INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Changes device of given mxnet arguments :param arg_params: arguments :param aux_params: auxiliary parameters :param ctx: new device context :return: arguments and auxiliary parameters on new device
def _ch_dev(arg_params, aux_params, ctx): """ Changes device of given mxnet arguments :param arg_params: arguments :param aux_params: auxiliary parameters :param ctx: new device context :return: arguments and auxiliary parameters on new device """ new_args = dict() new_auxs = dict() for k, v in arg_params.items(): new_args[k] = v.as_in_context(ctx) for k, v in aux_params.items(): new_auxs[k] = v.as_in_context(ctx) return new_args, new_auxs
Run the layer comparison on a caffe model, given its prototxt, weights and mean. The comparison is done by inferring on a given image using both caffe and mxnet model :param image_url: image file or url to run inference on :param gpu: gpu to use, -1 for cpu :param caffe_prototxt_path: path to caffe prototxt :param caffe_model_path: path to caffe weights :param caffe_mean: path to caffe mean file
def convert_and_compare_caffe_to_mxnet(image_url, gpu, caffe_prototxt_path, caffe_model_path, caffe_mean, mean_diff_allowed, max_diff_allowed): """ Run the layer comparison on a caffe model, given its prototxt, weights and mean. The comparison is done by inferring on a given image using both caffe and mxnet model :param image_url: image file or url to run inference on :param gpu: gpu to use, -1 for cpu :param caffe_prototxt_path: path to caffe prototxt :param caffe_model_path: path to caffe weights :param caffe_mean: path to caffe mean file """ import caffe from caffe_proto_utils import read_network_dag, process_network_proto, read_caffe_mean from convert_model import convert_model if isinstance(caffe_mean, str): caffe_mean = read_caffe_mean(caffe_mean) elif caffe_mean is None: pass elif len(caffe_mean) == 3: # swap channels from Caffe BGR to RGB caffe_mean = caffe_mean[::-1] # get caffe root location, this is needed to run the upgrade network utility, so we only need # to support parsing of latest caffe caffe_root = os.path.dirname(os.path.dirname(caffe.__path__[0])) caffe_prototxt_path = process_network_proto(caffe_root, caffe_prototxt_path) _, layer_name_to_record, top_to_layers = read_network_dag(caffe_prototxt_path) caffe.set_mode_cpu() caffe_net = caffe.Net(caffe_prototxt_path, caffe_model_path, caffe.TEST) image_dims = tuple(caffe_net.blobs['data'].shape)[2:4] logging.info('getting image %s', image_url) img_rgb = read_image(image_url, image_dims, caffe_mean) img_bgr = img_rgb[:, ::-1, :, :] caffe_net.blobs['data'].reshape(*img_bgr.shape) caffe_net.blobs['data'].data[...] = img_bgr _ = caffe_net.forward() # read sym and add all outputs sym, arg_params, aux_params, _ = convert_model(caffe_prototxt_path, caffe_model_path) sym = sym.get_internals() # now mxnet if gpu < 0: ctx = mx.cpu(0) else: ctx = mx.gpu(gpu) arg_params, aux_params = _ch_dev(arg_params, aux_params, ctx) arg_params["data"] = mx.nd.array(img_rgb, ctx) arg_params["prob_label"] = mx.nd.empty((1,), ctx) exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params) exe.forward(is_train=False) compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record, top_to_layers, mean_diff_allowed, max_diff_allowed) return
Implementation of Breadth-first search (BFS) on caffe network DAG :param root_node: root node of caffe network DAG :param process_node: function to run on each node
def _bfs(root_node, process_node): """ Implementation of Breadth-first search (BFS) on caffe network DAG :param root_node: root node of caffe network DAG :param process_node: function to run on each node """ from collections import deque seen_nodes = set() next_nodes = deque() seen_nodes.add(root_node) next_nodes.append(root_node) while next_nodes: current_node = next_nodes.popleft() # process current node process_node(current_node) for child_node in current_node.children: if child_node not in seen_nodes: seen_nodes.add(child_node) next_nodes.append(child_node)
Compare layer by layer of a caffe network with mxnet network :param caffe_net: loaded caffe network :param arg_params: arguments :param aux_params: auxiliary parameters :param exe: mxnet model :param layer_name_to_record: map between caffe layer and information record :param top_to_layers: map between caffe blob name to layers which outputs it (including inplace) :param mean_diff_allowed: mean difference allowed between caffe blob and mxnet blob :param max_diff_allowed: max difference allowed between caffe blob and mxnet blob
def compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record, top_to_layers, mean_diff_allowed, max_diff_allowed): """ Compare layer by layer of a caffe network with mxnet network :param caffe_net: loaded caffe network :param arg_params: arguments :param aux_params: auxiliary parameters :param exe: mxnet model :param layer_name_to_record: map between caffe layer and information record :param top_to_layers: map between caffe blob name to layers which outputs it (including inplace) :param mean_diff_allowed: mean difference allowed between caffe blob and mxnet blob :param max_diff_allowed: max difference allowed between caffe blob and mxnet blob """ import re log_format = ' {0:<40} {1:<40} {2:<8} {3:>10} {4:>10} {5:<1}' compare_layers_from_nets.is_first_convolution = True def _compare_blob(caf_blob, mx_blob, caf_name, mx_name, blob_type, note): diff = np.abs(mx_blob - caf_blob) diff_mean = diff.mean() diff_max = diff.max() logging.info(log_format.format(caf_name, mx_name, blob_type, '%4.5f' % diff_mean, '%4.5f' % diff_max, note)) assert diff_mean < mean_diff_allowed assert diff_max < max_diff_allowed def _process_layer_parameters(layer): logging.debug('processing layer %s of type %s', layer.name, layer.type) normalized_layer_name = re.sub('[-/]', '_', layer.name) # handle weight and bias of convolution and fully-connected layers if layer.name in caffe_net.params and layer.type in ['Convolution', 'InnerProduct', 'Deconvolution']: has_bias = len(caffe_net.params[layer.name]) > 1 mx_name_weight = '{}_weight'.format(normalized_layer_name) mx_beta = arg_params[mx_name_weight].asnumpy() # first convolution should change from BGR to RGB if layer.type == 'Convolution' and compare_layers_from_nets.is_first_convolution: compare_layers_from_nets.is_first_convolution = False # if RGB or RGBA if mx_beta.shape[1] == 3 or mx_beta.shape[1] == 4: # Swapping BGR of caffe into RGB in mxnet mx_beta[:, [0, 2], :, :] = mx_beta[:, [2, 0], :, :] caf_beta = caffe_net.params[layer.name][0].data _compare_blob(caf_beta, mx_beta, layer.name, mx_name_weight, 'weight', '') if has_bias: mx_name_bias = '{}_bias'.format(normalized_layer_name) mx_gamma = arg_params[mx_name_bias].asnumpy() caf_gamma = caffe_net.params[layer.name][1].data _compare_blob(caf_gamma, mx_gamma, layer.name, mx_name_bias, 'bias', '') elif layer.name in caffe_net.params and layer.type == 'Scale': if 'scale' in normalized_layer_name: bn_name = normalized_layer_name.replace('scale', 'bn') elif 'sc' in normalized_layer_name: bn_name = normalized_layer_name.replace('sc', 'bn') else: assert False, 'Unknown name convention for bn/scale' beta_name = '{}_beta'.format(bn_name) gamma_name = '{}_gamma'.format(bn_name) mx_beta = arg_params[beta_name].asnumpy() caf_beta = caffe_net.params[layer.name][1].data _compare_blob(caf_beta, mx_beta, layer.name, beta_name, 'mov_mean', '') mx_gamma = arg_params[gamma_name].asnumpy() caf_gamma = caffe_net.params[layer.name][0].data _compare_blob(caf_gamma, mx_gamma, layer.name, gamma_name, 'mov_var', '') elif layer.name in caffe_net.params and layer.type == 'BatchNorm': mean_name = '{}_moving_mean'.format(normalized_layer_name) var_name = '{}_moving_var'.format(normalized_layer_name) caf_rescale_factor = caffe_net.params[layer.name][2].data mx_mean = aux_params[mean_name].asnumpy() caf_mean = caffe_net.params[layer.name][0].data / caf_rescale_factor _compare_blob(caf_mean, mx_mean, layer.name, mean_name, 'mean', '') mx_var = aux_params[var_name].asnumpy() caf_var = caffe_net.params[layer.name][1].data / caf_rescale_factor _compare_blob(caf_var, mx_var, layer.name, var_name, 'var', 'expect 1e-04 change due to cudnn eps') elif layer.type in ['Input', 'Pooling', 'ReLU', 'Eltwise', 'Softmax', 'LRN', 'Concat', 'Dropout', 'Crop']: # no parameters to check for these layers pass else: warnings.warn('No handling for layer %s of type %s, should we ignore it?', layer.name, layer.type) return def _process_layer_output(caffe_blob_name): logging.debug('processing blob %s', caffe_blob_name) # skip blobs not originating from actual layers, e.g. artificial split layers added by caffe if caffe_blob_name not in top_to_layers: return caf_blob = caffe_net.blobs[caffe_blob_name].data # data should change from BGR to RGB if caffe_blob_name == 'data': # if RGB or RGBA if caf_blob.shape[1] == 3 or caf_blob.shape[1] == 4: # Swapping BGR of caffe into RGB in mxnet caf_blob[:, [0, 2], :, :] = caf_blob[:, [2, 0], :, :] mx_name = 'data' else: # get last layer name which outputs this blob name last_layer_name = top_to_layers[caffe_blob_name][-1] normalized_last_layer_name = re.sub('[-/]', '_', last_layer_name) mx_name = '{}_output'.format(normalized_last_layer_name) if 'scale' in mx_name: mx_name = mx_name.replace('scale', 'bn') elif 'sc' in mx_name: mx_name = mx_name.replace('sc', 'bn') if mx_name not in exe.output_dict: logging.error('mxnet blob %s is missing, time to extend the compare tool..', mx_name) return mx_blob = exe.output_dict[mx_name].asnumpy() _compare_blob(caf_blob, mx_blob, caffe_blob_name, mx_name, 'output', '') return # check layer parameters logging.info('\n***** Network Parameters '.ljust(140, '*')) logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note')) first_layer_name = layer_name_to_record.keys()[0] _bfs(layer_name_to_record[first_layer_name], _process_layer_parameters) # check layer output logging.info('\n***** Network Outputs '.ljust(140, '*')) logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note')) for caffe_blob_name in caffe_net.blobs.keys(): _process_layer_output(caffe_blob_name) return
Entrypoint for compare_layers
def main(): """Entrypoint for compare_layers""" parser = argparse.ArgumentParser( description='Tool for testing caffe to mxnet conversion layer by layer') parser.add_argument('--image_url', type=str, default='https://github.com/dmlc/web-data/raw/master/mxnet/doc/'\ 'tutorials/python/predict_image/cat.jpg', help='input image to test inference, can be either file path or url') parser.add_argument('--caffe_prototxt_path', type=str, default='./model.prototxt', help='path to caffe prototxt') parser.add_argument('--caffe_model_path', type=str, default='./model.caffemodel', help='path to caffe weights') parser.add_argument('--caffe_mean', type=str, default='./model_mean.binaryproto', help='path to caffe mean file') parser.add_argument('--mean_diff_allowed', type=int, default=1e-03, help='mean difference allowed between caffe blob and mxnet blob') parser.add_argument('--max_diff_allowed', type=int, default=1e-01, help='max difference allowed between caffe blob and mxnet blob') parser.add_argument('--gpu', type=int, default=-1, help='the gpu id used for predict') args = parser.parse_args() convert_and_compare_caffe_to_mxnet(args.image_url, args.gpu, args.caffe_prototxt_path, args.caffe_model_path, args.caffe_mean, args.mean_diff_allowed, args.max_diff_allowed)
Get executor to Stochastic Gradient Langevin Dynamics and/or Bayesian Dark Knowledge
def get_executor(sym, ctx, data_inputs, initializer=None): """Get executor to Stochastic Gradient Langevin Dynamics and/or Bayesian Dark Knowledge""" data_shapes = {k: v.shape for k, v in data_inputs.items()} arg_names = sym.list_arguments() aux_names = sym.list_auxiliary_states() param_names = list(set(arg_names) - set(data_inputs.keys())) arg_shapes, output_shapes, aux_shapes = sym.infer_shape(**data_shapes) arg_name_shape = {k: s for k, s in zip(arg_names, arg_shapes)} params = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names} params_grad = {n: nd.empty(arg_name_shape[n], ctx=ctx) for n in param_names} aux_states = {k: nd.empty(s, ctx=ctx) for k, s in zip(aux_names, aux_shapes)} exe = sym.bind(ctx=ctx, args=dict(params, **data_inputs), args_grad=params_grad, aux_states=aux_states) if initializer is not None: for k, v in params.items(): initializer(k, v) return exe, params, params_grad, aux_states
Create copy of parameters
def copy_param(exe, new_param=None): """Create copy of parameters""" if new_param is None: new_param = {k: nd.empty(v.shape, ctx=mx.cpu()) for k, v in exe.arg_dict.items()} for k, v in new_param.items(): exe.arg_dict[k].copyto(v) return new_param
Parse command line arguments
def parse_args(): """Parse command line arguments""" parser = argparse.ArgumentParser() parser.add_argument("font_path", help="Path to ttf font file or directory containing ttf files") parser.add_argument("--loss", help="'ctc' or 'warpctc' loss [Default 'ctc']", default='ctc') parser.add_argument("--cpu", help="Number of CPUs for training [Default 8]. Ignored if --gpu is specified.", type=int, default=8) parser.add_argument("--gpu", help="Number of GPUs for training [Default 0]", type=int) parser.add_argument("--num_proc", help="Number CAPTCHA generating processes [Default 4]", type=int, default=4) parser.add_argument("--prefix", help="Checkpoint prefix [Default 'ocr']", default='ocr') return parser.parse_args()
Program entry point
def main(): """Program entry point""" args = parse_args() if not any(args.loss == s for s in ['ctc', 'warpctc']): raise ValueError("Invalid loss '{}' (must be 'ctc' or 'warpctc')".format(args.loss)) hp = Hyperparams() # Start a multiprocessor captcha image generator mp_captcha = MPDigitCaptcha( font_paths=get_fonts(args.font_path), h=hp.seq_length, w=30, num_digit_min=3, num_digit_max=4, num_processes=args.num_proc, max_queue_size=hp.batch_size * 2) try: # Must call start() before any call to mxnet module (https://github.com/apache/incubator-mxnet/issues/9213) mp_captcha.start() if args.gpu: contexts = [mx.context.gpu(i) for i in range(args.gpu)] else: contexts = [mx.context.cpu(i) for i in range(args.cpu)] init_states = lstm.init_states(hp.batch_size, hp.num_lstm_layer, hp.num_hidden) data_train = OCRIter( hp.train_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_captcha, name='train') data_val = OCRIter( hp.eval_epoch_size // hp.batch_size, hp.batch_size, init_states, captcha=mp_captcha, name='val') symbol = lstm.lstm_unroll( num_lstm_layer=hp.num_lstm_layer, seq_len=hp.seq_length, num_hidden=hp.num_hidden, num_label=hp.num_label, loss_type=args.loss) head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.DEBUG, format=head) module = mx.mod.Module( symbol, data_names=['data', 'l0_init_c', 'l0_init_h', 'l1_init_c', 'l1_init_h'], label_names=['label'], context=contexts) metrics = CtcMetrics(hp.seq_length) module.fit(train_data=data_train, eval_data=data_val, # use metrics.accuracy or metrics.accuracy_lcs eval_metric=mx.metric.np(metrics.accuracy, allow_extra_outputs=True), optimizer='sgd', optimizer_params={'learning_rate': hp.learning_rate, 'momentum': hp.momentum, 'wd': 0.00001, }, initializer=mx.init.Xavier(factor_type="in", magnitude=2.34), num_epoch=hp.num_epoch, batch_end_callback=mx.callback.Speedometer(hp.batch_size, 50), epoch_end_callback=mx.callback.do_checkpoint(args.prefix), ) except KeyboardInterrupt: print("W: interrupt received, stopping...") finally: # Reset multiprocessing captcha generator to stop processes mp_captcha.reset()
Gatys et al. CVPR 2017 ref: Image Style Transfer Using Convolutional Neural Networks
def optimize(args): """ Gatys et al. CVPR 2017 ref: Image Style Transfer Using Convolutional Neural Networks """ if args.cuda: ctx = mx.gpu(0) else: ctx = mx.cpu(0) # load the content and style target content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True) content_image = utils.subtract_imagenet_mean_preprocess_batch(content_image) style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size) style_image = utils.subtract_imagenet_mean_preprocess_batch(style_image) # load the pre-trained vgg-16 and extract features vgg = net.Vgg16() utils.init_vgg_params(vgg, 'models', ctx=ctx) # content feature f_xc_c = vgg(content_image)[1] # style feature features_style = vgg(style_image) gram_style = [net.gram_matrix(y) for y in features_style] # output output = Parameter('output', shape=content_image.shape) output.initialize(ctx=ctx) output.set_data(content_image) # optimizer trainer = gluon.Trainer([output], 'adam', {'learning_rate': args.lr}) mse_loss = gluon.loss.L2Loss() # optimizing the images for e in range(args.iters): utils.imagenet_clamp_batch(output.data(), 0, 255) # fix BN for pre-trained vgg with autograd.record(): features_y = vgg(output.data()) content_loss = 2 * args.content_weight * mse_loss(features_y[1], f_xc_c) style_loss = 0. for m in range(len(features_y)): gram_y = net.gram_matrix(features_y[m]) gram_s = gram_style[m] style_loss = style_loss + 2 * args.style_weight * mse_loss(gram_y, gram_s) total_loss = content_loss + style_loss total_loss.backward() trainer.step(1) if (e + 1) % args.log_interval == 0: print('loss:{:.2f}'.format(total_loss.asnumpy()[0])) # save the image output = utils.add_imagenet_mean_batch(output.data()) utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda)
Get symbol of mnist
def get_mnist_sym(output_op=None, num_hidden=400): """Get symbol of mnist""" net = mx.symbol.Variable('data') net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden) net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu") net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden) net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu") net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10) if output_op is None: net = mx.symbol.SoftmaxOutput(data=net, name='softmax') else: net = output_op(data=net, name='softmax') return net
Get synthetic gradient value
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None): """Get synthetic gradient value""" if grad is None: grad = nd.empty(theta.shape, theta.context) theta1 = theta.asnumpy()[0] theta2 = theta.asnumpy()[1] v1 = sigma1 ** 2 v2 = sigma2 ** 2 vx = sigmax ** 2 denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp( -(X - theta1 - theta2) ** 2 / (2 * vx)) grad_npy = numpy.zeros(theta.shape) grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (X - theta1 - theta2) / vx) / denominator).sum() + theta1 / v1 grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (X - theta1 - theta2) / vx) / denominator).sum() + theta2 / v2 grad[:] = grad_npy return grad
Get toy symbol
def get_toy_sym(teacher=True, teacher_noise_precision=None): """Get toy symbol""" if teacher: net = mx.symbol.Variable('data') net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100) net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu") net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1) net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output', grad_scale=teacher_noise_precision) else: net = mx.symbol.Variable('data') net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100) net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu") student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1) student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1) net = mx.symbol.Group([student_mean, student_var]) return net
Run DistilledSGLD on mnist dataset
def run_mnist_DistilledSGLD(num_training=50000, gpu_id=None): """Run DistilledSGLD on mnist dataset""" X, Y, X_test, Y_test = load_mnist(num_training) minibatch_size = 100 if num_training >= 10000: num_hidden = 800 total_iter_num = 1000000 teacher_learning_rate = 1E-6 student_learning_rate = 0.0001 teacher_prior = 1 student_prior = 0.1 perturb_deviation = 0.1 else: num_hidden = 400 total_iter_num = 20000 teacher_learning_rate = 4E-5 student_learning_rate = 0.0001 teacher_prior = 1 student_prior = 0.1 perturb_deviation = 0.001 teacher_net = get_mnist_sym(num_hidden=num_hidden) logsoftmax = LogSoftmax() student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden) data_shape = (minibatch_size,) + X.shape[1::] teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)), 'softmax_label': nd.zeros((minibatch_size,), ctx=dev(gpu_id))} student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)), 'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev(gpu_id))} teacher_initializer = BiasXavier(factor_type="in", magnitude=1) student_initializer = BiasXavier(factor_type="in", magnitude=1) student_exe, student_params, _ = \ DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net, teacher_data_inputs=teacher_data_inputs, student_data_inputs=student_data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num, student_initializer=student_initializer, teacher_initializer=teacher_initializer, student_optimizing_algorithm="adam", teacher_learning_rate=teacher_learning_rate, student_learning_rate=student_learning_rate, teacher_prior_precision=teacher_prior, student_prior_precision=student_prior, perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev(gpu_id))
Run SGLD on toy dataset
def run_toy_SGLD(gpu_id=None): """Run SGLD on toy dataset""" X, Y, X_test, Y_test = load_toy() minibatch_size = 1 teacher_noise_precision = 1.0 / 9.0 net = get_toy_sym(True, teacher_noise_precision) data_shape = (minibatch_size,) + X.shape[1::] data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)), 'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))} initializer = mx.init.Uniform(0.07) exe, params, _ = SGLD(sym=net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=50000, initializer=initializer, learning_rate=1E-4, # lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5), prior_precision=0.1, burn_in_iter_num=1000, thin_interval=10, task='regression', minibatch_size=minibatch_size, dev=dev(gpu_id))
Run DistilledSGLD on toy dataset
def run_toy_DistilledSGLD(gpu_id): """Run DistilledSGLD on toy dataset""" X, Y, X_test, Y_test = load_toy() minibatch_size = 1 teacher_noise_precision = 1.0 teacher_net = get_toy_sym(True, teacher_noise_precision) student_net = get_toy_sym(False) data_shape = (minibatch_size,) + X.shape[1::] teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)), 'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))} student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id))} teacher_initializer = mx.init.Uniform(0.07) student_initializer = mx.init.Uniform(0.07) student_grad_f = lambda student_outputs, teacher_pred: \ regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision) student_exe, student_params, _ = \ DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net, teacher_data_inputs=teacher_data_inputs, student_data_inputs=student_data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000, teacher_initializer=teacher_initializer, student_initializer=student_initializer, teacher_learning_rate=1E-4, student_learning_rate=0.01, # teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5), student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8), student_grad_f=student_grad_f, teacher_prior_precision=0.1, student_prior_precision=0.001, perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression', dev=dev(gpu_id))
Run HMC on toy dataset
def run_toy_HMC(gpu_id=None): """Run HMC on toy dataset""" X, Y, X_test, Y_test = load_toy() minibatch_size = Y.shape[0] noise_precision = 1 / 9.0 net = get_toy_sym(True, noise_precision) data_shape = (minibatch_size,) + X.shape[1::] data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)), 'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))} initializer = mx.init.Uniform(0.07) sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test, sample_num=300000, initializer=initializer, prior_precision=1.0, learning_rate=1E-3, L=10, dev=dev(gpu_id))
Run synthetic SGLD
def run_synthetic_SGLD(): """Run synthetic SGLD""" theta1 = 0 theta2 = 1 sigma1 = numpy.sqrt(10) sigma2 = 1 sigmax = numpy.sqrt(2) X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100) minibatch_size = 1 total_iter_num = 1000000 lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num, factor=0.55) optimizer = mx.optimizer.create('sgld', learning_rate=None, rescale_grad=1.0, lr_scheduler=lr_scheduler, wd=0) updater = mx.optimizer.get_updater(optimizer) theta = mx.random.normal(0, 1, (2,), mx.cpu()) grad = nd.empty((2,), mx.cpu()) samples = numpy.zeros((2, total_iter_num)) start = time.time() for i in range(total_iter_num): if (i + 1) % 100000 == 0: end = time.time() print("Iter:%d, Time spent: %f" % (i + 1, end - start)) start = time.time() ind = numpy.random.randint(0, X.shape[0]) synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad=X.shape[0] / float(minibatch_size), grad=grad) updater('theta', grad, theta) samples[:, i] = theta.asnumpy() plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet) plt.colorbar() plt.show()
wrapper function for loading pascal voc dataset Parameters: ---------- image_set : str train, trainval... year : str 2007, 2012 or combinations splitted by comma devkit_path : str root directory of dataset shuffle : bool whether to shuffle initial list Returns: ---------- Imdb
def load_pascal(image_set, year, devkit_path, shuffle=False): """ wrapper function for loading pascal voc dataset Parameters: ---------- image_set : str train, trainval... year : str 2007, 2012 or combinations splitted by comma devkit_path : str root directory of dataset shuffle : bool whether to shuffle initial list Returns: ---------- Imdb """ image_set = [y.strip() for y in image_set.split(',')] assert image_set, "No image_set specified" year = [y.strip() for y in year.split(',')] assert year, "No year specified" # make sure (# sets == # years) if len(image_set) > 1 and len(year) == 1: year = year * len(image_set) if len(image_set) == 1 and len(year) > 1: image_set = image_set * len(year) assert len(image_set) == len(year), "Number of sets and year mismatch" imdbs = [] for s, y in zip(image_set, year): imdbs.append(PascalVoc(s, y, devkit_path, shuffle, is_train=True)) if len(imdbs) > 1: return ConcatDB(imdbs, shuffle) else: return imdbs[0]
wrapper function for loading ms coco dataset Parameters: ---------- image_set : str train2014, val2014, valminusminival2014, minival2014 dirname: str root dir for coco shuffle: boolean initial shuffle
def load_coco(image_set, dirname, shuffle=False): """ wrapper function for loading ms coco dataset Parameters: ---------- image_set : str train2014, val2014, valminusminival2014, minival2014 dirname: str root dir for coco shuffle: boolean initial shuffle """ anno_files = ['instances_' + y.strip() + '.json' for y in image_set.split(',')] assert anno_files, "No image set specified" imdbs = [] for af in anno_files: af_path = os.path.join(dirname, 'annotations', af) imdbs.append(Coco(af_path, dirname, shuffle=shuffle)) if len(imdbs) > 1: return ConcatDB(imdbs, shuffle) else: return imdbs[0]
Resets the iterator to the beginning of the data.
def reset(self): """Resets the iterator to the beginning of the data.""" self.curr_idx = 0 #shuffle data in each bucket random.shuffle(self.idx) for i, buck in enumerate(self.sentences): self.indices[i], self.sentences[i], self.characters[i], self.label[i] = shuffle(self.indices[i], self.sentences[i], self.characters[i], self.label[i]) self.ndindex = [] self.ndsent = [] self.ndchar = [] self.ndlabel = [] #for each bucket of data for i, buck in enumerate(self.sentences): #append the lists with an array self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype)) self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype)) self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype)) self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype))
Returns the next batch of data.
def next(self): """Returns the next batch of data.""" if self.curr_idx == len(self.idx): raise StopIteration #i = batches index, j = starting record i, j = self.idx[self.curr_idx] self.curr_idx += 1 indices = self.ndindex[i][j:j + self.batch_size] sentences = self.ndsent[i][j:j + self.batch_size] characters = self.ndchar[i][j:j + self.batch_size] label = self.ndlabel[i][j:j + self.batch_size] return DataBatch([sentences, characters], [label], pad=0, index = indices, bucket_key=self.buckets[i], provide_data=[DataDesc(name=self.data_names[0], shape=sentences.shape, layout=self.layout), DataDesc(name=self.data_names[1], shape=characters.shape, layout=self.layout)], provide_label=[DataDesc(name=self.label_name, shape=label.shape, layout=self.layout)])
Converts a reshape layer from mxnet to coreml. This doesn't currently handle the deprecated parameters for the reshape layer. Parameters ---------- network: net An mxnet network object. layer: node Node to convert. module: module A module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_reshape(net, node, module, builder): """Converts a reshape layer from mxnet to coreml. This doesn't currently handle the deprecated parameters for the reshape layer. Parameters ---------- network: net An mxnet network object. layer: node Node to convert. module: module A module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] target_shape = node['shape'] if any(item <= 0 for item in target_shape): raise NotImplementedError('Special dimensional values less than or equal to 0 are not supported yet.' 'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.') if 'reverse' in node and node['reverse'] == 'True': raise NotImplementedError('"reverse" parameter is not supported by yet.' 'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.') mode = 0 # CHANNEL_FIRST builder.add_reshape(name, input_name, output_name, target_shape, mode)
Convert a transpose layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_transpose(net, node, module, builder): """Convert a transpose layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attrs(node) axes = literal_eval(param['axes']) builder.add_permute(name, axes, input_name, output_name)
Convert a flatten layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_flatten(net, node, module, builder): """Convert a flatten layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] mode = 0 # CHANNEL_FIRST builder.add_flatten(name, mode, input_name, output_name)
Convert a softmax layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_softmax(net, node, module, builder): """Convert a softmax layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] builder.add_softmax(name=name, input_name=input_name, output_name=output_name)
Convert an activation layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_activation(net, node, module, builder): """Convert an activation layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] mx_non_linearity = _get_attrs(node)['act_type'] #TODO add SCALED_TANH, SOFTPLUS, SOFTSIGN, SIGMOID_HARD, LEAKYRELU, PRELU, ELU, PARAMETRICSOFTPLUS, THRESHOLDEDRELU, LINEAR if mx_non_linearity == 'relu': non_linearity = 'RELU' elif mx_non_linearity == 'tanh': non_linearity = 'TANH' elif mx_non_linearity == 'sigmoid': non_linearity = 'SIGMOID' else: raise TypeError('Unknown activation type %s' % mx_non_linearity) builder.add_activation(name = name, non_linearity = non_linearity, input_name = input_name, output_name = output_name)
Convert a leakyrelu layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_leakyrelu(net, node, module, builder): """Convert a leakyrelu layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] inputs = node['inputs'] args, _ = module.get_params() mx_non_linearity = _get_attrs(node)['act_type'] if mx_non_linearity == 'elu': non_linearity = 'ELU' slope = _get_attrs(node)['slope'] if 'slope' in _get_attrs(node) else 0.25 params = slope elif mx_non_linearity == 'leaky': non_linearity = 'LEAKYRELU' slope = _get_attrs(node)['slope'] if 'slope' in _get_attrs(node) else 0.25 params = [slope] elif mx_non_linearity == 'prelu': non_linearity = 'PRELU' params = args[_get_node_name(net, inputs[1][0])].asnumpy() else: raise TypeError('Unknown activation type %s' % mx_non_linearity) builder.add_activation(name = name, non_linearity = non_linearity, input_name = input_name, output_name = output_name, params = params)
Convert an elementwise add layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_elementwise_add(net, node, module, builder): """Convert an elementwise add layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_names, output_name = _get_input_output_name(net, node, [0, 1]) name = node['name'] builder.add_elementwise(name, input_names, output_name, 'ADD')
Convert a convolution layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_convolution(net, node, module, builder): """Convert a convolution layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attrs(node) inputs = node['inputs'] args, _ = module.get_params() if 'no_bias' in param.keys(): has_bias = not literal_eval(param['no_bias']) else: has_bias = True if 'pad' in param.keys() and literal_eval(param['pad']) != (0, 0): pad = literal_eval(param['pad']) builder.add_padding( name=name+"_pad", left=pad[1], right=pad[1], top=pad[0], bottom=pad[0], value=0, input_name=input_name, output_name=name+"_pad_output") input_name = name+"_pad_output" border_mode = "valid" n_filters = int(param['num_filter']) n_groups = int(param['num_group']) if 'num_group' in param else 1 W = args[_get_node_name(net, inputs[1][0])].asnumpy() if has_bias: Wb = args[_get_node_name(net, inputs[2][0])].asnumpy() else: Wb = None channels = W.shape[1] stride_height = 1 stride_width = 1 if 'stride' in param.keys(): stride_height, stride_width = literal_eval(param['stride']) kernel_height, kernel_width = literal_eval(param['kernel']) W = W.transpose((2, 3, 1, 0)) builder.add_convolution( name=name, kernel_channels=channels, output_channels=n_filters, height=kernel_height, width=kernel_width, stride_height=stride_height, stride_width=stride_width, border_mode=border_mode, groups=n_groups, W=W, b=Wb, has_bias=has_bias, is_deconv=False, output_shape=None, input_name=input_name, output_name=output_name)
Convert a pooling layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_pooling(net, node, module, builder): """Convert a pooling layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attrs(node) layer_type_mx = param['pool_type'] if layer_type_mx == 'max': layer_type = 'MAX' elif layer_type_mx == 'avg': layer_type = 'AVERAGE' else: raise TypeError("Pooling type %s not supported" % layer_type_mx) # Add padding if there is any if 'pad' in param.keys() and literal_eval(param['pad']) != (0, 0): pad = literal_eval(param['pad']) builder.add_padding( name=name+"_pad", left=pad[1], right=pad[1], top=pad[0], bottom=pad[0], value=0, input_name=input_name, output_name=name+"_pad_output") input_name = name+"_pad_output" stride_height = 1 stride_width = 1 if 'stride' in param.keys(): stride_height, stride_width = literal_eval(param['stride']) kernel_width, kernel_height = literal_eval(param['kernel']) type_map = {'valid': 'VALID', 'full': 'INCLUDE_LAST_PIXEL'} padding_type = param['pooling_convention'] if 'pooling_convention' in param else 'valid' if padding_type not in type_map: raise KeyError("%s type is not supported in this converter. It is a Github issue.") padding_type = type_map[padding_type] if 'global_pool' in param.keys(): is_global = literal_eval(param['global_pool']) else: is_global = False # For reasons why we are not using the standard builder but having our own implementation, # see the function documentation. _add_pooling.add_pooling_with_padding_types( builder=builder, name=name, height=kernel_height, width=kernel_width, stride_height=stride_height, stride_width=stride_width, layer_type=layer_type, padding_type=padding_type, exclude_pad_area=False, is_global=is_global, input_name=input_name, output_name=output_name )
Convert a batchnorm layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_batchnorm(net, node, module, builder): """Convert a batchnorm layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = _get_input_output_name(net, node) name = node['name'] inputs = node['inputs'] eps = 1e-3 # Default value of eps for MXNet. use_global_stats = False # Default value of use_global_stats for MXNet. fix_gamma = True # Default value of fix_gamma for MXNet. attrs = _get_attrs(node) if 'eps' in attrs: eps = literal_eval(attrs['eps']) if 'fix_gamma' in attrs: fix_gamma = literal_eval(attrs['fix_gamma']) args, aux = module.get_params() gamma = args[_get_node_name(net, inputs[1][0])].asnumpy() beta = args[_get_node_name(net, inputs[2][0])].asnumpy() mean = aux[_get_node_name(net, inputs[3][0])].asnumpy() variance = aux[_get_node_name(net, inputs[4][0])].asnumpy() nb_channels = gamma.shape[0] if fix_gamma: gamma.fill(1.) builder.add_batchnorm( name=name, channels=nb_channels, gamma=gamma, beta=beta, mean=mean, variance=variance, input_name=input_name, output_name=output_name, epsilon=eps)
Convert concat layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def convert_concat(net, node, module, builder): """Convert concat layer from mxnet to coreml. Parameters ---------- network: net A mxnet network object. layer: node Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object. """ # Get input and output names input_names, output_name = _get_input_output_name(net, node, 'all') name = node['name'] mode = 'CONCAT' builder.add_elementwise(name = name, input_names = input_names, output_name = output_name, mode = mode)
convert from mxnet's opts to dmlc's opts
def dmlc_opts(opts): """convert from mxnet's opts to dmlc's opts """ args = ['--num-workers', str(opts.num_workers), '--num-servers', str(opts.num_servers), '--cluster', opts.launcher, '--host-file', opts.hostfile, '--sync-dst-dir', opts.sync_dst_dir] # convert to dictionary dopts = vars(opts) for key in ['env_server', 'env_worker', 'env']: for v in dopts[key]: args.append('--' + key.replace("_","-")) args.append(v) args += opts.command try: from dmlc_tracker import opts except ImportError: print("Can't load dmlc_tracker package. Perhaps you need to run") print(" git submodule update --init --recursive") raise dmlc_opts = opts.get_opts(args) return dmlc_opts
Unfuses the fused RNN in to a stack of rnn cells.
def _unfuse(self): """Unfuses the fused RNN in to a stack of rnn cells.""" assert not self._projection_size, "_unfuse does not support projection layer yet!" assert not self._lstm_state_clip_min and not self._lstm_state_clip_max, \ "_unfuse does not support state clipping yet!" get_cell = {'rnn_relu': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size, activation='relu', **kwargs), 'rnn_tanh': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size, activation='tanh', **kwargs), 'lstm': lambda **kwargs: rnn_cell.LSTMCell(self._hidden_size, **kwargs), 'gru': lambda **kwargs: rnn_cell.GRUCell(self._hidden_size, **kwargs)}[self._mode] stack = rnn_cell.HybridSequentialRNNCell(prefix=self.prefix, params=self.params) with stack.name_scope(): ni = self._input_size for i in range(self._num_layers): kwargs = {'input_size': ni, 'i2h_weight_initializer': self._i2h_weight_initializer, 'h2h_weight_initializer': self._h2h_weight_initializer, 'i2h_bias_initializer': self._i2h_bias_initializer, 'h2h_bias_initializer': self._h2h_bias_initializer} if self._dir == 2: stack.add(rnn_cell.BidirectionalCell( get_cell(prefix='l%d_'%i, **kwargs), get_cell(prefix='r%d_'%i, **kwargs))) else: stack.add(get_cell(prefix='l%d_'%i, **kwargs)) if self._dropout > 0 and i != self._num_layers - 1: stack.add(rnn_cell.DropoutCell(self._dropout)) ni = self._hidden_size * self._dir return stack
Initial state for this cell. Parameters ---------- batch_size: int Only required for `NDArray` API. Size of the batch ('N' in layout). Dimension of the input. func : callable, default `ndarray.zeros` Function for creating initial state. For Symbol API, func can be `symbol.zeros`, `symbol.uniform`, `symbol.var` etc. Use `symbol.var` if you want to directly feed input as states. For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc. **kwargs : Additional keyword arguments passed to func. For example `mean`, `std`, `dtype`, etc. Returns ------- states : nested list of Symbol Starting states for the first RNN step.
def begin_state(self, batch_size=0, func=ndarray.zeros, **kwargs): """Initial state for this cell. Parameters ---------- batch_size: int Only required for `NDArray` API. Size of the batch ('N' in layout). Dimension of the input. func : callable, default `ndarray.zeros` Function for creating initial state. For Symbol API, func can be `symbol.zeros`, `symbol.uniform`, `symbol.var` etc. Use `symbol.var` if you want to directly feed input as states. For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc. **kwargs : Additional keyword arguments passed to func. For example `mean`, `std`, `dtype`, etc. Returns ------- states : nested list of Symbol Starting states for the first RNN step. """ states = [] for i, info in enumerate(self.state_info(batch_size)): if info is not None: info.update(kwargs) else: info = kwargs states.append(func(name='%sh0_%d'%(self.prefix, i), **info)) return states
forward using CUDNN or CPU kenrel
def _forward_kernel(self, F, inputs, states, **kwargs): """ forward using CUDNN or CPU kenrel""" if self._layout == 'NTC': inputs = F.swapaxes(inputs, dim1=0, dim2=1) if self._projection_size is None: params = (kwargs['{}{}_{}_{}'.format(d, l, g, t)].reshape(-1) for t in ['weight', 'bias'] for l in range(self._num_layers) for d in ['l', 'r'][:self._dir] for g in ['i2h', 'h2h']) else: params = (kwargs['{}{}_{}_{}'.format(d, l, g, t)].reshape(-1) for t in ['weight', 'bias'] for l in range(self._num_layers) for d in ['l', 'r'][:self._dir] for g in ['i2h', 'h2h', 'h2r'] if g != 'h2r' or t != 'bias') params = F._internal._rnn_param_concat(*params, dim=0) rnn = F.RNN(inputs, params, *states, state_size=self._hidden_size, projection_size=self._projection_size, num_layers=self._num_layers, bidirectional=self._dir == 2, p=self._dropout, state_outputs=True, mode=self._mode, lstm_state_clip_min=self._lstm_state_clip_min, lstm_state_clip_max=self._lstm_state_clip_max, lstm_state_clip_nan=self._lstm_state_clip_nan) if self._mode == 'lstm': outputs, states = rnn[0], [rnn[1], rnn[2]] else: outputs, states = rnn[0], [rnn[1]] if self._layout == 'NTC': outputs = F.swapaxes(outputs, dim1=0, dim2=1) return outputs, states
Wait for network service to appear @param server: host to connect to (str) @param port: port (int) @param timeout: in seconds, if None or 0 wait forever @return: True of False, if timeout is None may return only True or throw unhandled network exception
def wait_ssh_open(server, port, keep_waiting=None, timeout=None): """ Wait for network service to appear @param server: host to connect to (str) @param port: port (int) @param timeout: in seconds, if None or 0 wait forever @return: True of False, if timeout is None may return only True or throw unhandled network exception """ import socket import errno import time log = logging.getLogger('wait_ssh_open') sleep_s = 1 if timeout: from time import time as now # time module is needed to calc timeout shared between two exceptions end = now() + timeout while True: log.debug("Sleeping for %s second(s)", sleep_s) time.sleep(sleep_s) s = socket.socket() try: if keep_waiting and not keep_waiting(): log.debug("keep_waiting() is set and evaluates to False") return False if timeout: next_timeout = end - now() if next_timeout < 0: log.debug("connect time out") return False else: log.debug("connect timeout %d s", next_timeout) s.settimeout(next_timeout) log.debug("connect %s:%d", server, port) s.connect((server, port)) ret = s.recv(1024).decode() if ret and ret.startswith('SSH'): s.close() log.info("wait_ssh_open: port %s:%s is open and ssh is ready", server, port) return True else: log.debug("Didn't get the SSH banner") s.close() except ConnectionError as err: log.debug("ConnectionError %s", err) if sleep_s == 0: sleep_s = 1 else: sleep_s *= 2 except socket.gaierror as err: log.debug("gaierror %s",err) return False except socket.timeout as err: # this exception occurs only if timeout is set if timeout: return False except TimeoutError as err: # catch timeout exception from underlying network library # this one is different from socket.timeout raise
Wait for network service to appear @param server: host to connect to (str) @param port: port (int) @param timeout: in seconds, if None or 0 wait forever @return: True of False, if timeout is None may return only True or throw unhandled network exception
def wait_port_open(server, port, timeout=None): """ Wait for network service to appear @param server: host to connect to (str) @param port: port (int) @param timeout: in seconds, if None or 0 wait forever @return: True of False, if timeout is None may return only True or throw unhandled network exception """ import socket import errno import time sleep_s = 0 if timeout: from time import time as now # time module is needed to calc timeout shared between two exceptions end = now() + timeout while True: logging.debug("Sleeping for %s second(s)", sleep_s) time.sleep(sleep_s) s = socket.socket() try: if timeout: next_timeout = end - now() if next_timeout < 0: return False else: s.settimeout(next_timeout) logging.info("connect %s %d", server, port) s.connect((server, port)) except ConnectionError as err: logging.debug("ConnectionError %s", err) if sleep_s == 0: sleep_s = 1 except socket.gaierror as err: logging.debug("gaierror %s",err) return False except socket.timeout as err: # this exception occurs only if timeout is set if timeout: return False except TimeoutError as err: # catch timeout exception from underlying network library # this one is different from socket.timeout raise else: s.close() logging.info("wait_port_open: port %s:%s is open", server, port) return True
Convert symbol for detail information. Parameters ---------- symbol: Symbol Symbol to be visualized. shape: dict A dict of shapes, str->shape (tuple), given input shapes. line_length: int Rotal length of printed lines positions: list Relative or absolute positions of log elements in each line. Returns ------ None Notes ----- If ``mxnet`` is imported, the visualization module can be used in its short-form. For example, if we ``import mxnet`` as follows:: import mxnet this method in visualization module can be used in its short-form as:: mxnet.viz.print_summary(...)
def print_summary(symbol, shape=None, line_length=120, positions=[.44, .64, .74, 1.]): """Convert symbol for detail information. Parameters ---------- symbol: Symbol Symbol to be visualized. shape: dict A dict of shapes, str->shape (tuple), given input shapes. line_length: int Rotal length of printed lines positions: list Relative or absolute positions of log elements in each line. Returns ------ None Notes ----- If ``mxnet`` is imported, the visualization module can be used in its short-form. For example, if we ``import mxnet`` as follows:: import mxnet this method in visualization module can be used in its short-form as:: mxnet.viz.print_summary(...) """ if not isinstance(symbol, Symbol): raise TypeError("symbol must be Symbol") show_shape = False if shape is not None: show_shape = True interals = symbol.get_internals() _, out_shapes, _ = interals.infer_shape(**shape) if out_shapes is None: raise ValueError("Input shape is incomplete") shape_dict = dict(zip(interals.list_outputs(), out_shapes)) conf = json.loads(symbol.tojson()) nodes = conf["nodes"] heads = set(conf["heads"][0]) if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] # header names for the different log elements to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Previous Layer'] def print_row(fields, positions): """Print format row. Parameters ---------- fields: list Information field. positions: list Field length ratio. Returns ------ None """ line = '' for i, field in enumerate(fields): line += str(field) line = line[:positions[i]] line += ' ' * (positions[i] - len(line)) print(line) print('_' * line_length) print_row(to_display, positions) print('=' * line_length) def print_layer_summary(node, out_shape): """print layer information Parameters ---------- node: dict Node information. out_shape: dict Node shape information. Returns ------ Node total parameters. """ op = node["op"] pre_node = [] pre_filter = 0 if op != "null": inputs = node["inputs"] for item in inputs: input_node = nodes[item[0]] input_name = input_node["name"] if input_node["op"] != "null" or item[0] in heads: # add precede pre_node.append(input_name) if show_shape: if input_node["op"] != "null": key = input_name + "_output" else: key = input_name if key in shape_dict: shape = shape_dict[key][1:] pre_filter = pre_filter + int(shape[0]) cur_param = 0 if op == 'Convolution': if "no_bias" in node["attrs"] and node["attrs"]["no_bias"] == 'True': num_group = int(node['attrs'].get('num_group', '1')) cur_param = pre_filter * int(node["attrs"]["num_filter"]) \ // num_group for k in _str2tuple(node["attrs"]["kernel"]): cur_param *= int(k) else: num_group = int(node['attrs'].get('num_group', '1')) cur_param = pre_filter * int(node["attrs"]["num_filter"]) \ // num_group for k in _str2tuple(node["attrs"]["kernel"]): cur_param *= int(k) cur_param += int(node["attrs"]["num_filter"]) elif op == 'FullyConnected': if "no_bias" in node["attrs"] and node["attrs"]["no_bias"] == 'True': cur_param = pre_filter * int(node["attrs"]["num_hidden"]) else: cur_param = (pre_filter+1) * int(node["attrs"]["num_hidden"]) elif op == 'BatchNorm': key = node["name"] + "_output" if show_shape: num_filter = shape_dict[key][1] cur_param = int(num_filter) * 2 elif op == 'Embedding': cur_param = int(node["attrs"]['input_dim']) * int(node["attrs"]['output_dim']) if not pre_node: first_connection = '' else: first_connection = pre_node[0] fields = [node['name'] + '(' + op + ')', "x".join([str(x) for x in out_shape]), cur_param, first_connection] print_row(fields, positions) if len(pre_node) > 1: for i in range(1, len(pre_node)): fields = ['', '', '', pre_node[i]] print_row(fields, positions) return cur_param total_params = 0 for i, node in enumerate(nodes): out_shape = [] op = node["op"] if op == "null" and i > 0: continue if op != "null" or i in heads: if show_shape: if op != "null": key = node["name"] + "_output" else: key = node["name"] if key in shape_dict: out_shape = shape_dict[key][1:] total_params += print_layer_summary(nodes[i], out_shape) if i == len(nodes) - 1: print('=' * line_length) else: print('_' * line_length) print("Total params: {params}".format(params=total_params)) print('_' * line_length)
Creates a visualization (Graphviz digraph object) of the given computation graph. Graphviz must be installed for this function to work. Parameters ---------- title: str, optional Title of the generated visualization. symbol: Symbol A symbol from the computation graph. The generated digraph will visualize the part of the computation graph required to compute `symbol`. shape: dict, optional Specifies the shape of the input tensors. If specified, the visualization will include the shape of the tensors between the nodes. `shape` is a dictionary mapping input symbol names (str) to the corresponding tensor shape (tuple). dtype: dict, optional Specifies the type of the input tensors. If specified, the visualization will include the type of the tensors between the nodes. `dtype` is a dictionary mapping input symbol names (str) to the corresponding tensor type (e.g. `numpy.float32`). node_attrs: dict, optional Specifies the attributes for nodes in the generated visualization. `node_attrs` is a dictionary of Graphviz attribute names and values. For example:: node_attrs={"shape":"oval","fixedsize":"false"} will use oval shape for nodes and allow variable sized nodes in the visualization. hide_weights: bool, optional If True (default), then inputs with names of form *_weight* (corresponding to weight tensors) or *_bias* (corresponding to bias vectors) will be hidden for a cleaner visualization. Returns ------- dot: Digraph A Graphviz digraph object visualizing the computation graph to compute `symbol`. Example ------- >>> net = mx.sym.Variable('data') >>> net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=128) >>> net = mx.sym.Activation(data=net, name='relu1', act_type="relu") >>> net = mx.sym.FullyConnected(data=net, name='fc2', num_hidden=10) >>> net = mx.sym.SoftmaxOutput(data=net, name='out') >>> digraph = mx.viz.plot_network(net, shape={'data':(100,200)}, ... node_attrs={"fixedsize":"false"}) >>> digraph.view() Notes ----- If ``mxnet`` is imported, the visualization module can be used in its short-form. For example, if we ``import mxnet`` as follows:: import mxnet this method in visualization module can be used in its short-form as:: mxnet.viz.plot_network(...)
def plot_network(symbol, title="plot", save_format='pdf', shape=None, dtype=None, node_attrs={}, hide_weights=True): """Creates a visualization (Graphviz digraph object) of the given computation graph. Graphviz must be installed for this function to work. Parameters ---------- title: str, optional Title of the generated visualization. symbol: Symbol A symbol from the computation graph. The generated digraph will visualize the part of the computation graph required to compute `symbol`. shape: dict, optional Specifies the shape of the input tensors. If specified, the visualization will include the shape of the tensors between the nodes. `shape` is a dictionary mapping input symbol names (str) to the corresponding tensor shape (tuple). dtype: dict, optional Specifies the type of the input tensors. If specified, the visualization will include the type of the tensors between the nodes. `dtype` is a dictionary mapping input symbol names (str) to the corresponding tensor type (e.g. `numpy.float32`). node_attrs: dict, optional Specifies the attributes for nodes in the generated visualization. `node_attrs` is a dictionary of Graphviz attribute names and values. For example:: node_attrs={"shape":"oval","fixedsize":"false"} will use oval shape for nodes and allow variable sized nodes in the visualization. hide_weights: bool, optional If True (default), then inputs with names of form *_weight* (corresponding to weight tensors) or *_bias* (corresponding to bias vectors) will be hidden for a cleaner visualization. Returns ------- dot: Digraph A Graphviz digraph object visualizing the computation graph to compute `symbol`. Example ------- >>> net = mx.sym.Variable('data') >>> net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=128) >>> net = mx.sym.Activation(data=net, name='relu1', act_type="relu") >>> net = mx.sym.FullyConnected(data=net, name='fc2', num_hidden=10) >>> net = mx.sym.SoftmaxOutput(data=net, name='out') >>> digraph = mx.viz.plot_network(net, shape={'data':(100,200)}, ... node_attrs={"fixedsize":"false"}) >>> digraph.view() Notes ----- If ``mxnet`` is imported, the visualization module can be used in its short-form. For example, if we ``import mxnet`` as follows:: import mxnet this method in visualization module can be used in its short-form as:: mxnet.viz.plot_network(...) """ # todo add shape support try: from graphviz import Digraph except: raise ImportError("Draw network requires graphviz library") if not isinstance(symbol, Symbol): raise TypeError("symbol must be a Symbol") internals = symbol.get_internals() draw_shape = shape is not None if draw_shape: _, out_shapes, _ = internals.infer_shape(**shape) if out_shapes is None: raise ValueError("Input shape is incomplete") shape_dict = dict(zip(internals.list_outputs(), out_shapes)) draw_type = dtype is not None if draw_type: _, out_types, _ = internals.infer_type(**dtype) if out_types is None: raise ValueError("Input type is incomplete") type_dict = dict(zip(internals.list_outputs(), out_types)) conf = json.loads(symbol.tojson()) nodes = conf["nodes"] # check if multiple nodes have the same name if len(nodes) != len(set([node["name"] for node in nodes])): seen_nodes = set() # find all repeated names repeated = set(node['name'] for node in nodes if node['name'] in seen_nodes or seen_nodes.add(node['name'])) warning_message = "There are multiple variables with the same name in your graph, " \ "this may result in cyclic graph. Repeated names: " + ','.join(repeated) warnings.warn(warning_message, RuntimeWarning) # default attributes of node node_attr = {"shape": "box", "fixedsize": "true", "width": "1.3", "height": "0.8034", "style": "filled"} # merge the dict provided by user and the default one node_attr.update(node_attrs) dot = Digraph(name=title, format=save_format) # color map cm = ("#8dd3c7", "#fb8072", "#ffffb3", "#bebada", "#80b1d3", "#fdb462", "#b3de69", "#fccde5") def looks_like_weight(name): """Internal helper to figure out if node should be hidden with `hide_weights`. """ weight_like = ('_weight', '_bias', '_beta', '_gamma', '_moving_var', '_moving_mean', '_running_var', '_running_mean') return name.endswith(weight_like) # make nodes hidden_nodes = set() for node in nodes: op = node["op"] name = node["name"] # input data attr = copy.deepcopy(node_attr) label = name if op == "null": if looks_like_weight(node["name"]): if hide_weights: hidden_nodes.add(node["name"]) # else we don't render a node, but # don't add it to the hidden_nodes set # so it gets rendered as an empty oval continue attr["shape"] = "oval" # inputs get their own shape label = node["name"] attr["fillcolor"] = cm[0] elif op == "Convolution": label = "Convolution\n{kernel}/{stride}, {filter}".format( kernel="x".join(_str2tuple(node["attrs"]["kernel"])), stride="x".join(_str2tuple(node["attrs"]["stride"])) if "stride" in node["attrs"] else "1", filter=node["attrs"]["num_filter"] ) attr["fillcolor"] = cm[1] elif op == "FullyConnected": label = "FullyConnected\n{hidden}".format(hidden=node["attrs"]["num_hidden"]) attr["fillcolor"] = cm[1] elif op == "BatchNorm": attr["fillcolor"] = cm[3] elif op == 'Activation': act_type = node["attrs"]["act_type"] label = 'Activation\n{activation}'.format(activation=act_type) attr["fillcolor"] = cm[2] elif op == 'LeakyReLU': attrs = node.get("attrs") act_type = attrs.get("act_type", "Leaky") if attrs else "Leaky" label = 'LeakyReLU\n{activation}'.format(activation=act_type) attr["fillcolor"] = cm[2] elif op == "Pooling": label = "Pooling\n{pooltype}, {kernel}/{stride}".format(pooltype=node["attrs"]["pool_type"], kernel="x".join(_str2tuple(node["attrs"]["kernel"])) if "kernel" in node["attrs"] else "[]", stride="x".join(_str2tuple(node["attrs"]["stride"])) if "stride" in node["attrs"] else "1") attr["fillcolor"] = cm[4] elif op in ("Concat", "Flatten", "Reshape"): attr["fillcolor"] = cm[5] elif op == "Softmax": attr["fillcolor"] = cm[6] else: attr["fillcolor"] = cm[7] if op == "Custom": label = node["attrs"]["op_type"] dot.node(name=name, label=label, **attr) # add edges for node in nodes: # pylint: disable=too-many-nested-blocks op = node["op"] name = node["name"] if op == "null": continue else: inputs = node["inputs"] for item in inputs: input_node = nodes[item[0]] input_name = input_node["name"] if input_name not in hidden_nodes: attr = {"dir": "back", 'arrowtail':'open', 'label': ''} # add shapes if draw_shape: if input_node["op"] != "null": key = input_name + "_output" if "attrs" in input_node: params = input_node["attrs"] if "num_outputs" in params: key += str(int(params["num_outputs"]) - 1) shape = shape_dict[key][1:] label = "x".join([str(x) for x in shape]) attr["label"] = label else: key = input_name shape = shape_dict[key][1:] label = "x".join([str(x) for x in shape]) attr["label"] = label if draw_type: if input_node["op"] != "null": key = input_name + "_output" if "attrs" in input_node: params = input_node["attrs"] if "num_outputs" in params: key += str(int(params["num_outputs"]) - 1) dtype = type_dict[key] attr["label"] += '(' + dtype.__name__ + ')' else: key = input_name dtype = type_dict[key] attr["label"] += '(' + dtype.__name__ + ')' dot.edge(tail_name=name, head_name=input_name, **attr) return dot
Measure the accuracy of ResNet Parameters ---------- data_iterator: Iter examples of dataset network: ResNet Returns ---------- tuple of array element
def evaluate_accuracy(data_iterator, network): """ Measure the accuracy of ResNet Parameters ---------- data_iterator: Iter examples of dataset network: ResNet Returns ---------- tuple of array element """ acc = mx.metric.Accuracy() # Iterate through data and label for i, (data, label) in enumerate(data_iterator): # Get the data and label into the GPU data = data.as_in_context(ctx[0]) label = label.as_in_context(ctx[0]) # Get network's output which is a probability distribution # Apply argmax on the probability distribution to get network's classification. output = network(data) predictions = nd.argmax(output, axis=1) # Give network's prediction and the correct label to update the metric acc.update(preds=predictions, labels=label) # Return the accuracy return acc.get()[1]
Training with multiple GPUs Parameters ---------- batch_list: List list of dataset context: List a list of all GPUs to be used for training network: ResNet gluon_trainer: rain module of gluon
def train_batch(batch_list, context, network, gluon_trainer): """ Training with multiple GPUs Parameters ---------- batch_list: List list of dataset context: List a list of all GPUs to be used for training network: ResNet gluon_trainer: rain module of gluon """ # Split and load data into multiple GPUs data = batch_list[0] data = gluon.utils.split_and_load(data, context) # Split and load label into multiple GPUs label = batch_list[1] label = gluon.utils.split_and_load(label, context) # Run the forward and backward pass forward_backward(network, data, label) # Update the parameters this_batch_size = batch_list[0].shape[0] gluon_trainer.step(this_batch_size)
Take an executor's underlying symbol graph and return its generated optimized version. Parameters ---------- executor : An executor for which you want to see an optimized symbol. Getting an optimized symbol is useful to compare and verify the work TensorRT has done against a legacy behaviour. Returns ------- symbol : nnvm::Symbol The nnvm symbol optimized.
def get_optimized_symbol(executor): """ Take an executor's underlying symbol graph and return its generated optimized version. Parameters ---------- executor : An executor for which you want to see an optimized symbol. Getting an optimized symbol is useful to compare and verify the work TensorRT has done against a legacy behaviour. Returns ------- symbol : nnvm::Symbol The nnvm symbol optimized. """ handle = SymbolHandle() try: check_call(_LIB.MXExecutorGetOptimizedSymbol(executor.handle, ctypes.byref(handle))) result = sym.Symbol(handle=handle) return result except MXNetError: logging.error('Error while trying to fetch TRT optimized symbol for graph. Please ensure ' 'build was compiled with MXNET_USE_TENSORRT enabled.') raise
Bind current symbol to get an optimized trt executor. Parameters ---------- symbol : Symbol The symbol you wish to bind, and optimize with TensorRT. ctx : Context The device context the generated executor to run on. all_params : Dict of str->ndarray A dictionary of mappings from parameter names to parameter NDArrays. type_dict : Dict of str->numpy.dtype Input type dictionary, name->dtype stype_dict : Dict of str->str Input storage type dictionary, name->storage_type group2ctx : Dict of string to mx.Context The dict mapping the `ctx_group` attribute to the context assignment. kwargs : Dict of str->shape Input shape dictionary, name->shape Returns ------- executor : mxnet.Executor An optimized TensorRT executor.
def tensorrt_bind(symbol, ctx, all_params, type_dict=None, stype_dict=None, group2ctx=None, **kwargs): """Bind current symbol to get an optimized trt executor. Parameters ---------- symbol : Symbol The symbol you wish to bind, and optimize with TensorRT. ctx : Context The device context the generated executor to run on. all_params : Dict of str->ndarray A dictionary of mappings from parameter names to parameter NDArrays. type_dict : Dict of str->numpy.dtype Input type dictionary, name->dtype stype_dict : Dict of str->str Input storage type dictionary, name->storage_type group2ctx : Dict of string to mx.Context The dict mapping the `ctx_group` attribute to the context assignment. kwargs : Dict of str->shape Input shape dictionary, name->shape Returns ------- executor : mxnet.Executor An optimized TensorRT executor. """ kwargs['shared_buffer'] = all_params return symbol.simple_bind(ctx, type_dict=type_dict, stype_dict=stype_dict, group2ctx=group2ctx, **kwargs)
:param frame: an (w,h,channels) numpy array (image) :return: DataBatch of (1,channels,data_shape,data_shape)
def create_batch(self, frame): """ :param frame: an (w,h,channels) numpy array (image) :return: DataBatch of (1,channels,data_shape,data_shape) """ frame_resize = mx.nd.array(cv2.resize(frame, (self.data_shape[0], self.data_shape[1]))) #frame_resize = mx.img.imresize(frame, self.data_shape[0], self.data_shape[1], cv2.INTER_LINEAR) # Change dimensions from (w,h,channels) to (channels, w, h) frame_t = mx.nd.transpose(frame_resize, axes=(2,0,1)) frame_norm = frame_t - self.mean_pixels_nd # Add dimension for batch, results in (1,channels,w,h) batch_frame = [mx.nd.expand_dims(frame_norm, axis=0)] batch_shape = [DataDesc('data', batch_frame[0].shape)] batch = DataBatch(data=batch_frame, provide_data=batch_shape) return batch
detect all images in iterator Parameters: ---------- det_iter : DetIter iterator for all testing images show_timer : Boolean whether to print out detection exec time Returns: ---------- list of detection results
def detect_iter(self, det_iter, show_timer=False): """ detect all images in iterator Parameters: ---------- det_iter : DetIter iterator for all testing images show_timer : Boolean whether to print out detection exec time Returns: ---------- list of detection results """ num_images = det_iter._size if not isinstance(det_iter, mx.io.PrefetchingIter): det_iter = mx.io.PrefetchingIter(det_iter) start = timer() detections = self.mod.predict(det_iter).asnumpy() time_elapsed = timer() - start if show_timer: logging.info("Detection time for {} images: {:.4f} sec".format( num_images, time_elapsed)) result = Detector.filter_positive_detections(detections) return result
Return detections for batch :param batch: :return:
def detect_batch(self, batch): """ Return detections for batch :param batch: :return: """ self.mod.forward(batch, is_train=False) detections = self.mod.get_outputs()[0] positive_detections = Detector.filter_positive_detections(detections) return positive_detections
wrapper for detecting multiple images Parameters: ---------- im_list : list of str image path or list of image paths root_dir : str directory of input images, optional if image path already has full directory information extension : str image extension, eg. ".jpg", optional Returns: ---------- list of detection results in format [det0, det1...], det is in format np.array([id, score, xmin, ymin, xmax, ymax]...)
def im_detect(self, im_list, root_dir=None, extension=None, show_timer=False): """ wrapper for detecting multiple images Parameters: ---------- im_list : list of str image path or list of image paths root_dir : str directory of input images, optional if image path already has full directory information extension : str image extension, eg. ".jpg", optional Returns: ---------- list of detection results in format [det0, det1...], det is in format np.array([id, score, xmin, ymin, xmax, ymax]...) """ test_db = TestDB(im_list, root_dir=root_dir, extension=extension) test_iter = DetIter(test_db, 1, self.data_shape, self.mean_pixels, is_train=False) return self.detect_iter(test_iter, show_timer)
visualize detections in one image Parameters: ---------- img : numpy.array image, in bgr format dets : numpy.array ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) each row is one object classes : tuple or list of str class names thresh : float score threshold
def visualize_detection(self, img, dets, classes=[], thresh=0.6): """ visualize detections in one image Parameters: ---------- img : numpy.array image, in bgr format dets : numpy.array ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) each row is one object classes : tuple or list of str class names thresh : float score threshold """ import matplotlib.pyplot as plt import random plt.imshow(img) height = img.shape[0] width = img.shape[1] colors = dict() for det in dets: (klass, score, x0, y0, x1, y1) = det if score < thresh: continue cls_id = int(klass) if cls_id not in colors: colors[cls_id] = (random.random(), random.random(), random.random()) xmin = int(x0 * width) ymin = int(y0 * height) xmax = int(x1 * width) ymax = int(y1 * height) rect = plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, edgecolor=colors[cls_id], linewidth=3.5) plt.gca().add_patch(rect) class_name = str(cls_id) if classes and len(classes) > cls_id: class_name = classes[cls_id] plt.gca().text(xmin, ymin - 2, '{:s} {:.3f}'.format(class_name, score), bbox=dict(facecolor=colors[cls_id], alpha=0.5), fontsize=12, color='white') plt.show()
First column (class id) is -1 for negative detections :param detections: :return:
def filter_positive_detections(detections): """ First column (class id) is -1 for negative detections :param detections: :return: """ class_idx = 0 assert(isinstance(detections, mx.nd.NDArray) or isinstance(detections, np.ndarray)) detections_per_image = [] # for each image for i in range(detections.shape[0]): result = [] det = detections[i, :, :] for obj in det: if obj[class_idx] >= 0: result.append(obj) detections_per_image.append(result) logging.info("%d positive detections", len(result)) return detections_per_image
wrapper for im_detect and visualize_detection Parameters: ---------- im_list : list of str or str image path or list of image paths root_dir : str or None directory of input images, optional if image path already has full directory information extension : str or None image extension, eg. ".jpg", optional Returns: ----------
def detect_and_visualize(self, im_list, root_dir=None, extension=None, classes=[], thresh=0.6, show_timer=False): """ wrapper for im_detect and visualize_detection Parameters: ---------- im_list : list of str or str image path or list of image paths root_dir : str or None directory of input images, optional if image path already has full directory information extension : str or None image extension, eg. ".jpg", optional Returns: ---------- """ dets = self.im_detect(im_list, root_dir, extension, show_timer=show_timer) if not isinstance(im_list, list): im_list = [im_list] assert len(dets) == len(im_list) for k, det in enumerate(dets): img = cv2.imread(im_list[k]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) self.visualize_detection(img, det, classes, thresh)
Runs the caffe upgrade tool on the prototxt to create a prototxt in the latest format. This enable us to work just with latest structures, instead of supporting all the variants :param caffe_root: link to caffe root folder, where the upgrade tool is located :param deploy_proto: name of the original prototxt file :return: name of new processed prototxt file
def process_network_proto(caffe_root, deploy_proto): """ Runs the caffe upgrade tool on the prototxt to create a prototxt in the latest format. This enable us to work just with latest structures, instead of supporting all the variants :param caffe_root: link to caffe root folder, where the upgrade tool is located :param deploy_proto: name of the original prototxt file :return: name of new processed prototxt file """ processed_deploy_proto = deploy_proto + ".processed" from shutil import copyfile copyfile(deploy_proto, processed_deploy_proto) # run upgrade tool on new file name (same output file) import os upgrade_tool_command_line = caffe_root + '/build/tools/upgrade_net_proto_text.bin ' \ + processed_deploy_proto + ' ' + processed_deploy_proto os.system(upgrade_tool_command_line) return processed_deploy_proto
Reads from the caffe prototxt the network structure :param processed_deploy_prototxt: name of prototxt to load, preferably the prototxt should be processed before using a call to process_network_proto() :return: network_def, layer_name_to_record, top_to_layers network_def: caffe network structure, gives access to *all* the network information layer_name_to_record: *ordered* dictionary which maps between layer name and a structure which describes in a simple form the layer parameters top_to_layers: dictionary which maps a blob name to an ordered list of layers which output it when a top is used several times, like in inplace layhers, the list will contain all the layers by order of appearance
def read_network_dag(processed_deploy_prototxt): """ Reads from the caffe prototxt the network structure :param processed_deploy_prototxt: name of prototxt to load, preferably the prototxt should be processed before using a call to process_network_proto() :return: network_def, layer_name_to_record, top_to_layers network_def: caffe network structure, gives access to *all* the network information layer_name_to_record: *ordered* dictionary which maps between layer name and a structure which describes in a simple form the layer parameters top_to_layers: dictionary which maps a blob name to an ordered list of layers which output it when a top is used several times, like in inplace layhers, the list will contain all the layers by order of appearance """ from caffe.proto import caffe_pb2 from google.protobuf import text_format # pylint: disable=relative-import from collections import OrderedDict # load prototxt file network_def = caffe_pb2.NetParameter() with open(processed_deploy_prototxt, 'r') as proto_file: text_format.Merge(str(proto_file.read()), network_def) # map layer name to layer record layer_name_to_record = OrderedDict() for layer_def in network_def.layer: if (len(layer_def.include) == 0) or \ (caffe_pb2.TEST in [item.phase for item in layer_def.include]): layer_name_to_record[layer_def.name] = LayerRecord(layer_def) top_to_layers = dict() for layer in network_def.layer: # no specific phase, or TEST phase is specifically asked for if (len(layer.include) == 0) or (caffe_pb2.TEST in [item.phase for item in layer.include]): for top in layer.top: if top not in top_to_layers: top_to_layers[top] = list() top_to_layers[top].append(layer.name) # find parents and children of all layers for child_layer_name in layer_name_to_record.keys(): # pylint: disable=too-many-nested-blocks child_layer_def = layer_name_to_record[child_layer_name] for bottom in child_layer_def.bottoms: if bottom in top_to_layers: for parent_layer_name in top_to_layers[bottom]: if parent_layer_name in layer_name_to_record: parent_layer_def = layer_name_to_record[parent_layer_name] if parent_layer_def not in child_layer_def.parents: child_layer_def.parents.append(parent_layer_def) if child_layer_def not in parent_layer_def.children: parent_layer_def.children.append(child_layer_def) # update filter, strid, pad for maxout "structures" for layer_name in layer_name_to_record.keys(): layer_def = layer_name_to_record[layer_name] if layer_def.type == 'Eltwise' and \ len(layer_def.parents) == 1 and \ layer_def.parents[0].type == 'Slice' and \ len(layer_def.parents[0].parents) == 1 and \ layer_def.parents[0].parents[0].type in ['Convolution', 'InnerProduct']: layer_def.filter = layer_def.parents[0].parents[0].filter layer_def.stride = layer_def.parents[0].parents[0].stride layer_def.pad = layer_def.parents[0].parents[0].pad return network_def, layer_name_to_record, top_to_layers
Reads caffe formatted mean file :param caffe_mean_file: path to caffe mean file, presumably with 'binaryproto' suffix :return: mean image, converted from BGR to RGB format
def read_caffe_mean(caffe_mean_file): """ Reads caffe formatted mean file :param caffe_mean_file: path to caffe mean file, presumably with 'binaryproto' suffix :return: mean image, converted from BGR to RGB format """ import caffe_parser import numpy as np mean_blob = caffe_parser.caffe_pb2.BlobProto() with open(caffe_mean_file, 'rb') as f: mean_blob.ParseFromString(f.read()) img_mean_np = np.array(mean_blob.data) img_mean_np = img_mean_np.reshape(mean_blob.channels, mean_blob.height, mean_blob.width) # swap channels from Caffe BGR to RGB img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :] return img_mean_np
Helper function for margin-based loss. Return a distance matrix given a matrix.
def get_distance(F, x): """Helper function for margin-based loss. Return a distance matrix given a matrix.""" n = x.shape[0] square = F.sum(x ** 2.0, axis=1, keepdims=True) distance_square = square + square.transpose() - (2.0 * F.dot(x, x.transpose())) # Adding identity to make sqrt work. return F.sqrt(distance_square + F.array(np.identity(n)))
cross entropy loss with a mask
def cross_entropy_loss(inputs, labels, rescale_loss=1): """ cross entropy loss with a mask """ criterion = mx.gluon.loss.SoftmaxCrossEntropyLoss(weight=rescale_loss) loss = criterion(inputs, labels) mask = S.var('mask') loss = loss * S.reshape(mask, shape=(-1,)) return S.make_loss(loss.mean())
word embedding + LSTM Projected
def rnn(bptt, vocab_size, num_embed, nhid, num_layers, dropout, num_proj, batch_size): """ word embedding + LSTM Projected """ state_names = [] data = S.var('data') weight = S.var("encoder_weight", stype='row_sparse') embed = S.sparse.Embedding(data=data, weight=weight, input_dim=vocab_size, output_dim=num_embed, name='embed', sparse_grad=True) states = [] outputs = S.Dropout(embed, p=dropout) for i in range(num_layers): prefix = 'lstmp%d_' % i init_h = S.var(prefix + 'init_h', shape=(batch_size, num_proj), init=mx.init.Zero()) init_c = S.var(prefix + 'init_c', shape=(batch_size, nhid), init=mx.init.Zero()) state_names += [prefix + 'init_h', prefix + 'init_c'] lstmp = mx.gluon.contrib.rnn.LSTMPCell(nhid, num_proj, prefix=prefix) outputs, next_states = lstmp.unroll(bptt, outputs, begin_state=[init_h, init_c], \ layout='NTC', merge_outputs=True) outputs = S.Dropout(outputs, p=dropout) states += [S.stop_gradient(s) for s in next_states] outputs = S.reshape(outputs, shape=(-1, num_proj)) trainable_lstm_args = [] for arg in outputs.list_arguments(): if 'lstmp' in arg and 'init' not in arg: trainable_lstm_args.append(arg) return outputs, states, trainable_lstm_args, state_names
Sampled softmax via importance sampling. This under-estimates the full softmax and is only used for training.
def sampled_softmax(num_classes, num_samples, in_dim, inputs, weight, bias, sampled_values, remove_accidental_hits=True): """ Sampled softmax via importance sampling. This under-estimates the full softmax and is only used for training. """ # inputs = (n, in_dim) sample, prob_sample, prob_target = sampled_values # (num_samples, ) sample = S.var('sample', shape=(num_samples,), dtype='float32') # (n, ) label = S.var('label') label = S.reshape(label, shape=(-1,), name="label_reshape") # (num_samples+n, ) sample_label = S.concat(sample, label, dim=0) # lookup weights and biases # (num_samples+n, dim) sample_target_w = S.sparse.Embedding(data=sample_label, weight=weight, input_dim=num_classes, output_dim=in_dim, sparse_grad=True) # (num_samples+n, 1) sample_target_b = S.sparse.Embedding(data=sample_label, weight=bias, input_dim=num_classes, output_dim=1, sparse_grad=True) # (num_samples, dim) sample_w = S.slice(sample_target_w, begin=(0, 0), end=(num_samples, None)) target_w = S.slice(sample_target_w, begin=(num_samples, 0), end=(None, None)) sample_b = S.slice(sample_target_b, begin=(0, 0), end=(num_samples, None)) target_b = S.slice(sample_target_b, begin=(num_samples, 0), end=(None, None)) # target # (n, 1) true_pred = S.sum(target_w * inputs, axis=1, keepdims=True) + target_b # samples # (n, num_samples) sample_b = S.reshape(sample_b, (-1,)) sample_pred = S.FullyConnected(inputs, weight=sample_w, bias=sample_b, num_hidden=num_samples) # remove accidental hits if remove_accidental_hits: label_v = S.reshape(label, (-1, 1)) sample_v = S.reshape(sample, (1, -1)) neg = S.broadcast_equal(label_v, sample_v) * -1e37 sample_pred = sample_pred + neg prob_sample = S.reshape(prob_sample, shape=(1, num_samples)) p_target = true_pred - S.log(prob_target) p_sample = S.broadcast_sub(sample_pred, S.log(prob_sample)) # return logits and new_labels # (n, 1+num_samples) logits = S.concat(p_target, p_sample, dim=1) new_targets = S.zeros_like(label) return logits, new_targets
Split labels into `num_splits` and generate candidates based on log-uniform distribution.
def generate_samples(label, num_splits, sampler): """ Split labels into `num_splits` and generate candidates based on log-uniform distribution. """ def listify(x): return x if isinstance(x, list) else [x] label_splits = listify(label.split(num_splits, axis=0)) prob_samples = [] prob_targets = [] samples = [] for label_split in label_splits: label_split_2d = label_split.reshape((-1,1)) sampled_value = sampler.draw(label_split_2d) sampled_classes, exp_cnt_true, exp_cnt_sampled = sampled_value samples.append(sampled_classes.astype(np.float32)) prob_targets.append(exp_cnt_true.astype(np.float32).reshape((-1,1))) prob_samples.append(exp_cnt_sampled.astype(np.float32)) return samples, prob_samples, prob_targets
Load & generate training examples from multivariate time series data :return: data iters & variables required to define network architecture
def build_iters(data_dir, max_records, q, horizon, splits, batch_size): """ Load & generate training examples from multivariate time series data :return: data iters & variables required to define network architecture """ # Read in data as numpy array df = pd.read_csv(os.path.join(data_dir, "electricity.txt"), sep=",", header=None) feature_df = df.iloc[:, :].astype(float) x = feature_df.as_matrix() x = x[:max_records] if max_records else x # Construct training examples based on horizon and window x_ts = np.zeros((x.shape[0] - q, q, x.shape[1])) y_ts = np.zeros((x.shape[0] - q, x.shape[1])) for n in range(x.shape[0]): if n + 1 < q: continue elif n + 1 + horizon > x.shape[0]: continue else: y_n = x[n + horizon, :] x_n = x[n + 1 - q:n + 1, :] x_ts[n-q] = x_n y_ts[n-q] = y_n # Split into training and testing data training_examples = int(x_ts.shape[0] * splits[0]) valid_examples = int(x_ts.shape[0] * splits[1]) x_train, y_train = x_ts[:training_examples], \ y_ts[:training_examples] x_valid, y_valid = x_ts[training_examples:training_examples + valid_examples], \ y_ts[training_examples:training_examples + valid_examples] x_test, y_test = x_ts[training_examples + valid_examples:], \ y_ts[training_examples + valid_examples:] #build iterators to feed batches to network train_iter = mx.io.NDArrayIter(data=x_train, label=y_train, batch_size=batch_size) val_iter = mx.io.NDArrayIter(data=x_valid, label=y_valid, batch_size=batch_size) test_iter = mx.io.NDArrayIter(data=x_test, label=y_test, batch_size=batch_size) return train_iter, val_iter, test_iter
Returns a pre-defined model by name Parameters ---------- name : str Name of the model. pretrained : bool Whether to load the pretrained weights for model. classes : int Number of classes for the output layer. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. Returns ------- HybridBlock The model.
def get_model(name, **kwargs): """Returns a pre-defined model by name Parameters ---------- name : str Name of the model. pretrained : bool Whether to load the pretrained weights for model. classes : int Number of classes for the output layer. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. Returns ------- HybridBlock The model. """ models = {'resnet18_v1': resnet18_v1, 'resnet34_v1': resnet34_v1, 'resnet50_v1': resnet50_v1, 'resnet101_v1': resnet101_v1, 'resnet152_v1': resnet152_v1, 'resnet18_v2': resnet18_v2, 'resnet34_v2': resnet34_v2, 'resnet50_v2': resnet50_v2, 'resnet101_v2': resnet101_v2, 'resnet152_v2': resnet152_v2, 'vgg11': vgg11, 'vgg13': vgg13, 'vgg16': vgg16, 'vgg19': vgg19, 'vgg11_bn': vgg11_bn, 'vgg13_bn': vgg13_bn, 'vgg16_bn': vgg16_bn, 'vgg19_bn': vgg19_bn, 'alexnet': alexnet, 'densenet121': densenet121, 'densenet161': densenet161, 'densenet169': densenet169, 'densenet201': densenet201, 'squeezenet1.0': squeezenet1_0, 'squeezenet1.1': squeezenet1_1, 'inceptionv3': inception_v3, 'mobilenet1.0': mobilenet1_0, 'mobilenet0.75': mobilenet0_75, 'mobilenet0.5': mobilenet0_5, 'mobilenet0.25': mobilenet0_25, 'mobilenetv2_1.0': mobilenet_v2_1_0, 'mobilenetv2_0.75': mobilenet_v2_0_75, 'mobilenetv2_0.5': mobilenet_v2_0_5, 'mobilenetv2_0.25': mobilenet_v2_0_25 } name = name.lower() if name not in models: raise ValueError( 'Model %s is not supported. Available options are\n\t%s' % ( name, '\n\t'.join(sorted(models.keys())))) return models[name](**kwargs)
Return a new handle with specified storage type, shape, dtype and context. Empty handle is only used to hold results Returns ------- handle A new empty ndarray handle
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None): """Return a new handle with specified storage type, shape, dtype and context. Empty handle is only used to hold results Returns ------- handle A new empty ndarray handle """ hdl = NDArrayHandle() for aux_t in aux_types: if np.dtype(aux_t) != np.dtype("int64"): raise NotImplementedError("only int64 is supported for aux types") aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types] aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes] aux_shapes = py_sum(aux_shapes, ()) num_aux = mx_uint(len(aux_types)) check_call(_LIB.MXNDArrayCreateSparseEx( ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])), c_array_buf(mx_uint, native_array('I', shape)), mx_uint(len(shape)), ctypes.c_int(ctx.device_typeid), ctypes.c_int(ctx.device_id), ctypes.c_int(int(delay_alloc)), ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])), num_aux, c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)), c_array_buf(mx_uint, native_array('I', aux_shape_lens)), c_array_buf(mx_uint, native_array('I', aux_shapes)), ctypes.byref(hdl))) return hdl
Prepare `source_array` so that it can be used to construct NDArray. `source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \ nor an `np.ndarray`.
def _prepare_src_array(source_array, dtype): """Prepare `source_array` so that it can be used to construct NDArray. `source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \ nor an `np.ndarray`. """ if not isinstance(source_array, NDArray) and not isinstance(source_array, np.ndarray): try: source_array = np.array(source_array, dtype=dtype) except: raise TypeError('values must be array like object') return source_array
Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise.
def _prepare_default_dtype(src_array, dtype): """Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise.""" if dtype is None: if isinstance(src_array, (NDArray, np.ndarray)): dtype = src_array.dtype elif spsp and isinstance(src_array, spsp.csr.csr_matrix): dtype = src_array.dtype else: dtype = mx_real_t return dtype
check s1 == s2 if both are not None
def _check_shape(s1, s2): """check s1 == s2 if both are not None""" if s1 and s2 and s1 != s2: raise ValueError("Shape mismatch detected. " + str(s1) + " v.s. " + str(s2))
Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format. The CSRNDArray can be instantiated in several ways: - csr_matrix(D): to construct a CSRNDArray with a dense 2D array ``D`` - **D** (*array_like*) - An object exposing the array interface, an object whose \ `__array__` method returns an array, or any (nested) sequence. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \ float32 otherwise. - csr_matrix(S) to construct a CSRNDArray with a sparse 2D array ``S`` - **S** (*CSRNDArray or scipy.sparse.csr.csr_matrix*) - A sparse matrix. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``S.dtype``. - csr_matrix((M, N)) to construct an empty CSRNDArray with shape ``(M, N)`` - **M** (*int*) - Number of rows in the matrix - **N** (*int*) - Number of columns in the matrix - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. - csr_matrix((data, indices, indptr)) to construct a CSRNDArray based on the definition of compressed sparse row format \ using three separate arrays, \ where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` \ and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. \ The column indices for a given row are expected to be **sorted in ascending order.** \ Duplicate column entries for the same row are not allowed. - **data** (*array_like*) - An object exposing the array interface, which \ holds all the non-zero entries of the matrix in row-major order. - **indices** (*array_like*) - An object exposing the array interface, which \ stores the column index for each non-zero element in ``data``. - **indptr** (*array_like*) - An object exposing the array interface, which \ stores the offset into ``data`` of the first non-zero element number of each \ row of the matrix. - **shape** (*tuple of int, optional*) - The shape of the array. The default \ shape is inferred from the indices and indptr arrays. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``data.dtype`` if ``data`` is an NDArray or numpy.ndarray, \ float32 otherwise. - csr_matrix((data, (row, col))) to construct a CSRNDArray based on the COOrdinate format \ using three seperate arrays, \ where ``row[i]`` is the row index of the element, \ ``col[i]`` is the column index of the element \ and ``data[i]`` is the data corresponding to the element. All the missing \ elements in the input are taken to be zeroes. - **data** (*array_like*) - An object exposing the array interface, which \ holds all the non-zero entries of the matrix in COO format. - **row** (*array_like*) - An object exposing the array interface, which \ stores the row index for each non zero element in ``data``. - **col** (*array_like*) - An object exposing the array interface, which \ stores the col index for each non zero element in ``data``. - **shape** (*tuple of int, optional*) - The shape of the array. The default \ shape is inferred from the ``row`` and ``col`` arrays. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. Parameters ---------- arg1: tuple of int, tuple of array_like, array_like, CSRNDArray, scipy.sparse.csr_matrix, \ scipy.sparse.coo_matrix, tuple of int or tuple of array_like The argument to help instantiate the csr matrix. See above for further details. shape : tuple of int, optional The shape of the csr matrix. ctx: Context, optional Device context (default is the current default context). dtype: str or numpy.dtype, optional The data type of the output array. Returns ------- CSRNDArray A `CSRNDArray` with the `csr` storage representation. Example ------- >>> a = mx.nd.sparse.csr_matrix(([1, 2, 3], [1, 0, 2], [0, 1, 2, 2, 3]), shape=(4, 3)) >>> a.asnumpy() array([[ 0., 1., 0.], [ 2., 0., 0.], [ 0., 0., 0.], [ 0., 0., 3.]], dtype=float32) See Also -------- CSRNDArray : MXNet NDArray in compressed sparse row format.
def csr_matrix(arg1, shape=None, ctx=None, dtype=None): """Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format. The CSRNDArray can be instantiated in several ways: - csr_matrix(D): to construct a CSRNDArray with a dense 2D array ``D`` - **D** (*array_like*) - An object exposing the array interface, an object whose \ `__array__` method returns an array, or any (nested) sequence. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \ float32 otherwise. - csr_matrix(S) to construct a CSRNDArray with a sparse 2D array ``S`` - **S** (*CSRNDArray or scipy.sparse.csr.csr_matrix*) - A sparse matrix. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``S.dtype``. - csr_matrix((M, N)) to construct an empty CSRNDArray with shape ``(M, N)`` - **M** (*int*) - Number of rows in the matrix - **N** (*int*) - Number of columns in the matrix - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. - csr_matrix((data, indices, indptr)) to construct a CSRNDArray based on the definition of compressed sparse row format \ using three separate arrays, \ where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` \ and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. \ The column indices for a given row are expected to be **sorted in ascending order.** \ Duplicate column entries for the same row are not allowed. - **data** (*array_like*) - An object exposing the array interface, which \ holds all the non-zero entries of the matrix in row-major order. - **indices** (*array_like*) - An object exposing the array interface, which \ stores the column index for each non-zero element in ``data``. - **indptr** (*array_like*) - An object exposing the array interface, which \ stores the offset into ``data`` of the first non-zero element number of each \ row of the matrix. - **shape** (*tuple of int, optional*) - The shape of the array. The default \ shape is inferred from the indices and indptr arrays. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``data.dtype`` if ``data`` is an NDArray or numpy.ndarray, \ float32 otherwise. - csr_matrix((data, (row, col))) to construct a CSRNDArray based on the COOrdinate format \ using three seperate arrays, \ where ``row[i]`` is the row index of the element, \ ``col[i]`` is the column index of the element \ and ``data[i]`` is the data corresponding to the element. All the missing \ elements in the input are taken to be zeroes. - **data** (*array_like*) - An object exposing the array interface, which \ holds all the non-zero entries of the matrix in COO format. - **row** (*array_like*) - An object exposing the array interface, which \ stores the row index for each non zero element in ``data``. - **col** (*array_like*) - An object exposing the array interface, which \ stores the col index for each non zero element in ``data``. - **shape** (*tuple of int, optional*) - The shape of the array. The default \ shape is inferred from the ``row`` and ``col`` arrays. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. Parameters ---------- arg1: tuple of int, tuple of array_like, array_like, CSRNDArray, scipy.sparse.csr_matrix, \ scipy.sparse.coo_matrix, tuple of int or tuple of array_like The argument to help instantiate the csr matrix. See above for further details. shape : tuple of int, optional The shape of the csr matrix. ctx: Context, optional Device context (default is the current default context). dtype: str or numpy.dtype, optional The data type of the output array. Returns ------- CSRNDArray A `CSRNDArray` with the `csr` storage representation. Example ------- >>> a = mx.nd.sparse.csr_matrix(([1, 2, 3], [1, 0, 2], [0, 1, 2, 2, 3]), shape=(4, 3)) >>> a.asnumpy() array([[ 0., 1., 0.], [ 2., 0., 0.], [ 0., 0., 0.], [ 0., 0., 3.]], dtype=float32) See Also -------- CSRNDArray : MXNet NDArray in compressed sparse row format. """ # construct a csr matrix from (M, N) or (data, indices, indptr) if isinstance(arg1, tuple): arg_len = len(arg1) if arg_len == 2: # construct a sparse csr matrix from # scipy coo matrix if input format is coo if isinstance(arg1[1], tuple) and len(arg1[1]) == 2: data, (row, col) = arg1 if isinstance(data, NDArray): data = data.asnumpy() if isinstance(row, NDArray): row = row.asnumpy() if isinstance(col, NDArray): col = col.asnumpy() coo = spsp.coo_matrix((data, (row, col)), shape=shape) _check_shape(coo.shape, shape) csr = coo.tocsr() return array(csr, ctx=ctx, dtype=dtype) else: # empty matrix with shape _check_shape(arg1, shape) return empty('csr', arg1, ctx=ctx, dtype=dtype) elif arg_len == 3: # data, indices, indptr return _csr_matrix_from_definition(arg1[0], arg1[1], arg1[2], shape=shape, ctx=ctx, dtype=dtype) else: raise ValueError("Unexpected length of input tuple: " + str(arg_len)) else: # construct a csr matrix from a sparse / dense one if isinstance(arg1, CSRNDArray) or (spsp and isinstance(arg1, spsp.csr.csr_matrix)): # construct a csr matrix from scipy or CSRNDArray _check_shape(arg1.shape, shape) return array(arg1, ctx=ctx, dtype=dtype) elif isinstance(arg1, RowSparseNDArray): raise ValueError("Unexpected input type: RowSparseNDArray") else: # construct a csr matrix from a dense one # prepare default ctx and dtype since mx.nd.array doesn't use default values # based on source_array dtype = _prepare_default_dtype(arg1, dtype) # create dns array with provided dtype. ctx is not passed since copy across # ctx requires dtype to be the same dns = _array(arg1, dtype=dtype) if ctx is not None and dns.context != ctx: dns = dns.as_in_context(ctx) _check_shape(dns.shape, shape) return dns.tostype('csr')
Create a `CSRNDArray` based on data, indices and indptr
def _csr_matrix_from_definition(data, indices, indptr, shape=None, ctx=None, dtype=None, indices_type=None, indptr_type=None): """Create a `CSRNDArray` based on data, indices and indptr""" # pylint: disable= no-member, protected-access storage_type = 'csr' # context ctx = current_context() if ctx is None else ctx # types dtype = _prepare_default_dtype(data, dtype) indptr_type = _STORAGE_AUX_TYPES[storage_type][0] if indptr_type is None else indptr_type indices_type = _STORAGE_AUX_TYPES[storage_type][1] if indices_type is None else indices_type # prepare src array and types data = _prepare_src_array(data, dtype) indptr = _prepare_src_array(indptr, indptr_type) indices = _prepare_src_array(indices, indices_type) # TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays # if they are not for now. In the future, we should provide a c-api # to accept np.ndarray types to copy from to result.data and aux_data if not isinstance(data, NDArray): data = _array(data, ctx, dtype) if not isinstance(indptr, NDArray): indptr = _array(indptr, ctx, indptr_type) if not isinstance(indices, NDArray): indices = _array(indices, ctx, indices_type) if shape is None: if indices.shape[0] == 0: raise ValueError('invalid shape') shape = (len(indptr) - 1, op.max(indices).asscalar() + 1) # verify shapes aux_shapes = [indptr.shape, indices.shape] if data.ndim != 1 or indptr.ndim != 1 or indices.ndim != 1 or \ indptr.shape[0] == 0 or len(shape) != 2: raise ValueError('invalid shape') result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype, [indptr_type, indices_type], aux_shapes)) check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1))) check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0))) check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1))) return result
Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \ tensor slices at given indices. The RowSparseNDArray can be instantiated in several ways: - row_sparse_array(D): to construct a RowSparseNDArray with a dense ndarray ``D`` - **D** (*array_like*) - An object exposing the array interface, an object whose \ `__array__` method returns an array, or any (nested) sequence. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \ float32 otherwise. - row_sparse_array(S) to construct a RowSparseNDArray with a sparse ndarray ``S`` - **S** (*RowSparseNDArray*) - A sparse ndarray. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``S.dtype``. - row_sparse_array((D0, D1 .. Dn)) to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)`` - **D0, D1 .. Dn** (*int*) - The shape of the ndarray - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. - row_sparse_array((data, indices)) to construct a RowSparseNDArray based on the definition of row sparse format \ using two separate arrays, \ where the `indices` stores the indices of the row slices with non-zeros, while the values are stored in `data`. The corresponding NDArray ``dense`` represented by RowSparseNDArray ``rsp`` has \ ``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]`` The row indices for are expected to be **sorted in ascending order.** \ - **data** (*array_like*) - An object exposing the array interface, which \ holds all the non-zero row slices of the array. - **indices** (*array_like*) - An object exposing the array interface, which \ stores the row index for each row slice with non-zero elements. - **shape** (*tuple of int, optional*) - The shape of the array. The default \ shape is inferred from the indices and indptr arrays. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. Parameters ---------- arg1 : NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like The argument to help instantiate the row sparse ndarray. See above for further details. shape : tuple of int, optional The shape of the row sparse ndarray. (Default value = None) ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional The data type of the output array. (Default value = None) Returns ------- RowSparseNDArray An `RowSparseNDArray` with the `row_sparse` storage representation. Examples -------- >>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2)) >>> a.asnumpy() array([[ 0., 0.], [ 1., 2.], [ 0., 0.], [ 0., 0.], [ 3., 4.], [ 0., 0.]], dtype=float32) See Also -------- RowSparseNDArray : MXNet NDArray in row sparse format.
def row_sparse_array(arg1, shape=None, ctx=None, dtype=None): """Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \ tensor slices at given indices. The RowSparseNDArray can be instantiated in several ways: - row_sparse_array(D): to construct a RowSparseNDArray with a dense ndarray ``D`` - **D** (*array_like*) - An object exposing the array interface, an object whose \ `__array__` method returns an array, or any (nested) sequence. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \ float32 otherwise. - row_sparse_array(S) to construct a RowSparseNDArray with a sparse ndarray ``S`` - **S** (*RowSparseNDArray*) - A sparse ndarray. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is ``S.dtype``. - row_sparse_array((D0, D1 .. Dn)) to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)`` - **D0, D1 .. Dn** (*int*) - The shape of the ndarray - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. - row_sparse_array((data, indices)) to construct a RowSparseNDArray based on the definition of row sparse format \ using two separate arrays, \ where the `indices` stores the indices of the row slices with non-zeros, while the values are stored in `data`. The corresponding NDArray ``dense`` represented by RowSparseNDArray ``rsp`` has \ ``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]`` The row indices for are expected to be **sorted in ascending order.** \ - **data** (*array_like*) - An object exposing the array interface, which \ holds all the non-zero row slices of the array. - **indices** (*array_like*) - An object exposing the array interface, which \ stores the row index for each row slice with non-zero elements. - **shape** (*tuple of int, optional*) - The shape of the array. The default \ shape is inferred from the indices and indptr arrays. - **ctx** (*Context, optional*) - Device context \ (default is the current default context). - **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \ The default dtype is float32. Parameters ---------- arg1 : NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like The argument to help instantiate the row sparse ndarray. See above for further details. shape : tuple of int, optional The shape of the row sparse ndarray. (Default value = None) ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional The data type of the output array. (Default value = None) Returns ------- RowSparseNDArray An `RowSparseNDArray` with the `row_sparse` storage representation. Examples -------- >>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2)) >>> a.asnumpy() array([[ 0., 0.], [ 1., 2.], [ 0., 0.], [ 0., 0.], [ 3., 4.], [ 0., 0.]], dtype=float32) See Also -------- RowSparseNDArray : MXNet NDArray in row sparse format. """ # construct a row sparse array from (D0, D1 ..) or (data, indices) if isinstance(arg1, tuple): arg_len = len(arg1) if arg_len < 2: raise ValueError("Unexpected length of input tuple: " + str(arg_len)) elif arg_len > 2: # empty ndarray with shape _check_shape(arg1, shape) return empty('row_sparse', arg1, ctx=ctx, dtype=dtype) else: # len(arg1) = 2, is either shape or (data, indices) if isinstance(arg1[0], integer_types) and isinstance(arg1[1], integer_types): # empty ndarray with shape _check_shape(arg1, shape) return empty('row_sparse', arg1, ctx=ctx, dtype=dtype) else: # data, indices, indptr return _row_sparse_ndarray_from_definition(arg1[0], arg1[1], shape=shape, ctx=ctx, dtype=dtype) else: # construct a row sparse ndarray from a dense / sparse array if isinstance(arg1, RowSparseNDArray): # construct a row sparse ndarray from RowSparseNDArray _check_shape(arg1.shape, shape) return array(arg1, ctx=ctx, dtype=dtype) elif isinstance(arg1, CSRNDArray): raise ValueError("Unexpected input type: CSRNDArray") else: # construct a csr matrix from a dense one # prepare default dtype since mx.nd.array doesn't use default values # based on source_array dtype = _prepare_default_dtype(arg1, dtype) # create dns array with provided dtype. ctx is not passed since copy across # ctx requires dtype to be the same dns = _array(arg1, dtype=dtype) if ctx is not None and dns.context != ctx: dns = dns.as_in_context(ctx) _check_shape(dns.shape, shape) return dns.tostype('row_sparse')
Create a `RowSparseNDArray` based on data and indices
def _row_sparse_ndarray_from_definition(data, indices, shape=None, ctx=None, dtype=None, indices_type=None): """Create a `RowSparseNDArray` based on data and indices""" storage_type = 'row_sparse' # context ctx = current_context() if ctx is None else ctx # types dtype = _prepare_default_dtype(data, dtype) indices_type = _STORAGE_AUX_TYPES[storage_type][0] if indices_type is None else indices_type # prepare src array and types data = _prepare_src_array(data, dtype) indices = _prepare_src_array(indices, indices_type) # TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays # if they are not for now. In the future, we should provide a c-api # to accept np.ndarray types to copy from to result.data and aux_data if not isinstance(data, NDArray): data = _array(data, ctx, dtype) if not isinstance(indices, NDArray): indices = _array(indices, ctx, indices_type) if shape is None: num_indices = indices.shape[0] if num_indices == 0: raise ValueError('invalid shape') dim0 = indices[num_indices - 1].asscalar() + 1 shape = (dim0, ) + data.shape[1:] # verify shapes if data.ndim != len(shape) or indices.ndim != 1 or np.prod(shape[1:]) == 0: raise ValueError("invalid shape") result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype, [indices_type], [indices.shape])) check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1))) check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0))) return result
Returns element-wise sum of the input arrays with broadcasting. Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and ``mx.nd.broadcast_plus(lhs, rhs)`` when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape, this is equivalent to ``mx.nd.elemwise_add(lhs, rhs)`` .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape.abs Parameters ---------- lhs : scalar or mxnet.ndarray.sparse.array First array to be added. rhs : scalar or mxnet.ndarray.sparse.array Second array to be added. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise sum of the input arrays. Examples -------- >>> a = mx.nd.ones((2,3)).tostype('csr') >>> b = mx.nd.ones((2,3)).tostype('csr') >>> a.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> b.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (a+b).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> c = mx.nd.ones((2,3)).tostype('row_sparse') >>> d = mx.nd.ones((2,3)).tostype('row_sparse') >>> c.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> d.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (c+d).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32)
def add(lhs, rhs): """Returns element-wise sum of the input arrays with broadcasting. Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and ``mx.nd.broadcast_plus(lhs, rhs)`` when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape, this is equivalent to ``mx.nd.elemwise_add(lhs, rhs)`` .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape.abs Parameters ---------- lhs : scalar or mxnet.ndarray.sparse.array First array to be added. rhs : scalar or mxnet.ndarray.sparse.array Second array to be added. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise sum of the input arrays. Examples -------- >>> a = mx.nd.ones((2,3)).tostype('csr') >>> b = mx.nd.ones((2,3)).tostype('csr') >>> a.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> b.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (a+b).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> c = mx.nd.ones((2,3)).tostype('row_sparse') >>> d = mx.nd.ones((2,3)).tostype('row_sparse') >>> c.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> d.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (c+d).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) """ # pylint: disable= no-member, protected-access if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape: return _ufunc_helper( lhs, rhs, op.elemwise_add, operator.add, _internal._plus_scalar, None) return _ufunc_helper( lhs, rhs, op.broadcast_add, operator.add, _internal._plus_scalar, None)
Returns element-wise difference of the input arrays with broadcasting. Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and ``mx.nd.broadcast_minus(lhs, rhs)`` when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape, this is equivalent to ``mx.nd.elemwise_sub(lhs, rhs)`` .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.sparse.array First array to be subtracted. rhs : scalar or mxnet.ndarray.sparse.array Second array to be subtracted. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape.__spec__ Returns ------- NDArray The element-wise difference of the input arrays. Examples -------- >>> a = mx.nd.ones((2,3)).tostype('csr') >>> b = mx.nd.ones((2,3)).tostype('csr') >>> a.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> b.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (a-b).asnumpy() array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) >>> c = mx.nd.ones((2,3)).tostype('row_sparse') >>> d = mx.nd.ones((2,3)).tostype('row_sparse') >>> c.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> d.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (c-d).asnumpy() array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32)
def subtract(lhs, rhs): """Returns element-wise difference of the input arrays with broadcasting. Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and ``mx.nd.broadcast_minus(lhs, rhs)`` when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape, this is equivalent to ``mx.nd.elemwise_sub(lhs, rhs)`` .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.sparse.array First array to be subtracted. rhs : scalar or mxnet.ndarray.sparse.array Second array to be subtracted. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape.__spec__ Returns ------- NDArray The element-wise difference of the input arrays. Examples -------- >>> a = mx.nd.ones((2,3)).tostype('csr') >>> b = mx.nd.ones((2,3)).tostype('csr') >>> a.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> b.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (a-b).asnumpy() array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) >>> c = mx.nd.ones((2,3)).tostype('row_sparse') >>> d = mx.nd.ones((2,3)).tostype('row_sparse') >>> c.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> d.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> (c-d).asnumpy() array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) """ # pylint: disable= no-member, protected-access if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape: return _ufunc_helper( lhs, rhs, op.elemwise_sub, operator.sub, _internal._minus_scalar, None) return _ufunc_helper( lhs, rhs, op.broadcast_sub, operator.sub, _internal._minus_scalar, None)
Returns element-wise product of the input arrays with broadcasting. Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)`` when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape, this is equivalent to ``mx.nd.elemwise_mul(lhs, rhs)`` .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.sparse.array First array to be multiplied. rhs : scalar or mxnet.ndarray.sparse.array Second array to be multiplied. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise multiplication of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3)).tostype('csr') >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(3) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([ 0., 1., 2.], dtype=float32) >>> (x*2).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> (x*y).asnumpy() array([[ 0., 0., 0.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.sparse.multiply(x, y).asnumpy() array([[ 0., 0., 0.], [ 1., 1., 1.]], dtype=float32) >>> (x*z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) >>> mx.nd.sparse.multiply(x, z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) >>> z = z.reshape((1, 3)) >>> z.asnumpy() array([[ 0., 1., 2.]], dtype=float32) >>> (x*z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) >>> mx.nd.sparse.multiply(x, z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32)
def multiply(lhs, rhs): """Returns element-wise product of the input arrays with broadcasting. Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)`` when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape, this is equivalent to ``mx.nd.elemwise_mul(lhs, rhs)`` .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.sparse.array First array to be multiplied. rhs : scalar or mxnet.ndarray.sparse.array Second array to be multiplied. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise multiplication of the input arrays. Examples -------- >>> x = mx.nd.ones((2,3)).tostype('csr') >>> y = mx.nd.arange(2).reshape((2,1)) >>> z = mx.nd.arange(3) >>> x.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.asnumpy() array([[ 0.], [ 1.]], dtype=float32) >>> z.asnumpy() array([ 0., 1., 2.], dtype=float32) >>> (x*2).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> (x*y).asnumpy() array([[ 0., 0., 0.], [ 1., 1., 1.]], dtype=float32) >>> mx.nd.sparse.multiply(x, y).asnumpy() array([[ 0., 0., 0.], [ 1., 1., 1.]], dtype=float32) >>> (x*z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) >>> mx.nd.sparse.multiply(x, z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) >>> z = z.reshape((1, 3)) >>> z.asnumpy() array([[ 0., 1., 2.]], dtype=float32) >>> (x*z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) >>> mx.nd.sparse.multiply(x, z).asnumpy() array([[ 0., 1., 2.], [ 0., 1., 2.]], dtype=float32) """ # pylint: disable= no-member, protected-access if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape: return _ufunc_helper( lhs, rhs, op.elemwise_mul, operator.mul, _internal._mul_scalar, None) return _ufunc_helper( lhs, rhs, op.broadcast_mul, operator.mul, _internal._mul_scalar, None)
Return a new array of given shape and type, filled with zeros. Parameters ---------- stype: string The storage type of the empty array, such as 'row_sparse', 'csr', etc shape : int or tuple of int The shape of the empty array ctx : Context, optional An optional device context (default is the current default context) dtype : str or numpy.dtype, optional An optional value type (default is `float32`) Returns ------- RowSparseNDArray or CSRNDArray A created array Examples -------- >>> mx.nd.sparse.zeros('csr', (1,2)) <CSRNDArray 1x2 @cpu(0)> >>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy() array([[ 0., 0.]], dtype=float16)
def zeros(stype, shape, ctx=None, dtype=None, **kwargs): """Return a new array of given shape and type, filled with zeros. Parameters ---------- stype: string The storage type of the empty array, such as 'row_sparse', 'csr', etc shape : int or tuple of int The shape of the empty array ctx : Context, optional An optional device context (default is the current default context) dtype : str or numpy.dtype, optional An optional value type (default is `float32`) Returns ------- RowSparseNDArray or CSRNDArray A created array Examples -------- >>> mx.nd.sparse.zeros('csr', (1,2)) <CSRNDArray 1x2 @cpu(0)> >>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy() array([[ 0., 0.]], dtype=float16) """ # pylint: disable= no-member, protected-access if stype == 'default': return _zeros_ndarray(shape, ctx=ctx, dtype=dtype, **kwargs) if ctx is None: ctx = current_context() dtype = mx_real_t if dtype is None else dtype if stype in ('row_sparse', 'csr'): aux_types = _STORAGE_AUX_TYPES[stype] else: raise ValueError("unknown storage type" + stype) out = _ndarray_cls(_new_alloc_handle(stype, shape, ctx, True, dtype, aux_types)) return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, out=out, **kwargs)
Returns element-wise division of the input arrays with broadcasting. Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)`` when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape, this is equivalent to ``mx.nd.elemwise_div(lhs, rhs)`` .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.sparse.array First array in division. rhs : scalar or mxnet.ndarray.sparse.array Second array in division. The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise division of the input arrays. Examples -------- >>> x = (mx.nd.ones((2,3))*6).tostype('csr') >>> y = mx.nd.arange(2).reshape((2,1)) + 1 >>> z = mx.nd.arange(3) + 1 >>> x.asnumpy() array([[ 6., 6., 6.], [ 6., 6., 6.]], dtype=float32) >>> y.asnumpy() array([[ 1.], [ 2.]], dtype=float32) >>> z.asnumpy() array([ 1., 2., 3.], dtype=float32) >>> x/2 <NDArray 2x3 @cpu(0)> >>> (x/3).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> (x/y).asnumpy() array([[ 6., 6., 6.], [ 3., 3., 3.]], dtype=float32) >>> mx.nd.sparse.divide(x,y).asnumpy() array([[ 6., 6., 6.], [ 3., 3., 3.]], dtype=float32) >>> (x/z).asnumpy() array([[ 6., 3., 2.], [ 6., 3., 2.]], dtype=float32) >>> mx.nd.sprase.divide(x,z).asnumpy() array([[ 6., 3., 2.], [ 6., 3., 2.]], dtype=float32) >>> z = z.reshape((1,3)) >>> z.asnumpy() array([[ 1., 2., 3.]], dtype=float32) >>> (x/z).asnumpy() array([[ 6., 3., 2.], [ 6., 3., 2.]], dtype=float32) >>> mx.nd.sparse.divide(x,z).asnumpy() array([[ 6., 3., 2.], [ 6., 3., 2.]], dtype=float32)
def divide(lhs, rhs): """Returns element-wise division of the input arrays with broadcasting. Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)`` when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape, this is equivalent to ``mx.nd.elemwise_div(lhs, rhs)`` .. note:: If the corresponding dimensions of two arrays have the same size or one of them has size 1, then the arrays are broadcastable to a common shape. Parameters ---------- lhs : scalar or mxnet.ndarray.sparse.array First array in division. rhs : scalar or mxnet.ndarray.sparse.array Second array in division. The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be broadcastable to a common shape. Returns ------- NDArray The element-wise division of the input arrays. Examples -------- >>> x = (mx.nd.ones((2,3))*6).tostype('csr') >>> y = mx.nd.arange(2).reshape((2,1)) + 1 >>> z = mx.nd.arange(3) + 1 >>> x.asnumpy() array([[ 6., 6., 6.], [ 6., 6., 6.]], dtype=float32) >>> y.asnumpy() array([[ 1.], [ 2.]], dtype=float32) >>> z.asnumpy() array([ 1., 2., 3.], dtype=float32) >>> x/2 <NDArray 2x3 @cpu(0)> >>> (x/3).asnumpy() array([[ 2., 2., 2.], [ 2., 2., 2.]], dtype=float32) >>> (x/y).asnumpy() array([[ 6., 6., 6.], [ 3., 3., 3.]], dtype=float32) >>> mx.nd.sparse.divide(x,y).asnumpy() array([[ 6., 6., 6.], [ 3., 3., 3.]], dtype=float32) >>> (x/z).asnumpy() array([[ 6., 3., 2.], [ 6., 3., 2.]], dtype=float32) >>> mx.nd.sprase.divide(x,z).asnumpy() array([[ 6., 3., 2.], [ 6., 3., 2.]], dtype=float32) >>> z = z.reshape((1,3)) >>> z.asnumpy() array([[ 1., 2., 3.]], dtype=float32) >>> (x/z).asnumpy() array([[ 6., 3., 2.], [ 6., 3., 2.]], dtype=float32) >>> mx.nd.sparse.divide(x,z).asnumpy() array([[ 6., 3., 2.], [ 6., 3., 2.]], dtype=float32) """ # pylint: disable= no-member, protected-access if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape: return _ufunc_helper( lhs, rhs, op.elemwise_div, operator.truediv, _internal._div_scalar, None) return _ufunc_helper( lhs, rhs, op.broadcast_div, operator.truediv, _internal._div_scalar, None)
Returns a new array of given shape and type, without initializing entries. Parameters ---------- stype: string The storage type of the empty array, such as 'row_sparse', 'csr', etc shape : int or tuple of int The shape of the empty array. ctx : Context, optional An optional device context (default is the current default context). dtype : str or numpy.dtype, optional An optional value type (default is `float32`). Returns ------- CSRNDArray or RowSparseNDArray A created array.
def empty(stype, shape, ctx=None, dtype=None): """Returns a new array of given shape and type, without initializing entries. Parameters ---------- stype: string The storage type of the empty array, such as 'row_sparse', 'csr', etc shape : int or tuple of int The shape of the empty array. ctx : Context, optional An optional device context (default is the current default context). dtype : str or numpy.dtype, optional An optional value type (default is `float32`). Returns ------- CSRNDArray or RowSparseNDArray A created array. """ if isinstance(shape, int): shape = (shape, ) if ctx is None: ctx = current_context() if dtype is None: dtype = mx_real_t assert(stype is not None) if stype in ('csr', 'row_sparse'): return zeros(stype, shape, ctx=ctx, dtype=dtype) else: raise Exception("unknown stype : " + str(stype))
Creates a sparse array from any object exposing the array interface. Parameters ---------- source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix The source sparse array ctx : Context, optional The default context is ``source_array.context`` if ``source_array`` is an NDArray. \ The current default context otherwise. dtype : str or numpy.dtype, optional The data type of the output array. The default dtype is ``source_array.dtype`` if `source_array` is an `NDArray`, `numpy.ndarray` or `scipy.sparse.csr.csr_matrix`, \ `float32` otherwise. Returns ------- RowSparseNDArray or CSRNDArray An array with the same contents as the `source_array`. Examples -------- >>> import scipy.sparse as spsp >>> csr = spsp.csr_matrix((2, 100)) >>> mx.nd.sparse.array(csr) <CSRNDArray 2x100 @cpu(0)> >>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2))) <CSRNDArray 3x2 @cpu(0)> >>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2))) <RowSparseNDArray 3x2 @cpu(0)>
def array(source_array, ctx=None, dtype=None): """Creates a sparse array from any object exposing the array interface. Parameters ---------- source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix The source sparse array ctx : Context, optional The default context is ``source_array.context`` if ``source_array`` is an NDArray. \ The current default context otherwise. dtype : str or numpy.dtype, optional The data type of the output array. The default dtype is ``source_array.dtype`` if `source_array` is an `NDArray`, `numpy.ndarray` or `scipy.sparse.csr.csr_matrix`, \ `float32` otherwise. Returns ------- RowSparseNDArray or CSRNDArray An array with the same contents as the `source_array`. Examples -------- >>> import scipy.sparse as spsp >>> csr = spsp.csr_matrix((2, 100)) >>> mx.nd.sparse.array(csr) <CSRNDArray 2x100 @cpu(0)> >>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2))) <CSRNDArray 3x2 @cpu(0)> >>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2))) <RowSparseNDArray 3x2 @cpu(0)> """ ctx = current_context() if ctx is None else ctx if isinstance(source_array, NDArray): assert(source_array.stype != 'default'), \ "Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray" # prepare dtype and ctx based on source_array, if not provided dtype = _prepare_default_dtype(source_array, dtype) # if both dtype and ctx are different from source_array, we cannot copy directly if source_array.dtype != dtype and source_array.context != ctx: arr = empty(source_array.stype, source_array.shape, dtype=dtype) arr[:] = source_array arr = arr.as_in_context(ctx) else: arr = empty(source_array.stype, source_array.shape, dtype=dtype, ctx=ctx) arr[:] = source_array return arr elif spsp and isinstance(source_array, spsp.csr.csr_matrix): # TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy # preprocess scipy csr to canonical form csr = source_array.sorted_indices() csr.sum_duplicates() dtype = _prepare_default_dtype(source_array, dtype) return csr_matrix((csr.data, csr.indices, csr.indptr), shape=csr.shape, \ dtype=dtype, ctx=ctx) elif isinstance(source_array, (np.ndarray, np.generic)): raise ValueError("Please use mx.nd.array to create an NDArray with source_array of type ", type(source_array)) else: raise ValueError("Unexpected source_array type: ", type(source_array))
Data-type of the array's ith aux data. Returns ------- numpy.dtype This BaseSparseNDArray's aux data type.
def _aux_type(self, i): """Data-type of the array's ith aux data. Returns ------- numpy.dtype This BaseSparseNDArray's aux data type. """ aux_type = ctypes.c_int() check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type))) return _DTYPE_MX_TO_NP[aux_type.value]
The data types of the aux data for the BaseSparseNDArray.
def _aux_types(self): """The data types of the aux data for the BaseSparseNDArray. """ aux_types = [] num_aux = self._num_aux for i in range(num_aux): aux_types.append(self._aux_type(i)) return aux_types
Return a copy of the array after casting to a specified type. Parameters ---------- dtype : numpy.dtype or str The type of the returned array. copy : bool Default `True`. By default, astype always returns a newly allocated ndarray on the same context. If this is set to `False`, and the dtype requested is the same as the ndarray's dtype, the ndarray is returned instead of a copy. Examples -------- >>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32') >>> y = x.astype('int32') >>> y.dtype <type 'numpy.int32'>
def astype(self, dtype, copy=True): """Return a copy of the array after casting to a specified type. Parameters ---------- dtype : numpy.dtype or str The type of the returned array. copy : bool Default `True`. By default, astype always returns a newly allocated ndarray on the same context. If this is set to `False`, and the dtype requested is the same as the ndarray's dtype, the ndarray is returned instead of a copy. Examples -------- >>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32') >>> y = x.astype('int32') >>> y.dtype <type 'numpy.int32'> """ if not copy and np.dtype(dtype) == self.dtype: return self res = zeros(shape=self.shape, ctx=self.context, dtype=dtype, stype=self.stype) self.copyto(res) return res
Check whether the NDArray format is valid. Parameters ---------- full_check : bool, optional If `True`, rigorous check, O(N) operations. Otherwise basic check, O(1) operations (default True).
def check_format(self, full_check=True): """Check whether the NDArray format is valid. Parameters ---------- full_check : bool, optional If `True`, rigorous check, O(N) operations. Otherwise basic check, O(1) operations (default True). """ check_call(_LIB.MXNDArraySyncCheckFormat(self.handle, ctypes.c_bool(full_check)))
A deep copy NDArray of the data array associated with the BaseSparseNDArray. This function blocks. Do not use it in performance critical code.
def _data(self): """A deep copy NDArray of the data array associated with the BaseSparseNDArray. This function blocks. Do not use it in performance critical code. """ self.wait_to_read() hdl = NDArrayHandle() check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl))) return NDArray(hdl)
Get a deep copy NDArray of the i-th aux data array associated with the BaseSparseNDArray. This function blocks. Do not use it in performance critical code.
def _aux_data(self, i): """ Get a deep copy NDArray of the i-th aux data array associated with the BaseSparseNDArray. This function blocks. Do not use it in performance critical code. """ self.wait_to_read() hdl = NDArrayHandle() check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl))) return NDArray(hdl)
Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array Examples -------- >>> x = mx.nd.sparse.zeros('csr', (2,3)) >>> y = x.asscipy() >>> type(y) <type 'scipy.sparse.csr.csr_matrix'> >>> y <2x3 sparse matrix of type '<type 'numpy.float32'>' with 0 stored elements in Compressed Sparse Row format>
def asscipy(self): """Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array Examples -------- >>> x = mx.nd.sparse.zeros('csr', (2,3)) >>> y = x.asscipy() >>> type(y) <type 'scipy.sparse.csr.csr_matrix'> >>> y <2x3 sparse matrix of type '<type 'numpy.float32'>' with 0 stored elements in Compressed Sparse Row format> """ data = self.data.asnumpy() indices = self.indices.asnumpy() indptr = self.indptr.asnumpy() if not spsp: raise ImportError("scipy is not available. \ Please check if the scipy python bindings are installed.") return spsp.csr_matrix((data, indices, indptr), shape=self.shape, dtype=self.dtype)
Return a copy of the array with chosen storage type. Returns ------- NDArray or RowSparseNDArray A copy of the array with the chosen storage stype
def tostype(self, stype): """Return a copy of the array with chosen storage type. Returns ------- NDArray or RowSparseNDArray A copy of the array with the chosen storage stype """ # pylint: disable= no-member, protected-access if stype == 'csr': raise ValueError("cast_storage from row_sparse to csr is not supported") return op.cast_storage(self, stype=stype)
Copies the value of this array to another array. If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape`` and ``self.shape`` should be the same. This function copies the value from ``self`` to ``other``. If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on the target context, and the value of ``self`` is copied. Parameters ---------- other : NDArray or RowSparseNDArray or Context The destination array or context. Returns ------- NDArray or RowSparseNDArray The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
def copyto(self, other): """Copies the value of this array to another array. If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape`` and ``self.shape`` should be the same. This function copies the value from ``self`` to ``other``. If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on the target context, and the value of ``self`` is copied. Parameters ---------- other : NDArray or RowSparseNDArray or Context The destination array or context. Returns ------- NDArray or RowSparseNDArray The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``. """ if isinstance(other, Context): return super(RowSparseNDArray, self).copyto(other) elif isinstance(other, NDArray): stype = other.stype if stype in ('default', 'row_sparse'): return super(RowSparseNDArray, self).copyto(other) else: raise TypeError('copyto does not support destination NDArray stype ' + str(stype)) else: raise TypeError('copyto does not support type ' + str(type(other)))
Exports the MXNet model file, passed as a parameter, into ONNX model. Accepts both symbol,parameter objects as well as json and params filepaths as input. Operator support and coverage - https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration Parameters ---------- sym : str or symbol object Path to the json file or Symbol object params : str or symbol object Path to the params file or params dictionary. (Including both arg_params and aux_params) input_shape : List of tuple Input shape of the model e.g [(1,3,224,224)] input_type : data type Input data type e.g. np.float32 onnx_file_path : str Path where to save the generated onnx file verbose : Boolean If true will print logs of the model conversion Returns ------- onnx_file_path : str Onnx file path Notes ----- This method is available when you ``import mxnet.contrib.onnx``
def export_model(sym, params, input_shape, input_type=np.float32, onnx_file_path='model.onnx', verbose=False): """Exports the MXNet model file, passed as a parameter, into ONNX model. Accepts both symbol,parameter objects as well as json and params filepaths as input. Operator support and coverage - https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration Parameters ---------- sym : str or symbol object Path to the json file or Symbol object params : str or symbol object Path to the params file or params dictionary. (Including both arg_params and aux_params) input_shape : List of tuple Input shape of the model e.g [(1,3,224,224)] input_type : data type Input data type e.g. np.float32 onnx_file_path : str Path where to save the generated onnx file verbose : Boolean If true will print logs of the model conversion Returns ------- onnx_file_path : str Onnx file path Notes ----- This method is available when you ``import mxnet.contrib.onnx`` """ try: from onnx import helper, mapping except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") converter = MXNetGraph() data_format = np.dtype(input_type) # if input parameters are strings(file paths), load files and create symbol parameter objects if isinstance(sym, string_types) and isinstance(params, string_types): logging.info("Converting json and weight file to sym and params") sym_obj, params_obj = load_module(sym, params) onnx_graph = converter.create_onnx_graph_proto(sym_obj, params_obj, input_shape, mapping.NP_TYPE_TO_TENSOR_TYPE[data_format], verbose=verbose) elif isinstance(sym, symbol.Symbol) and isinstance(params, dict): onnx_graph = converter.create_onnx_graph_proto(sym, params, input_shape, mapping.NP_TYPE_TO_TENSOR_TYPE[data_format], verbose=verbose) else: raise ValueError("Input sym and params should either be files or objects") # Create the model (ModelProto) onnx_model = helper.make_model(onnx_graph) # Save model on disk with open(onnx_file_path, "wb") as file_handle: serialized = onnx_model.SerializeToString() file_handle.write(serialized) logging.info("Input shape of the model %s ", input_shape) logging.info("Exported ONNX file %s saved to disk", onnx_file_path) return onnx_file_path
Benchmarking both storage and dot
def bench_dot(lhs_row_dim, lhs_col_dim, rhs_col_dim, density, rhs_density, dot_func, trans_lhs, lhs_stype, rhs_stype, only_storage, distribution="uniform"): """ Benchmarking both storage and dot """ lhs_nd = rand_ndarray((lhs_row_dim, lhs_col_dim), lhs_stype, density, distribution=distribution) if not only_storage: rhs_nd = rand_ndarray((lhs_col_dim, rhs_col_dim), rhs_stype, density=rhs_density, distribution=distribution) out = dot_func(lhs_nd, rhs_nd, trans_lhs) mx.nd.waitall()
Convert caffe mean Parameters ---------- binaryproto_fname : str Filename of the mean output : str, optional Save the mean into mxnet's format Returns ------- NDArray Mean in ndarray
def convert_mean(binaryproto_fname, output=None): """Convert caffe mean Parameters ---------- binaryproto_fname : str Filename of the mean output : str, optional Save the mean into mxnet's format Returns ------- NDArray Mean in ndarray """ mean_blob = caffe_parser.caffe_pb2.BlobProto() with open(binaryproto_fname, 'rb') as f: mean_blob.ParseFromString(f.read()) img_mean_np = np.array(mean_blob.data) img_mean_np = img_mean_np.reshape( mean_blob.channels, mean_blob.height, mean_blob.width ) # swap channels from Caffe BGR to RGB img_mean_np[[0, 2], :, :] = img_mean_np[[2, 0], :, :] nd = mx.nd.array(img_mean_np) if output is not None: mx.nd.save(output, {"mean_image": nd}) return nd
r"""Densenet-BC model from the `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper. Parameters ---------- num_layers : int Number of layers for the variant of densenet. Options are 121, 161, 169, 201. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default $MXNET_HOME/models Location for keeping the model parameters.
def get_densenet(num_layers, pretrained=False, ctx=cpu(), root=os.path.join(base.data_dir(), 'models'), **kwargs): r"""Densenet-BC model from the `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper. Parameters ---------- num_layers : int Number of layers for the variant of densenet. Options are 121, 161, 169, 201. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default $MXNET_HOME/models Location for keeping the model parameters. """ num_init_features, growth_rate, block_config = densenet_spec[num_layers] net = DenseNet(num_init_features, growth_rate, block_config, **kwargs) if pretrained: from ..model_store import get_model_file net.load_parameters(get_model_file('densenet%d'%(num_layers), root=root), ctx=ctx) return net
Loads the MXNet model file and returns MXNet symbol and params (weights). Parameters ---------- json_path : str Path to the json file params_path : str Path to the params file Returns ------- sym : MXNet symbol Model symbol object params : params object Model weights including both arg and aux params.
def load_module(sym_filepath, params_filepath): """Loads the MXNet model file and returns MXNet symbol and params (weights). Parameters ---------- json_path : str Path to the json file params_path : str Path to the params file Returns ------- sym : MXNet symbol Model symbol object params : params object Model weights including both arg and aux params. """ if not (os.path.isfile(sym_filepath) and os.path.isfile(params_filepath)): raise ValueError("Symbol and params files provided are invalid") else: try: # reads symbol.json file from given path and # retrieves model prefix and number of epochs model_name = sym_filepath.rsplit('.', 1)[0].rsplit('-', 1)[0] params_file_list = params_filepath.rsplit('.', 1)[0].rsplit('-', 1) # Setting num_epochs to 0 if not present in filename num_epochs = 0 if len(params_file_list) == 1 else int(params_file_list[1]) except IndexError: logging.info("Model and params name should be in format: " "prefix-symbol.json, prefix-epoch.params") raise sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, num_epochs) # Merging arg and aux parameters params = {} params.update(arg_params) params.update(aux_params) return sym, params
Helper function to import module
def import_module(module_name): """Helper function to import module""" import sys, os import importlib sys.path.append(os.path.dirname(__file__)) return importlib.import_module(module_name)
Build network symbol for training SSD Parameters ---------- network : str base network symbol name num_classes : int number of object classes not including background from_layers : list of str feature extraction layers, use '' for add extra layers For example: from_layers = ['relu4_3', 'fc7', '', '', '', ''] which means extract feature from relu4_3 and fc7, adding 4 extra layers on top of fc7 num_filters : list of int number of filters for extra layers, you can use -1 for extracted features, however, if normalization and scale is applied, the number of filter for that layer must be provided. For example: num_filters = [512, -1, 512, 256, 256, 256] strides : list of int strides for the 3x3 convolution appended, -1 can be used for extracted feature layers pads : list of int paddings for the 3x3 convolution, -1 can be used for extracted layers sizes : list or list of list [min_size, max_size] for all layers or [[], [], []...] for specific layers ratios : list or list of list [ratio1, ratio2...] for all layers or [[], [], ...] for specific layers normalizations : int or list of int use normalizations value for all layers or [...] for specific layers, -1 indicate no normalizations and scales steps : list specify steps for each MultiBoxPrior layer, leave empty, it will calculate according to layer dimensions min_filter : int minimum number of filters used in 1x1 convolution nms_thresh : float non-maximum suppression threshold force_suppress : boolean whether suppress different class objects nms_topk : int apply NMS to top K detections Returns ------- mx.Symbol
def get_symbol_train(network, num_classes, from_layers, num_filters, strides, pads, sizes, ratios, normalizations=-1, steps=[], min_filter=128, nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs): """Build network symbol for training SSD Parameters ---------- network : str base network symbol name num_classes : int number of object classes not including background from_layers : list of str feature extraction layers, use '' for add extra layers For example: from_layers = ['relu4_3', 'fc7', '', '', '', ''] which means extract feature from relu4_3 and fc7, adding 4 extra layers on top of fc7 num_filters : list of int number of filters for extra layers, you can use -1 for extracted features, however, if normalization and scale is applied, the number of filter for that layer must be provided. For example: num_filters = [512, -1, 512, 256, 256, 256] strides : list of int strides for the 3x3 convolution appended, -1 can be used for extracted feature layers pads : list of int paddings for the 3x3 convolution, -1 can be used for extracted layers sizes : list or list of list [min_size, max_size] for all layers or [[], [], []...] for specific layers ratios : list or list of list [ratio1, ratio2...] for all layers or [[], [], ...] for specific layers normalizations : int or list of int use normalizations value for all layers or [...] for specific layers, -1 indicate no normalizations and scales steps : list specify steps for each MultiBoxPrior layer, leave empty, it will calculate according to layer dimensions min_filter : int minimum number of filters used in 1x1 convolution nms_thresh : float non-maximum suppression threshold force_suppress : boolean whether suppress different class objects nms_topk : int apply NMS to top K detections Returns ------- mx.Symbol """ label = mx.sym.Variable('label') body = import_module(network).get_symbol(num_classes, **kwargs) layers = multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=min_filter) loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \ num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \ num_channels=num_filters, clip=False, interm_layer=0, steps=steps) tmp = mx.symbol.contrib.MultiBoxTarget( *[anchor_boxes, label, cls_preds], overlap_threshold=.5, \ ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \ negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2), name="multibox_target") loc_target = tmp[0] loc_target_mask = tmp[1] cls_target = tmp[2] cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \ ignore_label=-1, use_ignore=True, grad_scale=1., multi_output=True, \ normalization='valid', name="cls_prob") loc_loss_ = mx.symbol.smooth_l1(name="loc_loss_", \ data=loc_target_mask * (loc_preds - loc_target), scalar=1.0) loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \ normalization='valid', name="loc_loss") # monitoring training status cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name="cls_label") det = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \ name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress, variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk) det = mx.symbol.MakeLoss(data=det, grad_scale=0, name="det_out") # group output out = mx.symbol.Group([cls_prob, loc_loss, cls_label, det]) return out
Build network for testing SSD Parameters ---------- network : str base network symbol name num_classes : int number of object classes not including background from_layers : list of str feature extraction layers, use '' for add extra layers For example: from_layers = ['relu4_3', 'fc7', '', '', '', ''] which means extract feature from relu4_3 and fc7, adding 4 extra layers on top of fc7 num_filters : list of int number of filters for extra layers, you can use -1 for extracted features, however, if normalization and scale is applied, the number of filter for that layer must be provided. For example: num_filters = [512, -1, 512, 256, 256, 256] strides : list of int strides for the 3x3 convolution appended, -1 can be used for extracted feature layers pads : list of int paddings for the 3x3 convolution, -1 can be used for extracted layers sizes : list or list of list [min_size, max_size] for all layers or [[], [], []...] for specific layers ratios : list or list of list [ratio1, ratio2...] for all layers or [[], [], ...] for specific layers normalizations : int or list of int use normalizations value for all layers or [...] for specific layers, -1 indicate no normalizations and scales steps : list specify steps for each MultiBoxPrior layer, leave empty, it will calculate according to layer dimensions min_filter : int minimum number of filters used in 1x1 convolution nms_thresh : float non-maximum suppression threshold force_suppress : boolean whether suppress different class objects nms_topk : int apply NMS to top K detections Returns ------- mx.Symbol
def get_symbol(network, num_classes, from_layers, num_filters, sizes, ratios, strides, pads, normalizations=-1, steps=[], min_filter=128, nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs): """Build network for testing SSD Parameters ---------- network : str base network symbol name num_classes : int number of object classes not including background from_layers : list of str feature extraction layers, use '' for add extra layers For example: from_layers = ['relu4_3', 'fc7', '', '', '', ''] which means extract feature from relu4_3 and fc7, adding 4 extra layers on top of fc7 num_filters : list of int number of filters for extra layers, you can use -1 for extracted features, however, if normalization and scale is applied, the number of filter for that layer must be provided. For example: num_filters = [512, -1, 512, 256, 256, 256] strides : list of int strides for the 3x3 convolution appended, -1 can be used for extracted feature layers pads : list of int paddings for the 3x3 convolution, -1 can be used for extracted layers sizes : list or list of list [min_size, max_size] for all layers or [[], [], []...] for specific layers ratios : list or list of list [ratio1, ratio2...] for all layers or [[], [], ...] for specific layers normalizations : int or list of int use normalizations value for all layers or [...] for specific layers, -1 indicate no normalizations and scales steps : list specify steps for each MultiBoxPrior layer, leave empty, it will calculate according to layer dimensions min_filter : int minimum number of filters used in 1x1 convolution nms_thresh : float non-maximum suppression threshold force_suppress : boolean whether suppress different class objects nms_topk : int apply NMS to top K detections Returns ------- mx.Symbol """ body = import_module(network).get_symbol(num_classes, **kwargs) layers = multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=min_filter) loc_preds, cls_preds, anchor_boxes = multibox_layer(layers, \ num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \ num_channels=num_filters, clip=False, interm_layer=0, steps=steps) cls_prob = mx.symbol.softmax(data=cls_preds, axis=1, name='cls_prob') out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \ name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress, variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk) return out
This is an internal helper function that can be used for either of these but not both at the same time: 1. Record the output and gradient of output of an intermediate convolutional layer. 2. Record the gradients of the image. Parameters ---------- image : NDArray Image to visuaize. This is an NDArray with the preprocessed image. class_id : int Category ID this image belongs to. If not provided, network's prediction will be used. conv_layer_name: str Name of the convolutional layer whose output and output's gradients need to be acptured. image_grad: bool Whether to capture gradients of the image.
def _get_grad(net, image, class_id=None, conv_layer_name=None, image_grad=False): """This is an internal helper function that can be used for either of these but not both at the same time: 1. Record the output and gradient of output of an intermediate convolutional layer. 2. Record the gradients of the image. Parameters ---------- image : NDArray Image to visuaize. This is an NDArray with the preprocessed image. class_id : int Category ID this image belongs to. If not provided, network's prediction will be used. conv_layer_name: str Name of the convolutional layer whose output and output's gradients need to be acptured. image_grad: bool Whether to capture gradients of the image.""" if image_grad: image.attach_grad() Conv2D.capture_layer_name = None Activation.set_guided_backprop(True) else: # Tell convviz.Conv2D which layer's output and gradient needs to be recorded Conv2D.capture_layer_name = conv_layer_name Activation.set_guided_backprop(False) # Run the network with autograd.record(train_mode=False): out = net(image) # If user didn't provide a class id, we'll use the class that the network predicted if class_id == None: model_output = out.asnumpy() class_id = np.argmax(model_output) # Create a one-hot target with class_id and backprop with the created target one_hot_target = mx.nd.one_hot(mx.nd.array([class_id]), 1000) out.backward(one_hot_target, train_mode=False) if image_grad: return image.grad[0].asnumpy() else: # Return the recorded convolution output and gradient conv_out = Conv2D.conv_output return conv_out[0].asnumpy(), conv_out.grad[0].asnumpy()
Get the output and gradients of output of a convolutional layer. Parameters: ---------- net: Block Network to use for visualization. image: NDArray Preprocessed image to use for visualization. class_id: int Category ID this image belongs to. If not provided, network's prediction will be used. conv_layer_name: str Name of the convolutional layer whose output and output's gradients need to be acptured.
def get_conv_out_grad(net, image, class_id=None, conv_layer_name=None): """Get the output and gradients of output of a convolutional layer. Parameters: ---------- net: Block Network to use for visualization. image: NDArray Preprocessed image to use for visualization. class_id: int Category ID this image belongs to. If not provided, network's prediction will be used. conv_layer_name: str Name of the convolutional layer whose output and output's gradients need to be acptured.""" return _get_grad(net, image, class_id, conv_layer_name, image_grad=False)
Get the gradients of the image. Parameters: ---------- net: Block Network to use for visualization. image: NDArray Preprocessed image to use for visualization. class_id: int Category ID this image belongs to. If not provided, network's prediction will be used.
def get_image_grad(net, image, class_id=None): """Get the gradients of the image. Parameters: ---------- net: Block Network to use for visualization. image: NDArray Preprocessed image to use for visualization. class_id: int Category ID this image belongs to. If not provided, network's prediction will be used.""" return _get_grad(net, image, class_id, image_grad=True)
Convert gradients of image obtained using `get_image_grad` into image. This shows parts of the image that is most strongly activating the output neurons.
def grad_to_image(gradient): """Convert gradients of image obtained using `get_image_grad` into image. This shows parts of the image that is most strongly activating the output neurons.""" gradient = gradient - gradient.min() gradient /= gradient.max() gradient = np.uint8(gradient * 255).transpose(1, 2, 0) gradient = gradient[..., ::-1] return gradient
Compute CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details
def get_cam(imggrad, conv_out): """Compute CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details""" weights = np.mean(imggrad, axis=(1, 2)) cam = np.ones(conv_out.shape[1:], dtype=np.float32) for i, w in enumerate(weights): cam += w * conv_out[i, :, :] cam = cv2.resize(cam, (imggrad.shape[1], imggrad.shape[2])) cam = np.maximum(cam, 0) cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam)) cam = np.uint8(cam * 255) return cam
Draw a heatmap on top of the original image using intensities from activation_map
def get_img_heatmap(orig_img, activation_map): """Draw a heatmap on top of the original image using intensities from activation_map""" heatmap = cv2.applyColorMap(activation_map, cv2.COLORMAP_COOL) heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB) img_heatmap = np.float32(heatmap) + np.float32(orig_img) img_heatmap = img_heatmap / np.max(img_heatmap) img_heatmap *= 255 return img_heatmap.astype(int)