Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def train(symbol_data, train_iterator, valid_iterator, data_column_names, target_names): devs = mx.cpu() # default setting if args.gpus is not None: for i in args.gpus.split(','): mx.gpu(int(i)) devs = mx.gpu() module = mx.mod.Module(symbol_data, data_names=data_column_names, label_names=target_names, context=devs) module.fit(train_data=train_iterator, eval_data=valid_iterator, eval_metric='acc', kvstore=args.kv_store, optimizer=args.optimizer, optimizer_params={'learning_rate': args.lr}, initializer=mx.initializer.Uniform(0.1), num_epoch=args.num_epochs, batch_end_callback=mx.callback.Speedometer(args.batch_size, args.disp_batches), epoch_end_callback=save_model())
[ "Train cnn model\n\n Parameters\n ----------\n symbol_data: symbol\n train_iterator: DataIter\n Train DataIter\n valid_iterator: DataIter\n Valid DataIter\n data_column_names: list of str\n Defaults to ('data') for a typical model used in image classification\n target_names: list of str\n Defaults to ('softmax_label') for a typical model used in image classification\n " ]
Please provide a description of the function:def convert_mat_to_images(args): '''convert the caltech101 mat file to images Examples -------- python convert_data.py --dataset /home/ubuntu/datasets/caltech101/data/caltech101_silhouettes_28.mat --save_path /home/ubuntu/datasets/caltech101/data/ --invert --height 32 --width 32 ''' dataset = scipy.io.loadmat("{}/{}".format(args.save_path, args.dataset)) # image pixel data X = dataset['X'] # image class labels (not used in this project) Y = dataset['Y'] total_image = X.shape[0] h=args.height w=args.width for i in range(total_image): img = X[i] img = np.reshape(img, (28, 28)) if args.invert: img = (1-img)*255 else: img = img*255 img = Image.fromarray(img, 'L') img = img.rotate(-90) img = img.resize([h, w], Image.BILINEAR) img.save(args.save_path + '/img' + str(i) + '.png')
[]
Please provide a description of the function:def build(args) -> None: venv_exe = shutil.which('virtualenv') pyexe = shutil.which(args.pyexe) if not venv_exe: logging.warn("virtualenv wasn't found in path, it's recommended to install virtualenv to manage python environments") if not pyexe: logging.warn("Python executable %s not found in path", args.pyexe) if args.cmake_options: cmake = CMake(args.cmake_options) else: cmake = CMake() cmake() create_virtualenv(venv_exe, pyexe, args.venv)
[ "Build using CMake" ]
Please provide a description of the function:def create_network(batch_size, update_freq): import logging head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) train_data = np.random.randint(1, 5, [1000, 2]) weights = np.array([1.0, 2.0]) train_label = train_data.dot(weights) di = mx.io.NDArrayIter(train_data, train_label, batch_size=batch_size, shuffle=True, label_name='lin_reg_label') X = mx.sym.Variable('data') Y = mx.symbol.Variable('lin_reg_label') fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") mod = SVRGModule( symbol=lro, data_names=['data'], label_names=['lin_reg_label'], update_freq=update_freq, logger=logging ) return di, mod
[ "Create a linear regression network for performing SVRG optimization.\n Parameters\n ----------\n batch_size: int\n Size of data split\n update_freq: int\n Update Frequency for calculating full gradients\n\n Returns\n ----------\n di: mx.io.NDArrayIter\n Data iterator\n update_freq: SVRGModule\n An instance of SVRGModule for performing SVRG optimization\n " ]
Please provide a description of the function:def get_squeezenet(version, pretrained=False, ctx=cpu(), root=os.path.join(base.data_dir(), 'models'), **kwargs): r net = SqueezeNet(version, **kwargs) if pretrained: from ..model_store import get_model_file net.load_parameters(get_model_file('squeezenet%s'%version, root=root), ctx=ctx) return net
[ "SqueezeNet model from the `\"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters\n and <0.5MB model size\" <https://arxiv.org/abs/1602.07360>`_ paper.\n SqueezeNet 1.1 model from the `official SqueezeNet repo\n <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.\n SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters\n than SqueezeNet 1.0, without sacrificing accuracy.\n\n Parameters\n ----------\n version : str\n Version of squeezenet. Options are '1.0', '1.1'.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default $MXNET_HOME/models\n Location for keeping the model parameters.\n " ]
Please provide a description of the function:def parse_helper(attrs, attrs_name, alt_value=None): tuple_re = re.compile('\([0-9L|,| ]+\)') if not attrs: return alt_value attrs_str = None if attrs.get(attrs_name) is None else str(attrs.get(attrs_name)) if attrs_str is None: return alt_value attrs_match = tuple_re.search(attrs_str) if attrs_match is not None: if attrs_match.span() == (0, len(attrs_str)): dims = eval(attrs_str) return dims else: raise AttributeError("Malformed %s dimensions: %s" % (attrs_name, str(attrs_str))) return alt_value
[ "Helper function to parse operator attributes in required format." ]
Please provide a description of the function:def transform_padding(pad_width): num_pad_values = len(pad_width) onnx_pad_width = [0]*num_pad_values start_index = 0 # num_pad_values will always be multiple of 2 end_index = int(num_pad_values/2) for idx in range(0, num_pad_values): if idx % 2 == 0: onnx_pad_width[start_index] = pad_width[idx] start_index += 1 else: onnx_pad_width[end_index] = pad_width[idx] end_index += 1 return onnx_pad_width
[ "Helper function to convert padding format for pad operator.\n " ]
Please provide a description of the function:def convert_string_to_list(string_val): result_list = [] list_string = string_val.split(',') for val in list_string: val = str(val.strip()) val = val.replace("(", "") val = val.replace(")", "") val = val.replace("L", "") val = val.replace("[", "") val = val.replace("]", "") if val not in ("", "None"): result_list.append(int(val)) return result_list
[ "Helper function to convert string to list.\n Used to convert shape attribute string to list format.\n " ]
Please provide a description of the function:def get_inputs(node, kwargs): name = node["name"] proc_nodes = kwargs["proc_nodes"] index_lookup = kwargs["index_lookup"] inputs = node["inputs"] attrs = node.get("attrs", {}) input_nodes = [] for ip in inputs: input_node_id = index_lookup[ip[0]] input_nodes.append(proc_nodes[input_node_id].name) return name, input_nodes, attrs
[ "Helper function to get inputs" ]
Please provide a description of the function:def create_basic_op_node(op_name, node, kwargs): name, input_nodes, _ = get_inputs(node, kwargs) node = onnx.helper.make_node( op_name, input_nodes, [name], name=name ) return [node]
[ "Helper function to create a basic operator\n node that doesn't contain op specific attrs" ]
Please provide a description of the function:def convert_weights_and_inputs(node, **kwargs): name, _, _ = get_inputs(node, kwargs) if kwargs["is_input"] is False: weights = kwargs["weights"] initializer = kwargs["initializer"] np_arr = weights[name] data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np_arr.dtype] dims = np.shape(np_arr) tensor_node = onnx.helper.make_tensor_value_info(name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=name, data_type=data_type, dims=dims, vals=np_arr.flatten().tolist(), raw=False, ) ) return [tensor_node] else: tval_node = onnx.helper.make_tensor_value_info(name, kwargs["in_type"], kwargs["in_shape"]) return [tval_node]
[ "Helper function to convert weights and inputs.\n " ]
Please provide a description of the function:def convert_convolution(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) kernel_dims = list(parse_helper(attrs, "kernel")) stride_dims = list(parse_helper(attrs, "stride", [1, 1])) pad_dims = list(parse_helper(attrs, "pad", [0, 0])) num_group = int(attrs.get("num_group", 1)) dilations = list(parse_helper(attrs, "dilate", [1, 1])) pad_dims = pad_dims + pad_dims conv_node = onnx.helper.make_node( "Conv", inputs=input_nodes, outputs=[name], kernel_shape=kernel_dims, strides=stride_dims, dilations=dilations, pads=pad_dims, group=num_group, name=name ) return [conv_node]
[ "Map MXNet's convolution operator attributes to onnx's Conv operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_deconvolution(node, **kwargs): name, inputs, attrs = get_inputs(node, kwargs) kernel_dims = list(parse_helper(attrs, "kernel")) stride_dims = list(parse_helper(attrs, "stride", [1, 1])) pad_dims = list(parse_helper(attrs, "pad", [0, 0])) num_group = int(attrs.get("num_group", 1)) dilations = list(parse_helper(attrs, "dilate", [1, 1])) adj_dims = list(parse_helper(attrs, "adj", [0, 0])) pad_dims = pad_dims + pad_dims deconv_node = onnx.helper.make_node( "ConvTranspose", inputs=inputs, outputs=[name], kernel_shape=kernel_dims, strides=stride_dims, dilations=dilations, output_padding=adj_dims, pads=pad_dims, group=num_group, name=name ) return [deconv_node]
[ "Map MXNet's deconvolution operator attributes to onnx's ConvTranspose operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_crop(node, **kwargs): name, inputs, attrs = get_inputs(node, kwargs) num_inputs = len(inputs) y, x = list(parse_helper(attrs, "offset", [0, 0])) h, w = list(parse_helper(attrs, "h_w", [0, 0])) if num_inputs > 1: h, w = kwargs["out_shape"][-2:] border = [x, y, x + w, y + h] crop_node = onnx.helper.make_node( "Crop", inputs=[inputs[0]], outputs=[name], border=border, scale=[1, 1], name=name ) logging.warning( "Using an experimental ONNX operator: Crop. " \ "Its definition can change.") return [crop_node]
[ "Map MXNet's crop operator attributes to onnx's Crop operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_fully_connected(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) initializer = kwargs["initializer"] no_bias = get_boolean_attribute_value(attrs, "no_bias") fcnode = [] op_name = "flatten_" + str(kwargs["idx"]) flatten_node = onnx.helper.make_node( 'Flatten', inputs=[input_nodes[0]], outputs=[op_name], name=op_name ) input_nodes[0] = op_name fcnode.append(flatten_node) if no_bias: data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')] bias_name = "bias" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(bias_name, data_type, (1,)) initializer.append( onnx.helper.make_tensor( name=bias_name, data_type=data_type, dims=(1,), vals=[0], raw=False, ) ) input_nodes.append(bias_name) fcnode.append(tensor_node) node = onnx.helper.make_node( "Gemm", input_nodes, # input (A, B, C) - C can be in place [name], # output alpha=1.0, beta=1.0, transA=False, transB=True, name=name ) fcnode.append(node) return fcnode
[ "Map MXNet's FullyConnected operator attributes to onnx's Gemm operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_batchnorm(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) momentum = float(attrs.get("momentum", 0.9)) eps = float(attrs.get("eps", 0.001)) bn_node = onnx.helper.make_node( "BatchNormalization", input_nodes, [name], name=name, epsilon=eps, momentum=momentum, # MXNet computes mean and variance per feature for batchnorm # Default for onnx is across all spatial features. So disabling the parameter. spatial=0 ) return [bn_node]
[ "Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_activation(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) act_type = attrs["act_type"] # Creating a dictionary here, but if this titlecase pattern # mxnet_name.title() act_types = { "tanh": "Tanh", "relu": "Relu", "sigmoid": "Sigmoid", "softrelu": "Softplus", "softsign": "Softsign" } act_name = act_types.get(act_type) if act_name: node = onnx.helper.make_node( act_name, input_nodes, [name], name=name ) else: raise AttributeError( "Activation %s not implemented or recognized in the converter" % act_type ) return [node]
[ "Map MXNet's Activation operator attributes to onnx's Tanh/Relu operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_pad(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mxnet_pad_width = convert_string_to_list(attrs.get("pad_width")) onnx_pad_width = transform_padding(mxnet_pad_width) pad_mode = attrs.get("mode") if pad_mode == "constant": pad_value = float(attrs.get("constant_value")) \ if "constant_value" in attrs else 0.0 node = onnx.helper.make_node( 'Pad', inputs=input_nodes, outputs=[name], mode='constant', value=pad_value, pads=onnx_pad_width, name=name ) else: node = onnx.helper.make_node( 'Pad', inputs=input_nodes, outputs=[name], mode=pad_mode, pads=onnx_pad_width, name=name ) return [node]
[ "Map MXNet's pad operator attributes to onnx's Pad operator\n and return the created node.\n " ]
Please provide a description of the function:def create_helper_trans_node(op_name, input_node, node_name): node_name = op_name + "_" + node_name trans_node = onnx.helper.make_node( 'Transpose', inputs=[input_node], outputs=[node_name], name=node_name ) return trans_node
[ "create extra transpose node for dot operator" ]
Please provide a description of the function:def convert_dot(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) input_node_a = input_nodes[0] input_node_b = input_nodes[1] trans_a_node = None trans_b_node = None trans_a = get_boolean_attribute_value(attrs, "transpose_a") trans_b = get_boolean_attribute_value(attrs, "transpose_b") op_name = "transpose" + str(kwargs["idx"]) if trans_a: trans_a_node = create_helper_trans_node(op_name, input_nodes[0], 'a') input_node_a = op_name+"_a" if trans_b: trans_b_node = create_helper_trans_node(op_name, input_nodes[1], 'b') input_node_b = op_name+"_b" matmul_node = onnx.helper.make_node( 'MatMul', inputs=[input_node_a, input_node_b], outputs=[name], name=name ) if not trans_a and not trans_b: return [matmul_node] elif trans_a and not trans_b: return [trans_a_node, matmul_node] elif trans_b and not trans_a: return [trans_b_node, matmul_node] else: return [trans_a_node, trans_b_node, matmul_node]
[ "Map MXNet's dot operator attributes to onnx's\n MatMul and Transpose operators based on the values set for\n transpose_a, transpose_b attributes." ]
Please provide a description of the function:def convert_linalg_gemm2(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Getting the attributes and assigning default values. alpha = float(attrs.get("alpha", 1.0)) trans_a = get_boolean_attribute_value(attrs, "transpose_a") trans_b = get_boolean_attribute_value(attrs, "transpose_b") op_name = "transpose" + str(kwargs["idx"]) if alpha == 1.0 and trans_a == 0 and trans_b == 0: matmul_node = onnx.helper.make_node( 'MatMul', inputs=input_nodes, outputs=[name], name=name ) return [matmul_node] elif trans_a == 1 and trans_b == 0: op_name = "transpose" + str(kwargs["idx"]) node_name = op_name+"_a" trans_a_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[0]], outputs=[op_name+"_a"], name=node_name ) matmul_node = onnx.helper.make_node( 'MatMul', inputs=[node_name, input_nodes[1]], outputs=[name], name=name ) return [trans_a_node, matmul_node] elif trans_a == 0 and trans_b == 1: node_name = op_name + "_b" trans_b_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[1]], outputs=[op_name+"_b"], name=node_name ) matmul_node = onnx.helper.make_node( 'MatMul', inputs=[input_nodes[0], node_name], outputs=[name], name=name ) return [trans_b_node, matmul_node] else: node_name_a = op_name+"_a" trans_a_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[0]], outputs=[op_name+"_a"], name=node_name_a ) node_name_b = op_name + "_b" trans_b_node = onnx.helper.make_node( 'Transpose', inputs=[input_nodes[1]], outputs=[op_name+"_b"], name=node_name_b ) matmul_node = onnx.helper.make_node( 'MatMul', inputs=input_nodes, outputs=[name], name=name ) return [trans_a_node, trans_b_node, matmul_node]
[ "Map MXNet's _linalg_gemm2 operator attributes to onnx's\n MatMul and Transpose operators based on the values set for\n transpose_a, transpose_b attributes.\n Return multiple nodes created.\n " ]
Please provide a description of the function:def convert_pooling(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) kernel = eval(attrs["kernel"]) pool_type = attrs["pool_type"] if attrs.get("pool_type") else "max" stride = eval(attrs["stride"]) if attrs.get("stride") else (1, 1) global_pool = get_boolean_attribute_value(attrs, "global_pool") p_value = attrs.get('p_value', 'None') pooling_convention = attrs.get('pooling_convention', 'valid') if pooling_convention == 'full': pooling_warning = "Pooling: ONNX currently doesn't support pooling_convention. " \ "This might lead to shape or accuracy issues. " \ "https://github.com/onnx/onnx/issues/549" logging.warning(pooling_warning) pad_dims = list(parse_helper(attrs, "pad", [0, 0])) pad_dims = pad_dims + pad_dims pool_types = {"max": "MaxPool", "avg": "AveragePool", "lp": "LpPool"} global_pool_types = {"max": "GlobalMaxPool", "avg": "GlobalAveragePool", "lp": "GlobalLpPool"} if pool_type == 'lp' and p_value == 'None': raise AttributeError('ONNX requires a p value for LpPool and GlobalLpPool') if global_pool: if pool_type == 'lp': node = onnx.helper.make_node( global_pool_types[pool_type], input_nodes, # input [name], p=int(p_value), name=name ) else: node = onnx.helper.make_node( global_pool_types[pool_type], input_nodes, # input [name], name=name ) else: if pool_type == 'lp': node = onnx.helper.make_node( pool_types[pool_type], input_nodes, # input [name], p=int(p_value), kernel_shape=kernel, pads=pad_dims, strides=stride, name=name ) else: node = onnx.helper.make_node( pool_types[pool_type], input_nodes, # input [name], kernel_shape=kernel, pads=pad_dims, strides=stride, name=name ) return [node]
[ "Map MXNet's Pooling operator attributes to onnx's\n MaxPool/AveragePool/GlobalMaxPool/GlobalAveragePool operators\n based on the input node's attributes and return the created node.\n " ]
Please provide a description of the function:def convert_instancenorm(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) eps = float(attrs.get("eps", 0.001)) node = onnx.helper.make_node( 'InstanceNormalization', inputs=input_nodes, outputs=[name], name=name, epsilon=eps) return [node]
[ "Map MXNet's InstanceNorm operator attributes to onnx's InstanceNormalization operator\n based on the input node's attributes and return the created node.\n " ]
Please provide a description of the function:def convert_leakyrelu(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) act_type = attrs.get("act_type", "leaky") alpha = float(attrs.get("slope", 0.25)) act_name = {"elu": "Elu", "leaky": "LeakyRelu", "prelu": "PRelu", "selu": "Selu"} if act_type == "prelu" or act_type == "selu": node = onnx.helper.make_node( act_name[act_type], inputs=input_nodes, outputs=[name], name=name) else: node = onnx.helper.make_node( act_name[act_type], inputs=input_nodes, outputs=[name], name=name, alpha=alpha) return [node]
[ "Map MXNet's LeakyReLU operator attributes to onnx's Elu/LeakyRelu/PRelu operators\n based on the input node's attributes and return the created node.\n " ]
Please provide a description of the function:def convert_softmax(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis", -1)) softmax_node = onnx.helper.make_node( "Softmax", input_nodes, [name], axis=axis, name=name ) return [softmax_node]
[ "Map MXNet's softmax operator attributes to onnx's Softmax operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_softmax_output(node, **kwargs): name = node["name"] input1_idx = kwargs["index_lookup"][node["inputs"][0][0]] input1 = kwargs["proc_nodes"][input1_idx] softmax_node = onnx.helper.make_node( "Softmax", [input1.name], [name], axis=1, name=name ) return [softmax_node]
[ "Map MXNet's SoftmaxOutput operator attributes to onnx's Softmax operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_logistic_regression_output(node, **kwargs): name = node["name"] input1_idx = kwargs["index_lookup"][node["inputs"][0][0]] input1 = kwargs["proc_nodes"][input1_idx] sigmoid_node = onnx.helper.make_node( "Sigmoid", [input1.name], [name], name=name ) return [sigmoid_node]
[ "Map MXNet's SoftmaxOutput operator attributes to onnx's Softmax operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_concat(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("dim", 1)) concat_node = onnx.helper.make_node( "Concat", input_nodes, [name], axis=axis, name=name ) return [concat_node]
[ "Map MXNet's Concat operator attributes to onnx's Concat operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_transpose(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axes = attrs.get("axes", ()) if axes: axes = tuple(map(int, re.findall(r'\d+', axes))) transpose_node = onnx.helper.make_node( "Transpose", input_nodes, [name], perm=axes, name=name ) else: transpose_node = onnx.helper.make_node( "Transpose", input_nodes, [name], name=name ) return [transpose_node]
[ "Map MXNet's transpose operator attributes to onnx's Transpose operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_lrn(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) alpha = float(attrs.get("alpha", 0.0001)) beta = float(attrs.get("beta", 0.75)) bias = float(attrs.get("knorm", 1.0)) size = int(attrs.get("nsize")) lrn_node = onnx.helper.make_node( "LRN", inputs=input_nodes, outputs=[name], name=name, alpha=alpha, beta=beta, bias=bias, size=size ) return [lrn_node]
[ "Map MXNet's LRN operator attributes to onnx's LRN operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_l2normalization(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mode = attrs.get("mode", "instance") if mode != "channel": raise AttributeError("L2Normalization: ONNX currently supports channel mode only") l2norm_node = onnx.helper.make_node( "LpNormalization", input_nodes, [name], axis=1, # channel only name=name ) return [l2norm_node]
[ "Map MXNet's L2Normalization operator attributes to onnx's LpNormalization operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_dropout(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) probability = float(attrs.get("p", 0.5)) dropout_node = onnx.helper.make_node( "Dropout", input_nodes, [name], ratio=probability, name=name ) return [dropout_node]
[ "Map MXNet's Dropout operator attributes to onnx's Dropout operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_clip(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) a_min = np.float(attrs.get('a_min', -np.inf)) a_max = np.float(attrs.get('a_max', np.inf)) clip_node = onnx.helper.make_node( "Clip", input_nodes, [name], name=name, min=a_min, max=a_max ) return [clip_node]
[ "Map MXNet's Clip operator attributes to onnx's Clip operator\n and return the created node.\n " ]
Please provide a description of the function:def scalar_op_helper(node, op_name, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) from onnx import numpy_helper input_type = kwargs["in_type"] scalar_value = np.array([attrs.get("scalar", 1)], dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[input_type]) initializer = kwargs["initializer"] flag = True # If the input value is in initializer, just multiply with scalar input # and create a new initializer for i in initializer: if i.name == input_nodes[0]: if op_name == 'Mul': new_initializer = numpy_helper.to_array(i) * scalar_value[0] elif op_name == 'Sub': if name.startswith("_rminusscalar"): new_initializer = scalar_value[0] - numpy_helper.to_array(i) else: new_initializer = numpy_helper.to_array(i) - scalar_value[0] elif op_name == 'Add': new_initializer = numpy_helper.to_array(i) + scalar_value[0] elif op_name == 'Div': if name.startswith("_rdivscalar"): new_initializer = scalar_value[0] / numpy_helper.to_array(i) else: new_initializer = numpy_helper.to_array(i) / scalar_value[0] elif op_name == 'Pow': new_initializer = numpy_helper.to_array(i) ** scalar_value[0] flag = False break # else create a new tensor of the scalar value, add it in initializer if flag is True: dims = np.shape(scalar_value) scalar_op_name = "scalar_op" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(scalar_op_name, input_type, dims) initializer.append( onnx.helper.make_tensor( name=scalar_op_name, data_type=input_type, dims=dims, vals=scalar_value, raw=False, ) ) mul_node = onnx.helper.make_node( op_name, [input_nodes[0], scalar_op_name], [name], name=name ) return [tensor_node, mul_node] else: data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[new_initializer.dtype] dims = np.shape(new_initializer) new_a_node = input_nodes[0] + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(new_a_node, data_type, dims) initializer.append( onnx.helper.make_tensor( name=new_a_node, data_type=data_type, dims=dims, vals=new_initializer, raw=False, ) ) return [tensor_node]
[ "Helper function for scalar arithmetic operations" ]
Please provide a description of the function:def convert_argmax(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis")) keepdims = get_boolean_attribute_value(attrs, "keepdims") node = onnx.helper.make_node( 'ArgMax', inputs=input_nodes, axis=axis, keepdims=keepdims, outputs=[name], name=name ) return [node]
[ "Map MXNet's argmax operator attributes to onnx's ArgMax operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_reshape(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) output_shape_list = convert_string_to_list(attrs["shape"]) initializer = kwargs["initializer"] output_shape_np = np.array(output_shape_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype] dims = np.shape(output_shape_np) output_shape_name = "reshape_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=output_shape_list, raw=False, ) ) input_nodes.append(output_shape_name) not_supported_shape = [-2, -3, -4] for val in output_shape_list: if val in not_supported_shape: raise AttributeError("Reshape: Shape value not supported in ONNX", val) reshape_node = onnx.helper.make_node( "Reshape", input_nodes, [name], name=name ) return [tensor_node, reshape_node]
[ "Map MXNet's Reshape operator attributes to onnx's Reshape operator.\n Converts output shape attribute to output shape tensor\n and return multiple created nodes.\n " ]
Please provide a description of the function:def convert_cast(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) dtype = attrs["dtype"] # dtype can be mapped only with types from TensorProto # float32 is mapped to float and float64 to double in onnx # following tensorproto mapping https://github.com/onnx/onnx/blob/master/onnx/mapping.py if dtype == 'float32': dtype = 'float' elif dtype == 'float64': dtype = 'double' node = onnx.helper.make_node( "Cast", input_nodes, [name], to=getattr(onnx.TensorProto, dtype.upper()), name=name, ) return [node]
[ "Map MXNet's Cast operator attributes to onnx's Cast operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_slice_axis(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axes = int(attrs.get("axis")) starts = int(attrs.get("begin")) ends = int(attrs.get("end", None)) if not ends: raise ValueError("Slice: ONNX doesnt't support 'None' in 'end' attribute") node = onnx.helper.make_node( "Slice", input_nodes, [name], axes=[axes], starts=[starts], ends=[ends], name=name, ) return [node]
[ "Map MXNet's slice_axis operator attributes to onnx's Slice operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_slice_channel(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) num_outputs = int(attrs.get("num_outputs")) axis = int(attrs.get("axis", 1)) squeeze_axis = int(attrs.get("squeeze_axis", 0)) if squeeze_axis == 1 and num_outputs == 1: node = onnx.helper.make_node( "Squeeze", input_nodes, [name], axes=[axis], name=name, ) return [node] elif squeeze_axis == 0 and num_outputs > 1: in_shape = kwargs.get('in_shape')[0] split = in_shape[axis] // num_outputs node = onnx.helper.make_node( "Split", input_nodes, [name+'_output'+str(i) for i in range(num_outputs)], axis=axis, split=[split for _ in range(num_outputs)], name=name, ) return [node] else: raise NotImplementedError("SliceChannel operator with num_outputs>1 and" "squeeze_axis true is not implemented.")
[ "Map MXNet's SliceChannel operator attributes to onnx's Squeeze or Split\n operator based on squeeze_axis attribute\n and return the created node.\n " ]
Please provide a description of the function:def convert_expand_dims(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = int(attrs.get("axis")) node = onnx.helper.make_node( "Unsqueeze", input_nodes, [name], axes=[axis], name=name, ) return [node]
[ "Map MXNet's expand_dims operator attributes to onnx's Unsqueeze operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_squeeze(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) axis = attrs.get("axis", None) if not axis: raise AttributeError("Squeeze: Missing axis attribute: ONNX currently requires axis to " "be specified for squeeze operator") axis = convert_string_to_list(axis) node = onnx.helper.make_node( "Squeeze", input_nodes, [name], axes=axis, name=name, ) return [node]
[ "Map MXNet's squeeze operator attributes to onnx's squeeze operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_depthtospace(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) blksize = int(attrs.get("block_size", 0)) node = onnx.helper.make_node( "DepthToSpace", input_nodes, [name], blocksize=blksize, name=name, ) return [node]
[ "Map MXNet's depth_to_space operator attributes to onnx's\n DepthToSpace operator and return the created node.\n " ]
Please provide a description of the function:def convert_square(node, **kwargs): name, input_nodes, _ = get_inputs(node, kwargs) initializer = kwargs["initializer"] data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('int64')] power2_name = "square_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(power2_name, data_type, (1,)) initializer.append( onnx.helper.make_tensor( name=power2_name, data_type=data_type, dims=(1,), vals=[2], raw=False, ) ) input_nodes.append(power2_name) node = onnx.helper.make_node( "Pow", input_nodes, [name], name=name ) return [tensor_node, node]
[ "Map MXNet's square operator attributes to onnx's Pow operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_sum(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None keepdims = get_boolean_attribute_value(attrs, "keepdims") if axes: node = onnx.helper.make_node( 'ReduceSum', inputs=input_nodes, outputs=[name], axes=axes, keepdims=keepdims, name=name ) else: node = onnx.helper.make_node( 'ReduceSum', inputs=input_nodes, outputs=[name], keepdims=keepdims, name=name ) return [node]
[ "Map MXNet's sum operator attributes to onnx's ReduceSum operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_hardsigmoid(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 alpha = float(attrs.get("alpha", 0.2)) beta = float(attrs.get("beta", 0.5)) node = onnx.helper.make_node( 'HardSigmoid', input_nodes, [name], alpha=alpha, beta=beta, name=name ) return [node]
[ "Map MXNet's hard_sigmoid operator attributes to onnx's HardSigmoid operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_logsoftmax(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to int axis = int(attrs.get("axis", -1)) temp = attrs.get("temperature", 'None') if temp != 'None': raise AttributeError("LogSoftMax: ONNX supports only temperature=None") node = onnx.helper.make_node( 'LogSoftmax', input_nodes, [name], axis=axis, name=name ) return [node]
[ "Map MXNet's log_softmax operator attributes to onnx's LogSoftMax operator\n and return the created node.\n " ]
Please provide a description of the function:def convert_norm(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) mx_axis = attrs.get("axis", None) axes = convert_string_to_list(str(mx_axis)) if mx_axis else None keepdims = get_boolean_attribute_value(attrs, "keepdims") ord = int(attrs.get("ord", 2)) onnx_op_name = "ReduceL1" if ord == 1 else "ReduceL2" if axes: reduce_node = onnx.helper.make_node( onnx_op_name, input_nodes, [name], axes=axes, keepdims=keepdims, name=name ) return [reduce_node] else: reduce_node = onnx.helper.make_node( onnx_op_name, input_nodes, [name], keepdims=keepdims, name=name ) return [reduce_node]
[ "Map MXNet's norm operator attributes to onnx's ReduceL1 and ReduceL2 operators\n and return the created node.\n " ]
Please provide a description of the function:def convert_multinomial(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get("dtype", 'int32'))] sample_size = convert_string_to_list(attrs.get("shape", '1')) if len(sample_size) < 2: sample_size = sample_size[-1] else: raise AttributeError("ONNX currently supports integer sample_size only") node = onnx.helper.make_node( "Multinomial", input_nodes, [name], dtype=dtype, sample_size=sample_size, name=name, ) return [node]
[ "Map MXNet's multinomial operator attributes to onnx's\n Multinomial operator and return the created node.\n " ]
Please provide a description of the function:def convert_random_uniform(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 low = float(attrs.get("low", 0)) high = float(attrs.get("high", 1.0)) shape = convert_string_to_list(attrs.get('shape', '[]')) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))] node = onnx.helper.make_node( 'RandomUniform', input_nodes, [name], low=low, high=high, dtype=dtype, shape=shape, name=name ) return [node]
[ "Map MXNet's random_uniform operator attributes to onnx's RandomUniform\n operator and return the created node.\n " ]
Please provide a description of the function:def convert_random_normal(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) # Converting to float32 mean = float(attrs.get("loc", 0)) scale = float(attrs.get("scale", 1.0)) shape = convert_string_to_list(attrs.get('shape', '[]')) dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))] node = onnx.helper.make_node( 'RandomNormal', input_nodes, [name], mean=mean, scale=scale, dtype=dtype, shape=shape, name=name ) return [node]
[ "Map MXNet's random_normal operator attributes to onnx's RandomNormal\n operator and return the created node.\n " ]
Please provide a description of the function:def convert_roipooling(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) pooled_shape = convert_string_to_list(attrs.get('pooled_size')) scale = float(attrs.get("spatial_scale")) node = onnx.helper.make_node( 'MaxRoiPool', input_nodes, [name], pooled_shape=pooled_shape, spatial_scale=scale, name=name ) return [node]
[ "Map MXNet's ROIPooling operator attributes to onnx's MaxRoiPool\n operator and return the created node.\n " ]
Please provide a description of the function:def convert_tile(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) reps_list = convert_string_to_list(attrs["reps"]) initializer = kwargs["initializer"] reps_shape_np = np.array(reps_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[reps_shape_np.dtype] dims = np.shape(reps_shape_np) output_shape_name = "reps_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=reps_list, raw=False, ) ) input_nodes.append(output_shape_name) tile_node = onnx.helper.make_node( "Tile", input_nodes, [name], name=name ) return [tensor_node, tile_node]
[ "Map MXNet's Tile operator attributes to onnx's Tile\n operator and return the created node.\n " ]
Please provide a description of the function:def convert_broadcast_to(node, **kwargs): name, input_nodes, attrs = get_inputs(node, kwargs) shape_list = convert_string_to_list(attrs["shape"]) initializer = kwargs["initializer"] output_shape_np = np.array(shape_list, dtype='int64') data_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[output_shape_np.dtype] dims = np.shape(output_shape_np) output_shape_name = "expand_attr_tensor" + str(kwargs["idx"]) tensor_node = onnx.helper.make_tensor_value_info(output_shape_name, data_type, dims) initializer.append( onnx.helper.make_tensor( name=output_shape_name, data_type=data_type, dims=dims, vals=shape_list, raw=False, ) ) input_nodes.append(output_shape_name) expand_node = onnx.helper.make_node( "Expand", input_nodes, [name], name=name ) return [tensor_node, expand_node]
[ "Map MXNet's broadcast_to operator attributes to onnx's Expand\n operator and return the created node.\n " ]
Please provide a description of the function:def exe(self): return self._buckets[self.curr_bucket_key]['exe'][tuple(self.data_shapes.items())]
[ "Get the current executor\n\n Returns\n -------\n exe : mxnet.executor.Executor\n " ]
Please provide a description of the function:def compute_internal(self, sym_name, bucket_kwargs=None, **arg_dict): data_shapes = {k: v.shape for k, v in arg_dict.items()} self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes) internal_sym = self.sym.get_internals()[sym_name] data_inputs = {k: mx.nd.empty(v, ctx=self.ctx) for k, v in self.data_shapes.items() if k in internal_sym.list_arguments()} params = {k: v for k, v in self.params.items() if k in internal_sym.list_arguments()} aux_states = {k: v for k, v in self.aux_states.items() if k in internal_sym.list_auxiliary_states()} exe = internal_sym.bind(ctx=self.ctx, args=dict(params, **data_inputs), args_grad=None, grad_req='null', aux_states=aux_states, shared_exec=self.exe) for k, v in arg_dict.items(): exe.arg_dict[k][:] = v exe.forward(is_train=False) assert 1 == len(exe.outputs) for output in exe.outputs: output.wait_to_read() return exe.outputs[0]
[ "\n View the internal symbols using the forward function.\n\n :param sym_name:\n :param bucket_kwargs:\n :param input_dict:\n :return:\n " ]
Please provide a description of the function:def init_from_fcnxs(ctx, fcnxs_symbol, fcnxs_args_from, fcnxs_auxs_from): fcnxs_args = fcnxs_args_from.copy() fcnxs_auxs = fcnxs_auxs_from.copy() for k,v in fcnxs_args.items(): if(v.context != ctx): fcnxs_args[k] = mx.nd.zeros(v.shape, ctx) v.copyto(fcnxs_args[k]) for k,v in fcnxs_auxs.items(): if(v.context != ctx): fcnxs_auxs[k] = mx.nd.zeros(v.shape, ctx) v.copyto(fcnxs_auxs[k]) data_shape=(1,3,500,500) arg_names = fcnxs_symbol.list_arguments() arg_shapes, _, _ = fcnxs_symbol.infer_shape(data=data_shape) rest_params = {} deconv_params = {} # this is fcn8s init from fcn16s if 'score_pool3_weight' in arg_names: rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes) if x[0] in ['score_pool3_bias', 'score_pool3_weight']]) deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \ in ["bigscore_weight", 'score4_weight']]) # this is fcn16s init from fcn32s elif 'score_pool4_weight' in arg_names: rest_params = dict([(x[0], mx.nd.zeros(x[1], ctx)) for x in zip(arg_names, arg_shapes) if x[0] in ['score_pool4_weight', 'score_pool4_bias']]) deconv_params = dict([(x[0], x[1]) for x in zip(arg_names, arg_shapes) if x[0] \ in ["bigscore_weight", 'score2_weight']]) # this is fcn32s init else: logging.error("you are init the fcn32s model, so you should use init_from_vgg16()") sys.exit() fcnxs_args.update(rest_params) for k, v in deconv_params.items(): filt = upsample_filt(v[3]) initw = np.zeros(v) initw[range(v[0]), range(v[1]), :, :] = filt # becareful here is the slice assing fcnxs_args[k] = mx.nd.array(initw, ctx) return fcnxs_args, fcnxs_auxs
[ " use zero initialization for better convergence, because it tends to oputut 0,\n and the label 0 stands for background, which may occupy most size of one image.\n " ]
Please provide a description of the function:def residual_unit(data, num_filter, stride, dim_match, name, bottle_neck=True, num_group=32, bn_mom=0.9, workspace=256, memonger=False): if bottle_neck: # the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper conv1 = mx.sym.Convolution(data=data, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True, workspace=workspace, name=name + '_conv1') bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1') act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1') conv2 = mx.sym.Convolution(data=act1, num_filter=int(num_filter*0.5), num_group=num_group, kernel=(3,3), stride=stride, pad=(1,1), no_bias=True, workspace=workspace, name=name + '_conv2') bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2') act2 = mx.sym.Activation(data=bn2, act_type='relu', name=name + '_relu2') conv3 = mx.sym.Convolution(data=act2, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True, workspace=workspace, name=name + '_conv3') bn3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3') if dim_match: shortcut = data else: shortcut_conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True, workspace=workspace, name=name+'_sc') shortcut = mx.sym.BatchNorm(data=shortcut_conv, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn') if memonger: shortcut._set_attr(mirror_stage='True') eltwise = bn3 + shortcut return mx.sym.Activation(data=eltwise, act_type='relu', name=name + '_relu') else: conv1 = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(3,3), stride=stride, pad=(1,1), no_bias=True, workspace=workspace, name=name + '_conv1') bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn1') act1 = mx.sym.Activation(data=bn1, act_type='relu', name=name + '_relu1') conv2 = mx.sym.Convolution(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1), no_bias=True, workspace=workspace, name=name + '_conv2') bn2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2') if dim_match: shortcut = data else: shortcut_conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True, workspace=workspace, name=name+'_sc') shortcut = mx.sym.BatchNorm(data=shortcut_conv, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn') if memonger: shortcut._set_attr(mirror_stage='True') eltwise = bn2 + shortcut return mx.sym.Activation(data=eltwise, act_type='relu', name=name + '_relu')
[ "Return ResNet Unit symbol for building ResNet\n Parameters\n ----------\n data : str\n Input data\n num_filter : int\n Number of output channels\n bnf : int\n Bottle neck channels factor with regard to num_filter\n stride : tuple\n Stride used in convolution\n dim_match : Boolean\n True means channel number between input and output is the same, otherwise means differ\n name : str\n Base name of the operators\n workspace : int\n Workspace used in convolution operator\n " ]
Please provide a description of the function:def resnext(units, num_stages, filter_list, num_classes, num_group, image_shape, bottle_neck=True, bn_mom=0.9, workspace=256, dtype='float32', memonger=False): num_unit = len(units) assert(num_unit == num_stages) data = mx.sym.Variable(name='data') if dtype == 'float32': data = mx.sym.identity(data=data, name='id') else: if dtype == 'float16': data = mx.sym.Cast(data=data, dtype=np.float16) data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name='bn_data') (nchannel, height, width) = image_shape if height <= 32: # such as cifar10 body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1,1), pad=(1, 1), no_bias=True, name="conv0", workspace=workspace) else: # often expected to be 224 such as imagenet body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2,2), pad=(3, 3), no_bias=True, name="conv0", workspace=workspace) body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='bn0') body = mx.sym.Activation(data=body, act_type='relu', name='relu0') body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2,2), pad=(1,1), pool_type='max') for i in range(num_stages): body = residual_unit(body, filter_list[i+1], (1 if i==0 else 2, 1 if i==0 else 2), False, name='stage%d_unit%d' % (i + 1, 1), bottle_neck=bottle_neck, num_group=num_group, bn_mom=bn_mom, workspace=workspace, memonger=memonger) for j in range(units[i]-1): body = residual_unit(body, filter_list[i+1], (1,1), True, name='stage%d_unit%d' % (i + 1, j + 2), bottle_neck=bottle_neck, num_group=num_group, bn_mom=bn_mom, workspace=workspace, memonger=memonger) pool1 = mx.sym.Pooling(data=body, global_pool=True, kernel=(7, 7), pool_type='avg', name='pool1') flat = mx.sym.Flatten(data=pool1) fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name='fc1') if dtype == 'float16': fc1 = mx.sym.Cast(data=fc1, dtype=np.float32) return mx.sym.SoftmaxOutput(data=fc1, name='softmax')
[ "Return ResNeXt symbol of\n Parameters\n ----------\n units : list\n Number of units in each stage\n num_stages : int\n Number of stage\n filter_list : list\n Channel size of each stage\n num_classes : int\n Ouput size of symbol\n num_groupes: int\n Number of conv groups\n dataset : str\n Dataset type, only cifar10 and imagenet supports\n workspace : int\n Workspace used in convolution operator\n dtype : str\n Precision (float32 or float16)\n " ]
Please provide a description of the function:def get_symbol(num_classes, num_layers, image_shape, num_group=32, conv_workspace=256, dtype='float32', **kwargs): image_shape = [int(l) for l in image_shape.split(',')] (nchannel, height, width) = image_shape if height <= 32: num_stages = 3 if (num_layers-2) % 9 == 0 and num_layers >= 164: per_unit = [(num_layers-2)//9] filter_list = [16, 64, 128, 256] bottle_neck = True elif (num_layers-2) % 6 == 0 and num_layers < 164: per_unit = [(num_layers-2)//6] filter_list = [16, 16, 32, 64] bottle_neck = False else: raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers)) units = per_unit * num_stages else: if num_layers >= 50: filter_list = [64, 256, 512, 1024, 2048] bottle_neck = True else: filter_list = [64, 64, 128, 256, 512] bottle_neck = False num_stages = 4 if num_layers == 18: units = [2, 2, 2, 2] elif num_layers == 34: units = [3, 4, 6, 3] elif num_layers == 50: units = [3, 4, 6, 3] elif num_layers == 101: units = [3, 4, 23, 3] elif num_layers == 152: units = [3, 8, 36, 3] elif num_layers == 200: units = [3, 24, 36, 3] elif num_layers == 269: units = [3, 30, 48, 8] else: raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers)) return resnext(units = units, num_stages = num_stages, filter_list = filter_list, num_classes = num_classes, num_group = num_group, image_shape = image_shape, bottle_neck = bottle_neck, workspace = conv_workspace, dtype = dtype)
[ "\n Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py\n Original author Wei Wu\n " ]
Please provide a description of the function:def var(name, attr=None, shape=None, lr_mult=None, wd_mult=None, dtype=None, init=None, stype=None, **kwargs): if not isinstance(name, string_types): raise TypeError('Expect a string for variable `name`') handle = SymbolHandle() check_call(_LIB.MXSymbolCreateVariable(c_str(name), ctypes.byref(handle))) ret = Symbol(handle) if not hasattr(AttrScope._current, "value"): AttrScope._current.value = AttrScope() attr = AttrScope._current.value.get(attr) attr = {} if attr is None else attr if shape is not None: attr['__shape__'] = str(shape) if lr_mult is not None: attr['__lr_mult__'] = str(lr_mult) if wd_mult is not None: attr['__wd_mult__'] = str(wd_mult) if dtype is not None: attr['__dtype__'] = str(_DTYPE_NP_TO_MX[_numpy.dtype(dtype).type]) if init is not None: if not isinstance(init, string_types): init = init.dumps() attr['__init__'] = init if stype is not None: attr['__storage_type__'] = str(_STORAGE_TYPE_STR_TO_ID[stype]) for k, v in kwargs.items(): if k.startswith('__') and k.endswith('__'): attr[k] = str(v) else: raise ValueError('Attribute name=%s is not supported.' ' Additional attributes must start and end with double underscores,' ' e.g, __yourattr__' % k) ret._set_attr(**attr) return ret
[ "Creates a symbolic variable with specified name.\n\n Example\n -------\n >>> data = mx.sym.Variable('data', attr={'a': 'b'})\n >>> data\n <Symbol data>\n >>> csr_data = mx.sym.Variable('csr_data', stype='csr')\n >>> csr_data\n <Symbol csr_data>\n >>> row_sparse_weight = mx.sym.Variable('weight', stype='row_sparse')\n >>> row_sparse_weight\n <Symbol weight>\n\n Parameters\n ----------\n name : str\n Variable name.\n attr : Dict of strings\n Additional attributes to set on the variable. Format {string : string}.\n shape : tuple\n The shape of a variable. If specified, this will be used during the shape inference.\n If one has specified a different shape for this variable using\n a keyword argument when calling shape inference, this shape information will be ignored.\n lr_mult : float\n The learning rate multiplier for input variable.\n wd_mult : float\n Weight decay multiplier for input variable.\n dtype : str or numpy.dtype\n The dtype for input variable. If not specified, this value will be inferred.\n init : initializer (mxnet.init.*)\n Initializer for this variable to (optionally) override the default initializer.\n stype : str\n The storage type of the variable, such as 'row_sparse', 'csr', 'default', etc\n kwargs : Additional attribute variables\n Additional attributes must start and end with double underscores.\n\n Returns\n -------\n variable : Symbol\n A symbol corresponding to an input to the computation graph.\n " ]
Please provide a description of the function:def Group(symbols): if not symbols or any(not isinstance(sym, Symbol) for sym in symbols): raise TypeError('Expected a list of symbols as input') handle = SymbolHandle() check_call(_LIB.MXSymbolCreateGroup( mx_uint(len(symbols)), c_handle_array(symbols), ctypes.byref(handle))) return Symbol(handle)
[ "Creates a symbol that contains a collection of other symbols, grouped together.\n\n Example\n -------\n >>> a = mx.sym.Variable('a')\n >>> b = mx.sym.Variable('b')\n >>> mx.sym.Group([a,b])\n <Symbol Grouped>\n\n Parameters\n ----------\n symbols : list\n List of symbols to be grouped.\n\n Returns\n -------\n sym : Symbol\n A group symbol.\n " ]
Please provide a description of the function:def load(fname): if not isinstance(fname, string_types): raise TypeError('fname need to be string') handle = SymbolHandle() check_call(_LIB.MXSymbolCreateFromFile(c_str(fname), ctypes.byref(handle))) return Symbol(handle)
[ "Loads symbol from a JSON file.\n\n You can also use pickle to do the job if you only work on python.\n The advantage of load/save is the file is language agnostic.\n This means the file saved using save can be loaded by other language binding of mxnet.\n You also get the benefit being able to directly load/save from cloud storage(S3, HDFS).\n\n Parameters\n ----------\n fname : str\n The name of the file, examples:\n\n - `s3://my-bucket/path/my-s3-symbol`\n - `hdfs://my-bucket/path/my-hdfs-symbol`\n - `/path-to/my-local-symbol`\n\n Returns\n -------\n sym : Symbol\n The loaded symbol.\n\n See Also\n --------\n Symbol.save : Used to save symbol into file.\n " ]
Please provide a description of the function:def load_json(json_str): if not isinstance(json_str, string_types): raise TypeError('fname required to be string') handle = SymbolHandle() check_call(_LIB.MXSymbolCreateFromJSON(c_str(json_str), ctypes.byref(handle))) return Symbol(handle)
[ "Loads symbol from json string.\n\n Parameters\n ----------\n json_str : str\n A JSON string.\n\n Returns\n -------\n sym : Symbol\n The loaded symbol.\n\n See Also\n --------\n Symbol.tojson : Used to save symbol into json string.\n " ]
Please provide a description of the function:def pow(base, exp): if isinstance(base, Symbol) and isinstance(exp, Symbol): return _internal._Power(base, exp) if isinstance(base, Symbol) and isinstance(exp, Number): return _internal._PowerScalar(base, scalar=exp) if isinstance(base, Number) and isinstance(exp, Symbol): return _internal._RPowerScalar(exp, scalar=base) if isinstance(base, Number) and isinstance(exp, Number): return base**exp else: raise TypeError('types (%s, %s) not supported' % (str(type(base)), str(type(exp))))
[ "Returns element-wise result of base element raised to powers from exp element.\n\n Both inputs can be Symbol or scalar number.\n Broadcasting is not supported. Use `broadcast_pow` instead.\n\n `sym.pow` is being deprecated, please use `sym.power` instead.\n\n Parameters\n ---------\n base : Symbol or scalar\n The base symbol\n exp : Symbol or scalar\n The exponent symbol\n\n Returns\n -------\n Symbol or scalar\n The bases in x raised to the exponents in y.\n\n Examples\n --------\n >>> mx.sym.pow(2, 3)\n 8\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.pow(x, 2)\n >>> z.eval(x=mx.nd.array([1,2]))[0].asnumpy()\n array([ 1., 4.], dtype=float32)\n >>> z = mx.sym.pow(3, y)\n >>> z.eval(y=mx.nd.array([2,3]))[0].asnumpy()\n array([ 9., 27.], dtype=float32)\n >>> z = mx.sym.pow(x, y)\n >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([2,3]))[0].asnumpy()\n array([ 9., 64.], dtype=float32)\n " ]
Please provide a description of the function:def maximum(left, right): if isinstance(left, Symbol) and isinstance(right, Symbol): return _internal._Maximum(left, right) if isinstance(left, Symbol) and isinstance(right, Number): return _internal._MaximumScalar(left, scalar=right) if isinstance(left, Number) and isinstance(right, Symbol): return _internal._MaximumScalar(right, scalar=left) if isinstance(left, Number) and isinstance(right, Number): return left if left > right else right else: raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
[ "Returns element-wise maximum of the input elements.\n\n Both inputs can be Symbol or scalar number. Broadcasting is not supported.\n\n Parameters\n ---------\n left : Symbol or scalar\n First symbol to be compared.\n right : Symbol or scalar\n Second symbol to be compared.\n\n Returns\n -------\n Symbol or scalar\n The element-wise maximum of the input symbols.\n\n Examples\n --------\n >>> mx.sym.maximum(2, 3.5)\n 3.5\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.maximum(x, 4)\n >>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()\n array([ 4., 5., 4., 10.], dtype=float32)\n >>> z = mx.sym.maximum(x, y)\n >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()\n array([ 10., 4.], dtype=float32)\n " ]
Please provide a description of the function:def minimum(left, right): if isinstance(left, Symbol) and isinstance(right, Symbol): return _internal._Minimum(left, right) if isinstance(left, Symbol) and isinstance(right, Number): return _internal._MinimumScalar(left, scalar=right) if isinstance(left, Number) and isinstance(right, Symbol): return _internal._MinimumScalar(right, scalar=left) if isinstance(left, Number) and isinstance(right, Number): return left if left < right else right else: raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
[ "Returns element-wise minimum of the input elements.\n\n Both inputs can be Symbol or scalar number. Broadcasting is not supported.\n\n Parameters\n ---------\n left : Symbol or scalar\n First symbol to be compared.\n right : Symbol or scalar\n Second symbol to be compared.\n\n Returns\n -------\n Symbol or scalar\n The element-wise minimum of the input symbols.\n\n Examples\n --------\n >>> mx.sym.minimum(2, 3.5)\n 2\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.minimum(x, 4)\n >>> z.eval(x=mx.nd.array([3,5,2,10]))[0].asnumpy()\n array([ 3., 4., 2., 4.], dtype=float32)\n >>> z = mx.sym.minimum(x, y)\n >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()\n array([ 3., 2.], dtype=float32)\n " ]
Please provide a description of the function:def hypot(left, right): if isinstance(left, Symbol) and isinstance(right, Symbol): return _internal._Hypot(left, right) if isinstance(left, Symbol) and isinstance(right, Number): return _internal._HypotScalar(left, scalar=right) if isinstance(left, Number) and isinstance(right, Symbol): return _internal._HypotScalar(right, scalar=left) if isinstance(left, Number) and isinstance(right, Number): return _numpy.hypot(left, right) else: raise TypeError('types (%s, %s) not supported' % (str(type(left)), str(type(right))))
[ "Given the \"legs\" of a right triangle, returns its hypotenuse.\n\n Equivalent to :math:`\\\\sqrt(left^2 + right^2)`, element-wise.\n Both inputs can be Symbol or scalar number. Broadcasting is not supported.\n\n Parameters\n ---------\n left : Symbol or scalar\n First leg of the triangle(s).\n right : Symbol or scalar\n Second leg of the triangle(s).\n\n Returns\n -------\n Symbol or scalar\n The hypotenuse of the triangle(s)\n\n Examples\n --------\n >>> mx.sym.hypot(3, 4)\n 5.0\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.hypot(x, 4)\n >>> z.eval(x=mx.nd.array([3,5,2]))[0].asnumpy()\n array([ 5., 6.40312433, 4.47213602], dtype=float32)\n >>> z = mx.sym.hypot(x, y)\n >>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([10,2]))[0].asnumpy()\n array([ 10.44030666, 4.47213602], dtype=float32)\n " ]
Please provide a description of the function:def eye(N, M=0, k=0, dtype=None, **kwargs): if dtype is None: dtype = _numpy.float32 return _internal._eye(N, M, k, dtype=dtype, **kwargs)
[ "Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N: int\n Number of rows in the output.\n M: int, optional\n Number of columns in the output. If 0, defaults to N.\n k: int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal,\n and a negative value to a lower diagonal.\n dtype : str or numpy.dtype, optional\n The value type of the inner value, default to ``np.float32``.\n\n Returns\n -------\n out : Symbol\n The created Symbol.\n " ]
Please provide a description of the function:def zeros(shape, dtype=None, **kwargs): if dtype is None: dtype = _numpy.float32 return _internal._zeros(shape=shape, dtype=dtype, **kwargs)
[ "Returns a new symbol of given shape and type, filled with zeros.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array.\n dtype : str or numpy.dtype, optional\n The value type of the inner value, default to ``np.float32``.\n\n Returns\n -------\n out : Symbol\n The created Symbol.\n " ]
Please provide a description of the function:def ones(shape, dtype=None, **kwargs): if dtype is None: dtype = _numpy.float32 return _internal._ones(shape=shape, dtype=dtype, **kwargs)
[ "Returns a new symbol of given shape and type, filled with ones.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array.\n dtype : str or numpy.dtype, optional\n The value type of the inner value, default to ``np.float32``.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n " ]
Please provide a description of the function:def full(shape, val, dtype=None, **kwargs): if dtype is None: dtype = _numpy.float32 return _internal._full(shape=shape, dtype=dtype, value=float(val), **kwargs)
[ "Returns a new array of given shape and type, filled with the given value `val`.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array.\n val : scalar\n Fill value.\n dtype : str or numpy.dtype, optional\n The value type of the inner value, default to ``np.float32``.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n " ]
Please provide a description of the function:def arange(start, stop=None, step=1.0, repeat=1, infer_range=False, name=None, dtype=None): if dtype is None: dtype = _numpy.float32 return _internal._arange(start=start, stop=stop, step=step, repeat=repeat, infer_range=infer_range, name=name, dtype=dtype)
[ "Returns evenly spaced values within a given interval.\n\n Values are generated within the half-open interval [`start`, `stop`). In other\n words, the interval includes `start` but excludes `stop`. The function is\n similar to the built-in Python function `range` and to `numpy.arange`,\n but returns a `Symbol`.\n\n Parameters\n ----------\n start : number, optional\n Start of interval. The interval includes this value. The default start value is 0.\n stop : number\n End of interval. The interval does not include this value.\n step : number, optional\n Spacing between values.\n repeat : int, optional\n \"The repeating time of all elements.\n E.g repeat=3, the element a will be repeated three times --> a, a, a.\n infer_range : boolean, optional\n When set to True, infer the stop position from the start, step,\n repeat, and output tensor size.\n dtype : str or numpy.dtype, optional\n The value type of the inner value, default to ``np.float32``.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n " ]
Please provide a description of the function:def histogram(a, bins=10, range=None, **kwargs): if isinstance(bins, Symbol): return _internal._histogram(data=a, bins=bins, **kwargs) elif isinstance(bins, integer_types): if range is None: raise ValueError("null range is not supported in symbol mode") return _internal._histogram(data=a, bin_cnt=bins, range=range, **kwargs) raise ValueError("bins argument should be either an integer or an NDArray")
[ "Compute the histogram of the input data.\n\n Parameters\n ----------\n a : NDArray\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars\n If bins is an int, it defines the number of equal-width bins in the\n given range (10, by default). If bins is a sequence, it defines the bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n range : (float, float), required if bins is an integer\n The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()).\n Values outside the range are ignored. The first element of the range must be less than or\n equal to the second. range affects the automatic bin computation as well, the range will\n be equally divided by the number of bins.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n " ]
Please provide a description of the function:def split_v2(ary, indices_or_sections, axis=0, squeeze_axis=False): indices = [] sections = 0 if isinstance(indices_or_sections, int): sections = indices_or_sections elif isinstance(indices_or_sections, tuple): indices = [0] + list(indices_or_sections) else: raise ValueError('indices_or_sections must either int or tuple of ints') return _internal._split_v2(ary, indices, axis, squeeze_axis, sections)
[ "Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : NDArray\n Array to be divided into sub-arrays.\n indices_or_sections : int or tuple of ints\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n squeeze_axis: boolean, optional\n Whether to squeeze the axis of sub-arrays or not, only useful when size\n of the sub-arrays are 1 on the `axis`. Default is False.\n\n Returns\n -------\n out : Symbol\n The created Symbol\n " ]
Please provide a description of the function:def name(self): ret = ctypes.c_char_p() success = ctypes.c_int() check_call(_LIB.MXSymbolGetName( self.handle, ctypes.byref(ret), ctypes.byref(success))) if success.value != 0: return py_str(ret.value) else: return None
[ "Gets name string from the symbol, this function only works for non-grouped symbol.\n\n Returns\n -------\n value : str\n The name of this symbol, returns ``None`` for grouped symbol.\n " ]
Please provide a description of the function:def attr(self, key): ret = ctypes.c_char_p() success = ctypes.c_int() check_call(_LIB.MXSymbolGetAttr( self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success))) if success.value != 0: return py_str(ret.value) else: return None
[ "Returns the attribute string for corresponding input key from the symbol.\n\n This function only works for non-grouped symbols.\n\n Example\n -------\n >>> data = mx.sym.Variable('data', attr={'mood': 'angry'})\n >>> data.attr('mood')\n 'angry'\n\n Parameters\n ----------\n key : str\n The key corresponding to the desired attribute.\n\n Returns\n -------\n value : str\n The desired attribute value, returns ``None`` if the attribute does not exist.\n " ]
Please provide a description of the function:def list_attr(self, recursive=False): if recursive: raise DeprecationWarning("Symbol.list_attr with recursive=True has been deprecated. " "Please use attr_dict instead.") size = mx_uint() pairs = ctypes.POINTER(ctypes.c_char_p)() f_handle = _LIB.MXSymbolListAttrShallow check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs))) return {py_str(pairs[i * 2]): py_str(pairs[i * 2 + 1]) for i in range(size.value)}
[ "Gets all attributes from the symbol.\n\n Example\n -------\n >>> data = mx.sym.Variable('data', attr={'mood': 'angry'})\n >>> data.list_attr()\n {'mood': 'angry'}\n\n Returns\n -------\n ret : Dict of str to str\n A dictionary mapping attribute keys to values.\n " ]
Please provide a description of the function:def attr_dict(self): size = mx_uint() pairs = ctypes.POINTER(ctypes.c_char_p)() f_handle = _LIB.MXSymbolListAttr check_call(f_handle(self.handle, ctypes.byref(size), ctypes.byref(pairs))) ret = {} for i in range(size.value): name, key = py_str(pairs[i * 2]).split('$') val = py_str(pairs[i * 2 + 1]) if name not in ret: ret[name] = {} ret[name][key] = val return ret
[ "Recursively gets all attributes from the symbol and its children.\n\n Example\n -------\n >>> a = mx.sym.Variable('a', attr={'a1':'a2'})\n >>> b = mx.sym.Variable('b', attr={'b1':'b2'})\n >>> c = a+b\n >>> c.attr_dict()\n {'a': {'a1': 'a2'}, 'b': {'b1': 'b2'}}\n\n Returns\n -------\n ret : Dict of str to dict\n There is a key in the returned dict for every child with non-empty attribute set.\n For each symbol, the name of the symbol is its key in the dict\n and the correspond value is that symbol's attribute list (itself a dictionary).\n " ]
Please provide a description of the function:def _set_attr(self, **kwargs): for key, value in kwargs.items(): if not isinstance(value, string_types): raise ValueError("Set Attr only accepts string values") check_call(_LIB.MXSymbolSetAttr( self.handle, c_str(key), c_str(str(value))))
[ "Sets an attribute of the symbol.\n\n For example. A._set_attr(foo=\"bar\") adds the mapping ``\"{foo: bar}\"``\n to the symbol's attribute dictionary.\n\n Parameters\n ----------\n **kwargs\n The attributes to set\n " ]
Please provide a description of the function:def get_internals(self): handle = SymbolHandle() check_call(_LIB.MXSymbolGetInternals( self.handle, ctypes.byref(handle))) return Symbol(handle=handle)
[ "Gets a new grouped symbol `sgroup`. The output of `sgroup` is a list of\n outputs of all of the internal nodes.\n\n Consider the following code:\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> d = c.get_internals()\n >>> d\n <Symbol Grouped>\n >>> d.list_outputs()\n ['a', 'b', '_plus4_output']\n\n Returns\n -------\n sgroup : Symbol\n A symbol group containing all internal and leaf nodes of the computation graph\n used to compute the symbol.\n " ]
Please provide a description of the function:def get_children(self): handle = SymbolHandle() check_call(_LIB.MXSymbolGetChildren( self.handle, ctypes.byref(handle))) ret = Symbol(handle=handle) if len(ret.list_outputs()) == 0: return None return ret
[ "Gets a new grouped symbol whose output contains\n inputs to output nodes of the original symbol.\n\n Example\n -------\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.Variable('y')\n >>> z = mx.sym.Variable('z')\n >>> a = y+z\n >>> b = x+a\n >>> b.get_children()\n <Symbol Grouped>\n >>> b.get_children().list_outputs()\n ['x', '_plus10_output']\n >>> b.get_children().get_children().list_outputs()\n ['y', 'z']\n\n Returns\n -------\n sgroup : Symbol or None\n The children of the head node. If the symbol has no\n inputs then ``None`` will be returned.\n " ]
Please provide a description of the function:def list_arguments(self): size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXSymbolListArguments( self.handle, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
[ "Lists all the arguments in the symbol.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> c.list_arguments\n ['a', 'b']\n\n Returns\n -------\n args : list of string\n List containing the names of all the arguments required to compute the symbol.\n " ]
Please provide a description of the function:def list_outputs(self): size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXSymbolListOutputs( self.handle, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
[ "Lists all the outputs in the symbol.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> c.list_outputs()\n ['_plus12_output']\n\n Returns\n -------\n list of str\n List of all the outputs.\n For most symbols, this list contains only the name of this symbol.\n For symbol groups, this is a list with the names of all symbols\n in the group.\n " ]
Please provide a description of the function:def list_auxiliary_states(self): size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXSymbolListAuxiliaryStates( self.handle, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
[ "Lists all the auxiliary states in the symbol.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> c.list_auxiliary_states()\n []\n\n Example of auxiliary states in `BatchNorm`.\n\n >>> data = mx.symbol.Variable('data')\n >>> weight = mx.sym.Variable(name='fc1_weight')\n >>> fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128)\n >>> fc2 = mx.symbol.BatchNorm(fc1, name='batchnorm0')\n >>> fc2.list_auxiliary_states()\n ['batchnorm0_moving_mean', 'batchnorm0_moving_var']\n\n Returns\n -------\n aux_states : list of str\n List of the auxiliary states in input symbol.\n\n Notes\n -----\n Auxiliary states are special states of symbols that do not correspond to an argument,\n and are not updated by gradient descent. Common examples of auxiliary states\n include the `moving_mean` and `moving_variance` in `BatchNorm`.\n Most operators do not have auxiliary states.\n " ]
Please provide a description of the function:def list_inputs(self): size = ctypes.c_uint() sarr = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.NNSymbolListInputNames( self.handle, 0, ctypes.byref(size), ctypes.byref(sarr))) return [py_str(sarr[i]) for i in range(size.value)]
[ "Lists all arguments and auxiliary states of this Symbol.\n\n Returns\n -------\n inputs : list of str\n List of all inputs.\n\n Examples\n --------\n >>> bn = mx.sym.BatchNorm(name='bn')\n >>> bn.list_arguments()\n ['bn_data', 'bn_gamma', 'bn_beta']\n >>> bn.list_auxiliary_states()\n ['bn_moving_mean', 'bn_moving_var']\n >>> bn.list_inputs()\n ['bn_data', 'bn_gamma', 'bn_beta', 'bn_moving_mean', 'bn_moving_var']\n " ]
Please provide a description of the function:def infer_type(self, *args, **kwargs): try: res = self._infer_type_impl(False, *args, **kwargs) if res[1] is None: arg_shapes, _, _ = self._infer_type_impl(True, *args, **kwargs) arg_names = self.list_arguments() unknowns = [] for name, dtype in zip(arg_names, arg_shapes): if not dtype: if len(unknowns) >= 10: unknowns.append('...') break unknowns.append('%s: %s' % (name, str(dtype))) warnings.warn( "Cannot decide type for the following arguments. " + "Consider providing them as input:\n\t" + "\n\t".join(unknowns), stacklevel=2) return res except MXNetError: print("infer_type error. Arguments:") for i, arg in enumerate(args): print(" #%d: %s" % (i, arg)) for k, v in kwargs.items(): print(" %s: %s" % (k, v)) raise
[ "Infers the type of all arguments and all outputs, given the known types\n for some arguments.\n\n This function takes the known types of some arguments in either positional way\n or keyword argument way as input. It returns a tuple of `None` values\n if there is not enough information to deduce the missing types.\n\n Inconsistencies in the known types will cause an error to be raised.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> arg_types, out_types, aux_types = c.infer_type(a='float32')\n >>> arg_types\n [<type 'numpy.float32'>, <type 'numpy.float32'>]\n >>> out_types\n [<type 'numpy.float32'>]\n >>> aux_types\n []\n\n Parameters\n ----------\n *args :\n Type of known arguments in a positional way.\n Unknown type can be marked as None.\n\n **kwargs :\n Keyword arguments of known types.\n\n Returns\n -------\n arg_types : list of numpy.dtype or None\n List of argument types.\n The order is same as the order of list_arguments().\n out_types : list of numpy.dtype or None\n List of output types.\n The order is same as the order of list_outputs().\n aux_types : list of numpy.dtype or None\n List of auxiliary state types.\n The order is same as the order of list_auxiliary_states().\n " ]
Please provide a description of the function:def _infer_type_impl(self, partial, *args, **kwargs): # pylint: disable=too-many-locals if len(args) != 0 and len(kwargs) != 0: raise ValueError('Can only specify known argument \ types either by positional or kwargs way.') sdata = [] if len(args) != 0: keys = c_array(ctypes.c_char_p, []) for s in args: if s is not None: s = _numpy.dtype(s).type if s not in _DTYPE_NP_TO_MX: raise TypeError('Argument need to be one of ' + str(_DTYPE_NP_TO_MX)) sdata.append(_DTYPE_NP_TO_MX[s]) else: sdata.append(-1) else: str_keys = [] for k, v in kwargs.items(): v = _numpy.dtype(v).type if v in _DTYPE_NP_TO_MX: str_keys.append(k) sdata.append(_DTYPE_NP_TO_MX[v]) keys = c_str_array(str_keys) arg_type_size = mx_uint() arg_type_data = ctypes.POINTER(ctypes.c_int)() out_type_size = mx_uint() out_type_data = ctypes.POINTER(ctypes.c_int)() aux_type_size = mx_uint() aux_type_data = ctypes.POINTER(ctypes.c_int)() complete = ctypes.c_int() if partial: infer_func = _LIB.MXSymbolInferTypePartial else: infer_func = _LIB.MXSymbolInferType check_call(infer_func( self.handle, mx_uint(len(sdata)), keys, c_array_buf(ctypes.c_int, array('i', sdata)), ctypes.byref(arg_type_size), ctypes.byref(arg_type_data), ctypes.byref(out_type_size), ctypes.byref(out_type_data), ctypes.byref(aux_type_size), ctypes.byref(aux_type_data), ctypes.byref(complete))) if complete.value != 0: arg_types = [ _DTYPE_MX_TO_NP[arg_type_data[i]] for i in range(arg_type_size.value)] out_types = [ _DTYPE_MX_TO_NP[out_type_data[i]] for i in range(out_type_size.value)] aux_types = [ _DTYPE_MX_TO_NP[aux_type_data[i]] for i in range(aux_type_size.value)] return (arg_types, out_types, aux_types) else: return (None, None, None)
[ "The actual implementation for calling type inference API." ]
Please provide a description of the function:def infer_shape(self, *args, **kwargs): try: res = self._infer_shape_impl(False, *args, **kwargs) if res[1] is None: arg_shapes, _, _ = self._infer_shape_impl(True, *args, **kwargs) arg_names = self.list_arguments() unknowns = [] for name, shape in zip(arg_names, arg_shapes): if is_np_compat(): shape_is_none = not shape or -1 in shape else: shape_is_none = not shape or 0 in shape if shape_is_none: if len(unknowns) >= 10: unknowns.append('...') break unknowns.append('%s: %s' % (name, str(shape))) warnings.warn( "Cannot decide shape for the following arguments " + "(0s in shape means unknown dimensions). " + "Consider providing them as input:\n\t" + "\n\t".join(unknowns), stacklevel=2) return res except MXNetError: print("infer_shape error. Arguments:") for i, arg in enumerate(args): print(" #%d: %s" % (i, arg)) for k, v in kwargs.items(): print(" %s: %s" % (k, v)) raise
[ "Infers the shapes of all arguments and all outputs given the known shapes of\n some arguments.\n\n This function takes the known shapes of some arguments in either positional way\n or keyword argument way as input. It returns a tuple of `None` values\n if there is not enough information to deduce the missing shapes.\n\n Example\n -------\n >>> a = mx.sym.var('a')\n >>> b = mx.sym.var('b')\n >>> c = a + b\n >>> arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=(3,3))\n >>> arg_shapes\n [(3L, 3L), (3L, 3L)]\n >>> out_shapes\n [(3L, 3L)]\n >>> aux_shapes\n []\n >>> c.infer_shape(a=(0,3)) # 0s in shape means unknown dimensions. So, returns None.\n (None, None, None)\n\n Inconsistencies in the known shapes will cause an error to be raised.\n See the following example:\n\n >>> data = mx.sym.Variable('data')\n >>> out = mx.sym.FullyConnected(data=data, name='fc1', num_hidden=1000)\n >>> out = mx.sym.Activation(data=out, act_type='relu')\n >>> out = mx.sym.FullyConnected(data=out, name='fc2', num_hidden=10)\n >>> weight_shape= (1, 100)\n >>> data_shape = (100, 100)\n >>> out.infer_shape(data=data_shape, fc1_weight=weight_shape)\n Error in operator fc1: Shape inconsistent, Provided=(1,100), inferred shape=(1000,100)\n\n Parameters\n ----------\n *args :\n Shape of arguments in a positional way.\n Unknown shape can be marked as None.\n\n **kwargs :\n Keyword arguments of the known shapes.\n\n Returns\n -------\n arg_shapes : list of tuple or None\n List of argument shapes.\n The order is same as the order of list_arguments().\n out_shapes : list of tuple or None\n List of output shapes.\n The order is same as the order of list_outputs().\n aux_shapes : list of tuple or None\n List of auxiliary state shapes.\n The order is same as the order of list_auxiliary_states().\n " ]
Please provide a description of the function:def _infer_shape_impl(self, partial, *args, **kwargs): # pylint: disable=too-many-locals if len(args) != 0 and len(kwargs) != 0: raise ValueError('Can only specify known argument \ shapes either by positional or kwargs way.') sdata = [] indptr = [0] if len(args) != 0: keys = c_array(ctypes.c_char_p, []) for i, s in enumerate(args): if s is not None: if not isinstance(s, tuple): raise TypeError("Arguments need to be shapes (tuple), " "but argument %d is %s." % (i, type(s))) sdata.extend(s) indptr.append(len(sdata)) else: str_keys = [] for k, v in kwargs.items(): if not isinstance(v, tuple): raise TypeError("Arguments need to be shapes (tuple), " "but '%s' is %s." % (k, type(v))) str_keys.append(k) sdata.extend(v) indptr.append(len(sdata)) keys = c_str_array(str_keys) arg_shape_size = mx_uint() arg_shape_ndim = ctypes.POINTER(mx_int)() arg_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int))() out_shape_size = mx_uint() out_shape_ndim = ctypes.POINTER(mx_int)() out_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int))() aux_shape_size = mx_uint() aux_shape_ndim = ctypes.POINTER(mx_int)() aux_shape_data = ctypes.POINTER(ctypes.POINTER(mx_int))() complete = ctypes.c_int() if partial: infer_func = _LIB.MXSymbolInferShapePartialEx else: infer_func = _LIB.MXSymbolInferShapeEx check_call(infer_func( self.handle, mx_uint(len(indptr) - 1), keys, c_array_buf(mx_uint, array('I', indptr)), c_array_buf(mx_int, array('i', sdata)), ctypes.byref(arg_shape_size), ctypes.byref(arg_shape_ndim), ctypes.byref(arg_shape_data), ctypes.byref(out_shape_size), ctypes.byref(out_shape_ndim), ctypes.byref(out_shape_data), ctypes.byref(aux_shape_size), ctypes.byref(aux_shape_ndim), ctypes.byref(aux_shape_data), ctypes.byref(complete))) if complete.value != 0: arg_shapes = [tuple(arg_shape_data[i][:arg_shape_ndim[i]]) if arg_shape_ndim[i] >= 0 else None for i in range(arg_shape_size.value)] out_shapes = [tuple(out_shape_data[i][:out_shape_ndim[i]]) if out_shape_ndim[i] >= 0 else None for i in range(out_shape_size.value)] aux_shapes = [tuple(aux_shape_data[i][:aux_shape_ndim[i]]) if aux_shape_ndim[i] >= 0 else None for i in range(aux_shape_size.value)] return (arg_shapes, out_shapes, aux_shapes) else: return (None, None, None)
[ "The actual implementation for calling shape inference API." ]
Please provide a description of the function:def save(self, fname): if not isinstance(fname, string_types): raise TypeError('fname need to be string') check_call(_LIB.MXSymbolSaveToFile(self.handle, c_str(fname)))
[ "Saves symbol to a file.\n\n You can also use pickle to do the job if you only work on python.\n The advantage of `load`/`save` functions is that the file contents are language agnostic.\n This means the model saved by one language binding can be loaded by a different\n language binding of `MXNet`.\n You also get the benefit of being able to directly load/save from cloud storage(S3, HDFS).\n\n Parameters\n ----------\n fname : str\n The name of the file.\n\n - \"s3://my-bucket/path/my-s3-symbol\"\n - \"hdfs://my-bucket/path/my-hdfs-symbol\"\n - \"/path-to/my-local-symbol\"\n\n See Also\n --------\n symbol.load : Used to load symbol from file.\n " ]
Please provide a description of the function:def tojson(self): json_str = ctypes.c_char_p() check_call(_LIB.MXSymbolSaveToJSON(self.handle, ctypes.byref(json_str))) return py_str(json_str.value)
[ "Saves symbol to a JSON string.\n\n See Also\n --------\n symbol.load_json : Used to load symbol from JSON string.\n " ]
Please provide a description of the function:def _get_ndarray_inputs(arg_key, args, arg_names, allow_missing): # setup args arg_handles = [] arg_arrays = [] if isinstance(args, list): if len(args) != len(arg_names): raise ValueError('Length of %s does not match the number of arguments' % arg_key) for narr in args: if narr is None and allow_missing: arg_handles.append(None) elif not isinstance(narr, NDArray): raise TypeError('Only accept list of NDArrays or dict of str to NDArray') else: arg_handles.append(narr.handle) arg_arrays = args elif isinstance(args, dict): for name in arg_names: if name in args: narr = args[name] if not isinstance(narr, NDArray): raise TypeError('Only accept list of NDArrays or dict of str to NDArray') arg_handles.append(narr.handle) arg_arrays.append(narr) else: if allow_missing: arg_handles.append(None) arg_arrays.append(None) else: raise ValueError('key `%s` is missing in `%s`' % (name, arg_key)) else: raise TypeError('Only accept list of NDArrays or dict of str to NDArray') return c_array(NDArrayHandle, arg_handles), arg_arrays
[ "Helper function to get NDArray lists handles from various inputs.\n\n Parameters\n ----------\n arg_key : str\n The name of argument, used for error message.\n\n args : list of NDArray or dict of str to NDArray\n Input arguments to the symbols.\n If type is list of NDArray, the position is in the same order of arg_names.\n If type is dict of str to NDArray, then it maps the name of arguments\n to the corresponding NDArray,\n\n args_names : list of string\n List of argument names.\n\n allow_missing : boolean\n Whether missing argument is allowed.\n When allowed, the missing handle will be set to None(null)\n\n Returns\n -------\n handles : list of NDArrayHandle\n The positional list of NDArrayHandles generated from input.\n " ]
Please provide a description of the function:def simple_bind(self, ctx, grad_req='write', type_dict=None, stype_dict=None, group2ctx=None, shared_arg_names=None, shared_exec=None, shared_buffer=None, **kwargs): # data types num_provided_arg_types = 0 provided_arg_type_names = ctypes.POINTER(ctypes.c_char_p)() # provided type argument names provided_arg_type_data = ctypes.POINTER(mx_uint)() # provided types if type_dict is not None: provided_arg_type_names = [] provided_arg_type_data = [] for k, v in type_dict.items(): v = _numpy.dtype(v).type if v in _DTYPE_NP_TO_MX: provided_arg_type_names.append(k) provided_arg_type_data.append(_DTYPE_NP_TO_MX[v]) num_provided_arg_types = mx_uint(len(provided_arg_type_names)) provided_arg_type_names = c_str_array(provided_arg_type_names) provided_arg_type_data = c_array_buf(ctypes.c_int, array('i', provided_arg_type_data)) # storage types num_provided_arg_stypes = 0 # provided storage type argument names provided_arg_stype_names = ctypes.POINTER(ctypes.c_char_p)() provided_arg_stype_data = ctypes.POINTER(mx_uint)() # provided storage types if stype_dict is not None: provided_arg_stype_names = [] provided_arg_stype_data = [] for k, v in stype_dict.items(): if v in _STORAGE_TYPE_STR_TO_ID: provided_arg_stype_names.append(k) provided_arg_stype_data.append(_STORAGE_TYPE_STR_TO_ID[v]) num_provided_arg_stypes = mx_uint(len(provided_arg_stype_names)) provided_arg_stype_names = c_str_array(provided_arg_stype_names) provided_arg_stype_data = c_array_buf(ctypes.c_int, array('i', provided_arg_stype_data)) provided_arg_shape_data = [] # shape data # argument shape index in sdata, # e.g. [sdata[indptr[0]], sdata[indptr[1]]) is the shape of the first arg provided_arg_shape_idx = [0] provided_arg_shape_names = [] # provided argument names for k, v in kwargs.items(): # if k not in listed_arguments and k not in listed_aux_states: # raise ValueError('arg name %s is not valid', k) if isinstance(v, tuple): provided_arg_shape_names.append(k) provided_arg_shape_data.extend(v) provided_arg_shape_idx.append(len(provided_arg_shape_data)) provided_req_type_list_len = 0 provided_grad_req_types = ctypes.POINTER(ctypes.c_char_p)() provided_grad_req_names = ctypes.POINTER(ctypes.c_char_p)() if grad_req is not None: if isinstance(grad_req, string_types): # use provided_req_type_list_len = 0 to indicate this situation provided_req_type_list_len = 0 provided_grad_req_types = [grad_req] elif isinstance(grad_req, list): if len(grad_req) == 0: raise RuntimeError('grad_req in simple_bind cannot be an empty list') provided_grad_req_types = grad_req provided_req_type_list_len = len(provided_grad_req_types) elif isinstance(grad_req, dict): if len(grad_req) == 0: raise RuntimeError('grad_req in simple_bind cannot be an empty dict') provided_grad_req_names = [] provided_grad_req_types = [] for k, v in grad_req.items(): provided_grad_req_names.append(k) provided_grad_req_types.append(v) provided_grad_req_names = c_str_array(provided_grad_req_names) provided_req_type_list_len = len(provided_grad_req_types) provided_grad_req_types = c_str_array(provided_grad_req_types) num_ctx_map_keys = mx_uint(0) ctx_map_keys = ctypes.POINTER(ctypes.c_char_p)() ctx_map_dev_types = ctypes.POINTER(ctypes.c_int)() ctx_map_dev_ids = ctypes.POINTER(ctypes.c_int)() if group2ctx is not None: ctx_map_keys = [] ctx_map_dev_types = [] ctx_map_dev_ids = [] for key, val in group2ctx.items(): ctx_map_keys.append(key) ctx_map_dev_types.append(val.device_typeid) ctx_map_dev_ids.append(val.device_id) num_ctx_map_keys = mx_uint(len(ctx_map_keys)) ctx_map_keys = c_str_array(ctx_map_keys) ctx_map_dev_types = c_array(ctypes.c_int, array('i', ctx_map_dev_types)) ctx_map_dev_ids = c_array(ctypes.c_int, array('i', ctx_map_dev_ids)) # prepare param names shared_arg_name_list = [] if shared_arg_names is not None: if not isinstance(shared_arg_names, list): raise ValueError('shared_arg_names in simple_bind must be a list or None') shared_arg_name_list = shared_arg_names # prepare shared_buffer if shared_buffer is None: shared_buffer_len = ctypes.c_int(-1) shared_buffer_names = ctypes.POINTER(ctypes.c_char_p)() shared_buffer_handles = ctypes.POINTER(NDArrayHandle)() else: if not isinstance(shared_buffer, dict): raise ValueError('shared_buffer in simple_bind must be dict or None') buffer_names = shared_buffer.keys() buffer_arrays = shared_buffer.values() for v in buffer_arrays: assert(v.stype == 'default'), \ "shared_buffer is expected to only contain NDArrays with default storage" shared_buffer_names = c_str_array(buffer_names) shared_buffer_len = ctypes.c_int(len(buffer_arrays)) shared_buffer_handles = c_handle_array(buffer_arrays) updated_shared_buffer_names = ctypes.POINTER(ctypes.c_char_p)() updated_shared_buffer_handles = ctypes.POINTER(NDArrayHandle)() # prepare shared_exec_handle shared_exec_handle = shared_exec.handle if shared_exec is not None else ExecutorHandle() # prepare current executor handle exe_handle = ExecutorHandle() # prepare current executor's in_args, arg_grads, and aux_states num_in_args = ctypes.c_uint() in_arg_handles = ctypes.POINTER(NDArrayHandle)() arg_grad_handles = ctypes.POINTER(NDArrayHandle)() num_aux_states = ctypes.c_uint() aux_state_handles = ctypes.POINTER(NDArrayHandle)() try: check_call(_LIB.MXExecutorSimpleBindEx(self.handle, ctypes.c_int(ctx.device_typeid), ctypes.c_int(ctx.device_id), num_ctx_map_keys, ctx_map_keys, ctx_map_dev_types, ctx_map_dev_ids, mx_uint(provided_req_type_list_len), provided_grad_req_names, provided_grad_req_types, mx_uint(len(provided_arg_shape_names)), c_str_array(provided_arg_shape_names), c_array_buf(mx_int, array('I', provided_arg_shape_data)), c_array_buf(mx_uint, array('i', provided_arg_shape_idx)), num_provided_arg_types, provided_arg_type_names, provided_arg_type_data, num_provided_arg_stypes, provided_arg_stype_names, provided_arg_stype_data, mx_uint(len(shared_arg_name_list)), c_str_array(shared_arg_name_list), ctypes.byref(shared_buffer_len), shared_buffer_names, shared_buffer_handles, ctypes.byref(updated_shared_buffer_names), ctypes.byref(updated_shared_buffer_handles), ctypes.byref(num_in_args), ctypes.byref(in_arg_handles), ctypes.byref(arg_grad_handles), ctypes.byref(num_aux_states), ctypes.byref(aux_state_handles), shared_exec_handle, ctypes.byref(exe_handle))) except MXNetError as e: error_msg = "simple_bind error. Arguments:\n" for k, v in kwargs.items(): error_msg += "%s: %s\n" % (k, v) error_msg += "%s" % e raise RuntimeError(error_msg) # update shared_buffer if shared_buffer is not None: for i in range(shared_buffer_len.value): k = py_str(updated_shared_buffer_names[i]) v = NDArray(NDArrayHandle(updated_shared_buffer_handles[i])) shared_buffer[k] = v # create in_args, arg_grads, and aux_states for the current executor arg_arrays = [_ndarray_cls(NDArrayHandle(in_arg_handles[i])) for i in range(num_in_args.value)] grad_arrays = [_ndarray_cls(NDArrayHandle(arg_grad_handles[i])) if arg_grad_handles[i] is not None else None for i in range(num_in_args.value)] aux_arrays = [_ndarray_cls(NDArrayHandle(aux_state_handles[i])) for i in range(num_aux_states.value)] executor = Executor(exe_handle, self, ctx, grad_req, group2ctx) executor.arg_arrays = arg_arrays executor.grad_arrays = grad_arrays executor.aux_arrays = aux_arrays return executor
[ "Bind current symbol to get an executor, allocate all the arguments needed.\n Allows specifying data types.\n\n This function simplifies the binding procedure. You need to specify only input data shapes.\n Before binding the executor, the function allocates arguments and auxiliary states\n that were not explicitly specified. Allows specifying data types.\n\n Example\n -------\n >>> x = mx.sym.Variable('x')\n >>> y = mx.sym.FullyConnected(x, num_hidden=4)\n >>> exe = y.simple_bind(mx.cpu(), x=(5,4), grad_req='null')\n >>> exe.forward()\n [<NDArray 5x4 @cpu(0)>]\n >>> exe.outputs[0].asnumpy()\n array([[ 0., 0., 0., 0.],\n [ 0., 0., 0., 0.],\n [ 0., 0., 0., 0.],\n [ 0., 0., 0., 0.],\n [ 0., 0., 0., 0.]], dtype=float32)\n >>> exe.arg_arrays\n [<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]\n >>> exe.grad_arrays\n [<NDArray 5x4 @cpu(0)>, <NDArray 4x4 @cpu(0)>, <NDArray 4 @cpu(0)>]\n\n Parameters\n ----------\n ctx : Context\n The device context the generated executor to run on.\n\n grad_req: string\n {'write', 'add', 'null'}, or list of str or dict of str to str, optional\n To specify how we should update the gradient to the `args_grad`.\n\n - 'write' means every time gradient is written to specified `args_grad` NDArray.\n - 'add' means every time gradient is added to the specified NDArray.\n - 'null' means no action is taken, the gradient may not be calculated.\n\n type_dict : Dict of str->numpy.dtype\n Input type dictionary, name->dtype\n\n stype_dict : Dict of str->str\n Input storage type dictionary, name->storage_type\n\n group2ctx : Dict of string to mx.Context\n The dict mapping the `ctx_group` attribute to the context assignment.\n\n shared_arg_names : List of string\n The argument names whose `NDArray` of shared_exec can be reused for initializing\n the current executor.\n\n shared_exec : Executor\n The executor whose arg_arrays, arg_arrays, grad_arrays, and aux_arrays can be\n reused for initializing the current executor.\n\n shared_buffer : Dict of string to `NDArray`\n The dict mapping argument names to the `NDArray` that can be reused for initializing\n the current executor. This buffer will be checked for reuse if one argument name\n of the current executor is not found in `shared_arg_names`. The `NDArray` s are\n expected have default storage type.\n\n kwargs : Dict of str->shape\n Input shape dictionary, name->shape\n\n Returns\n -------\n executor : mxnet.Executor\n The generated executor\n " ]
Please provide a description of the function:def bind(self, ctx, args, args_grad=None, grad_req='write', aux_states=None, group2ctx=None, shared_exec=None): # pylint: disable=too-many-locals, too-many-branches if not isinstance(ctx, Context): raise TypeError("Context type error") listed_arguments = self.list_arguments() args_handle, args = self._get_ndarray_inputs('args', args, listed_arguments, False) # setup args gradient if args_grad is None: args_grad_handle = c_array(NDArrayHandle, [None] * len(args)) else: args_grad_handle, args_grad = self._get_ndarray_inputs( 'args_grad', args_grad, listed_arguments, True) if aux_states is None: aux_states = [] aux_args_handle, aux_states = self._get_ndarray_inputs( 'aux_states', aux_states, self.list_auxiliary_states(), False) # setup requirements if isinstance(grad_req, string_types): if grad_req not in _GRAD_REQ_MAP: raise ValueError('grad_req must be in %s' % str(_GRAD_REQ_MAP)) reqs_array = c_array_buf(mx_uint, array('I', [_GRAD_REQ_MAP[grad_req]] * len(listed_arguments))) elif isinstance(grad_req, list): reqs_array = c_array_buf(mx_uint, array('I', [_GRAD_REQ_MAP[item] for item in grad_req])) elif isinstance(grad_req, dict): req_array = [] for name in listed_arguments: if name in grad_req: req_array.append(_GRAD_REQ_MAP[grad_req[name]]) else: req_array.append(0) reqs_array = c_array_buf(mx_uint, array('I', req_array)) ctx_map_keys = [] ctx_map_dev_types = [] ctx_map_dev_ids = [] if group2ctx: for key, val in group2ctx.items(): ctx_map_keys.append(key) ctx_map_dev_types.append(val.device_typeid) ctx_map_dev_ids.append(val.device_id) handle = ExecutorHandle() shared_handle = shared_exec.handle if shared_exec is not None else ExecutorHandle() check_call(_LIB.MXExecutorBindEX(self.handle, ctypes.c_int(ctx.device_typeid), ctypes.c_int(ctx.device_id), mx_uint(len(ctx_map_keys)), c_str_array(ctx_map_keys), c_array_buf(ctypes.c_int, array('i', ctx_map_dev_types)), c_array_buf(ctypes.c_int, array('i', ctx_map_dev_ids)), mx_uint(len(args)), args_handle, args_grad_handle, reqs_array, mx_uint(len(aux_states)), aux_args_handle, shared_handle, ctypes.byref(handle))) executor = Executor(handle, self, ctx, grad_req, group2ctx) executor.arg_arrays = args executor.grad_arrays = args_grad executor.aux_arrays = aux_states return executor
[ "Binds the current symbol to an executor and returns it.\n\n We first declare the computation and then bind to the data to run.\n This function returns an executor which provides method `forward()` method for evaluation\n and a `outputs()` method to get all the results.\n\n Example\n -------\n >>> a = mx.sym.Variable('a')\n >>> b = mx.sym.Variable('b')\n >>> c = a + b\n <Symbol _plus1>\n >>> ex = c.bind(ctx=mx.cpu(), args={'a' : mx.nd.ones([2,3]), 'b' : mx.nd.ones([2,3])})\n >>> ex.forward()\n [<NDArray 2x3 @cpu(0)>]\n >>> ex.outputs[0].asnumpy()\n [[ 2. 2. 2.]\n [ 2. 2. 2.]]\n\n Parameters\n ----------\n ctx : Context\n The device context the generated executor to run on.\n\n args : list of NDArray or dict of str to NDArray\n Input arguments to the symbol.\n\n - If the input type is a list of `NDArray`, the order should be same as the order\n of `list_arguments()`.\n - If the input type is a dict of str to `NDArray`, then it maps the name of arguments\n to the corresponding `NDArray`.\n - In either case, all the arguments must be provided.\n\n args_grad : list of NDArray or dict of str to `NDArray`, optional\n When specified, `args_grad` provides NDArrays to hold\n the result of gradient value in backward.\n\n - If the input type is a list of `NDArray`, the order should be same as the order\n of `list_arguments()`.\n - If the input type is a dict of str to `NDArray`, then it maps the name of arguments\n to the corresponding NDArray.\n - When the type is a dict of str to `NDArray`, one only need to provide the dict\n for required argument gradient.\n Only the specified argument gradient will be calculated.\n\n grad_req : {'write', 'add', 'null'}, or list of str or dict of str to str, optional\n To specify how we should update the gradient to the `args_grad`.\n\n - 'write' means everytime gradient is write to specified `args_grad` `NDArray`.\n - 'add' means everytime gradient is add to the specified NDArray.\n - 'null' means no action is taken, the gradient may not be calculated.\n\n aux_states : list of `NDArray`, or dict of str to `NDArray`, optional\n Input auxiliary states to the symbol, only needed when the output of\n `list_auxiliary_states()` is not empty.\n\n - If the input type is a list of `NDArray`, the order should be same as the order\n of `list_auxiliary_states()`.\n - If the input type is a dict of str to `NDArray`, then it maps the name of\n `auxiliary_states` to the corresponding `NDArray`,\n - In either case, all the auxiliary states need to be provided.\n\n group2ctx : Dict of string to mx.Context\n The dict mapping the `ctx_group` attribute to the context assignment.\n\n shared_exec : mx.executor.Executor\n Executor to share memory with. This is intended for runtime reshaping, variable length\n sequences, etc. The returned executor shares state with `shared_exec`, and should not be\n used in parallel with it.\n\n Returns\n -------\n executor : Executor\n The generated executor\n\n Notes\n -----\n Auxiliary states are the special states of symbols that do not correspond\n to an argument, and do not have gradient but are still useful\n for the specific operations. Common examples of auxiliary states include\n the `moving_mean` and `moving_variance` states in `BatchNorm`.\n Most operators do not have auxiliary states and in those cases,\n this parameter can be safely ignored.\n\n One can give up gradient by using a dict in `args_grad` and only specify\n gradient they interested in.\n " ]
Please provide a description of the function:def gradient(self, wrt): handle = SymbolHandle() c_wrt = c_str_array(wrt) check_call(_LIB.MXSymbolGrad(self.handle, mx_uint(len(wrt)), c_wrt, ctypes.byref(handle))) return Symbol(handle)
[ "Gets the autodiff of current symbol.\n\n This function can only be used if current symbol is a loss function.\n\n .. note:: This function is currently not implemented.\n\n Parameters\n ----------\n wrt : Array of String\n keyword arguments of the symbol that the gradients are taken.\n\n Returns\n -------\n grad : Symbol\n A gradient Symbol with returns to be the corresponding gradients.\n " ]
Please provide a description of the function:def eval(self, ctx=None, **kwargs): if ctx is None: ctx = current_context() return self.bind(ctx, kwargs).forward()
[ "Evaluates a symbol given arguments.\n\n The `eval` method combines a call to `bind` (which returns an executor)\n with a call to `forward` (executor method).\n For the common use case, where you might repeatedly evaluate with same arguments,\n eval is slow.\n In that case, you should call `bind` once and then repeatedly call forward.\n This function allows simpler syntax for less cumbersome introspection.\n\n Example\n -------\n >>> a = mx.sym.Variable('a')\n >>> b = mx.sym.Variable('b')\n >>> c = a + b\n >>> ex = c.eval(ctx = mx.cpu(), a = mx.nd.ones([2,3]), b = mx.nd.ones([2,3]))\n >>> ex\n [<NDArray 2x3 @cpu(0)>]\n >>> ex[0].asnumpy()\n array([[ 2., 2., 2.],\n [ 2., 2., 2.]], dtype=float32)\n\n Parameters\n ----------\n ctx : Context\n The device context the generated executor to run on.\n\n kwargs : Keyword arguments of type `NDArray`\n Input arguments to the symbol. All the arguments must be provided.\n\n Returns\n ----------\n result : a list of NDArrays corresponding to the values taken by each symbol when\n evaluated on given args. When called on a single symbol (not a group),\n the result will be a list with one element.\n " ]
Please provide a description of the function:def get_backend_symbol(self, backend): out = SymbolHandle() check_call(_LIB.MXGenBackendSubgraph(self.handle, c_str(backend), ctypes.byref(out))) return Symbol(out)
[ "Return symbol for target backend.\n\n Parameters\n ----------\n backend : str\n The backend names.\n\n Returns\n -------\n out : Symbol\n The created Symbol for target backend.\n " ]
Please provide a description of the function:def hybrid_forward(self, F, x): f = self._factor # (N, C*f, W) x = F.reshape(x, (0, -4, -1, f, 0)) # (N, C, f, W) x = F.transpose(x, (0, 1, 3, 2)) # (N, C, W, f) x = F.reshape(x, (0, 0, -3)) # (N, C, W*f) return x
[ "Perform pixel-shuffling on the input." ]
Please provide a description of the function:def hybrid_forward(self, F, x): f1, f2 = self._factors # (N, f1*f2*C, H, W) x = F.reshape(x, (0, -4, -1, f1 * f2, 0, 0)) # (N, C, f1*f2, H, W) x = F.reshape(x, (0, 0, -4, f1, f2, 0, 0)) # (N, C, f1, f2, H, W) x = F.transpose(x, (0, 1, 4, 2, 5, 3)) # (N, C, H, f1, W, f2) x = F.reshape(x, (0, 0, -3, -3)) # (N, C, H*f1, W*f2) return x
[ "Perform pixel-shuffling on the input." ]
Please provide a description of the function:def hybrid_forward(self, F, x): # `transpose` doesn't support 8D, need other implementation f1, f2, f3 = self._factors # (N, C*f1*f2*f3, D, H, W) x = F.reshape(x, (0, -4, -1, f1 * f2 * f3, 0, 0, 0)) # (N, C, f1*f2*f3, D, H, W) x = F.swapaxes(x, 2, 3) # (N, C, D, f1*f2*f3, H, W) x = F.reshape(x, (0, 0, 0, -4, f1, f2*f3, 0, 0)) # (N, C, D, f1, f2*f3, H, W) x = F.reshape(x, (0, 0, -3, 0, 0, 0)) # (N, C, D*f1, f2*f3, H, W) x = F.swapaxes(x, 3, 4) # (N, C, D*f1, H, f2*f3, W) x = F.reshape(x, (0, 0, 0, 0, -4, f2, f3, 0)) # (N, C, D*f1, H, f2, f3, W) x = F.reshape(x, (0, 0, 0, -3, 0, 0)) # (N, C, D*f1, H*f2, f3, W) x = F.swapaxes(x, 4, 5) # (N, C, D*f1, H*f2, W, f3) x = F.reshape(x, (0, 0, 0, 0, -3)) # (N, C, D*f1, H*f2, W*f3) return x
[ "Perform pixel-shuffling on the input." ]