Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def infer_storage_type(self, in_stype): for i, stype in enumerate(in_stype): assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \ "Default infer_storage_type implementation doesnt allow non default stypes: " \ "found non default stype '%s' for in_stype[%d]. Please implement " \ "infer_storage_type and infer_storage_type_backward interface " \ "in your custom operator if you have non-default input/output stypes" % (stype, i) return in_stype, \ [_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_outputs()), \ [_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_auxiliary_states())
[ "infer_storage_type interface. Used to infer storage type of\n inputs and outputs in the forward pass. When this interface is not implemented,\n all stypes will be inferred as default.\n\n Parameters\n ----------\n in_stype : list of stypes, valid stypes are default, row_sparse and\n csr\n\n Returns\n -------\n in_stype : list\n list of argument stypes.\n out_stype : list\n list of output types calculated from in_stype,\n in the same order as declared in list_outputs.\n aux_type : Optional, list\n list of aux types calculated from in_stype,\n in the same order as declared in list_auxiliary_states.\n " ]
Please provide a description of the function:def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype): for i, stype in enumerate(ograd_stype): assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \ "Default infer_storage_type_backward implementation doesnt allow non default stypes: " \ "found non default stype '%s' for ograd_stype[%d]. Please implement " \ "infer_storage_type and infer_storage_type_backward interface " \ "in your custom operator if you have non-default output gradient stypes" % (stype, i) for i, stype in enumerate(igrad_stype): if stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED]: stype = _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT] assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \ "Default infer_storage_type_backward implementation doesnt allow non default stypes: " \ "found non default stype '%s' for igrad_stype[%d]. Please implement " \ "infer_storage_type and infer_storage_type_backward interface " \ "in your custom operator if you have non-default input gradient stypes" % (stype, i) stype_lists = [ograd_stype, in_stype, out_stype, igrad_stype, aux_stype] for stype_list in stype_lists: stype_list[:] = len(stype_list) * [_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]] return stype_lists[0], stype_lists[1], stype_lists[2], stype_lists[3], stype_lists[4]
[ "infer_storage_type_backward interface. Used to infer storage\n type of inputs and outputs in the backward pass.\n\n Will raise an error if undefined storage type is returned.\n Returned lists have to be the same size as the input lists to infer_storage_type_backward,\n otherwise an exception will be thrown. When this interface is not implemented,\n all stypes will be inferred as default.\n\n Parameters\n ----------\n ograd_stype : list\n list of output gradient storage types\n in_stype : list\n list of input storage types\n out_stype : list\n list of output storage types\n igrad_stype : list\n list of input gradient storage types\n aux_stype : list\n list of auxiliary storage types\n\n Returns\n -------\n ograd_stype : list\n list of inferred output gradient storage types\n in_stype : list\n list of inferred input storage types\n out_stype : list\n list of inferred output storage types\n igrad_stype : list\n list of inferred input gradient storage types\n aux_stype : list\n list of inferred storage types for auxiliary states\n " ]
Please provide a description of the function:def declare_backward_dependency(self, out_grad, in_data, out_data): deps = [] if self.need_top_grad_: deps.extend(out_grad) deps.extend(in_data) deps.extend(out_data) return deps
[ "Declare dependencies of this operator for backward pass.\n\n Parameters\n ----------\n out_grad : list of int\n ids of out_grad blobs.\n in_data : list of int\n ids of in_data blobs.\n out_data: list of int\n ids of out_data blobs.\n\n Returns\n -------\n deps : list of int\n ids of the needed blobs.\n " ]
Please provide a description of the function:def inc(self): self.lock.acquire() cur = self.counter self.counter += 1 self.lock.release() return cur
[ "Get index for new entry." ]
Please provide a description of the function:def close(self): if not self.is_open: return super(IndexCreator, self).close() self.fidx.close()
[ "Closes the record and index files." ]
Please provide a description of the function:def tell(self): pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos))) return pos.value
[ "Returns the current position of read head.\n " ]
Please provide a description of the function:def create_index(self): self.reset() counter = 0 pre_time = time.time() while True: if counter % 1000 == 0: cur_time = time.time() print('time:', cur_time - pre_time, ' count:', counter) pos = self.tell() cont = self.read() if cont is None: break key = self.key_type(counter) self.fidx.write('%s\t%d\n'%(str(key), pos)) counter = counter + 1
[ "Creates the index file from open record file\n " ]
Please provide a description of the function:def _run_cmd(cmds): if not isinstance(cmds, str): cmds = "".join(cmds) print("Execute \"%s\"" % cmds) try: subprocess.check_call(cmds, shell=True) except subprocess.CalledProcessError as err: print(err) raise err
[ "Run commands, raise exception if failed" ]
Please provide a description of the function:def generate_doxygen(app): _run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir) _run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
[ "Run the doxygen make commands" ]
Please provide a description of the function:def build_mxnet(app): if not os.path.exists(os.path.join(app.builder.srcdir, '..', 'config.mk')): _run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " % app.builder.srcdir) else: _run_cmd("cd %s/.. && make -j$(nproc) USE_MKLDNN=0 USE_CPP_PACKAGE=1 " % app.builder.srcdir)
[ "Build mxnet .so lib" ]
Please provide a description of the function:def build_r_docs(app): r_root = app.builder.srcdir + '/../R-package' pdf_path = app.builder.srcdir + '/api/r/mxnet-r-reference-manual.pdf' _run_cmd('cd ' + r_root + '; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path) dest_path = app.builder.outdir + '/api/r/' _run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
[ "build r pdf" ]
Please provide a description of the function:def build_scala(app): if any(v in _BUILD_VER for v in ['1.2.', '1.3.', '1.4.']): _run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir) _run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir) else: _run_cmd("cd %s/../scala-package && mvn -B install -DskipTests" % app.builder.srcdir)
[ "build scala for scala docs, java docs, and clojure docs to use" ]
Please provide a description of the function:def build_scala_docs(app): scala_path = app.builder.srcdir + '/../scala-package' scala_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep -v \"\/javaapi\" | egrep -v \"Suite\"' scala_doc_classpath = ':'.join([ '`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `', '`find macros -name "*.jar" | tr "\\n" ":" `', '`find core -name "*.jar" | tr "\\n" ":" `', '`find infer -name "*.jar" | tr "\\n" ":" `' ]) # There are unresolvable errors on mxnet 1.2.x. We are ignoring those errors while aborting the ci on newer versions scala_ignore_errors = '; exit 0' if any(v in _BUILD_VER for v in ['1.2.', '1.3.']) else '' _run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}' .format(scala_path, scala_doc_sources, scala_doc_classpath, scala_ignore_errors)) dest_path = app.builder.outdir + '/api/scala/docs' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) # 'index' and 'package.html' do not exist in later versions of scala; delete these after upgrading scala>2.12.x scaladocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html'] for doc_file in scaladocs: _run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0')
[ "build scala doc and then move the outdir" ]
Please provide a description of the function:def build_java_docs(app): java_path = app.builder.srcdir + '/../scala-package' java_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep \"\/javaapi\" | egrep -v \"Suite\"' java_doc_classpath = ':'.join([ '`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `', '`find macros -name "*.jar" | tr "\\n" ":" `', '`find core -name "*.jar" | tr "\\n" ":" `', '`find infer -name "*.jar" | tr "\\n" ":" `' ]) _run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation' .format(java_path, java_doc_sources, java_doc_classpath)) dest_path = app.builder.outdir + '/api/java/docs' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) javadocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html'] for doc_file in javadocs: _run_cmd('cd ' + java_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0')
[ "build java docs and then move the outdir" ]
Please provide a description of the function:def build_clojure_docs(app): clojure_path = app.builder.srcdir + '/../contrib/clojure-package' _run_cmd('cd ' + clojure_path + '; lein codox') dest_path = app.builder.outdir + '/api/clojure/docs' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) clojure_doc_path = app.builder.srcdir + '/../contrib/clojure-package/target/doc' _run_cmd('cd ' + clojure_doc_path + ' && cp -r * ' + dest_path + '; exit 0')
[ "build clojure doc and then move the outdir" ]
Please provide a description of the function:def _convert_md_table_to_rst(table): if len(table) < 3: return '' out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n' for i,l in enumerate(table): cols = l.split('|')[1:-1] if i == 0: ncol = len(cols) else: if len(cols) != ncol: return '' if i == 1: for c in cols: if len(c) is not 0 and '---' not in c: return '' else: for j,c in enumerate(cols): out += ' * - ' if j == 0 else ' - ' out += pypandoc.convert_text( c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n' out += '```\n' return out
[ "Convert a markdown table to rst format" ]
Please provide a description of the function:def convert_table(app, docname, source): num_tables = 0 for i,j in enumerate(source): table = [] output = '' in_table = False for l in j.split('\n'): r = l.strip() if r.startswith('|'): table.append(r) in_table = True else: if in_table is True: converted = _convert_md_table_to_rst(table) if converted is '': print("Failed to convert the markdown table") print(table) else: num_tables += 1 output += converted in_table = False table = [] output += l + '\n' source[i] = output if num_tables > 0: print('Converted %d tables in %s' % (num_tables, docname))
[ "Find tables in a markdown and then convert them into the rst format" ]
Please provide a description of the function:def _parse_code_lines(lines): in_code = False lang = None indent = None for l in lines: m = _CODE_MARK.match(l) if m is not None: if not in_code: if m.groups()[1].lower() in _LANGS: lang = m.groups()[1].lower() indent = len(m.groups()[0]) in_code = True yield (l, in_code, lang, indent) else: yield (l, in_code, lang, indent) lang = None indent = None in_code = False else: yield (l, in_code, lang, indent)
[ "A iterator that returns if a line is within a code block\n\n Returns\n -------\n iterator of (str, bool, str, int)\n - line: the line\n - in_code: if this line is in a code block\n - lang: the code block langunage\n - indent: the code indent\n " ]
Please provide a description of the function:def _get_blocks(lines): cur_block = [] pre_lang = None pre_in_code = None for (l, in_code, cur_lang, _) in _parse_code_lines(lines): if in_code != pre_in_code: if pre_in_code and len(cur_block) >= 2: cur_block = cur_block[1:-1] # remove ``` # remove empty lines at head while len(cur_block) > 0: if len(cur_block[0]) == 0: cur_block.pop(0) else: break # remove empty lines at tail while len(cur_block) > 0: if len(cur_block[-1]) == 0: cur_block.pop() else: break if len(cur_block): yield (pre_in_code, pre_lang, cur_block) cur_block = [] cur_block.append(l) pre_lang = cur_lang pre_in_code = in_code if len(cur_block): yield (pre_in_code, pre_lang, cur_block)
[ "split lines into code and non-code blocks\n\n Returns\n -------\n iterator of (bool, str, list of str)\n - if it is a code block\n - source language\n - lines of source\n " ]
Please provide a description of the function:def _get_python_block_output(src, global_dict, local_dict): src = '\n'.join([l for l in src.split('\n') if not l.startswith('%') and not 'plt.show()' in l]) ret_status = True err = '' with _string_io() as s: try: exec(src, global_dict, global_dict) except Exception as e: err = str(e) ret_status = False return (ret_status, s.getvalue()+err)
[ "Evaluate python source codes\n\n Returns\n (bool, str):\n - True if success\n - output\n " ]
Please provide a description of the function:def copy_artifacts(app): dest_path = app.builder.outdir + '/error' source_path = app.builder.srcdir + '/build_version_doc/artifacts' _run_cmd('cd ' + app.builder.srcdir) _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) _run_cmd('cp ' + source_path + '/404.html ' + dest_path) _run_cmd('cp ' + source_path + '/api.html ' + dest_path) dest_path = app.builder.outdir + '/_static' _run_cmd('rm -rf ' + dest_path) _run_cmd('mkdir -p ' + dest_path) _run_cmd('cp ' + app.builder.srcdir + '/_static/mxnet.css ' + dest_path)
[ "Copies artifacts needed for website presentation" ]
Please provide a description of the function:def download_caffe_model(model_name, meta_info, dst_dir='./model'): if not os.path.isdir(dst_dir): os.mkdir(dst_dir) model_name = os.path.join(dst_dir, model_name) assert 'prototxt' in meta_info, "missing prototxt url" proto_url, proto_sha1 = meta_info['prototxt'] prototxt = mx.gluon.utils.download(proto_url, model_name+'_deploy.prototxt', sha1_hash=proto_sha1) assert 'caffemodel' in meta_info, "mssing caffemodel url" caffemodel_url, caffemodel_sha1 = meta_info['caffemodel'] caffemodel = mx.gluon.utils.download(caffemodel_url, model_name+'.caffemodel', sha1_hash=caffemodel_sha1) assert 'mean' in meta_info, 'no mean info' mean = meta_info['mean'] if isinstance(mean[0], str): mean_url, mean_sha1 = mean mean = mx.gluon.utils.download(mean_url, model_name+'_mean.binaryproto', sha1_hash=mean_sha1) return (prototxt, caffemodel, mean)
[ "Download caffe model into disk by the given meta info " ]
Please provide a description of the function:def convert_caffe_model(model_name, meta_info, dst_dir='./model'): (prototxt, caffemodel, mean) = download_caffe_model(model_name, meta_info, dst_dir) model_name = os.path.join(dst_dir, model_name) convert_model(prototxt, caffemodel, model_name) if isinstance(mean, str): mx_mean = model_name + '-mean.nd' convert_mean(mean, mx_mean) mean = mx_mean return (model_name, mean)
[ "Download, convert and save a caffe model" ]
Please provide a description of the function:def multi_p_run(tot_num, _func, worker, params, n_process): from multiprocessing import Process, Queue out_q = Queue() procs = [] split_num = split_seq(list(range(0, tot_num)), n_process) print(tot_num, ">>", split_num) split_len = len(split_num) if n_process > split_len: n_process = split_len for i in range(n_process): _p = Process(target=_func, args=(worker, split_num[i][0], split_num[i][1], params, out_q)) _p.daemon = True procs.append(_p) _p.start() try: result = [] for i in range(n_process): result.append(out_q.get()) for i in procs: i.join() except KeyboardInterrupt: print('Killing all the children in the pool.') for i in procs: i.terminate() i.join() return -1 while not out_q.empty(): print(out_q.get(block=False)) return result
[ "\n Run _func with multi-process using params.\n " ]
Please provide a description of the function:def split_seq(sam_num, n_tile): import math print(sam_num) print(n_tile) start_num = sam_num[0::int(math.ceil(len(sam_num) / (n_tile)))] end_num = start_num[1::] end_num.append(len(sam_num)) return [[i, j] for i, j in zip(start_num, end_num)]
[ "\n Split the number(sam_num) into numbers by n_tile\n " ]
Please provide a description of the function:def put_worker(func, from_idx, to_idx, params, out_q): succ, fail = func(from_idx, to_idx, params) return out_q.put({'succ': succ, 'fail': fail})
[ "\n put worker\n " ]
Please provide a description of the function:def namedtuple_with_defaults(typename, field_names, default_values=()): T = collections.namedtuple(typename, field_names) T.__new__.__defaults__ = (None, ) * len(T._fields) if isinstance(default_values, collections.Mapping): prototype = T(**default_values) else: prototype = T(*default_values) T.__new__.__defaults__ = tuple(prototype) return T
[ " create a namedtuple with default values " ]
Please provide a description of the function:def merge_dict(a, b): c = a.copy() c.update(b) return c
[ " merge dict a, b, with b overriding keys in a " ]
Please provide a description of the function:def zip_namedtuple(nt_list): if not nt_list: return dict() if not isinstance(nt_list, list): nt_list = [nt_list] for nt in nt_list: assert type(nt) == type(nt_list[0]) ret = {k : [v] for k, v in nt_list[0]._asdict().items()} for nt in nt_list[1:]: for k, v in nt._asdict().items(): ret[k].append(v) return ret
[ " accept list of namedtuple, return a dict of zipped fields " ]
Please provide a description of the function:def config_as_dict(cfg): ret = cfg.__dict__.copy() # random cropping params del ret['rand_crop_samplers'] assert isinstance(cfg.rand_crop_samplers, list) ret = merge_dict(ret, zip_namedtuple(cfg.rand_crop_samplers)) num_crop_sampler = len(cfg.rand_crop_samplers) ret['num_crop_sampler'] = num_crop_sampler # must specify the # ret['rand_crop_prob'] = 1.0 / (num_crop_sampler + 1) * num_crop_sampler # random padding params del ret['rand_pad'] ret = merge_dict(ret, cfg.rand_pad._asdict()) # color jitter del ret['color_jitter'] ret = merge_dict(ret, cfg.color_jitter._asdict()) return ret
[ " convert raw configuration to unified dictionary " ]
Please provide a description of the function:def import_model(model_file): graph = GraphProto() try: import onnx except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") # loads model file and returns ONNX protobuf object model_proto = onnx.load_model(model_file) sym, arg_params, aux_params = graph.from_onnx(model_proto.graph) return sym, arg_params, aux_params
[ "Imports the ONNX model file, passed as a parameter, into MXNet symbol and parameters.\n Operator support and coverage -\n https://cwiki.apache.org/confluence/display/MXNET/MXNet-ONNX+Integration\n\n Parameters\n ----------\n model_file : str\n ONNX model file name\n\n Returns\n -------\n sym : :class:`~mxnet.symbol.Symbol`\n MXNet symbol object\n\n arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`\n Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format\n\n aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray`\n Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format\n\n Notes\n -----\n This method is available when you ``import mxnet.contrib.onnx``\n\n " ]
Please provide a description of the function:def get_model_metadata(model_file): graph = GraphProto() try: import onnx except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") model_proto = onnx.load_model(model_file) metadata = graph.get_graph_metadata(model_proto.graph) return metadata
[ "\n Returns the name and shape information of input and output tensors of the given ONNX model file.\n\n Notes\n -----\n This method is available when you ``import mxnet.contrib.onnx``\n\n Parameters\n ----------\n model_file : str\n ONNX model file name\n\n Returns\n -------\n model_metadata : dict\n A dictionary object mapping various metadata to its corresponding value.\n The dictionary will have the following template::\n\n 'input_tensor_data' : list of tuples representing the shape of the input paramters\n 'output_tensor_data' : list of tuples representing the shape of the output of the model\n " ]
Please provide a description of the function:def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \ stride=(1,1), act_type="relu", use_batchnorm=False): conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \ stride=stride, num_filter=num_filter, name="{}_conv".format(name)) if use_batchnorm: conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name)) relu = mx.symbol.Activation(data=conv, act_type=act_type, \ name="{}_{}".format(name, act_type)) return relu
[ "\n wrapper for a small Convolution group\n\n Parameters:\n ----------\n from_layer : mx.symbol\n continue on which layer\n name : str\n base name of the new layers\n num_filter : int\n how many filters to use in Convolution layer\n kernel : tuple (int, int)\n kernel size (h, w)\n pad : tuple (int, int)\n padding size (h, w)\n stride : tuple (int, int)\n stride size (h, w)\n act_type : str\n activation type, can be relu...\n use_batchnorm : bool\n whether to use batch normalization\n\n Returns:\n ----------\n (conv, relu) mx.Symbols\n " ]
Please provide a description of the function:def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \ stride=(1,1), act_type="relu", use_batchnorm=False): assert not use_batchnorm, "batchnorm not yet supported" bias = mx.symbol.Variable(name="conv{}_bias".format(name), init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'}) conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \ stride=stride, num_filter=num_filter, name="conv{}".format(name)) relu = mx.symbol.Activation(data=conv, act_type=act_type, \ name="{}{}".format(act_type, name)) if use_batchnorm: relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name)) return conv, relu
[ "\n wrapper for a small Convolution group\n\n Parameters:\n ----------\n from_layer : mx.symbol\n continue on which layer\n name : str\n base name of the new layers\n num_filter : int\n how many filters to use in Convolution layer\n kernel : tuple (int, int)\n kernel size (h, w)\n pad : tuple (int, int)\n padding size (h, w)\n stride : tuple (int, int)\n stride size (h, w)\n act_type : str\n activation type, can be relu...\n use_batchnorm : bool\n whether to use batch normalization\n\n Returns:\n ----------\n (conv, relu) mx.Symbols\n " ]
Please provide a description of the function:def multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=128): # arguments check assert len(from_layers) > 0 assert isinstance(from_layers[0], str) and len(from_layers[0].strip()) > 0 assert len(from_layers) == len(num_filters) == len(strides) == len(pads) internals = body.get_internals() layers = [] for k, params in enumerate(zip(from_layers, num_filters, strides, pads)): from_layer, num_filter, s, p = params if from_layer.strip(): # extract from base network layer = internals[from_layer.strip() + '_output'] layers.append(layer) else: # attach from last feature layer assert len(layers) > 0 assert num_filter > 0 layer = layers[-1] num_1x1 = max(min_filter, num_filter // 2) conv_1x1 = conv_act_layer(layer, 'multi_feat_%d_conv_1x1' % (k), num_1x1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu') conv_3x3 = conv_act_layer(conv_1x1, 'multi_feat_%d_conv_3x3' % (k), num_filter, kernel=(3, 3), pad=(p, p), stride=(s, s), act_type='relu') layers.append(conv_3x3) return layers
[ "Wrapper function to extract features from base network, attaching extra\n layers and SSD specific layers\n\n Parameters\n ----------\n from_layers : list of str\n feature extraction layers, use '' for add extra layers\n For example:\n from_layers = ['relu4_3', 'fc7', '', '', '', '']\n which means extract feature from relu4_3 and fc7, adding 4 extra layers\n on top of fc7\n num_filters : list of int\n number of filters for extra layers, you can use -1 for extracted features,\n however, if normalization and scale is applied, the number of filter for\n that layer must be provided.\n For example:\n num_filters = [512, -1, 512, 256, 256, 256]\n strides : list of int\n strides for the 3x3 convolution appended, -1 can be used for extracted\n feature layers\n pads : list of int\n paddings for the 3x3 convolution, -1 can be used for extracted layers\n min_filter : int\n minimum number of filters used in 1x1 convolution\n\n Returns\n -------\n list of mx.Symbols\n\n " ]
Please provide a description of the function:def multibox_layer(from_layers, num_classes, sizes=[.2, .95], ratios=[1], normalization=-1, num_channels=[], clip=False, interm_layer=0, steps=[]): assert len(from_layers) > 0, "from_layers must not be empty list" assert num_classes > 0, \ "num_classes {} must be larger than 0".format(num_classes) assert len(ratios) > 0, "aspect ratios must not be empty list" if not isinstance(ratios[0], list): # provided only one ratio list, broadcast to all from_layers ratios = [ratios] * len(from_layers) assert len(ratios) == len(from_layers), \ "ratios and from_layers must have same length" assert len(sizes) > 0, "sizes must not be empty list" if len(sizes) == 2 and not isinstance(sizes[0], list): # provided size range, we need to compute the sizes for each layer assert sizes[0] > 0 and sizes[0] < 1 assert sizes[1] > 0 and sizes[1] < 1 and sizes[1] > sizes[0] tmp = np.linspace(sizes[0], sizes[1], num=(len(from_layers)-1)) # Ref for start_offset value: # https://arxiv.org/abs/1512.02325 start_offset = 0.1 min_sizes = [start_offset] + tmp.tolist() max_sizes = tmp.tolist() + [tmp[-1]+start_offset] sizes = zip(min_sizes, max_sizes) assert len(sizes) == len(from_layers), \ "sizes and from_layers must have same length" if not isinstance(normalization, list): normalization = [normalization] * len(from_layers) assert len(normalization) == len(from_layers) assert sum(x > 0 for x in normalization) <= len(num_channels), \ "must provide number of channels for each normalized layer" if steps: assert len(steps) == len(from_layers), "provide steps for all layers or leave empty" loc_pred_layers = [] cls_pred_layers = [] anchor_layers = [] num_classes += 1 # always use background as label 0 for k, from_layer in enumerate(from_layers): from_name = from_layer.name # normalize if normalization[k] > 0: from_layer = mx.symbol.L2Normalization(data=from_layer, \ mode="channel", name="{}_norm".format(from_name)) scale = mx.symbol.Variable(name="{}_scale".format(from_name), shape=(1, num_channels.pop(0), 1, 1), init=mx.init.Constant(normalization[k]), attr={'__wd_mult__': '0.1'}) from_layer = mx.symbol.broadcast_mul(lhs=scale, rhs=from_layer) if interm_layer > 0: from_layer = mx.symbol.Convolution(data=from_layer, kernel=(3,3), \ stride=(1,1), pad=(1,1), num_filter=interm_layer, \ name="{}_inter_conv".format(from_name)) from_layer = mx.symbol.Activation(data=from_layer, act_type="relu", \ name="{}_inter_relu".format(from_name)) # estimate number of anchors per location # here I follow the original version in caffe # TODO: better way to shape the anchors?? size = sizes[k] assert len(size) > 0, "must provide at least one size" size_str = "(" + ",".join([str(x) for x in size]) + ")" ratio = ratios[k] assert len(ratio) > 0, "must provide at least one ratio" ratio_str = "(" + ",".join([str(x) for x in ratio]) + ")" num_anchors = len(size) -1 + len(ratio) # create location prediction layer num_loc_pred = num_anchors * 4 bias = mx.symbol.Variable(name="{}_loc_pred_conv_bias".format(from_name), init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'}) loc_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \ stride=(1,1), pad=(1,1), num_filter=num_loc_pred, \ name="{}_loc_pred_conv".format(from_name)) loc_pred = mx.symbol.transpose(loc_pred, axes=(0,2,3,1)) loc_pred = mx.symbol.Flatten(data=loc_pred) loc_pred_layers.append(loc_pred) # create class prediction layer num_cls_pred = num_anchors * num_classes bias = mx.symbol.Variable(name="{}_cls_pred_conv_bias".format(from_name), init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'}) cls_pred = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=(3,3), \ stride=(1,1), pad=(1,1), num_filter=num_cls_pred, \ name="{}_cls_pred_conv".format(from_name)) cls_pred = mx.symbol.transpose(cls_pred, axes=(0,2,3,1)) cls_pred = mx.symbol.Flatten(data=cls_pred) cls_pred_layers.append(cls_pred) # create anchor generation layer if steps: step = (steps[k], steps[k]) else: step = '(-1.0, -1.0)' anchors = mx.symbol.contrib.MultiBoxPrior(from_layer, sizes=size_str, ratios=ratio_str, clip=clip, name="{}_anchors".format(from_name), steps=step) anchors = mx.symbol.Flatten(data=anchors) anchor_layers.append(anchors) loc_preds = mx.symbol.Concat(*loc_pred_layers, num_args=len(loc_pred_layers), \ dim=1, name="multibox_loc_pred") cls_preds = mx.symbol.Concat(*cls_pred_layers, num_args=len(cls_pred_layers), \ dim=1) cls_preds = mx.symbol.Reshape(data=cls_preds, shape=(0, -1, num_classes)) cls_preds = mx.symbol.transpose(cls_preds, axes=(0, 2, 1), name="multibox_cls_pred") anchor_boxes = mx.symbol.Concat(*anchor_layers, \ num_args=len(anchor_layers), dim=1) anchor_boxes = mx.symbol.Reshape(data=anchor_boxes, shape=(0, -1, 4), name="multibox_anchors") return [loc_preds, cls_preds, anchor_boxes]
[ "\n the basic aggregation module for SSD detection. Takes in multiple layers,\n generate multiple object detection targets by customized layers\n\n Parameters:\n ----------\n from_layers : list of mx.symbol\n generate multibox detection from layers\n num_classes : int\n number of classes excluding background, will automatically handle\n background in this function\n sizes : list or list of list\n [min_size, max_size] for all layers or [[], [], []...] for specific layers\n ratios : list or list of list\n [ratio1, ratio2...] for all layers or [[], [], ...] for specific layers\n normalizations : int or list of int\n use normalizations value for all layers or [...] for specific layers,\n -1 indicate no normalizations and scales\n num_channels : list of int\n number of input layer channels, used when normalization is enabled, the\n length of list should equals to number of normalization layers\n clip : bool\n whether to clip out-of-image boxes\n interm_layer : int\n if > 0, will add a intermediate Convolution layer\n steps : list\n specify steps for each MultiBoxPrior layer, leave empty, it will calculate\n according to layer dimensions\n\n Returns:\n ----------\n list of outputs, as [loc_preds, cls_preds, anchor_boxes]\n loc_preds : localization regression prediction\n cls_preds : classification prediction\n anchor_boxes : generated anchor boxes\n " ]
Please provide a description of the function:def _apply_weighting(F, loss, weight=None, sample_weight=None): if sample_weight is not None: loss = F.broadcast_mul(loss, sample_weight) if weight is not None: assert isinstance(weight, numeric_types), "weight must be a number" loss = loss * weight return loss
[ "Apply weighting to loss.\n\n Parameters\n ----------\n loss : Symbol\n The loss to be weighted.\n weight : float or None\n Global scalar weight for loss.\n sample_weight : Symbol or None\n Per sample weighting. Must be broadcastable to\n the same shape as loss. For example, if loss has\n shape (64, 10) and you want to weight each sample\n in the batch separately, `sample_weight` should have\n shape (64, 1).\n\n Returns\n -------\n loss : Symbol\n Weighted loss\n " ]
Please provide a description of the function:def _reshape_like(F, x, y): return x.reshape(y.shape) if F is ndarray else F.reshape_like(x, y)
[ "Reshapes x to the same shape as y." ]
Please provide a description of the function:def get_tv_grad_executor(img, ctx, tv_weight): if tv_weight <= 0.0: return None nchannel = img.shape[1] simg = mx.sym.Variable("img") skernel = mx.sym.Variable("kernel") channels = mx.sym.SliceChannel(simg, num_outputs=nchannel) out = mx.sym.Concat(*[ mx.sym.Convolution(data=channels[i], weight=skernel, num_filter=1, kernel=(3, 3), pad=(1,1), no_bias=True, stride=(1,1)) for i in range(nchannel)]) kernel = mx.nd.array(np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]]) .reshape((1, 1, 3, 3)), ctx) / 8.0 out = out * tv_weight return out.bind(ctx, args={"img": img, "kernel": kernel})
[ "create TV gradient executor with input binded on img\n " ]
Please provide a description of the function:def train_nstyle(args, callback=None): # input dev = mx.gpu(args.gpu) if args.gpu >= 0 else mx.cpu() content_np = PreprocessContentImage(args.content_image, args.max_long_edge) style_np = PreprocessStyleImage(args.style_image, shape=content_np.shape) size = content_np.shape[2:] # model Executor = namedtuple('Executor', ['executor', 'data', 'data_grad']) model_module = importlib.import_module('model_' + args.model) style, content = model_module.get_symbol() gram, gscale = style_gram_symbol(size, style) model_executor = model_module.get_executor(gram, content, size, dev) model_executor.data[:] = style_np model_executor.executor.forward() style_array = [] for i in range(len(model_executor.style)): style_array.append(model_executor.style[i].copyto(mx.cpu())) model_executor.data[:] = content_np model_executor.executor.forward() content_array = model_executor.content.copyto(mx.cpu()) # delete the executor del model_executor style_loss, content_loss = get_loss(gram, content) model_executor = model_module.get_executor( style_loss, content_loss, size, dev) grad_array = [] for i in range(len(style_array)): style_array[i].copyto(model_executor.arg_dict["target_gram_%d" % i]) grad_array.append(mx.nd.ones((1,), dev) * (float(args.style_weight) / gscale[i])) grad_array.append(mx.nd.ones((1,), dev) * (float(args.content_weight))) print([x.asscalar() for x in grad_array]) content_array.copyto(model_executor.arg_dict["target_content"]) # train # initialize img with random noise img = mx.nd.zeros(content_np.shape, ctx=dev) img[:] = mx.rnd.uniform(-0.1, 0.1, img.shape) lr = mx.lr_scheduler.FactorScheduler(step=args.lr_sched_delay, factor=args.lr_sched_factor) optimizer = mx.optimizer.NAG( learning_rate = args.lr, wd = 0.0001, momentum=0.95, lr_scheduler = lr) optim_state = optimizer.create_state(0, img) logging.info('start training arguments %s', args) old_img = img.copyto(dev) clip_norm = 1 * np.prod(img.shape) tv_grad_executor = get_tv_grad_executor(img, dev, args.tv_weight) for e in range(args.max_num_epochs): img.copyto(model_executor.data) model_executor.executor.forward() model_executor.executor.backward(grad_array) gnorm = mx.nd.norm(model_executor.data_grad).asscalar() if gnorm > clip_norm: model_executor.data_grad[:] *= clip_norm / gnorm if tv_grad_executor is not None: tv_grad_executor.forward() optimizer.update(0, img, model_executor.data_grad + tv_grad_executor.outputs[0], optim_state) else: optimizer.update(0, img, model_executor.data_grad, optim_state) new_img = img eps = (mx.nd.norm(old_img - new_img) / mx.nd.norm(new_img)).asscalar() old_img = new_img.copyto(dev) logging.info('epoch %d, relative change %f', e, eps) if eps < args.stop_eps: logging.info('eps < args.stop_eps, training finished') break if callback: cbdata = { 'eps': eps, 'epoch': e+1, } if (e+1) % args.save_epochs == 0: outfn = args.output_dir + 'e_'+str(e+1)+'.jpg' npimg = new_img.asnumpy() SaveImage(npimg, outfn, args.remove_noise) if callback: cbdata['filename'] = outfn cbdata['img'] = npimg if callback: callback(cbdata) final_fn = args.output_dir + '/final.jpg' SaveImage(new_img.asnumpy(), final_fn)
[ "Train a neural style network.\n Args are from argparse and control input, output, hyper-parameters.\n callback allows for display of training progress.\n " ]
Please provide a description of the function:def _get_batch(self): batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1])) batch_label = [] for i in range(self.batch_size): if (self._current + i) >= self._size: if not self.is_train: continue # use padding from middle in each epoch idx = (self._current + i + self._size // 2) % self._size index = self._index[idx] else: index = self._index[self._current + i] # index = self.debug_index im_path = self._imdb.image_path_from_index(index) with open(im_path, 'rb') as fp: img_content = fp.read() img = mx.img.imdecode(img_content) gt = self._imdb.label_from_index(index).copy() if self.is_train else None data, label = self._data_augmentation(img, gt) batch_data[i] = data if self.is_train: batch_label.append(label) self._data = {'data': batch_data} if self.is_train: self._label = {'label': mx.nd.array(np.array(batch_label))} else: self._label = {'label': None}
[ "\n Load data/label from dataset\n " ]
Please provide a description of the function:def _data_augmentation(self, data, label): if self.is_train and self._rand_samplers: rand_crops = [] for rs in self._rand_samplers: rand_crops += rs.sample(label) num_rand_crops = len(rand_crops) # randomly pick up one as input data if num_rand_crops > 0: index = int(np.random.uniform(0, 1) * num_rand_crops) width = data.shape[1] height = data.shape[0] crop = rand_crops[index][0] xmin = int(crop[0] * width) ymin = int(crop[1] * height) xmax = int(crop[2] * width) ymax = int(crop[3] * height) if xmin >= 0 and ymin >= 0 and xmax <= width and ymax <= height: data = mx.img.fixed_crop(data, xmin, ymin, xmax-xmin, ymax-ymin) else: # padding mode new_width = xmax - xmin new_height = ymax - ymin offset_x = 0 - xmin offset_y = 0 - ymin data_bak = data data = mx.nd.full((new_height, new_width, 3), 128, dtype='uint8') data[offset_y:offset_y+height, offset_x:offset_x + width, :] = data_bak label = rand_crops[index][1] if self.is_train: interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, \ cv2.INTER_NEAREST, cv2.INTER_LANCZOS4] else: interp_methods = [cv2.INTER_LINEAR] interp_method = interp_methods[int(np.random.uniform(0, 1) * len(interp_methods))] data = mx.img.imresize(data, self._data_shape[1], self._data_shape[0], interp_method) if self.is_train and self._rand_mirror: if np.random.uniform(0, 1) > 0.5: data = mx.nd.flip(data, axis=1) valid_mask = np.where(label[:, 0] > -1)[0] tmp = 1.0 - label[valid_mask, 1] label[valid_mask, 1] = 1.0 - label[valid_mask, 3] label[valid_mask, 3] = tmp data = mx.nd.transpose(data, (2,0,1)) data = data.astype('float32') data = data - self._mean_pixels return data, label
[ "\n perform data augmentations: crop, mirror, resize, sub mean, swap channels...\n " ]
Please provide a description of the function:def get_mnist(): np.random.seed(1234) # set seed for deterministic ordering mnist_data = mx.test_utils.get_mnist() X = np.concatenate([mnist_data['train_data'], mnist_data['test_data']]) Y = np.concatenate([mnist_data['train_label'], mnist_data['test_label']]) p = np.random.permutation(X.shape[0]) X = X[p].reshape((X.shape[0], -1)).astype(np.float32)*5 Y = Y[p] return X, Y
[ " Gets MNIST dataset " ]
Please provide a description of the function:def _split_input_slice(batch_size, work_load_list): total_work_load = sum(work_load_list) batch_num_list = [round(work_load * batch_size / total_work_load) for work_load in work_load_list] batch_num_sum = sum(batch_num_list) if batch_num_sum < batch_size: batch_num_list[-1] += batch_size - batch_num_sum slices = [] end = 0 for batch_num in batch_num_list: begin = int(min((end, batch_size))) end = int(min((begin + batch_num, batch_size))) if begin >= end: raise ValueError('Too many slices. Some splits are empty.') slices.append(slice(begin, end)) return slices
[ "Get input slice from the input shape.\n\n Parameters\n ----------\n batch_size : int\n The number of samples in a mini-batch.\n work_load_list : list of float or int, optional\n The list of work load for different devices,\n in the same order as `ctx`.\n\n Returns\n -------\n slices : list of slice\n The split slices to get a specific slice.\n\n Raises\n ------\n ValueError\n In case of too many splits, leading to some empty slices.\n " ]
Please provide a description of the function:def _check_arguments(symbol): arg_set = set() arg_names = symbol.list_arguments() for name in arg_names: if name in arg_set: raise ValueError(('Find duplicated argument name \"%s\", ' + 'please make the weight name non-duplicated(using name arguments), ' + 'arguments are %s') % (name, str(arg_names))) arg_set.add(name) aux_set = set() aux_names = symbol.list_auxiliary_states() for name in aux_names: if name in aux_set: raise ValueError( ('Find duplicated auxiliary param name \"%s\", ' + 'please make the weight name non-duplicated(using name arguments), ' + 'arguments are %s, auxiliary params are %s' ) % (name, str(arg_names), str(aux_names))) aux_set.add(name)
[ "Check the argument names of symbol.\n This function checks the duplication of arguments in Symbol.\n The check is done for feedforward net for now.\n\n Parameters\n ----------\n symbol : Symbol\n The network configuration.\n " ]
Please provide a description of the function:def _load_general(data, targets): for d_src, d_targets in zip(data, targets): if isinstance(d_targets, nd.NDArray): d_src.copyto(d_targets) else: assert d_targets[-1][0].stop == d_src.shape[0], \ "Batch size miss match. Expected %d, got %d"%( \ d_targets[-1][0].stop, d_src.shape[0]) for slice_idx, d_dst in d_targets: d_src[slice_idx].copyto(d_dst)
[ "Load a list of arrays into a list of arrays specified by slices." ]
Please provide a description of the function:def _bind_exec(sym, ctx, input_shapes, param_names, need_grad=False, base_exec=None, shared_data_arrays=None, input_types=None, logger=logging): arg_shape, _, aux_shape = sym.infer_shape(**input_shapes) assert(arg_shape is not None) if input_types is None: input_types = {k: mx_real_t for k in input_shapes.keys()} arg_types, _, aux_types = sym.infer_type(**input_types) assert(arg_types is not None) arg_arrays = [] grad_arrays = {} if need_grad is not False else None arg_names = sym.list_arguments() if need_grad is False: need_grad = set() elif need_grad is True: need_grad = set(arg_names) - set(input_shapes.keys()) elif isinstance(need_grad, set): pass else: raise AssertionError("need_grad must be boolean or set.") grad_req = {name:('write' if name in need_grad else 'null') for name in arg_names} # create or borrow arguments and gradients for i, name in enumerate(arg_names): if not name in param_names: # data or label if shared_data_arrays is not None and \ name in shared_data_arrays: arg_arr = shared_data_arrays[name] if np.prod(arg_arr.shape) >= np.prod(arg_shape[i]): # good, we can share this memory assert(arg_types[i] == arg_arr.dtype) arg_arr = arg_arr.reshape(arg_shape[i]) else: logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape[i])) + (', which is larger than already allocated ') + ('shape %s' % (arg_arr.shape,)) + ('. Need to re-allocate. Consider putting ') + ('default_bucket_key to be the bucket taking the largest ') + ('input for better memory sharing.')) arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i]) # replace existing shared array because the new one is bigger shared_data_arrays[name] = arg_arr else: arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i]) if shared_data_arrays is not None: shared_data_arrays[name] = arg_arr arg_arrays.append(arg_arr) else: # model parameter if base_exec is None: arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i]) if name in need_grad: grad_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i]) grad_arrays[name] = grad_arr else: arg_arr = base_exec.arg_dict[name] assert arg_arr.shape == arg_shape[i] assert arg_arr.dtype == arg_types[i] if name in need_grad: grad_arrays[name] = base_exec.grad_dict[name] arg_arrays.append(arg_arr) # create or borrow aux variables if base_exec is None: aux_arrays = [nd.zeros(s, ctx, dtype=t) for s, t in zip(aux_shape, aux_types)] else: for i, a in enumerate(base_exec.aux_arrays): assert aux_shape[i] == a.shape assert aux_types[i] == a.dtype aux_arrays = [a for a in base_exec.aux_arrays] executor = sym.bind(ctx=ctx, args=arg_arrays, args_grad=grad_arrays, aux_states=aux_arrays, grad_req=grad_req, shared_exec=base_exec) return executor
[ "bind executor for bucketing, potentially sharing data with an existing executor." ]
Please provide a description of the function:def load_data_batch(self, data_batch): _load_data(data_batch, self.data_arrays) _load_label(data_batch, self.label_arrays)
[ "Load data and labels into arrays." ]
Please provide a description of the function:def forward(self, is_train=False): for texec in self.train_execs: texec.forward(is_train=is_train)
[ "Perform a forward pass on each executor." ]
Please provide a description of the function:def update_metric(self, metric, labels, pre_sliced=False): for current_exec, (texec, islice) in enumerate(zip(self.train_execs, self.slices)): if not pre_sliced: labels_slice = [label[islice] for label in labels] else: labels_slice = labels[current_exec] metric.update(labels_slice, texec.outputs)
[ "Update evaluation metric with label and current outputs." ]
Please provide a description of the function:def install_monitor(self, monitor): if self.sym_gen is not None: raise NotImplementedError("Monitoring is not implemented for bucketing") for train_exec in self.execgrp.train_execs: monitor.install(train_exec)
[ "Install monitor on all executors." ]
Please provide a description of the function:def set_params(self, arg_params, aux_params): for texec in self.execgrp.train_execs: texec.copy_params_from(arg_params, aux_params)
[ "Set parameter and aux values.\n\n Parameters\n ----------\n arg_params : list of NDArray\n Source parameter arrays\n aux_params : list of NDArray\n Source aux arrays.\n " ]
Please provide a description of the function:def load_data_batch(self, data_batch): if self.sym_gen is not None: key = data_batch.bucket_key if key not in self.execgrp_bucket: # create new bucket entry symbol = self.sym_gen(key) execgrp = DataParallelExecutorGroup(symbol, self.arg_names, self.param_names, self.ctx, self.slices, data_batch, shared_group=self.execgrp) self.execgrp_bucket[key] = execgrp self.curr_execgrp = self.execgrp_bucket[key] else: self.curr_execgrp = self.execgrp self.curr_execgrp.load_data_batch(data_batch)
[ "Load data and labels into arrays." ]
Please provide a description of the function:def update_metric(self, metric, labels, pre_sliced=False): self.curr_execgrp.update_metric(metric, labels, pre_sliced)
[ "Update metric with the current executor." ]
Please provide a description of the function:def clear(self): self.states[:] = 0 self.actions[:] = 0 self.rewards[:] = 0 self.terminate_flags[:] = 0 self.top = 0 self.size = 0
[ "\n Clear all contents in the relay memory\n " ]
Please provide a description of the function:def get_header_guard_dmlc(filename): fileinfo = cpplint.FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() inc_list = ['include', 'api', 'wrapper'] if file_path_from_root.find('src/') != -1 and _HELPER.project_name is not None: idx = file_path_from_root.find('src/') file_path_from_root = _HELPER.project_name + file_path_from_root[idx + 3:] else: for spath in inc_list: prefix = spath + os.sep if file_path_from_root.startswith(prefix): file_path_from_root = re.sub('^' + prefix, '', file_path_from_root) break return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
[ "Get Header Guard Convention for DMLC Projects.\n For headers in include, directly use the path\n For headers in src, use project name plus path\n Examples: with project-name = dmlc\n include/dmlc/timer.h -> DMLC_TIMTER_H_\n src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_\n " ]
Please provide a description of the function:def process(fname, allow_type): fname = str(fname) # HACK: ignore op.h which is automatically generated if fname.endswith('op.h'): return arr = fname.rsplit('.', 1) if fname.find('#') != -1 or arr[-1] not in allow_type: return if arr[-1] in CXX_SUFFIX: _HELPER.process_cpp(fname, arr[-1]) if arr[-1] in PYTHON_SUFFIX: _HELPER.process_python(fname)
[ "Process a file." ]
Please provide a description of the function:def main(): if len(sys.argv) < 3: print('Usage: <project-name> <filetype> <list-of-path to traverse>') print('\tfiletype can be python/cpp/all') exit(-1) _HELPER.project_name = sys.argv[1] file_type = sys.argv[2] allow_type = [] if file_type == 'python' or file_type == 'all': allow_type += [x for x in PYTHON_SUFFIX] if file_type == 'cpp' or file_type == 'all': allow_type += [x for x in CXX_SUFFIX] allow_type = set(allow_type) if os.name != 'nt': sys.stderr = codecs.StreamReaderWriter(sys.stderr, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace') for path in sys.argv[3:]: if os.path.isfile(path): process(path, allow_type) else: for root, dirs, files in os.walk(path): for name in files: process(os.path.join(root, name), allow_type) nerr = _HELPER.print_summary(sys.stderr) sys.exit(nerr > 0)
[ "Main entry function." ]
Please provide a description of the function:def _print_summary_map(strm, result_map, ftype): if len(result_map) == 0: return 0 npass = len([x for k, x in result_map.iteritems() if len(x) == 0]) strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype)) for fname, emap in result_map.iteritems(): if len(emap) == 0: continue strm.write('%s: %d Errors of %d Categories map=%s\n' % ( fname, sum(emap.values()), len(emap), str(emap))) return len(result_map) - npass
[ "Print summary of certain result map." ]
Please provide a description of the function:def process_cpp(self, path, suffix): _cpplint_state.ResetErrorCounts() cpplint.ProcessFile(str(path), _cpplint_state.verbose_level) _cpplint_state.PrintErrorCounts() errors = _cpplint_state.errors_by_category.copy() if suffix == 'h': self.cpp_header_map[str(path)] = errors else: self.cpp_src_map[str(path)] = errors
[ "Process a cpp file." ]
Please provide a description of the function:def process_python(self, path): (pylint_stdout, pylint_stderr) = epylint.py_run( ' '.join([str(path)] + self.pylint_opts), return_std=True) emap = {} print(pylint_stderr.read()) for line in pylint_stdout: sys.stderr.write(line) key = line.split(':')[-1].split('(')[0].strip() if key not in self.pylint_cats: continue if key not in emap: emap[key] = 1 else: emap[key] += 1 sys.stderr.write('\n') self.python_map[str(path)] = emap
[ "Process a python file." ]
Please provide a description of the function:def print_summary(self, strm): nerr = 0 nerr += LintHelper._print_summary_map(strm, self.cpp_header_map, 'cpp-header') nerr += LintHelper._print_summary_map(strm, self.cpp_src_map, 'cpp-soruce') nerr += LintHelper._print_summary_map(strm, self.python_map, 'python') if nerr == 0: strm.write('All passed!\n') else: strm.write('%d files failed lint\n' % nerr) return nerr
[ "Print summary of lint." ]
Please provide a description of the function:def _init_kvstore_server_module(): is_worker = ctypes.c_int() check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker))) if is_worker.value == 0: kvstore = create('dist') server = KVStoreServer(kvstore) server.run() sys.exit()
[ "Start server/scheduler." ]
Please provide a description of the function:def _controller(self): def server_controller(cmd_id, cmd_body, _): if not self.init_logginig: # the reason put the codes here is because we cannot get # kvstore.rank earlier head = '%(asctime)-15s Server[' + str( self.kvstore.rank) + '] %(message)s' logging.basicConfig(level=logging.DEBUG, format=head) self.init_logginig = True if cmd_id == 0: try: optimizer = pickle.loads(cmd_body) except: raise self.kvstore.set_optimizer(optimizer) else: print("server %d, unknown command (%d, %s)" % ( self.kvstore.rank, cmd_id, cmd_body)) return server_controller
[ "Return the server controller.", "Server controler." ]
Please provide a description of the function:def run(self): _ctrl_proto = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p) check_call(_LIB.MXKVStoreRunServer(self.handle, _ctrl_proto(self._controller()), None))
[ "Run the server, whose behavior is like.\n\n\n >>> while receive(x):\n ... if is_command x: controller(x)\n ... else if is_key_value x: updater(x)\n " ]
Please provide a description of the function:def _generate_ndarray_function_code(handle, name, func_name, signature_only=False): real_name = ctypes.c_char_p() desc = ctypes.c_char_p() num_args = mx_uint() arg_names = ctypes.POINTER(ctypes.c_char_p)() arg_types = ctypes.POINTER(ctypes.c_char_p)() arg_descs = ctypes.POINTER(ctypes.c_char_p)() key_var_num_args = ctypes.c_char_p() ret_type = ctypes.c_char_p() check_call(_LIB.MXSymbolGetAtomicSymbolInfo( handle, ctypes.byref(real_name), ctypes.byref(desc), ctypes.byref(num_args), ctypes.byref(arg_names), ctypes.byref(arg_types), ctypes.byref(arg_descs), ctypes.byref(key_var_num_args), ctypes.byref(ret_type))) narg = int(num_args.value) arg_names = [py_str(arg_names[i]) for i in range(narg)] arg_types = [py_str(arg_types[i]) for i in range(narg)] key_var_num_args = py_str(key_var_num_args.value) ret_type = py_str(ret_type.value) if ret_type.value is not None else '' doc_str = _build_doc(name, py_str(desc.value), arg_names, arg_types, [py_str(arg_descs[i]) for i in range(narg)], key_var_num_args, ret_type) dtype_name = None arr_name = None ndsignature = [] signature = [] ndarg_names = [] kwarg_names = [] for i in range(narg): name, atype = arg_names[i], arg_types[i] if name == 'dtype': dtype_name = name signature.append('%s=_Null'%name) elif atype.startswith('NDArray') or atype.startswith('Symbol'): assert not arr_name, \ "Op can only have one argument with variable " \ "size and it must be the last argument." if atype.endswith('[]'): ndsignature.append('*%s'%name) arr_name = name else: ndsignature.append('%s=None'%name) ndarg_names.append(name) else: signature.append('%s=_Null'%name) kwarg_names.append(name) signature.append('out=None') signature.append('name=None') signature.append('**kwargs') signature = ndsignature + signature code = [] if arr_name: code.append(%(func_name, arr_name)) if not signature_only: code.append(.format(arr_name)) if dtype_name is not None: code.append(%( dtype_name, dtype_name, dtype_name)) code.append() else: code.append(%(func_name, ', '.join(signature))) if not signature_only: code.append() # NDArray args for name in ndarg_names: # pylint: disable=redefined-argument-from-local code.append(.format(name=name)) # kwargs for name in kwarg_names: # pylint: disable=redefined-argument-from-local code.append(%(name, name, name)) # dtype if dtype_name is not None: code.append(%(dtype_name, dtype_name, dtype_name)) if not signature_only: code.append(%( handle.value)) else: code.append() doc_str_lines = _os.linesep+''.join([' '+s if s.strip() else s for s in 'r'.format(doc_str=doc_str) .splitlines(True)]) code.insert(1, doc_str_lines) return ''.join(code), doc_str
[ "Generate function for ndarray op by handle and function name.", "\ndef %s(*%s, **kwargs):", "\n ndargs = []\n for i in {}:\n assert isinstance(i, NDArrayBase), \\\\\n \"Positional arguments must have NDArray type, \" \\\\\n \"but got %s\"%str(i)\n ndargs.append(i)", "\n if '%s' in kwargs:\n kwargs['%s'] = _np.dtype(kwargs['%s']).name", "\n _ = kwargs.pop('name', None)\n out = kwargs.pop('out', None)\n keys = list(kwargs.keys())\n vals = list(kwargs.values())", "\ndef %s(%s):", "\n ndargs = []\n keys = list(kwargs.keys())\n vals = list(kwargs.values())", "\n if {name} is not None:\n assert isinstance({name}, NDArrayBase), \\\\\n \"Argument {name} must have NDArray type, but got %s\"%str({name})\n ndargs.append({name})", "\n if %s is not _Null:\n keys.append('%s')\n vals.append(%s)", "\n if %s is not _Null:\n keys.append('%s')\n vals.append(_np.dtype(%s).name)", "\n return _imperative_invoke(%d, ndargs, keys, vals, out)", "\n return (0,)", "{doc_str}" ]
Please provide a description of the function:def _make_ndarray_function(handle, name, func_name): code, doc_str = _generate_ndarray_function_code(handle, name, func_name) local = {} exec(code, None, local) # pylint: disable=exec-used ndarray_function = local[func_name] ndarray_function.__name__ = func_name ndarray_function.__doc__ = doc_str ndarray_function.__module__ = 'mxnet.ndarray' return ndarray_function
[ "Create a NDArray function from the FunctionHandle." ]
Please provide a description of the function:def count_tokens_from_str(source_str, token_delim=' ', seq_delim='\n', to_lower=False, counter_to_update=None): source_str = filter(None, re.split(token_delim + '|' + seq_delim, source_str)) if to_lower: source_str = [t.lower() for t in source_str] if counter_to_update is None: return collections.Counter(source_str) else: counter_to_update.update(source_str) return counter_to_update
[ "Counts tokens in the specified string.\n\n For token_delim=\\'<td>\\' and seq_delim=\\'<sd>\\', a specified string of two sequences of\n tokens may look like::\n\n <td>token1<td>token2<td>token3<td><sd><td>token4<td>token5<td><sd>\n\n <td> and <sd> are regular expressions. Make use of \\\\\\\\ to allow special characters as\n delimiters. The list of\n special characters can be found at https://docs.python.org/3/library/re.html.\n\n Parameters\n ----------\n source_str : str\n A source string of tokens.\n token_delim : str, default ' '\n A token delimiter.\n seq_delim : str, default '\\\\\\\\n'\n A sequence delimiter.\n to_lower : bool, default False\n Whether to convert the source source_str to the lower case.\n counter_to_update : collections.Counter or None, default None\n The collections.Counter instance to be updated with the token counts of `source_str`. If\n None, return a new collections.Counter instance counting tokens from `source_str`.\n\n\n Returns\n -------\n collections.Counter\n The `counter_to_update` collections.Counter instance after being updated with the token\n counts of `source_str`. If `counter_to_update` is None, return a new collections.Counter\n instance counting tokens from `source_str`.\n\n\n Examples\n --------\n >>> source_str = ' Life is great ! \\\\n life is good . \\\\n'\n >>> count_tokens_from_str(token_line, ' ', '\\\\n', True)\n Counter({'!': 1, '.': 1, 'good': 1, 'great': 1, 'is': 2, 'life': 2})\n\n\n >>> source_str = '*Life*is*great*!*\\\\n*life*is*good*.*\\\\n'\n >>> count_tokens_from_str(token_line, '\\\\*', '\\\\n', True)\n Counter({'is': 2, 'life': 2, '!': 1, 'great': 1, 'good': 1, '.': 1})\n " ]
Please provide a description of the function:def zeros(shape, ctx=None, dtype=None, stype=None, **kwargs): if stype is None or stype == 'default': return _zeros_ndarray(shape, ctx, dtype, **kwargs) else: return _zeros_sparse_ndarray(stype, shape, ctx, dtype, **kwargs)
[ "Return a new array of given shape and type, filled with zeros.\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array\n ctx : Context, optional\n An optional device context (default is the current default context)\n dtype : str or numpy.dtype, optional\n An optional value type (default is `float32`)\n stype: string, optional\n The storage type of the empty array, such as 'row_sparse', 'csr', etc.\n\n Returns\n -------\n NDArray, CSRNDArray or RowSparseNDArray\n A created array\n Examples\n --------\n >>> mx.nd.zeros((1,2), mx.cpu(), stype='csr')\n <CSRNDArray 1x2 @cpu(0)>\n >>> mx.nd.zeros((1,2), mx.cpu(), 'float16', stype='row_sparse').asnumpy()\n array([[ 0., 0.]], dtype=float16)\n " ]
Please provide a description of the function:def empty(shape, ctx=None, dtype=None, stype=None): if stype is None or stype == 'default': return _empty_ndarray(shape, ctx, dtype) else: return _empty_sparse_ndarray(stype, shape, ctx, dtype)
[ "Returns a new array of given shape and type, without initializing entries.\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n ctx : Context, optional\n An optional device context (default is the current default context).\n dtype : str or numpy.dtype, optional\n An optional value type (default is `float32`).\n stype : str, optional\n An optional storage type (default is `default`).\n\n Returns\n -------\n NDArray, CSRNDArray or RowSparseNDArray\n A created array.\n\n Examples\n --------\n >>> mx.nd.empty(1)\n <NDArray 1 @cpu(0)>\n >>> mx.nd.empty((1,2), mx.gpu(0))\n <NDArray 1x2 @gpu(0)>\n >>> mx.nd.empty((1,2), mx.gpu(0), 'float16')\n <NDArray 1x2 @gpu(0)>\n >>> mx.nd.empty((1,2), stype='csr')\n <CSRNDArray 1x2 @cpu(0)>\n " ]
Please provide a description of the function:def array(source_array, ctx=None, dtype=None): if spsp is not None and isinstance(source_array, spsp.csr.csr_matrix): return _sparse_array(source_array, ctx=ctx, dtype=dtype) elif isinstance(source_array, NDArray) and source_array.stype != 'default': return _sparse_array(source_array, ctx=ctx, dtype=dtype) else: return _array(source_array, ctx=ctx, dtype=dtype)
[ "Creates an array from any object exposing the array interface.\n\n Parameters\n ----------\n source_array : array_like\n An object exposing the array interface, an object whose `__array__`\n method returns an array, or any (nested) sequence.\n ctx : Context, optional\n Device context (default is the current default context).\n dtype : str or numpy.dtype, optional\n The data type of the output array. The default dtype is ``source_array.dtype``\n if `source_array` is an `NDArray`, `float32` otherwise.\n\n Returns\n -------\n NDArray, RowSparseNDArray or CSRNDArray\n An array with the same contents as the `source_array`.\n\n Examples\n --------\n >>> import numpy as np\n >>> mx.nd.array([1, 2, 3])\n <NDArray 3 @cpu(0)>\n >>> mx.nd.array([[1, 2], [3, 4]])\n <NDArray 2x2 @cpu(0)>\n >>> mx.nd.array(np.zeros((3, 2)))\n <NDArray 3x2 @cpu(0)>\n >>> mx.nd.array(np.zeros((3, 2)), mx.gpu(0))\n <NDArray 3x2 @gpu(0)>\n >>> mx.nd.array(mx.nd.zeros((3, 2), stype='row_sparse'))\n <RowSparseNDArray 3x2 @cpu(0)>\n " ]
Please provide a description of the function:def load(fname): if not isinstance(fname, string_types): raise TypeError('fname required to be a string') out_size = mx_uint() out_name_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() names = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXNDArrayLoad(c_str(fname), ctypes.byref(out_size), ctypes.byref(handles), ctypes.byref(out_name_size), ctypes.byref(names))) if out_name_size.value == 0: return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)] else: assert out_name_size.value == out_size.value return dict( (py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i]))) for i in range(out_size.value))
[ "Loads an array from file.\n\n See more details in ``save``.\n\n Parameters\n ----------\n fname : str\n The filename.\n\n Returns\n -------\n list of NDArray, RowSparseNDArray or CSRNDArray, or \\\n dict of str to NDArray, RowSparseNDArray or CSRNDArray\n Loaded data.\n " ]
Please provide a description of the function:def load_frombuffer(buf): if not isinstance(buf, string_types + tuple([bytes])): raise TypeError('buf required to be a string or bytes') out_size = mx_uint() out_name_size = mx_uint() handles = ctypes.POINTER(NDArrayHandle)() names = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXNDArrayLoadFromBuffer(buf, mx_uint(len(buf)), ctypes.byref(out_size), ctypes.byref(handles), ctypes.byref(out_name_size), ctypes.byref(names))) if out_name_size.value == 0: return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)] else: assert out_name_size.value == out_size.value return dict( (py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i]))) for i in range(out_size.value))
[ "Loads an array dictionary or list from a buffer\n\n See more details in ``save``.\n\n Parameters\n ----------\n buf : str\n Buffer containing contents of a file as a string or bytes.\n\n Returns\n -------\n list of NDArray, RowSparseNDArray or CSRNDArray, or \\\n dict of str to NDArray, RowSparseNDArray or CSRNDArray\n Loaded data.\n " ]
Please provide a description of the function:def save(fname, data): if isinstance(data, NDArray): data = [data] handles = c_array(NDArrayHandle, []) if isinstance(data, dict): str_keys = data.keys() nd_vals = data.values() if any(not isinstance(k, string_types) for k in str_keys) or \ any(not isinstance(v, NDArray) for v in nd_vals): raise TypeError('save only accept dict str->NDArray or list of NDArray') keys = c_str_array(str_keys) handles = c_handle_array(nd_vals) elif isinstance(data, list): if any(not isinstance(v, NDArray) for v in data): raise TypeError('save only accept dict str->NDArray or list of NDArray') keys = None handles = c_handle_array(data) else: raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs " "or a list of NDarrays.") check_call(_LIB.MXNDArraySave(c_str(fname), mx_uint(len(handles)), handles, keys))
[ "Saves a list of arrays or a dict of str->array to file.\n\n Examples of filenames:\n\n - ``/path/to/file``\n - ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)\n - ``hdfs://path/to/file`` (if compiled with HDFS supports)\n\n Parameters\n ----------\n fname : str\n The filename.\n data : NDArray, RowSparseNDArray or CSRNDArray, \\\n or list of NDArray, RowSparseNDArray or CSRNDArray, \\\n or dict of str to NDArray, RowSparseNDArray or CSRNDArray\n The data to save.\n\n Examples\n --------\n >>> x = mx.nd.zeros((2,3))\n >>> y = mx.nd.ones((1,4))\n >>> mx.nd.save('my_list', [x,y])\n >>> mx.nd.save('my_dict', {'x':x, 'y':y})\n >>> mx.nd.load('my_list')\n [<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]\n >>> mx.nd.load('my_dict')\n {'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}\n " ]
Please provide a description of the function:def _common_prefix(names): if not names: return '' prefix = names[0] for name in names: i = 0 while i < len(prefix) and i < len(name) and prefix[i] == name[i]: i += 1 prefix = prefix[:i] return prefix
[ "Get the common prefix for all names" ]
Please provide a description of the function:def _infer_param_types(in_params, out_params, arg_params, aux_params, default_dtype=mx_real_t): arg_types = None aux_types = None # Get Input symbol details. This will be used to infer types of # other parameters. input_sym_names = [in_param.name for in_param in in_params] # Try to infer input types. If not successful, we will set default dtype. # If successful, we will try to infer other params in the graph. input_sym_arg_types = [] can_infer_input_type = True for in_param in in_params: input_sym_arg_type = in_param.infer_type()[0] if not input_sym_arg_type or len(input_sym_arg_type) < 1: can_infer_input_type = False break else: input_sym_arg_types.append(in_param.infer_type()[0][0]) # Try to infer types of other parameters. if can_infer_input_type: params = {k:v for k, v in zip(input_sym_names, input_sym_arg_types)} arg_types, _, aux_types = out_params.infer_type(**params) if arg_types is None or len(arg_types) != len(arg_params): arg_types = [] for _ in arg_params: arg_types.append(default_dtype) if aux_types is None or len(aux_types) != len(aux_params): aux_types = [] for _ in aux_params: aux_types.append(default_dtype) return (arg_types, aux_types)
[ "Utility function that helps in inferring DType of args and auxs params\n from given input param.\n\n Parameters\n ----------\n in_params: List of Symbol\n List of input symbol variables.\n out_params: Symbol\n Output symbol variable.\n arg_params: List of Str\n List of names of argument parametrs.\n aux_params: List of Str\n List of names of auxiliary parameters.\n default_dtype: numpy.dtype or str, default 'float32'\n Default data type for arg_params and aux_params, if unable to infer the type.\n\n Returns\n -------\n arg_types: List of numpy.dtype\n List of arg_params type. Order is same as arg_params.\n Defaults to 'float32', if unable to infer type.\n aux_types: List of numpy.dtype\n List of aux_params type. Order is same as aux_params.\n Defaults to 'float32', if unable to infer type.\n " ]
Please provide a description of the function:def create(prefix, params, hint): current = getattr(_BlockScope._current, "value", None) if current is None: if prefix is None: if not hasattr(_name.NameManager._current, "value"): _name.NameManager._current.value = _name.NameManager() prefix = _name.NameManager._current.value.get(None, hint) + '_' if params is None: params = ParameterDict(prefix) else: params = ParameterDict(params.prefix, params) return prefix, params if prefix is None: count = current._counter.get(hint, 0) prefix = '%s%d_'%(hint, count) current._counter[hint] = count + 1 if params is None: parent = current._block.params params = ParameterDict(parent.prefix+prefix, parent._shared) else: params = ParameterDict(params.prefix, params) return current._block.prefix+prefix, params
[ "Creates prefix and params for new `Block`." ]
Please provide a description of the function:def collect_params(self, select=None): # We need to check here because blocks inside containers are not supported. self._check_container_with_block() ret = ParameterDict(self._params.prefix) if not select: ret.update(self.params) else: pattern = re.compile(select) ret.update({name:value for name, value in self.params.items() if pattern.match(name)}) for cld in self._children.values(): ret.update(cld.collect_params(select=select)) return ret
[ "Returns a :py:class:`ParameterDict` containing this :py:class:`Block` and all of its\n children's Parameters(default), also can returns the select :py:class:`ParameterDict`\n which match some given regular expressions.\n\n For example, collect the specified parameters in ['conv1_weight', 'conv1_bias', 'fc_weight',\n 'fc_bias']::\n\n model.collect_params('conv1_weight|conv1_bias|fc_weight|fc_bias')\n\n or collect all parameters whose names end with 'weight' or 'bias', this can be done\n using regular expressions::\n\n model.collect_params('.*weight|.*bias')\n\n Parameters\n ----------\n select : str\n regular expressions\n\n Returns\n -------\n The selected :py:class:`ParameterDict`\n " ]
Please provide a description of the function:def save_params(self, filename): warnings.warn("save_params is deprecated. Please use save_parameters. " "Note that if you want load from SymbolBlock later, please " "use export instead. For details, see " "https://mxnet.incubator.apache.org/tutorials/gluon/save_lo" "ad_params.html") try: self.collect_params().save(filename, strip_prefix=self.prefix) except ValueError as e: raise ValueError('%s\nsave_params is deprecated. Using ' \ 'save_parameters may resolve this error.'%e.message)
[ "[Deprecated] Please use save_parameters. Note that if you want load\n from SymbolBlock later, please use export instead.\n\n Save parameters to file.\n\n filename : str\n Path to file.\n " ]
Please provide a description of the function:def load_parameters(self, filename, ctx=None, allow_missing=False, ignore_extra=False): loaded = ndarray.load(filename) params = self._collect_params_with_prefix() if not loaded and not params: return if not any('.' in i for i in loaded.keys()): # legacy loading del loaded self.collect_params().load( filename, ctx, allow_missing, ignore_extra, self.prefix) return if not allow_missing: for name in params.keys(): assert name in loaded, \ "Parameter '%s' is missing in file '%s', which contains parameters: %s. " \ "Set allow_missing=True to ignore missing parameters."%( name, filename, _brief_print_list(loaded.keys())) for name in loaded: if not ignore_extra and name not in params: raise ValueError( "Parameter '%s' loaded from file '%s' is not present in ParameterDict, " \ "which contains parameters %s. Set ignore_extra=True to ignore. "%( name, filename, _brief_print_list(self._params.keys()))) if name in params: params[name]._load_init(loaded[name], ctx)
[ "Load parameters from file previously saved by `save_parameters`.\n\n Parameters\n ----------\n filename : str\n Path to parameter file.\n ctx : Context or list of Context, default cpu()\n Context(s) to initialize loaded parameters on.\n allow_missing : bool, default False\n Whether to silently skip loading parameters not represents in the file.\n ignore_extra : bool, default False\n Whether to silently ignore parameters from the file that are not\n present in this Block.\n\n References\n ----------\n `Saving and Loading Gluon Models \\\n <https://mxnet.incubator.apache.org/tutorials/gluon/save_load_params.html>`_\n " ]
Please provide a description of the function:def load_params(self, filename, ctx=None, allow_missing=False, ignore_extra=False): warnings.warn("load_params is deprecated. Please use load_parameters.") self.load_parameters(filename, ctx, allow_missing, ignore_extra)
[ "[Deprecated] Please use load_parameters.\n\n Load parameters from file.\n\n filename : str\n Path to parameter file.\n ctx : Context or list of Context, default cpu()\n Context(s) to initialize loaded parameters on.\n allow_missing : bool, default False\n Whether to silently skip loading parameters not represents in the file.\n ignore_extra : bool, default False\n Whether to silently ignore parameters from the file that are not\n present in this Block.\n " ]
Please provide a description of the function:def register_child(self, block, name=None): if name is None: name = str(len(self._children)) self._children[name] = block
[ "Registers block as a child of self. :py:class:`Block` s assigned to self as\n attributes will be registered automatically." ]
Please provide a description of the function:def register_forward_pre_hook(self, hook): r handle = HookHandle() handle.attach(self._forward_pre_hooks, hook) return handle
[ "Registers a forward pre-hook on the block.\n\n The hook function is called immediately before :func:`forward`.\n It should not modify the input or output.\n\n Parameters\n ----------\n hook : callable\n The forward hook function of form `hook(block, input) -> None`.\n\n Returns\n -------\n :class:`mxnet.gluon.utils.HookHandle`\n " ]
Please provide a description of the function:def register_forward_hook(self, hook): r handle = HookHandle() handle.attach(self._forward_hooks, hook) return handle
[ "Registers a forward hook on the block.\n\n The hook function is called immediately after :func:`forward`.\n It should not modify the input or output.\n\n Parameters\n ----------\n hook : callable\n The forward hook function of form `hook(block, input, output) -> None`.\n\n Returns\n -------\n :class:`mxnet.gluon.utils.HookHandle`\n " ]
Please provide a description of the function:def apply(self, fn): r for cld in self._children.values(): cld.apply(fn) fn(self) return self
[ "Applies ``fn`` recursively to every child block as well as self.\n\n Parameters\n ----------\n fn : callable\n Function to be applied to each submodule, of form `fn(block)`.\n\n Returns\n -------\n this block\n " ]
Please provide a description of the function:def initialize(self, init=initializer.Uniform(), ctx=None, verbose=False, force_reinit=False): self.collect_params().initialize(init, ctx, verbose, force_reinit)
[ "Initializes :py:class:`Parameter` s of this :py:class:`Block` and its children.\n Equivalent to ``block.collect_params().initialize(...)``\n\n Parameters\n ----------\n init : Initializer\n Global default Initializer to be used when :py:meth:`Parameter.init` is ``None``.\n Otherwise, :py:meth:`Parameter.init` takes precedence.\n ctx : Context or list of Context\n Keeps a copy of Parameters on one or many context(s).\n verbose : bool, default False\n Whether to verbosely print out details on initialization.\n force_reinit : bool, default False\n Whether to force re-initialization if parameter is already initialized.\n " ]
Please provide a description of the function:def hybridize(self, active=True, **kwargs): for cld in self._children.values(): cld.hybridize(active, **kwargs)
[ "Activates or deactivates :py:class:`HybridBlock` s recursively. Has no effect on\n non-hybrid children.\n\n Parameters\n ----------\n active : bool, default True\n Whether to turn hybrid on or off.\n static_alloc : bool, default False\n Statically allocate memory to improve speed. Memory usage may increase.\n static_shape : bool, default False\n Optimize for invariant input shapes between iterations. Must also\n set static_alloc to True. Change of input shapes is still allowed\n but slower.\n " ]
Please provide a description of the function:def cast(self, dtype): for child in self._children.values(): child.cast(dtype) for _, param in self.params.items(): param.cast(dtype)
[ "Cast this Block to use another data type.\n\n Parameters\n ----------\n dtype : str or numpy.dtype\n The new data type.\n " ]
Please provide a description of the function:def summary(self, *inputs): summary = OrderedDict() seen = set() hooks = [] def _get_shape_str(args): def flatten(args): if not isinstance(args, (list, tuple)): return [args], int(0) flat = [] fmts = [] for i in args: arg, fmt = flatten(i) flat.extend(arg) fmts.append(fmt) return flat, fmts def regroup(args, fmt): if isinstance(fmt, int): if fmt == 0: return args[0], args[1:] return args[:fmt], args[fmt:] ret = [] for i in fmt: res, args = regroup(args, i) ret.append(res) return ret, args flat_args, fmts = flatten(args) flat_arg_shapes = [x.shape if isinstance(x, ndarray.NDArray) else x for x in flat_args] shapes = regroup(flat_arg_shapes, fmts)[0] if isinstance(shapes, list): shape_str = str(shapes)[1:-1] else: shape_str = str(shapes) return shape_str.replace('L', '') def _register_summary_hook(block): assert not isinstance(block, HybridBlock) or not block._active, \ '"{}" must not be hybridized to print summary.'.format(block.name) def _summary_hook(block, _, outputs): class_name = block.__class__.__name__ block_idx = len(summary) - 1 m_key = '%s-%i' % (class_name, block_idx+1) summary[m_key] = OrderedDict() summary[m_key]['output_shape'] = _get_shape_str(outputs) params = 0 summary[m_key]['trainable'] = 0 summary[m_key]['shared'] = 0 for p in block.params.values(): params += p.data().size summary[m_key]['trainable'] += 0 if p.grad_req == 'null' else p.data().size if p in seen: summary[m_key]['shared'] += p.data().size else: seen.add(p) summary[m_key]['n_params'] = params from .nn.basic_layers import Sequential, HybridSequential if not isinstance(block, (Sequential, HybridSequential)): hooks.append(block.register_forward_hook(_summary_hook)) summary['Input'] = OrderedDict() summary['Input']['output_shape'] = _get_shape_str(inputs) summary['Input']['n_params'] = 0 summary['Input']['trainable'] = 0 summary['Input']['shared'] = 0 try: self.apply(_register_summary_hook) self(*inputs) line_format = '{:>20} {:>42} {:>15}' print('-'*80) print(line_format.format('Layer (type)', 'Output Shape', 'Param #')) print('='*80) total_params = 0 trainable_params = 0 shared_params = 0 for layer in summary: print(line_format.format(layer, str(summary[layer]['output_shape']), summary[layer]['n_params'])) total_params += summary[layer]['n_params'] trainable_params += summary[layer]['trainable'] shared_params += summary[layer]['shared'] print('='*80) print('Parameters in forward computation graph, duplicate included') print(' Total params: ' + str(total_params)) print(' Trainable params: ' + str(trainable_params)) print(' Non-trainable params: ' + str(total_params - trainable_params)) print('Shared params in forward computation graph: ' + str(shared_params)) print('Unique parameters in model: ' + str(total_params - shared_params)) print('-'*80) finally: for h in hooks: h.detach()
[ "Print the summary of the model's output and parameters.\n\n The network must have been initialized, and must not have been hybridized.\n\n Parameters\n ----------\n inputs : object\n Any input that the model supports. For any tensor in the input, only\n :class:`mxnet.ndarray.NDArray` is supported.\n " ]
Please provide a description of the function:def _infer_attrs(self, infer_fn, attr, *args): inputs, out = self._get_graph(*args) args, _ = _flatten(args, "input") with warnings.catch_warnings(record=True) as w: arg_attrs, _, aux_attrs = getattr(out, infer_fn)( **{i.name: getattr(j, attr) for i, j in zip(inputs, args)}) if arg_attrs is None: raise ValueError(w[0].message) sdict = {i: j for i, j in zip(out.list_arguments(), arg_attrs)} sdict.update({name : attr for name, attr in \ zip(out.list_auxiliary_states(), aux_attrs)}) for i in self.collect_params().values(): setattr(i, attr, sdict[i.name])
[ "Generic infer attributes." ]
Please provide a description of the function:def export(self, path, epoch=0): if not self._cached_graph: raise RuntimeError( "Please first call block.hybridize() and then run forward with " "this block at least once before calling export.") sym = self._cached_graph[1] sym.save('%s-symbol.json'%path) arg_names = set(sym.list_arguments()) aux_names = set(sym.list_auxiliary_states()) arg_dict = {} for name, param in self.collect_params().items(): if name in arg_names: arg_dict['arg:%s'%name] = param._reduce() else: assert name in aux_names arg_dict['aux:%s'%name] = param._reduce() ndarray.save('%s-%04d.params'%(path, epoch), arg_dict)
[ "Export HybridBlock to json format that can be loaded by\n `SymbolBlock.imports`, `mxnet.mod.Module` or the C++ interface.\n\n .. note:: When there are only one input, it will have name `data`. When there\n Are more than one inputs, they will be named as `data0`, `data1`, etc.\n\n Parameters\n ----------\n path : str\n Path to save model. Two files `path-symbol.json` and `path-xxxx.params`\n will be created, where xxxx is the 4 digits epoch number.\n epoch : int\n Epoch number of saved model.\n " ]
Please provide a description of the function:def forward(self, x, *args): if isinstance(x, NDArray): with x.context as ctx: if self._active: return self._call_cached_op(x, *args) try: params = {i: j.data(ctx) for i, j in self._reg_params.items()} except DeferredInitializationError: self._deferred_infer_shape(x, *args) for _, i in self.params.items(): i._finish_deferred_init() params = {i: j.data(ctx) for i, j in self._reg_params.items()} return self.hybrid_forward(ndarray, x, *args, **params) assert isinstance(x, Symbol), \ "HybridBlock requires the first argument to forward be either " \ "Symbol or NDArray, but got %s"%type(x) params = {i: j.var() for i, j in self._reg_params.items()} with self.name_scope(): return self.hybrid_forward(symbol, x, *args, **params)
[ "Defines the forward computation. Arguments can be either\n :py:class:`NDArray` or :py:class:`Symbol`." ]
Please provide a description of the function:def imports(symbol_file, input_names, param_file=None, ctx=None): sym = symbol.load(symbol_file) if isinstance(input_names, str): input_names = [input_names] inputs = [symbol.var(i) for i in input_names] ret = SymbolBlock(sym, inputs) if param_file is not None: ret.collect_params().load(param_file, ctx=ctx) return ret
[ "Import model previously saved by `HybridBlock.export` or\n `Module.save_checkpoint` as a SymbolBlock for use in Gluon.\n\n Parameters\n ----------\n symbol_file : str\n Path to symbol file.\n input_names : list of str\n List of input variable names\n param_file : str, optional\n Path to parameter file.\n ctx : Context, default None\n The context to initialize SymbolBlock on.\n\n Returns\n -------\n SymbolBlock\n SymbolBlock loaded from symbol and parameter files.\n\n Examples\n --------\n >>> net1 = gluon.model_zoo.vision.resnet18_v1(\n ... prefix='resnet', pretrained=True)\n >>> net1.hybridize()\n >>> x = mx.nd.random.normal(shape=(1, 3, 32, 32))\n >>> out1 = net1(x)\n >>> net1.export('net1', epoch=1)\n >>>\n >>> net2 = gluon.SymbolBlock.imports(\n ... 'net1-symbol.json', ['data'], 'net1-0001.params')\n >>> out2 = net2(x)\n " ]
Please provide a description of the function:def calc_expectation(grad_dict, num_batches): for key in grad_dict.keys(): grad_dict[str.format(key+"_expectation")] = mx.ndarray.sum(grad_dict[key], axis=0) / num_batches return grad_dict
[ "Calculates the expectation of the gradients per epoch for each parameter w.r.t number of batches\n\n Parameters\n ----------\n grad_dict: dict\n dictionary that maps parameter name to gradients in the mod executor group\n num_batches: int\n number of batches\n\n Returns\n ----------\n grad_dict: dict\n dictionary with new keys mapping to gradients expectations\n\n " ]
Please provide a description of the function:def calc_variance(grad_dict, num_batches, param_names): for i in range(len(param_names)): diff_sqr = mx.ndarray.square(mx.nd.subtract(grad_dict[param_names[i]], grad_dict[str.format(param_names[i]+"_expectation")])) grad_dict[str.format(param_names[i] + "_variance")] = mx.ndarray.sum(diff_sqr, axis=0) / num_batches
[ "Calculates the variance of the gradients per epoch for each parameter w.r.t number of batches\n\n Parameters\n ----------\n grad_dict: dict\n dictionary that maps parameter name to gradients in the mod executor group\n num_batches: int\n number of batches\n param_names: str\n parameter name in the module\n\n Returns\n ----------\n grad_dict: dict\n dictionary with new keys mapping to gradients variance\n\n " ]
Please provide a description of the function:def makedirs(d): if sys.version_info[0] < 3: from distutils.dir_util import mkpath mkpath(d) else: os.makedirs(d, exist_ok=True)
[ "Create directories recursively if they don't exist. os.makedirs(exist_ok=True) is not\n available in Python2" ]
Please provide a description of the function:def alexnet(pretrained=False, ctx=cpu(), root=os.path.join(base.data_dir(), 'models'), **kwargs): r net = AlexNet(**kwargs) if pretrained: from ..model_store import get_model_file net.load_parameters(get_model_file('alexnet', root=root), ctx=ctx) return net
[ "AlexNet model from the `\"One weird trick...\" <https://arxiv.org/abs/1404.5997>`_ paper.\n\n Parameters\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default $MXNET_HOME/models\n Location for keeping the model parameters.\n " ]
Please provide a description of the function:def classifer_metrics(label, pred): prediction = np.argmax(pred, axis=1) label = label.astype(int) pred_is_entity = prediction != not_entity_index label_is_entity = label != not_entity_index corr_pred = (prediction == label) == (pred_is_entity == True) #how many entities are there? num_entities = np.sum(label_is_entity) entity_preds = np.sum(pred_is_entity) #how many times did we correctly predict an entity? correct_entitites = np.sum(corr_pred[pred_is_entity]) #precision: when we predict entity, how often are we right? precision = correct_entitites/entity_preds if entity_preds == 0: precision = np.nan #recall: of the things that were an entity, how many did we catch? recall = correct_entitites / num_entities if num_entities == 0: recall = np.nan f1 = 2 * precision * recall / (precision + recall) return precision, recall, f1
[ "\n computes f1, precision and recall on the entity class\n " ]
Please provide a description of the function:def data_iter(batch_size, num_embed, pre_trained_word2vec=False): print('Loading data...') if pre_trained_word2vec: word2vec = data_helpers.load_pretrained_word2vec('data/rt.vec') x, y = data_helpers.load_data_with_word2vec(word2vec) # reshape for convolution input x = np.reshape(x, (x.shape[0], 1, x.shape[1], x.shape[2])) embedded_size = x.shape[-1] sentences_size = x.shape[2] vocabulary_size = -1 else: x, y, vocab, vocab_inv = data_helpers.load_data() embedded_size = num_embed sentences_size = x.shape[1] vocabulary_size = len(vocab) # randomly shuffle data np.random.seed(10) shuffle_indices = np.random.permutation(np.arange(len(y))) x_shuffled = x[shuffle_indices] y_shuffled = y[shuffle_indices] # split train/valid set x_train, x_dev = x_shuffled[:-1000], x_shuffled[-1000:] y_train, y_dev = y_shuffled[:-1000], y_shuffled[-1000:] print('Train/Valid split: %d/%d' % (len(y_train), len(y_dev))) print('train shape:', x_train.shape) print('valid shape:', x_dev.shape) print('sentence max words', sentences_size) print('embedding size', embedded_size) print('vocab size', vocabulary_size) train_set = mx.io.NDArrayIter( x_train, y_train, batch_size, shuffle=True) valid = mx.io.NDArrayIter( x_dev, y_dev, batch_size) return train_set, valid, sentences_size, embedded_size, vocabulary_size
[ "Construct data iter\n\n Parameters\n ----------\n batch_size: int\n num_embed: int\n pre_trained_word2vec: boolean\n identify the pre-trained layers or not\n Returns\n ----------\n train_set: DataIter\n Train DataIter\n valid: DataIter\n Valid DataIter\n sentences_size: int\n array dimensions\n embedded_size: int\n array dimensions\n vocab_size: int\n array dimensions\n " ]
Please provide a description of the function:def sym_gen(batch_size, sentences_size, num_embed, vocabulary_size, num_label=2, filter_list=None, num_filter=100, dropout=0.0, pre_trained_word2vec=False): input_x = mx.sym.Variable('data') input_y = mx.sym.Variable('softmax_label') # embedding layer if not pre_trained_word2vec: embed_layer = mx.sym.Embedding(data=input_x, input_dim=vocabulary_size, output_dim=num_embed, name='vocab_embed') conv_input = mx.sym.Reshape(data=embed_layer, target_shape=(batch_size, 1, sentences_size, num_embed)) else: conv_input = input_x # create convolution + (max) pooling layer for each filter operation pooled_outputs = [] for i, filter_size in enumerate(filter_list): convi = mx.sym.Convolution(data=conv_input, kernel=(filter_size, num_embed), num_filter=num_filter) relui = mx.sym.Activation(data=convi, act_type='relu') pooli = mx.sym.Pooling(data=relui, pool_type='max', kernel=(sentences_size - filter_size + 1, 1), stride=(1, 1)) pooled_outputs.append(pooli) # combine all pooled outputs total_filters = num_filter * len(filter_list) concat = mx.sym.Concat(*pooled_outputs, dim=1) h_pool = mx.sym.Reshape(data=concat, target_shape=(batch_size, total_filters)) # dropout layer if dropout > 0.0: h_drop = mx.sym.Dropout(data=h_pool, p=dropout) else: h_drop = h_pool # fully connected cls_weight = mx.sym.Variable('cls_weight') cls_bias = mx.sym.Variable('cls_bias') fc = mx.sym.FullyConnected(data=h_drop, weight=cls_weight, bias=cls_bias, num_hidden=num_label) # softmax output sm = mx.sym.SoftmaxOutput(data=fc, label=input_y, name='softmax') return sm, ('data',), ('softmax_label',)
[ "Generate network symbol\n\n Parameters\n ----------\n batch_size: int\n sentences_size: int\n num_embed: int\n vocabulary_size: int\n num_label: int\n filter_list: list\n num_filter: int\n dropout: int\n pre_trained_word2vec: boolean\n identify the pre-trained layers or not\n Returns\n ----------\n sm: symbol\n data: list of str\n data names\n softmax_label: list of str\n label names\n " ]