Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),),
force_init=False):
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring.')
return
for module in self._modules:
module.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params, force_init=force_init)
self.optimizer_initialized = True | [
"Installs and initializes optimizers.\n\n Parameters\n ----------\n kvstore : str or KVStore\n Default `'local'`.\n optimizer : str or Optimizer\n Default `'sgd'`\n optimizer_params : dict\n Default ``(('learning_rate', 0.01),)``. The default value is not a dictionary,\n just to avoid pylint warning of dangerous default values.\n force_init : bool\n Default ``False``, indicating whether we should force re-initializing the\n optimizer in the case an optimizer is already installed.\n "
] |
Please provide a description of the function:def forward(self, data_batch, is_train=None):
assert self.binded and self.params_initialized
# make a shallow copy, just to maintain necessary properties (if any) like
# bucket_key, pad, etc.
data_batch = copy.copy(data_batch)
for i_layer, module in enumerate(self._modules):
module.forward(data_batch, is_train=is_train)
if i_layer+1 == len(self._modules):
# the last layer, do not need to do the followings
break
data_batch.data = module.get_outputs()
if hasattr(data_batch, 'provide_data'):
# need to update this, in case the internal module is using bucketing
# or whatever
data_names = [x[0] for x in module.output_shapes]
assert len(data_names) == len(data_batch.data)
data_batch.provide_data = [(name, x.shape) for name, x in
zip(data_names, data_batch.data)] | [
"Forward computation.\n\n Parameters\n ----------\n data_batch : DataBatch\n is_train : bool\n Default is ``None``, in which case `is_train` is take as ``self.for_training``.\n "
] |
Please provide a description of the function:def backward(self, out_grads=None):
assert self.binded and self.params_initialized
for i_layer, module in reversed(list(zip(range(len(self._modules)), self._modules))):
module.backward(out_grads=out_grads)
if i_layer == 0:
break
out_grads = module.get_input_grads() | [
"Backward computation."
] |
Please provide a description of the function:def update(self):
assert self.binded and self.params_initialized and self.optimizer_initialized
for module in self._modules:
module.update() | [
"Updates parameters according to installed optimizer and the gradient computed\n in the previous forward-backward cycle.\n "
] |
Please provide a description of the function:def get_outputs(self, merge_multi_context=True):
assert self.binded and self.params_initialized
return self._modules[-1].get_outputs(merge_multi_context=merge_multi_context) | [
"Gets outputs from a previous forward computation.\n\n Parameters\n ----------\n merge_multi_context : bool\n Default is ``True``. In the case when data-parallelism is used, the outputs\n will be collected from multiple devices. A ``True`` value indicate that we\n should merge the collected results so that they look like from a single\n executor.\n\n Returns\n -------\n list of NDArray or list of list of NDArray\n If `merge_multi_context` is ``True``, it is like ``[out1,\n out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1,\n out2_dev2]]``. All the output elements are numpy arrays.\n "
] |
Please provide a description of the function:def get_input_grads(self, merge_multi_context=True):
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._modules[0].get_input_grads(merge_multi_context=merge_multi_context) | [
"Gets the gradients with respect to the inputs of the module.\n\n Parameters\n ----------\n merge_multi_context : bool\n Default is ``True``. In the case when data-parallelism is used, the outputs\n will be collected from multiple devices. A ``True`` value indicate that we\n should merge the collected results so that they look like from a single\n executor.\n\n Returns\n -------\n list of NDArrays or list of list of NDArrays\n If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it\n is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output\n elements are `NDArray`.\n "
] |
Please provide a description of the function:def update_metric(self, eval_metric, labels, pre_sliced=False):
assert self.binded and self.params_initialized
for meta, module in zip(self._metas, self._modules):
if SequentialModule.META_TAKE_LABELS in meta and \
meta[SequentialModule.META_TAKE_LABELS]:
module.update_metric(eval_metric, labels, pre_sliced) | [
"Evaluates and accumulates evaluation metric on outputs of the last forward computation.\n\n Parameters\n ----------\n eval_metric : EvalMetric\n labels : list of NDArray\n Typically ``data_batch.label``.\n "
] |
Please provide a description of the function:def install_monitor(self, mon):
assert self.binded
for module in self._modules:
module.install_monitor(mon) | [
"Installs monitor on all executors."
] |
Please provide a description of the function:def get_iterator(data_shape, use_caffe_data):
def get_iterator_impl_mnist(args, kv):
# download data
get_mnist_ubyte()
flat = False if len(data_shape) != 1 else True
train = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
input_shape=data_shape,
batch_size=args.batch_size,
shuffle=True,
flat=flat,
num_parts=kv.num_workers,
part_index=kv.rank)
val = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
input_shape=data_shape,
batch_size=args.batch_size,
flat=flat,
num_parts=kv.num_workers,
part_index=kv.rank)
return (train, val)
def get_iterator_impl_caffe(args, kv):
flat = False if len(data_shape) != 1 else True
train = mx.io.CaffeDataIter(
prototxt=
'layer { \
name: "mnist" \
type: "Data" \
top: "data" \
top: "label" \
include { \
phase: TRAIN \
} \
transform_param { \
scale: 0.00390625 \
} \
data_param { \
source: "mnist_train_lmdb" \
batch_size: 64 \
backend: LMDB \
} \
}',
flat=flat,
num_examples=60000
# float32 is the default, so left out here in order to illustrate
)
val = mx.io.CaffeDataIter(
prototxt=
'layer { \
name: "mnist" \
type: "Data" \
top: "data" \
top: "label" \
include { \
phase: TEST \
} \
transform_param { \
scale: 0.00390625 \
} \
data_param { \
source: "mnist_test_lmdb" \
batch_size: 100 \
backend: LMDB \
} \
}',
flat=flat,
num_examples=10000,
dtype="float32" # float32 is the default
)
return train, val
if use_caffe_data:
return get_iterator_impl_caffe
else:
return get_iterator_impl_mnist | [
"Generate the iterator of mnist dataset",
"return train and val iterators for mnist"
] |
Please provide a description of the function:def predict(prediction_dir='./Test'):
if not os.path.exists(prediction_dir):
warnings.warn("The directory on which predictions are to be made is not found!")
return
if len(os.listdir(prediction_dir)) == 0:
warnings.warn("The directory on which predictions are to be made is empty! Exiting...")
return
# Loading synsets
if not os.path.exists('./synset.txt'):
warnings.warn("The synset or labels for the dataset do not exist. Please run the training script first.")
return
with open("./synset.txt", "r") as f:
synset = [l.rstrip() for l in f]
net = get_net(len(synset))
print("Trying to load the model with the saved parameters...")
if not os.path.exists("./net.params"):
warnings.warn("The model does not have any saved parameters... Cannot proceed! Train the model first")
return
net.load_parameters("./net.params")
file_names = os.listdir(prediction_dir)
full_file_names = [os.path.join(prediction_dir, item) for item in file_names]
from transforms import MFCC
mfcc = MFCC()
print("\nStarting predictions for audio files in ", prediction_dir, " ....\n")
for filename in full_file_names:
# Argument kaiser_fast to res_type is faster than 'kaiser_best'. To reduce the load time, passing kaiser_fast.
X1, _ = librosa.load(filename, res_type='kaiser_fast')
transformed_test_data = mfcc(mx.nd.array(X1))
output = net(transformed_test_data.reshape((1, -1)))
prediction = nd.argmax(output, axis=1)
print(filename, " -> ", synset[(int)(prediction.asscalar())]) | [
"The function is used to run predictions on the audio files in the directory `pred_directory`.\n\n Parameters\n ----------\n net:\n The model that has been trained.\n prediction_dir: string, default ./Test\n The directory that contains the audio files on which predictions are to be made\n\n "
] |
Please provide a description of the function:def _proc_loop(proc_id, alive, queue, fn):
print("proc {} started".format(proc_id))
try:
while alive.value:
data = fn()
put_success = False
while alive.value and not put_success:
try:
queue.put(data, timeout=0.5)
put_success = True
except QFullExcept:
# print("Queue Full")
pass
except KeyboardInterrupt:
print("W: interrupt received, stopping process {} ...".format(proc_id))
print("Closing process {}".format(proc_id))
queue.close() | [
"Thread loop for generating data\n\n Parameters\n ----------\n proc_id: int\n Process id\n alive: multiprocessing.Value\n variable for signaling whether process should continue or not\n queue: multiprocessing.Queue\n queue for passing data back\n fn: function\n function object that returns a sample to be pushed into the queue\n "
] |
Please provide a description of the function:def _init_proc(self):
if not self.proc:
self.proc = [
mp.Process(target=self._proc_loop, args=(i, self.alive, self.queue, self.fn))
for i in range(self.num_proc)
]
self.alive.value = True
for p in self.proc:
p.start() | [
"Start processes if not already started"
] |
Please provide a description of the function:def reset(self):
self.alive.value = False
qsize = 0
try:
while True:
self.queue.get(timeout=0.1)
qsize += 1
except QEmptyExcept:
pass
print("Queue size on reset: {}".format(qsize))
for i, p in enumerate(self.proc):
p.join()
self.proc.clear() | [
"Resets the generator by stopping all processes"
] |
Please provide a description of the function:def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {}) | [
"Create a base class with a metaclass."
] |
Please provide a description of the function:def _load_lib():
lib_path = libinfo.find_lib_path()
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)
# DMatrix functions
lib.MXGetLastError.restype = ctypes.c_char_p
return lib | [
"Load library by searching possible path."
] |
Please provide a description of the function:def c_array(ctype, values):
out = (ctype * len(values))()
out[:] = values
return out | [
"Create ctypes array from a Python array.\n\n Parameters\n ----------\n ctype : ctypes data type\n Data type of the array we want to convert to, such as mx_float.\n\n values : tuple or list\n Data content.\n\n Returns\n -------\n out : ctypes array\n Created ctypes array.\n\n Examples\n --------\n >>> x = mx.base.c_array(mx.base.mx_float, [1, 2, 3])\n >>> print len(x)\n 3\n >>> x[1]\n 2.0\n "
] |
Please provide a description of the function:def c_handle_array(objs):
arr = (ctypes.c_void_p * len(objs))()
arr[:] = [o.handle for o in objs]
return arr | [
"Create ctypes const void ** from a list of MXNet objects with handles.\n\n Parameters\n ----------\n objs : list of NDArray/Symbol.\n MXNet objects.\n\n Returns\n -------\n (ctypes.c_void_p * len(objs))\n A void ** pointer that can be passed to C API.\n "
] |
Please provide a description of the function:def ctypes2numpy_shared(cptr, shape):
if not isinstance(cptr, ctypes.POINTER(mx_float)):
raise RuntimeError('expected float pointer')
size = 1
for s in shape:
size *= s
dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))
return _np.frombuffer(dbuffer, dtype=_np.float32).reshape(shape) | [
"Convert a ctypes pointer to a numpy array.\n\n The resulting NumPy array shares the memory with the pointer.\n\n Parameters\n ----------\n cptr : ctypes.POINTER(mx_float)\n pointer to the memory region\n\n shape : tuple\n Shape of target `NDArray`.\n\n Returns\n -------\n out : numpy_array\n A numpy array : numpy array.\n "
] |
Please provide a description of the function:def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):
param_keys = set()
param_str = []
for key, type_info, desc in zip(arg_names, arg_types, arg_descs):
if key in param_keys and remove_dup:
continue
if key == 'num_args':
continue
param_keys.add(key)
ret = '%s : %s' % (key, type_info)
if len(desc) != 0:
ret += '\n ' + desc
param_str.append(ret)
doc_str = ('Parameters\n' +
'----------\n' +
'%s\n')
doc_str = doc_str % ('\n'.join(param_str))
return doc_str | [
"Build argument docs in python style.\n\n arg_names : list of str\n Argument names.\n\n arg_types : list of str\n Argument type information.\n\n arg_descs : list of str\n Argument description information.\n\n remove_dup : boolean, optional\n Whether remove duplication or not.\n\n Returns\n -------\n docstr : str\n Python docstring of parameter sections.\n "
] |
Please provide a description of the function:def add_fileline_to_docstring(module, incursive=True):
def _add_fileline(obj):
if obj.__doc__ is None or 'From:' in obj.__doc__:
return
fname = inspect.getsourcefile(obj)
if fname is None:
return
try:
line = inspect.getsourcelines(obj)[-1]
except IOError:
return
obj.__doc__ += '\n\nFrom:%s:%d' % (fname, line)
if isinstance(module, str):
module = sys.modules[module]
for _, obj in inspect.getmembers(module):
if inspect.isbuiltin(obj):
continue
if inspect.isfunction(obj):
_add_fileline(obj)
if inspect.ismethod(obj):
_add_fileline(obj.__func__)
if inspect.isclass(obj) and incursive:
add_fileline_to_docstring(obj, False) | [
"Append the definition position to each function contained in module.\n\n Examples\n --------\n # Put the following codes at the end of a file\n add_fileline_to_docstring(__name__)\n ",
"Add fileinto to a object.\n "
] |
Please provide a description of the function:def _init_op_module(root_namespace, module_name, make_op_func):
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_names.append(py_str(plist[i]))
module_op = sys.modules["%s.%s.op" % (root_namespace, module_name)]
module_internal = sys.modules["%s.%s._internal" % (root_namespace, module_name)]
# contrib module in the old format (deprecated)
# kept here for backward compatibility
# use mx.nd.contrib or mx.sym.contrib from now on
contrib_module_name_old = "%s.contrib.%s" % (root_namespace, module_name)
contrib_module_old = sys.modules[contrib_module_name_old]
submodule_dict = {}
for op_name_prefix in _OP_NAME_PREFIX_LIST:
submodule_dict[op_name_prefix] =\
sys.modules["%s.%s.%s" % (root_namespace, module_name, op_name_prefix[1:-1])]
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
op_name_prefix = _get_op_name_prefix(name)
module_name_local = module_name
if len(op_name_prefix) > 0:
if op_name_prefix != '_random_' or name.endswith('_like'):
func_name = name[len(op_name_prefix):]
cur_module = submodule_dict[op_name_prefix]
module_name_local = "%s.%s.%s" % (root_namespace, module_name, op_name_prefix[1:-1])
else:
func_name = name
cur_module = module_internal
elif name.startswith('_'):
func_name = name
cur_module = module_internal
else:
func_name = name
cur_module = module_op
function = make_op_func(hdl, name, func_name)
function.__module__ = module_name_local
setattr(cur_module, function.__name__, function)
cur_module.__all__.append(function.__name__)
if op_name_prefix == '_contrib_':
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
func_name = name[len(op_name_prefix):]
function = make_op_func(hdl, name, func_name)
function.__module__ = contrib_module_name_old
setattr(contrib_module_old, function.__name__, function)
contrib_module_old.__all__.append(function.__name__) | [
"\n Registers op functions created by `make_op_func` under\n `root_namespace.module_name.[submodule_name]`,\n where `submodule_name` is one of `_OP_SUBMODULE_NAME_LIST`.\n\n Parameters\n ----------\n root_namespace : str\n Top level module name, `mxnet` in the current cases.\n module_name : str\n Second level module name, `ndarray` and `symbol` in the current cases.\n make_op_func : function\n Function for creating op functions for `ndarray` and `symbol` modules.\n "
] |
Please provide a description of the function:def _generate_op_module_signature(root_namespace, module_name, op_code_gen_func):
def get_module_file(module_name):
path = os.path.dirname(__file__)
module_path = module_name.split('.')
module_path[-1] = 'gen_' + module_path[-1]
file_name = os.path.join(path, '..', *module_path) + '.py'
module_file = open(file_name, 'w')
dependencies = {'symbol': ['from ._internal import SymbolBase',
'from ..base import _Null'],
'ndarray': ['from ._internal import NDArrayBase',
'from ..base import _Null']}
module_file.write('# File content is auto-generated. Do not modify.' + os.linesep)
module_file.write('# pylint: skip-file' + os.linesep)
module_file.write(os.linesep.join(dependencies[module_name.split('.')[1]]))
return module_file
def write_all_str(module_file, module_all_list):
module_file.write(os.linesep)
module_file.write(os.linesep)
all_str = '__all__ = [' + ', '.join(["'%s'"%s for s in module_all_list]) + ']'
module_file.write(all_str)
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_names.append(py_str(plist[i]))
module_op_file = get_module_file("%s.%s.op" % (root_namespace, module_name))
module_op_all = []
module_internal_file = get_module_file("%s.%s._internal"%(root_namespace, module_name))
module_internal_all = []
submodule_dict = {}
for op_name_prefix in _OP_NAME_PREFIX_LIST:
submodule_dict[op_name_prefix] =\
(get_module_file("%s.%s.%s" % (root_namespace, module_name,
op_name_prefix[1:-1])), [])
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
op_name_prefix = _get_op_name_prefix(name)
if len(op_name_prefix) > 0:
func_name = name[len(op_name_prefix):]
cur_module_file, cur_module_all = submodule_dict[op_name_prefix]
elif name.startswith('_'):
func_name = name
cur_module_file = module_internal_file
cur_module_all = module_internal_all
else:
func_name = name
cur_module_file = module_op_file
cur_module_all = module_op_all
code, _ = op_code_gen_func(hdl, name, func_name, True)
cur_module_file.write(os.linesep)
cur_module_file.write(code)
cur_module_all.append(func_name)
for (submodule_f, submodule_all) in submodule_dict.values():
write_all_str(submodule_f, submodule_all)
submodule_f.close()
write_all_str(module_op_file, module_op_all)
module_op_file.close()
write_all_str(module_internal_file, module_internal_all)
module_internal_file.close() | [
"\n Generate op functions created by `op_code_gen_func` and write to the source file\n of `root_namespace.module_name.[submodule_name]`,\n where `submodule_name` is one of `_OP_SUBMODULE_NAME_LIST`.\n\n Parameters\n ----------\n root_namespace : str\n Top level module name, `mxnet` in the current cases.\n module_name : str\n Second level module name, `ndarray` and `symbol` in the current cases.\n op_code_gen_func : function\n Function for creating op functions for `ndarray` and `symbol` modules.\n ",
"Return the generated module file based on module name.",
"Write the proper __all__ based on available operators."
] |
Please provide a description of the function:def set_np_compat(active):
prev = ctypes.c_int()
check_call(_LIB.MXSetIsNumpyCompatible(ctypes.c_int(active), ctypes.byref(prev)))
return bool(prev.value) | [
"\n Turns on/off NumPy compatibility. NumPy-compatibility is turned off by default in backend.\n\n Parameters\n ----------\n active : bool\n Indicates whether to turn on/off NumPy compatibility.\n\n Returns\n -------\n A bool value indicating the previous state of NumPy compatibility.\n "
] |
Please provide a description of the function:def is_np_compat():
curr = ctypes.c_bool()
check_call(_LIB.MXIsNumpyCompatible(ctypes.byref(curr)))
return curr.value | [
"\n Checks whether the NumPy compatibility is currently turned on.\n NumPy-compatibility is turned off by default in backend.\n\n Returns\n -------\n A bool value indicating whether the NumPy compatibility is currently on.\n "
] |
Please provide a description of the function:def use_np_compat(func):
@wraps(func)
def _with_np_compat(*args, **kwargs):
with np_compat(active=True):
return func(*args, **kwargs)
return _with_np_compat | [
"Wraps a function with an activated NumPy-compatibility scope. This ensures\n that the execution of the function is guaranteed with NumPy compatible semantics,\n such as zero-dim and zero size tensors.\n\n Example::\n import mxnet as mx\n @mx.use_np_compat\n def scalar_one():\n return mx.nd.ones(())\n print(scalar_one())\n\n Parameters\n ----------\n func : a user-provided callable function to be scoped by the NumPy compatibility state.\n\n Returns\n -------\n Function\n A function for wrapping the user functions in the NumPy compatibility scope.\n "
] |
Please provide a description of the function:def rse(label, pred):
numerator = np.sqrt(np.mean(np.square(label - pred), axis = None))
denominator = np.std(label, axis = None)
return numerator / denominator | [
"computes the root relative squared error (condensed using standard deviation formula)"
] |
Please provide a description of the function:def rae(label, pred):
numerator = np.mean(np.abs(label - pred), axis=None)
denominator = np.mean(np.abs(label - np.mean(label, axis=None)), axis=None)
return numerator / denominator | [
"computes the relative absolute error (condensed using standard deviation formula)"
] |
Please provide a description of the function:def corr(label, pred):
numerator1 = label - np.mean(label, axis=0)
numerator2 = pred - np.mean(pred, axis = 0)
numerator = np.mean(numerator1 * numerator2, axis=0)
denominator = np.std(label, axis=0) * np.std(pred, axis=0)
return np.mean(numerator / denominator) | [
"computes the empirical correlation coefficient"
] |
Please provide a description of the function:def get_custom_metrics():
_rse = mx.metric.create(rse)
_rae = mx.metric.create(rae)
_corr = mx.metric.create(corr)
return mx.metric.create([_rae, _rse, _corr]) | [
"\n :return: mxnet metric object\n "
] |
Please provide a description of the function:def _get_input(proto):
layer = caffe_parser.get_layers(proto)
if len(proto.input_dim) > 0:
input_dim = proto.input_dim
elif len(proto.input_shape) > 0:
input_dim = proto.input_shape[0].dim
elif layer[0].type == "Input":
input_dim = layer[0].input_param.shape[0].dim
layer.pop(0)
else:
raise ValueError('Cannot find input size')
assert layer[0].type != "Input", 'only support single input'
# We assume the first bottom blob of first layer is the output from data layer
input_name = layer[0].bottom[0]
return input_name, input_dim, layer | [
"Get input size\n "
] |
Please provide a description of the function:def _convert_conv_param(param):
param_string = "num_filter=%d" % param.num_output
pad_w = 0
pad_h = 0
if isinstance(param.pad, int):
pad = param.pad
param_string += ", pad=(%d, %d)" % (pad, pad)
else:
if len(param.pad) > 0:
pad = param.pad[0]
param_string += ", pad=(%d, %d)" % (pad, pad)
else:
if isinstance(param.pad_w, int):
pad_w = param.pad_w
if isinstance(param.pad_h, int):
pad_h = param.pad_h
param_string += ", pad=(%d, %d)" % (pad_h, pad_w)
if isinstance(param.kernel_size, int):
kernel_size = param.kernel_size
param_string += ", kernel=(%d,%d)" % (kernel_size, kernel_size)
else:
if len(param.kernel_size) > 0:
kernel_size = param.kernel_size[0]
param_string += ", kernel=(%d,%d)" % (kernel_size, kernel_size)
else:
assert isinstance(param.kernel_w, int)
kernel_w = param.kernel_w
assert isinstance(param.kernel_h, int)
kernel_h = param.kernel_h
param_string += ", kernel=(%d,%d)" % (kernel_h, kernel_w)
stride = 1
if isinstance(param.stride, int):
stride = param.stride
else:
stride = 1 if len(param.stride) == 0 else param.stride[0]
param_string += ", stride=(%d,%d)" % (stride, stride)
dilate = 1
if hasattr(param, 'dilation'):
if isinstance(param.dilation, int):
dilate = param.dilation
else:
dilate = 1 if len(param.dilation) == 0 else param.dilation[0]
param_string += ", no_bias=%s" % (not param.bias_term)
# deal with dilation. Won't be in deconvolution
if dilate > 1:
param_string += ", dilate=(%d, %d)" % (dilate, dilate)
if isinstance(param.group, int):
if param.group != 1:
param_string += ", num_group=%d" % param.group
return param_string | [
"\n Convert convolution layer parameter from Caffe to MXNet\n "
] |
Please provide a description of the function:def _convert_pooling_param(param):
param_string = "pooling_convention='full', "
if param.global_pooling:
param_string += "global_pool=True, kernel=(1,1)"
else:
param_string += "pad=(%d,%d), kernel=(%d,%d), stride=(%d,%d)" % (
param.pad, param.pad, param.kernel_size, param.kernel_size,
param.stride, param.stride)
if param.pool == 0:
param_string += ", pool_type='max'"
elif param.pool == 1:
param_string += ", pool_type='avg'"
else:
raise ValueError("Unknown Pooling Method!")
return param_string | [
"Convert the pooling layer parameter\n "
] |
Please provide a description of the function:def _parse_proto(prototxt_fname):
proto = caffe_parser.read_prototxt(prototxt_fname)
# process data layer
input_name, input_dim, layers = _get_input(proto)
# only support single input, so always use `data` as the input data
mapping = {input_name: 'data'}
need_flatten = {input_name: False}
symbol_string = "import mxnet as mx\ndata = mx.symbol.Variable(name='data')\n"
flatten_count = 0
output_name = ""
prev_name = None
# convert reset layers one by one
for i, layer in enumerate(layers):
type_string = ''
param_string = ''
skip_layer = False
bottom_order = []
name = re.sub('[-/]', '_', layer.name)
if layer.type == 'Convolution' or layer.type == 4:
type_string = 'mx.symbol.Convolution'
param_string = _convert_conv_param(layer.convolution_param)
need_flatten[name] = True
if layer.type == 'Deconvolution' or layer.type == 39:
type_string = 'mx.symbol.Deconvolution'
param_string = _convert_conv_param(layer.convolution_param)
need_flatten[name] = True
if layer.type == 'Pooling' or layer.type == 17:
type_string = 'mx.symbol.Pooling'
param_string = _convert_pooling_param(layer.pooling_param)
need_flatten[name] = True
if layer.type == 'ReLU' or layer.type == 18:
type_string = 'mx.symbol.Activation'
param_string = "act_type='relu'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'TanH' or layer.type == 23:
type_string = 'mx.symbol.Activation'
param_string = "act_type='tanh'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Sigmoid' or layer.type == 19:
type_string = 'mx.symbol.Activation'
param_string = "act_type='sigmoid'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'LRN' or layer.type == 15:
type_string = 'mx.symbol.LRN'
param = layer.lrn_param
param_string = "alpha=%f, beta=%f, knorm=%f, nsize=%d" % (
param.alpha, param.beta, param.k, param.local_size)
need_flatten[name] = True
if layer.type == 'InnerProduct' or layer.type == 14:
type_string = 'mx.symbol.FullyConnected'
param = layer.inner_product_param
param_string = "num_hidden=%d, no_bias=%s" % (
param.num_output, not param.bias_term)
need_flatten[name] = False
if layer.type == 'Dropout' or layer.type == 6:
type_string = 'mx.symbol.Dropout'
param = layer.dropout_param
param_string = "p=%f" % param.dropout_ratio
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Softmax' or layer.type == 20:
if layer.softmax_param.axis == 2:
symbol_string += "%s = mx.symbol.transpose(%s, axes=(0,2,1))\n" %\
(mapping[layer.bottom[0]], mapping[layer.bottom[0]])
type_string = 'mx.symbol.SoftmaxActivation'
param_string = "mode='channel'"
need_flatten[name] = False
else:
type_string = 'mx.symbol.SoftmaxOutput'
if layer.type == 'Flatten' or layer.type == 8:
if 'softmax' in layer.bottom[0]:
prev_name = re.sub('[-/]', '_', layers[i-1].name)
skip_layer = True
else:
type_string = 'mx.symbol.Flatten'
need_flatten[name] = False
if layer.type == 'Split' or layer.type == 22:
type_string = 'split' # will process later
if layer.type == 'Concat' or layer.type == 3:
type_string = 'mx.symbol.Concat'
need_flatten[name] = True
if layer.type == 'Crop':
type_string = 'mx.symbol.Crop'
need_flatten[name] = True
param_string = 'center_crop=True'
if layer.type == 'BatchNorm':
type_string = 'mx.symbol.BatchNorm'
param = layer.batch_norm_param
# CuDNN requires eps to be greater than 1e-05
# We compensate for this change in convert_model
epsilon = param.eps
if (epsilon <= 1e-05):
epsilon = 1e-04
# if next layer is scale, don't fix gamma
fix_gamma = layers[i+1].type != 'Scale'
param_string = 'use_global_stats=%s, fix_gamma=%s, eps=%f' % (
param.use_global_stats, fix_gamma, epsilon)
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Scale':
assert layers[i-1].type == 'BatchNorm'
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
skip_layer = True
prev_name = re.sub('[-/]', '_', layers[i-1].name)
if layer.type == 'PReLU':
type_string = 'mx.symbol.LeakyReLU'
param = layer.prelu_param
param_string = "act_type='prelu', slope=%f" % param.filler.value
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Eltwise':
type_string = 'mx.symbol.broadcast_add'
param_string = ""
need_flatten[name] = False
if layer.type == 'Reshape':
type_string = 'mx.symbol.Reshape'
param = layer.reshape_param
param_string = 'shape=(' + ','.join([str(x) for x in list(param.shape.dim)]) + ')'
need_flatten[name] = True
if layer.type == 'AbsVal':
type_string = 'mx.symbol.abs'
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Normalize':
bottom = re.sub('[-/]', '_', layer.bottom[0])
conv_layer = _find_layer(layers, bottom)
assert conv_layer is not None
param = layer.norm_param
assert not param.across_spatial and not param.channel_shared
assert param.scale_filler.type == 'constant'
if conv_layer.type == 'Convolution':
scale_name = "%s_scale" % name
symbol_string += "%s=mx.sym.Variable(name='%s', shape=(1, %d, 1, 1), init=mx.init.Constant(%f))\n" % \
(scale_name, scale_name, conv_layer.convolution_param.num_output,
param.scale_filler.value)
symbol_string += "%s=mx.symbol.L2Normalization(name='%s', data=%s, mode='channel')\n" %\
(name, name, mapping[layer.bottom[0]])
symbol_string += "%s=mx.symbol.broadcast_mul(lhs=%s, rhs=%s)\n" %\
(name, scale_name, name)
type_string = 'split'
need_flatten[name] = True
else:
raise ValueError('Unknown/Invalid normalize layer!')
if layer.type == 'Permute':
type_string = 'mx.symbol.transpose'
param_string = "axes=(%s)" % (','.join([str(x) for x in layer.permute_param.order]))
need_flatten[name] = True
from_name = ''
if layer.type == 'PriorBox':
param = layer.prior_box_param
if layer.bottom[0] == 'data':
bottom_order = [1]
else:
bottom_order = [0]
try:
import math
min_size = param.min_size[0] / input_dim[2]
max_size = math.sqrt(param.min_size[0] * param.max_size[0]) / input_dim[2]
sizes = '(%f, %f)' %(min_size, max_size)
except AttributeError:
min_size = param.min_size[0] / input_dim[2]
sizes = '(%f)' %(min_size)
ars = list(param.aspect_ratio)
ratios = [1.]
for ar in ars:
ratios.append(ar)
if param.flip:
ratios.append(1. / ar)
ratios_string = '(' + ','.join(str(x) for x in ratios) + ')'
clip = param.clip
if (param.step_h > 0 or param.step_w > 0):
step_h = param.step_h
step_w = param.step_w
elif param.step > 0:
step_h = param.step
step_w = param.step
else:
step_h = -1
step_w = -1
finput_dimh = float(input_dim[2])
finput_dimw = float(input_dim[3])
step = '(%f, %f)' % (step_h / finput_dimh, step_w / finput_dimw)
assert param.offset == 0.5, "currently only support offset = 0.5"
symbol_string += '%s = mx.contrib.symbol.MultiBoxPrior(%s, sizes=%s, ratios=%s, clip=%s, steps=%s, name="%s")\n' % \
(name, mapping[layer.bottom[0]], sizes, ratios_string, clip, step, name)
symbol_string += '%s = mx.symbol.Flatten(data=%s)\n' % (name, name)
type_string = 'split'
need_flatten[name] = False
if layer.type == 'DetectionOutput':
bottom_order = [1, 0, 2]
param = layer.detection_output_param
assert param.share_location == True
assert param.background_label_id == 0
nms_param = param.nms_param
type_string = 'mx.contrib.symbol.MultiBoxDetection'
param_string = "nms_threshold=%f, nms_topk=%d, clip=False" % \
(nms_param.nms_threshold, nms_param.top_k)
if skip_layer:
assert len(layer.bottom) == 1
symbol_string += "%s = %s\n" % (name, prev_name)
elif type_string == '':
raise ValueError('Unknown layer %s!' % layer.type)
elif type_string != 'split':
bottom = layer.bottom
if param_string != "":
param_string = ", " + param_string
if len(bottom) == 1:
# print(need_flatten)
if need_flatten[mapping[bottom[0]]] and type_string == 'mx.symbol.FullyConnected':
flatten_name = "flatten_%d" % flatten_count
symbol_string += "%s=mx.symbol.Flatten(name='%s', data=%s)\n" % (
flatten_name, flatten_name, mapping[bottom[0]])
flatten_count += 1
need_flatten[flatten_name] = False
bottom[0] = flatten_name
mapping[bottom[0]] = bottom[0]
symbol_string += "%s = %s(name='%s', data=%s %s)\n" % (
name, type_string, name, mapping[bottom[0]], param_string)
else:
if not bottom_order:
bottom_order = range(len(bottom))
symbol_string += "%s = %s(name='%s', *[%s] %s)\n" % \
(name, type_string, name, ','.join([mapping[bottom[x]] for x in bottom_order]), param_string)
if layer.type == 'Concat' and layer.concat_param.axis == 2:
symbol_string += "%s = mx.symbol.Reshape(data=%s, shape=(0, -1, 4), name='%s')\n" %\
(name, name, name)
for j in range(len(layer.top)):
mapping[layer.top[j]] = name
output_name = name
return symbol_string, output_name, input_dim | [
"Parse Caffe prototxt into symbol string\n "
] |
Please provide a description of the function:def convert_symbol(prototxt_fname):
sym, output_name, input_dim = _parse_proto(prototxt_fname)
exec(sym) # pylint: disable=exec-used
_locals = locals()
exec("ret = " + output_name, globals(), _locals) # pylint: disable=exec-used
ret = _locals['ret']
return ret, input_dim | [
"Convert caffe model definition into Symbol\n\n Parameters\n ----------\n prototxt_fname : str\n Filename of the prototxt file\n\n Returns\n -------\n Symbol\n Converted Symbol\n tuple\n Input shape\n "
] |
Please provide a description of the function:def train_episode(agent, envs, preprocessors, t_max, render):
num_envs = len(envs)
# Buffers to hold trajectories, e.g. `env_xs[i]` will hold the observations
# for environment `i`.
env_xs, env_as = _2d_list(num_envs), _2d_list(num_envs)
env_rs, env_vs = _2d_list(num_envs), _2d_list(num_envs)
episode_rs = np.zeros(num_envs, dtype=np.float)
for p in preprocessors:
p.reset()
observations = [p.preprocess(e.reset())
for p, e in zip(preprocessors, envs)]
done = np.array([False for _ in range(num_envs)])
all_done = False
t = 1
while not all_done:
if render:
envs[0].render()
# NOTE(reed): Reshape to set the data shape.
agent.model.reshape([('data', (num_envs, preprocessors[0].obs_size))])
step_xs = np.vstack([o.ravel() for o in observations])
# Get actions and values for all environments in a single forward pass.
step_xs_nd = mx.nd.array(step_xs, ctx=agent.ctx)
data_batch = mx.io.DataBatch(data=[step_xs_nd], label=None)
agent.model.forward(data_batch, is_train=False)
_, step_vs, _, step_ps = agent.model.get_outputs()
step_ps = step_ps.asnumpy()
step_vs = step_vs.asnumpy()
step_as = agent.act(step_ps)
# Step each environment whose episode has not completed.
for i, env in enumerate(envs):
if not done[i]:
obs, r, done[i], _ = env.step(step_as[i])
# Record the observation, action, value, and reward in the
# buffers.
env_xs[i].append(step_xs[i].ravel())
env_as[i].append(step_as[i])
env_vs[i].append(step_vs[i][0])
env_rs[i].append(r)
episode_rs[i] += r
# Add 0 as the state value when done.
if done[i]:
env_vs[i].append(0.0)
else:
observations[i] = preprocessors[i].preprocess(obs)
# Perform an update every `t_max` steps.
if t == t_max:
# If the episode has not finished, add current state's value. This
# will be used to 'bootstrap' the final return (see Algorithm S3
# in A3C paper).
step_xs = np.vstack([o.ravel() for o in observations])
step_xs_nd = mx.nd.array(step_xs, ctx=agent.ctx)
data_batch = mx.io.DataBatch(data=[step_xs_nd], label=None)
agent.model.forward(data_batch, is_train=False)
_, extra_vs, _, _ = agent.model.get_outputs()
extra_vs = extra_vs.asnumpy()
for i in range(num_envs):
if not done[i]:
env_vs[i].append(extra_vs[i][0])
# Perform update and clear buffers.
env_xs = np.vstack(list(chain.from_iterable(env_xs)))
agent.train_step(env_xs, env_as, env_rs, env_vs)
env_xs, env_as = _2d_list(num_envs), _2d_list(num_envs)
env_rs, env_vs = _2d_list(num_envs), _2d_list(num_envs)
t = 0
all_done = np.all(done)
t += 1
return episode_rs | [
"Complete an episode's worth of training for each environment."
] |
Please provide a description of the function:def parse_caffemodel(file_path):
f = open(file_path, 'rb')
contents = f.read()
net_param = caffe_pb2.NetParameter()
net_param.ParseFromString(contents)
layers = find_layers(net_param)
return layers | [
"\n parses the trained .caffemodel file\n\n filepath: /path/to/trained-model.caffemodel\n\n returns: layers\n "
] |
Please provide a description of the function:def featurize(self, audio_clip, overwrite=False, save_feature_as_csvfile=False):
return spectrogram_from_file(
audio_clip, step=self.step, window=self.window,
max_freq=self.max_freq, overwrite=overwrite,
save_feature_as_csvfile=save_feature_as_csvfile) | [
" For a given audio clip, calculate the log of its Fourier Transform\n Params:\n audio_clip(str): Path to the audio clip\n "
] |
Please provide a description of the function:def load_metadata_from_desc_file(self, desc_file, partition='train',
max_duration=16.0,):
logger = logUtil.getlogger()
logger.info('Reading description file: {} for partition: {}'
.format(desc_file, partition))
audio_paths, durations, texts = [], [], []
with open(desc_file) as json_line_file:
for line_num, json_line in enumerate(json_line_file):
try:
spec = json.loads(json_line)
if float(spec['duration']) > max_duration:
continue
audio_paths.append(spec['key'])
durations.append(float(spec['duration']))
texts.append(spec['text'])
except Exception as e:
# Change to (KeyError, ValueError) or
# (KeyError,json.decoder.JSONDecodeError), depending on
# json module version
logger.warn('Error reading line #{}: {}'
.format(line_num, json_line))
logger.warn(str(e))
if partition == 'train':
self.count = len(audio_paths)
self.train_audio_paths = audio_paths
self.train_durations = durations
self.train_texts = texts
elif partition == 'validation':
self.val_audio_paths = audio_paths
self.val_durations = durations
self.val_texts = texts
self.val_count = len(audio_paths)
elif partition == 'test':
self.test_audio_paths = audio_paths
self.test_durations = durations
self.test_texts = texts
else:
raise Exception("Invalid partition to load metadata. "
"Must be train/validation/test") | [
" Read metadata from the description file\n (possibly takes long, depending on the filesize)\n Params:\n desc_file (str): Path to a JSON-line file that contains labels and\n paths to the audio files\n partition (str): One of 'train', 'validation' or 'test'\n max_duration (float): In seconds, the maximum duration of\n utterances to train or test on\n "
] |
Please provide a description of the function:def prepare_minibatch(self, audio_paths, texts, overwrite=False,
is_bi_graphemes=False, seq_length=-1, save_feature_as_csvfile=False):
assert len(audio_paths) == len(texts),\
"Inputs and outputs to the network must be of the same number"
# Features is a list of (timesteps, feature_dim) arrays
# Calculate the features for each audio clip, as the log of the
# Fourier Transform of the audio
features = [self.featurize(a, overwrite=overwrite, save_feature_as_csvfile=save_feature_as_csvfile) for a in audio_paths]
input_lengths = [f.shape[0] for f in features]
feature_dim = features[0].shape[1]
mb_size = len(features)
# Pad all the inputs so that they are all the same length
if seq_length == -1:
x = np.zeros((mb_size, self.max_seq_length, feature_dim))
else:
x = np.zeros((mb_size, seq_length, feature_dim))
y = np.zeros((mb_size, self.max_label_length))
labelUtil = LabelUtil.getInstance()
label_lengths = []
for i in range(mb_size):
feat = features[i]
feat = self.normalize(feat) # Center using means and std
x[i, :feat.shape[0], :] = feat
if is_bi_graphemes:
label = generate_bi_graphemes_label(texts[i])
label = labelUtil.convert_bi_graphemes_to_num(label)
y[i, :len(label)] = label
else:
label = labelUtil.convert_word_to_num(texts[i])
y[i, :len(texts[i])] = label
label_lengths.append(len(label))
return {
'x': x, # (0-padded features of shape(mb_size,timesteps,feat_dim)
'y': y, # list(int) Flattened labels (integer sequences)
'texts': texts, # list(str) Original texts
'input_lengths': input_lengths, # list(int) Length of each input
'label_lengths': label_lengths, # list(int) Length of each label
} | [
" Featurize a minibatch of audio, zero pad them and return a dictionary\n Params:\n audio_paths (list(str)): List of paths to audio files\n texts (list(str)): List of texts corresponding to the audio files\n Returns:\n dict: See below for contents\n "
] |
Please provide a description of the function:def sample_normalize(self, k_samples=1000, overwrite=False):
log = logUtil.getlogger()
log.info("Calculating mean and std from samples")
# if k_samples is negative then it goes through total dataset
if k_samples < 0:
audio_paths = self.audio_paths
# using sample
else:
k_samples = min(k_samples, len(self.train_audio_paths))
samples = self.rng.sample(self.train_audio_paths, k_samples)
audio_paths = samples
manager = Manager()
return_dict = manager.dict()
jobs = []
for threadIndex in range(cpu_count()):
proc = Process(target=self.preprocess_sample_normalize, args=(threadIndex, audio_paths, overwrite, return_dict))
jobs.append(proc)
proc.start()
for proc in jobs:
proc.join()
feat = np.sum(np.vstack([item['feat'] for item in return_dict.values()]), axis=0)
count = sum([item['count'] for item in return_dict.values()])
feat_squared = np.sum(np.vstack([item['feat_squared'] for item in return_dict.values()]), axis=0)
self.feats_mean = feat / float(count)
self.feats_std = np.sqrt(feat_squared / float(count) - np.square(self.feats_mean))
np.savetxt(
generate_file_path(self.save_dir, self.model_name, 'feats_mean'), self.feats_mean)
np.savetxt(
generate_file_path(self.save_dir, self.model_name, 'feats_std'), self.feats_std)
log.info("End calculating mean and std from samples") | [
" Estimate the mean and std of the features from the training set\n Params:\n k_samples (int): Use this number of samples for estimation\n "
] |
Please provide a description of the function:def gru(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0., is_batchnorm=False, gamma=None, beta=None, name=None):
if dropout > 0.:
indata = mx.sym.Dropout(data=indata, p=dropout)
i2h = mx.sym.FullyConnected(data=indata,
weight=param.gates_i2h_weight,
bias=param.gates_i2h_bias,
num_hidden=num_hidden * 2,
name="t%d_l%d_gates_i2h" % (seqidx, layeridx))
if is_batchnorm:
if name is not None:
i2h = batchnorm(net=i2h, gamma=gamma, beta=beta, name="%s_batchnorm" % name)
else:
i2h = batchnorm(net=i2h, gamma=gamma, beta=beta)
h2h = mx.sym.FullyConnected(data=prev_state.h,
weight=param.gates_h2h_weight,
bias=param.gates_h2h_bias,
num_hidden=num_hidden * 2,
name="t%d_l%d_gates_h2h" % (seqidx, layeridx))
gates = i2h + h2h
slice_gates = mx.sym.SliceChannel(gates, num_outputs=2,
name="t%d_l%d_slice" % (seqidx, layeridx))
update_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid")
reset_gate = mx.sym.Activation(slice_gates[1], act_type="sigmoid")
# The transform part of GRU is a little magic
htrans_i2h = mx.sym.FullyConnected(data=indata,
weight=param.trans_i2h_weight,
bias=param.trans_i2h_bias,
num_hidden=num_hidden,
name="t%d_l%d_trans_i2h" % (seqidx, layeridx))
h_after_reset = prev_state.h * reset_gate
htrans_h2h = mx.sym.FullyConnected(data=h_after_reset,
weight=param.trans_h2h_weight,
bias=param.trans_h2h_bias,
num_hidden=num_hidden,
name="t%d_l%d_trans_h2h" % (seqidx, layeridx))
h_trans = htrans_i2h + htrans_h2h
h_trans_active = mx.sym.Activation(h_trans, act_type="tanh")
next_h = prev_state.h + update_gate * (h_trans_active - prev_state.h)
return GRUState(h=next_h) | [
"\n GRU Cell symbol\n Reference:\n * Chung, Junyoung, et al. \"Empirical evaluation of gated recurrent neural\n networks on sequence modeling.\" arXiv preprint arXiv:1412.3555 (2014).\n "
] |
Please provide a description of the function:def save_image(data, epoch, image_size, batch_size, output_dir, padding=2):
data = data.asnumpy().transpose((0, 2, 3, 1))
datanp = np.clip(
(data - np.min(data))*(255.0/(np.max(data) - np.min(data))), 0, 255).astype(np.uint8)
x_dim = min(8, batch_size)
y_dim = int(math.ceil(float(batch_size) / x_dim))
height, width = int(image_size + padding), int(image_size + padding)
grid = np.zeros((height * y_dim + 1 + padding // 2, width *
x_dim + 1 + padding // 2, 3), dtype=np.uint8)
k = 0
for y in range(y_dim):
for x in range(x_dim):
if k >= batch_size:
break
start_y = y * height + 1 + padding // 2
end_y = start_y + height - padding
start_x = x * width + 1 + padding // 2
end_x = start_x + width - padding
np.copyto(grid[start_y:end_y, start_x:end_x, :], datanp[k])
k += 1
imageio.imwrite(
'{}/fake_samples_epoch_{}.png'.format(output_dir, epoch), grid) | [
" save image "
] |
Please provide a description of the function:def list_image(root, recursive, exts):
i = 0
if recursive:
cat = {}
for path, dirs, files in os.walk(root, followlinks=True):
dirs.sort()
files.sort()
for fname in files:
fpath = os.path.join(path, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
if path not in cat:
cat[path] = len(cat)
yield (i, os.path.relpath(fpath, root), cat[path])
i += 1
for k, v in sorted(cat.items(), key=lambda x: x[1]):
print(os.path.relpath(k, root), v)
else:
for fname in sorted(os.listdir(root)):
fpath = os.path.join(root, fname)
suffix = os.path.splitext(fname)[1].lower()
if os.path.isfile(fpath) and (suffix in exts):
yield (i, os.path.relpath(fpath, root), 0)
i += 1 | [
"Traverses the root of directory that contains images and\n generates image list iterator.\n Parameters\n ----------\n root: string\n recursive: bool\n exts: string\n Returns\n -------\n image iterator that contains all the image under the specified path\n "
] |
Please provide a description of the function:def write_list(path_out, image_list):
with open(path_out, 'w') as fout:
for i, item in enumerate(image_list):
line = '%d\t' % item[0]
for j in item[2:]:
line += '%f\t' % j
line += '%s\n' % item[1]
fout.write(line) | [
"Hepler function to write image list into the file.\n The format is as below,\n integer_image_index \\t float_label_index \\t path_to_image\n Note that the blank between number and tab is only used for readability.\n Parameters\n ----------\n path_out: string\n image_list: list\n "
] |
Please provide a description of the function:def make_list(args):
image_list = list_image(args.root, args.recursive, args.exts)
image_list = list(image_list)
if args.shuffle is True:
random.seed(100)
random.shuffle(image_list)
N = len(image_list)
chunk_size = (N + args.chunks - 1) // args.chunks
for i in range(args.chunks):
chunk = image_list[i * chunk_size:(i + 1) * chunk_size]
if args.chunks > 1:
str_chunk = '_%d' % i
else:
str_chunk = ''
sep = int(chunk_size * args.train_ratio)
sep_test = int(chunk_size * args.test_ratio)
if args.train_ratio == 1.0:
write_list(args.prefix + str_chunk + '.lst', chunk)
else:
if args.test_ratio:
write_list(args.prefix + str_chunk + '_test.lst', chunk[:sep_test])
if args.train_ratio + args.test_ratio < 1.0:
write_list(args.prefix + str_chunk + '_val.lst', chunk[sep_test + sep:])
write_list(args.prefix + str_chunk + '_train.lst', chunk[sep_test:sep_test + sep]) | [
"Generates .lst file.\n Parameters\n ----------\n args: object that contains all the arguments\n "
] |
Please provide a description of the function:def read_list(path_in):
with open(path_in) as fin:
while True:
line = fin.readline()
if not line:
break
line = [i.strip() for i in line.strip().split('\t')]
line_len = len(line)
# check the data format of .lst file
if line_len < 3:
print('lst should have at least has three parts, but only has %s parts for %s' % (line_len, line))
continue
try:
item = [int(line[0])] + [line[-1]] + [float(i) for i in line[1:-1]]
except Exception as e:
print('Parsing lst met error for %s, detail: %s' % (line, e))
continue
yield item | [
"Reads the .lst file and generates corresponding iterator.\n Parameters\n ----------\n path_in: string\n Returns\n -------\n item iterator that contains information in .lst file\n "
] |
Please provide a description of the function:def image_encode(args, i, item, q_out):
fullpath = os.path.join(args.root, item[1])
if len(item) > 3 and args.pack_label:
header = mx.recordio.IRHeader(0, item[2:], item[0], 0)
else:
header = mx.recordio.IRHeader(0, item[2], item[0], 0)
if args.pass_through:
try:
with open(fullpath, 'rb') as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, item))
except Exception as e:
traceback.print_exc()
print('pack_img error:', item[1], e)
q_out.put((i, None, item))
return
try:
img = cv2.imread(fullpath, args.color)
except:
traceback.print_exc()
print('imread error trying to load file: %s ' % fullpath)
q_out.put((i, None, item))
return
if img is None:
print('imread read blank (None) image for file: %s' % fullpath)
q_out.put((i, None, item))
return
if args.center_crop:
if img.shape[0] > img.shape[1]:
margin = (img.shape[0] - img.shape[1]) // 2
img = img[margin:margin + img.shape[1], :]
else:
margin = (img.shape[1] - img.shape[0]) // 2
img = img[:, margin:margin + img.shape[0]]
if args.resize:
if img.shape[0] > img.shape[1]:
newsize = (args.resize, img.shape[0] * args.resize // img.shape[1])
else:
newsize = (img.shape[1] * args.resize // img.shape[0], args.resize)
img = cv2.resize(img, newsize)
try:
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, item))
except Exception as e:
traceback.print_exc()
print('pack_img error on file: %s' % fullpath, e)
q_out.put((i, None, item))
return | [
"Reads, preprocesses, packs the image and put it back in output queue.\n Parameters\n ----------\n args: object\n i: int\n item: list\n q_out: queue\n "
] |
Please provide a description of the function:def read_worker(args, q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out) | [
"Function that will be spawned to fetch the image\n from the input queue and put it back to output queue.\n Parameters\n ----------\n args: object\n q_in: queue\n q_out: queue\n "
] |
Please provide a description of the function:def write_worker(q_out, fname, working_dir):
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1 | [
"Function that will be spawned to fetch processed image\n from the output queue and write to the .rec file.\n Parameters\n ----------\n q_out: queue\n fname: string\n working_dir: string\n "
] |
Please provide a description of the function:def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', action='store_true',
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', nargs='+', default=['.jpeg', '.jpg', '.png'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', action='store_true',
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
cgroup.add_argument('--no-shuffle', dest='shuffle', action='store_false',
help='If this is passed, \
im2rec will not randomize the image order in <prefix>.lst')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--pass-through', action='store_true',
help='whether to skip transformation and save image as is')
rgroup.add_argument('--resize', type=int, default=0,
help='resize the shorter edge of image to the newsize, original images will\
be packed by default.')
rgroup.add_argument('--center-crop', action='store_true',
help='specify whether to crop the center image to make it rectangular.')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=1,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', action='store_true',
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
args.root = os.path.abspath(args.root)
return args | [
"Defines all arguments.\n Returns\n -------\n args object that contains all the params\n "
] |
Please provide a description of the function:def transform(data, target_wd, target_ht, is_train, box):
if box is not None:
x, y, w, h = box
data = data[y:min(y+h, data.shape[0]), x:min(x+w, data.shape[1])]
# Resize to target_wd * target_ht.
data = mx.image.imresize(data, target_wd, target_ht)
# Normalize in the same way as the pre-trained model.
data = data.astype(np.float32) / 255.0
data = (data - mx.nd.array([0.485, 0.456, 0.406])) / mx.nd.array([0.229, 0.224, 0.225])
if is_train:
if random.random() < 0.5:
data = nd.flip(data, axis=1)
data, _ = mx.image.random_crop(data, (224, 224))
else:
data, _ = mx.image.center_crop(data, (224, 224))
# Transpose from (target_wd, target_ht, 3)
# to (3, target_wd, target_ht).
data = nd.transpose(data, (2, 0, 1))
# If image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = nd.tile(data, (3, 1, 1))
return data.reshape((1,) + data.shape) | [
"Crop and normnalize an image nd array."
] |
Please provide a description of the function:def cub200_iterator(data_path, batch_k, batch_size, data_shape):
return (CUB200Iter(data_path, batch_k, batch_size, data_shape, is_train=True),
CUB200Iter(data_path, batch_k, batch_size, data_shape, is_train=False)) | [
"Return training and testing iterator for the CUB200-2011 dataset."
] |
Please provide a description of the function:def get_image(self, img, is_train):
img_arr = mx.image.imread(img)
img_arr = transform(img_arr, 256, 256, is_train, self.boxes[img])
return img_arr | [
"Load and transform an image."
] |
Please provide a description of the function:def sample_train_batch(self):
batch = []
labels = []
num_groups = self.batch_size // self.batch_k
# For CUB200, we use the first 100 classes for training.
sampled_classes = np.random.choice(100, num_groups, replace=False)
for i in range(num_groups):
img_fnames = np.random.choice(self.train_image_files[sampled_classes[i]],
self.batch_k, replace=False)
batch += [self.get_image(img_fname, is_train=True) for img_fname in img_fnames]
labels += [sampled_classes[i] for _ in range(self.batch_k)]
return nd.concatenate(batch, axis=0), labels | [
"Sample a training batch (data and label)."
] |
Please provide a description of the function:def next(self):
if self.is_train:
data, labels = self.sample_train_batch()
else:
if self.test_count * self.batch_size < len(self.test_image_files):
data, labels = self.get_test_batch()
self.test_count += 1
else:
self.test_count = 0
raise StopIteration
return mx.io.DataBatch(data=[data], label=[labels]) | [
"Return a batch."
] |
Please provide a description of the function:def load_mnist(training_num=50000):
data_path = os.path.join(os.path.dirname(os.path.realpath('__file__')), 'mnist.npz')
if not os.path.isfile(data_path):
from six.moves import urllib
origin = (
'https://github.com/sxjscience/mxnet/raw/master/example/bayesian-methods/mnist.npz'
)
print('Downloading data from %s to %s' % (origin, data_path))
ctx = ssl._create_unverified_context()
with urllib.request.urlopen(origin, context=ctx) as u, open(data_path, 'wb') as f:
f.write(u.read())
print('Done!')
dat = numpy.load(data_path)
X = (dat['X'][:training_num] / 126.0).astype('float32')
Y = dat['Y'][:training_num]
X_test = (dat['X_test'] / 126.0).astype('float32')
Y_test = dat['Y_test']
Y = Y.reshape((Y.shape[0],))
Y_test = Y_test.reshape((Y_test.shape[0],))
return X, Y, X_test, Y_test | [
"Load mnist dataset"
] |
Please provide a description of the function:def feature_list():
lib_features_c_array = ctypes.POINTER(Feature)()
lib_features_size = ctypes.c_size_t()
check_call(_LIB.MXLibInfoFeatures(ctypes.byref(lib_features_c_array), ctypes.byref(lib_features_size)))
features = [lib_features_c_array[i] for i in range(lib_features_size.value)]
return features | [
"\n Check the library for compile-time features. The list of features are maintained in libinfo.h and libinfo.cc\n\n Returns\n -------\n list\n List of :class:`.Feature` objects\n "
] |
Please provide a description of the function:def is_enabled(self, feature_name):
feature_name = feature_name.upper()
if feature_name not in self:
raise RuntimeError("Feature '{}' is unknown, known features are: {}".format(
feature_name, list(self.keys())))
return self[feature_name].enabled | [
"\n Check for a particular feature by name\n\n Parameters\n ----------\n feature_name: str\n The name of a valid feature as string for example 'CUDA'\n\n Returns\n -------\n Boolean\n True if it's enabled, False if it's disabled, RuntimeError if the feature is not known\n "
] |
Please provide a description of the function:def cache_path(self):
cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache')
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return cache_path | [
"\n make a directory to store all caches\n\n Returns:\n ---------\n cache path\n "
] |
Please provide a description of the function:def _load_image_set_index(self, shuffle):
image_set_index_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)
with open(image_set_index_file) as f:
image_set_index = [x.strip() for x in f.readlines()]
if shuffle:
np.random.shuffle(image_set_index)
return image_set_index | [
"\n find out which indexes correspond to given image set (train or val)\n\n Parameters:\n ----------\n shuffle : boolean\n whether to shuffle the image list\n Returns:\n ----------\n entire list of images specified in the setting\n "
] |
Please provide a description of the function:def image_path_from_index(self, index):
assert self.image_set_index is not None, "Dataset not initialized"
name = self.image_set_index[index]
image_file = os.path.join(self.data_path, 'JPEGImages', name + self.extension)
assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)
return image_file | [
"\n given image index, find out full path\n\n Parameters:\n ----------\n index: int\n index of a specific image\n Returns:\n ----------\n full path of this image\n "
] |
Please provide a description of the function:def _label_path_from_index(self, index):
label_file = os.path.join(self.data_path, 'Annotations', index + '.xml')
assert os.path.exists(label_file), 'Path does not exist: {}'.format(label_file)
return label_file | [
"\n given image index, find out annotation path\n\n Parameters:\n ----------\n index: int\n index of a specific image\n\n Returns:\n ----------\n full path of annotation file\n "
] |
Please provide a description of the function:def _load_image_labels(self):
temp = []
# load ground-truth from xml annotations
for idx in self.image_set_index:
label_file = self._label_path_from_index(idx)
tree = ET.parse(label_file)
root = tree.getroot()
size = root.find('size')
width = float(size.find('width').text)
height = float(size.find('height').text)
label = []
for obj in root.iter('object'):
difficult = int(obj.find('difficult').text)
# if not self.config['use_difficult'] and difficult == 1:
# continue
cls_name = obj.find('name').text
if cls_name not in self.classes:
continue
cls_id = self.classes.index(cls_name)
xml_box = obj.find('bndbox')
xmin = float(xml_box.find('xmin').text) / width
ymin = float(xml_box.find('ymin').text) / height
xmax = float(xml_box.find('xmax').text) / width
ymax = float(xml_box.find('ymax').text) / height
label.append([cls_id, xmin, ymin, xmax, ymax, difficult])
temp.append(np.array(label))
return temp | [
"\n preprocess all ground-truths\n\n Returns:\n ----------\n labels packed in [num_images x max_num_objects x 5] tensor\n "
] |
Please provide a description of the function:def evaluate_detections(self, detections):
# make all these folders for results
result_dir = os.path.join(self.devkit_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year)
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year, 'Main')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
self.write_pascal_results(detections)
self.do_python_eval() | [
"\n top level evaluations\n Parameters:\n ----------\n detections: list\n result list, each entry is a matrix of detections\n Returns:\n ----------\n None\n "
] |
Please provide a description of the function:def get_result_file_template(self):
res_file_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year, 'Main')
comp_id = self.config['comp_id']
filename = comp_id + '_det_' + self.image_set + '_{:s}.txt'
path = os.path.join(res_file_folder, filename)
return path | [
"\n this is a template\n VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n\n Returns:\n ----------\n a string template\n "
] |
Please provide a description of the function:def write_pascal_results(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
print('Writing {} VOC results file'.format(cls))
filename = self.get_result_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_set_index):
dets = all_boxes[im_ind]
if dets.shape[0] < 1:
continue
h, w = self._get_imsize(self.image_path_from_index(im_ind))
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
if (int(dets[k, 0]) == cls_ind):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, 1],
int(dets[k, 2] * w) + 1, int(dets[k, 3] * h) + 1,
int(dets[k, 4] * w) + 1, int(dets[k, 5] * h) + 1)) | [
"\n write results files in pascal devkit path\n Parameters:\n ----------\n all_boxes: list\n boxes to be processed [bbox, confidence]\n Returns:\n ----------\n None\n "
] |
Please provide a description of the function:def do_python_eval(self):
annopath = os.path.join(self.data_path, 'Annotations', '{:s}.xml')
imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
cache_dir = os.path.join(self.cache_path, self.name)
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self.year) < 2010 else False
print('VOC07 metric? ' + ('Y' if use_07_metric else 'No'))
for cls_ind, cls in enumerate(self.classes):
filename = self.get_result_file_template().format(cls)
rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, cache_dir,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
print('Mean AP = {:.4f}'.format(np.mean(aps))) | [
"\n python evaluation wrapper\n\n Returns:\n ----------\n None\n "
] |
Please provide a description of the function:def _get_imsize(self, im_name):
img = cv2.imread(im_name)
return (img.shape[0], img.shape[1]) | [
"\n get image size info\n Returns:\n ----------\n tuple of (height, width)\n "
] |
Please provide a description of the function:def add_fit_args(parser):
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, \
required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--initializer', type=str, default='default',
help='the initializer type')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
train.add_argument('--save-period', type=int, default=1, help='params saving period')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--loss', type=str, default='',
help='show the cross-entropy or nll loss. ce strands for cross-entropy, nll-loss stands for likelihood loss')
train.add_argument('--test-io', type=int, default=0,
help='1 means test reading speed without training')
train.add_argument('--dtype', type=str, default='float32',
help='precision: float32 or float16')
train.add_argument('--gc-type', type=str, default='none',
help='type of gradient compression to use, \
takes `2bit` or `none` for now')
train.add_argument('--gc-threshold', type=float, default=0.5,
help='threshold for 2bit gradient compression')
# additional parameters for large batch sgd
train.add_argument('--macrobatch-size', type=int, default=0,
help='distributed effective batch size')
train.add_argument('--warmup-epochs', type=int, default=5,
help='the epochs to ramp-up lr to scaled large-batch value')
train.add_argument('--warmup-strategy', type=str, default='linear',
help='the ramping-up strategy for large batch sgd')
train.add_argument('--profile-worker-suffix', type=str, default='',
help='profile workers actions into this file. During distributed training\
filename saved will be rank1_ followed by this suffix')
train.add_argument('--profile-server-suffix', type=str, default='',
help='profile server actions into a file with name like rank1_ followed by this suffix \
during distributed training')
return train | [
"\n parser : argparse.ArgumentParser\n return a parser added with args required by fit\n "
] |
Please provide a description of the function:def fit(args, network, data_loader, **kwargs):
# kvstore
kv = mx.kvstore.create(args.kv_store)
if args.gc_type != 'none':
kv.set_gradient_compression({'type': args.gc_type,
'threshold': args.gc_threshold})
if args.profile_server_suffix:
mx.profiler.set_config(filename=args.profile_server_suffix, profile_all=True, profile_process='server')
mx.profiler.set_state(state='run', profile_process='server')
if args.profile_worker_suffix:
if kv.num_workers > 1:
filename = 'rank' + str(kv.rank) + '_' + args.profile_worker_suffix
else:
filename = args.profile_worker_suffix
mx.profiler.set_config(filename=filename, profile_all=True, profile_process='worker')
mx.profiler.set_state(state='run', profile_process='worker')
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
epoch_size = get_epoch_size(args, kv)
# data iterators
(train, val) = data_loader(args, kv)
if 'dist' in args.kv_store and not 'async' in args.kv_store:
logging.info('Resizing training data to %d batches per machine', epoch_size)
# resize train iter to ensure each machine has same number of batches per epoch
# if not, dist_sync can hang at the end with one machine waiting for other machines
train = mx.io.ResizeIter(train, epoch_size)
if args.test_io:
tic = time.time()
for i, batch in enumerate(train):
if isinstance(batch, list):
for b in batch:
for j in b.data:
j.wait_to_read()
else:
for j in batch.data:
j.wait_to_read()
if (i + 1) % args.disp_batches == 0:
logging.info('Batch [%d]\tSpeed: %.2f samples/sec', i,
args.disp_batches * args.batch_size / (time.time() - tic))
tic = time.time()
return
# load model
if 'arg_params' in kwargs and 'aux_params' in kwargs:
arg_params = kwargs['arg_params']
aux_params = kwargs['aux_params']
else:
sym, arg_params, aux_params = _load_model(args, kv.rank)
if sym is not None:
assert sym.tojson() == network.tojson()
# save model
checkpoint = _save_model(args, kv.rank)
# devices for training
devs = mx.cpu() if args.gpus is None or args.gpus == "" else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# learning rate
lr, lr_scheduler = _get_lr_scheduler(args, kv)
# create model
model = mx.mod.Module(
context=devs,
symbol=network
)
lr_scheduler = lr_scheduler
optimizer_params = {
'learning_rate': lr,
'wd': args.wd,
'lr_scheduler': lr_scheduler,
'multi_precision': True}
# Only a limited number of optimizers have 'momentum' property
has_momentum = {'sgd', 'dcasgd', 'nag', 'signum', 'lbsgd'}
if args.optimizer in has_momentum:
optimizer_params['momentum'] = args.mom
monitor = mx.mon.Monitor(
args.monitor, pattern=".*") if args.monitor > 0 else None
# A limited number of optimizers have a warmup period
has_warmup = {'lbsgd', 'lbnag'}
if args.optimizer in has_warmup:
nworkers = kv.num_workers
if epoch_size < 1:
epoch_size = 1
macrobatch_size = args.macrobatch_size
if macrobatch_size < args.batch_size * nworkers:
macrobatch_size = args.batch_size * nworkers
#batch_scale = round(float(macrobatch_size) / args.batch_size / nworkers +0.4999)
batch_scale = math.ceil(
float(macrobatch_size) / args.batch_size / nworkers)
optimizer_params['updates_per_epoch'] = epoch_size
optimizer_params['begin_epoch'] = args.load_epoch if args.load_epoch else 0
optimizer_params['batch_scale'] = batch_scale
optimizer_params['warmup_strategy'] = args.warmup_strategy
optimizer_params['warmup_epochs'] = args.warmup_epochs
optimizer_params['num_epochs'] = args.num_epochs
if args.initializer == 'default':
if args.network == 'alexnet':
# AlexNet will not converge using Xavier
initializer = mx.init.Normal()
# VGG will not trend to converge using Xavier-Gaussian
elif args.network and 'vgg' in args.network:
initializer = mx.init.Xavier()
else:
initializer = mx.init.Xavier(
rnd_type='gaussian', factor_type="in", magnitude=2)
# initializer = mx.init.Xavier(factor_type="in", magnitude=2.34),
elif args.initializer == 'xavier':
initializer = mx.init.Xavier()
elif args.initializer == 'msra':
initializer = mx.init.MSRAPrelu()
elif args.initializer == 'orthogonal':
initializer = mx.init.Orthogonal()
elif args.initializer == 'normal':
initializer = mx.init.Normal()
elif args.initializer == 'uniform':
initializer = mx.init.Uniform()
elif args.initializer == 'one':
initializer = mx.init.One()
elif args.initializer == 'zero':
initializer = mx.init.Zero()
# evaluation metrices
eval_metrics = ['accuracy']
if args.top_k > 0:
eval_metrics.append(mx.metric.create(
'top_k_accuracy', top_k=args.top_k))
supported_loss = ['ce', 'nll_loss']
if len(args.loss) > 0:
# ce or nll loss is only applicable to softmax output
loss_type_list = args.loss.split(',')
if 'softmax_output' in network.list_outputs():
for loss_type in loss_type_list:
loss_type = loss_type.strip()
if loss_type == 'nll':
loss_type = 'nll_loss'
if loss_type not in supported_loss:
logging.warning(loss_type + ' is not an valid loss type, only cross-entropy or ' \
'negative likelihood loss is supported!')
else:
eval_metrics.append(mx.metric.create(loss_type))
else:
logging.warning("The output is not softmax_output, loss argument will be skipped!")
# callbacks that run after each batch
batch_end_callbacks = [mx.callback.Speedometer(
args.batch_size, args.disp_batches)]
if 'batch_end_callback' in kwargs:
cbs = kwargs['batch_end_callback']
batch_end_callbacks += cbs if isinstance(cbs, list) else [cbs]
# run
model.fit(train,
begin_epoch=args.load_epoch if args.load_epoch else 0,
num_epoch=args.num_epochs,
eval_data=val,
eval_metric=eval_metrics,
kvstore=kv,
optimizer=args.optimizer,
optimizer_params=optimizer_params,
initializer=initializer,
arg_params=arg_params,
aux_params=aux_params,
batch_end_callback=batch_end_callbacks,
epoch_end_callback=checkpoint,
allow_missing=True,
monitor=monitor)
if args.profile_server_suffix:
mx.profiler.set_state(state='run', profile_process='server')
if args.profile_worker_suffix:
mx.profiler.set_state(state='run', profile_process='worker') | [
"\n train a model\n args : argparse returns\n network : the symbol definition of the nerual network\n data_loader : function that returns the train and val data iterators\n "
] |
Please provide a description of the function:def CreateMultiRandCropAugmenter(min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0), min_eject_coverage=0.3,
max_attempts=50, skip_prob=0):
def align_parameters(params):
out_params = []
num = 1
for p in params:
if not isinstance(p, list):
p = [p]
out_params.append(p)
num = max(num, len(p))
# align for each param
for k, p in enumerate(out_params):
if len(p) != num:
assert len(p) == 1
out_params[k] = p * num
return out_params
aligned_params = align_parameters([min_object_covered, aspect_ratio_range, area_range,
min_eject_coverage, max_attempts])
augs = []
for moc, arr, ar, mec, ma in zip(*aligned_params):
augs.append(DetRandomCropAug(min_object_covered=moc, aspect_ratio_range=arr,
area_range=ar, min_eject_coverage=mec, max_attempts=ma))
return DetRandomSelectAug(augs, skip_prob=skip_prob) | [
"Helper function to create multiple random crop augmenters.\n\n Parameters\n ----------\n min_object_covered : float or list of float, default=0.1\n The cropped area of the image must contain at least this fraction of\n any bounding box supplied. The value of this parameter should be non-negative.\n In the case of 0, the cropped area does not need to overlap any of the\n bounding boxes supplied.\n min_eject_coverage : float or list of float, default=0.3\n The minimum coverage of cropped sample w.r.t its original size. With this\n constraint, objects that have marginal area after crop will be discarded.\n aspect_ratio_range : tuple of floats or list of tuple of floats, default=(0.75, 1.33)\n The cropped area of the image must have an aspect ratio = width / height\n within this range.\n area_range : tuple of floats or list of tuple of floats, default=(0.05, 1.0)\n The cropped area of the image must contain a fraction of the supplied\n image within in this range.\n max_attempts : int or list of int, default=50\n Number of attempts at generating a cropped/padded region of the image of the\n specified constraints. After max_attempts failures, return the original image.\n\n Examples\n --------\n >>> # An example of creating multiple random crop augmenters\n >>> min_object_covered = [0.1, 0.3, 0.5, 0.7, 0.9] # use 5 augmenters\n >>> aspect_ratio_range = (0.75, 1.33) # use same range for all augmenters\n >>> area_range = [(0.1, 1.0), (0.2, 1.0), (0.2, 1.0), (0.3, 0.9), (0.5, 1.0)]\n >>> min_eject_coverage = 0.3\n >>> max_attempts = 50\n >>> aug = mx.image.det.CreateMultiRandCropAugmenter(min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range, area_range=area_range,\n min_eject_coverage=min_eject_coverage, max_attempts=max_attempts,\n skip_prob=0)\n >>> aug.dumps() # show some details\n\n ",
"Align parameters as pairs"
] |
Please provide a description of the function:def CreateDetAugmenter(data_shape, resize=0, rand_crop=0, rand_pad=0, rand_gray=0,
rand_mirror=False, mean=None, std=None, brightness=0, contrast=0,
saturation=0, pca_noise=0, hue=0, inter_method=2, min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 3.0),
min_eject_coverage=0.3, max_attempts=50, pad_val=(127, 127, 127)):
auglist = []
if resize > 0:
auglist.append(DetBorrowAug(ResizeAug(resize, inter_method)))
if rand_crop > 0:
crop_augs = CreateMultiRandCropAugmenter(min_object_covered, aspect_ratio_range,
area_range, min_eject_coverage,
max_attempts, skip_prob=(1 - rand_crop))
auglist.append(crop_augs)
if rand_mirror > 0:
auglist.append(DetHorizontalFlipAug(0.5))
# apply random padding as late as possible to save computation
if rand_pad > 0:
pad_aug = DetRandomPadAug(aspect_ratio_range,
(1.0, area_range[1]), max_attempts, pad_val)
auglist.append(DetRandomSelectAug([pad_aug], 1 - rand_pad))
# force resize
auglist.append(DetBorrowAug(ForceResizeAug((data_shape[2], data_shape[1]), inter_method)))
auglist.append(DetBorrowAug(CastAug()))
if brightness or contrast or saturation:
auglist.append(DetBorrowAug(ColorJitterAug(brightness, contrast, saturation)))
if hue:
auglist.append(DetBorrowAug(HueJitterAug(hue)))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(DetBorrowAug(LightingAug(pca_noise, eigval, eigvec)))
if rand_gray > 0:
auglist.append(DetBorrowAug(RandomGrayAug(rand_gray)))
if mean is True:
mean = np.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, np.ndarray) and mean.shape[0] in [1, 3]
if std is True:
std = np.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, np.ndarray) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(DetBorrowAug(ColorNormalizeAug(mean, std)))
return auglist | [
"Create augmenters for detection.\n\n Parameters\n ----------\n data_shape : tuple of int\n Shape for output data\n resize : int\n Resize shorter edge if larger than 0 at the begining\n rand_crop : float\n [0, 1], probability to apply random cropping\n rand_pad : float\n [0, 1], probability to apply random padding\n rand_gray : float\n [0, 1], probability to convert to grayscale for all channels\n rand_mirror : bool\n Whether to apply horizontal flip to image with probability 0.5\n mean : np.ndarray or None\n Mean pixel values for [r, g, b]\n std : np.ndarray or None\n Standard deviations for [r, g, b]\n brightness : float\n Brightness jittering range (percent)\n contrast : float\n Contrast jittering range (percent)\n saturation : float\n Saturation jittering range (percent)\n hue : float\n Hue jittering range (percent)\n pca_noise : float\n Pca noise level (percent)\n inter_method : int, default=2(Area-based)\n Interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n min_object_covered : float\n The cropped area of the image must contain at least this fraction of\n any bounding box supplied. The value of this parameter should be non-negative.\n In the case of 0, the cropped area does not need to overlap any of the\n bounding boxes supplied.\n min_eject_coverage : float\n The minimum coverage of cropped sample w.r.t its original size. With this\n constraint, objects that have marginal area after crop will be discarded.\n aspect_ratio_range : tuple of floats\n The cropped area of the image must have an aspect ratio = width / height\n within this range.\n area_range : tuple of floats\n The cropped area of the image must contain a fraction of the supplied\n image within in this range.\n max_attempts : int\n Number of attempts at generating a cropped/padded region of the image of the\n specified constraints. After max_attempts failures, return the original image.\n pad_val: float\n Pixel value to be filled when padding is enabled. pad_val will automatically\n be subtracted by mean and divided by std if applicable.\n\n Examples\n --------\n >>> # An example of creating multiple augmenters\n >>> augs = mx.image.CreateDetAugmenter(data_shape=(3, 300, 300), rand_crop=0.5,\n ... rand_pad=0.5, rand_mirror=True, mean=True, brightness=0.125, contrast=0.125,\n ... saturation=0.125, pca_noise=0.05, inter_method=10, min_object_covered=[0.3, 0.5, 0.9],\n ... area_range=(0.3, 3.0))\n >>> # dump the details\n >>> for aug in augs:\n ... aug.dumps()\n "
] |
Please provide a description of the function:def dumps(self):
return [self.__class__.__name__.lower(), [x.dumps() for x in self.aug_list]] | [
"Override default."
] |
Please provide a description of the function:def _calculate_areas(self, label):
heights = np.maximum(0, label[:, 3] - label[:, 1])
widths = np.maximum(0, label[:, 2] - label[:, 0])
return heights * widths | [
"Calculate areas for multiple labels"
] |
Please provide a description of the function:def _intersect(self, label, xmin, ymin, xmax, ymax):
left = np.maximum(label[:, 0], xmin)
right = np.minimum(label[:, 2], xmax)
top = np.maximum(label[:, 1], ymin)
bot = np.minimum(label[:, 3], ymax)
invalid = np.where(np.logical_or(left >= right, top >= bot))[0]
out = label.copy()
out[:, 0] = left
out[:, 1] = top
out[:, 2] = right
out[:, 3] = bot
out[invalid, :] = 0
return out | [
"Calculate intersect areas, normalized."
] |
Please provide a description of the function:def _check_satisfy_constraints(self, label, xmin, ymin, xmax, ymax, width, height):
if (xmax - xmin) * (ymax - ymin) < 2:
return False # only 1 pixel
x1 = float(xmin) / width
y1 = float(ymin) / height
x2 = float(xmax) / width
y2 = float(ymax) / height
object_areas = self._calculate_areas(label[:, 1:])
valid_objects = np.where(object_areas * width * height > 2)[0]
if valid_objects.size < 1:
return False
intersects = self._intersect(label[valid_objects, 1:], x1, y1, x2, y2)
coverages = self._calculate_areas(intersects) / object_areas[valid_objects]
coverages = coverages[np.where(coverages > 0)[0]]
return coverages.size > 0 and np.amin(coverages) > self.min_object_covered | [
"Check if constrains are satisfied"
] |
Please provide a description of the function:def _update_labels(self, label, crop_box, height, width):
xmin = float(crop_box[0]) / width
ymin = float(crop_box[1]) / height
w = float(crop_box[2]) / width
h = float(crop_box[3]) / height
out = label.copy()
out[:, (1, 3)] -= xmin
out[:, (2, 4)] -= ymin
out[:, (1, 3)] /= w
out[:, (2, 4)] /= h
out[:, 1:5] = np.maximum(0, out[:, 1:5])
out[:, 1:5] = np.minimum(1, out[:, 1:5])
coverage = self._calculate_areas(out[:, 1:]) * w * h / self._calculate_areas(label[:, 1:])
valid = np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2])
valid = np.logical_and(valid, coverage > self.min_eject_coverage)
valid = np.where(valid)[0]
if valid.size < 1:
return None
out = out[valid, :]
return out | [
"Convert labels according to crop box"
] |
Please provide a description of the function:def _random_crop_proposal(self, label, height, width):
from math import sqrt
if not self.enabled or height <= 0 or width <= 0:
return ()
min_area = self.area_range[0] * height * width
max_area = self.area_range[1] * height * width
for _ in range(self.max_attempts):
ratio = random.uniform(*self.aspect_ratio_range)
if ratio <= 0:
continue
h = int(round(sqrt(min_area / ratio)))
max_h = int(round(sqrt(max_area / ratio)))
if round(max_h * ratio) > width:
# find smallest max_h satifying round(max_h * ratio) <= width
max_h = int((width + 0.4999999) / ratio)
if max_h > height:
max_h = height
if h > max_h:
h = max_h
if h < max_h:
# generate random h in range [h, max_h]
h = random.randint(h, max_h)
w = int(round(h * ratio))
assert w <= width
# trying to fix rounding problems
area = w * h
if area < min_area:
h += 1
w = int(round(h * ratio))
area = w * h
if area > max_area:
h -= 1
w = int(round(h * ratio))
area = w * h
if not (min_area <= area <= max_area and 0 <= w <= width and 0 <= h <= height):
continue
y = random.randint(0, max(0, height - h))
x = random.randint(0, max(0, width - w))
if self._check_satisfy_constraints(label, x, y, x + w, y + h, width, height):
new_label = self._update_labels(label, (x, y, w, h), height, width)
if new_label is not None:
return (x, y, w, h, new_label)
return () | [
"Propose cropping areas"
] |
Please provide a description of the function:def _update_labels(self, label, pad_box, height, width):
out = label.copy()
out[:, (1, 3)] = (out[:, (1, 3)] * width + pad_box[0]) / pad_box[2]
out[:, (2, 4)] = (out[:, (2, 4)] * height + pad_box[1]) / pad_box[3]
return out | [
"Update label according to padding region"
] |
Please provide a description of the function:def _random_pad_proposal(self, label, height, width):
from math import sqrt
if not self.enabled or height <= 0 or width <= 0:
return ()
min_area = self.area_range[0] * height * width
max_area = self.area_range[1] * height * width
for _ in range(self.max_attempts):
ratio = random.uniform(*self.aspect_ratio_range)
if ratio <= 0:
continue
h = int(round(sqrt(min_area / ratio)))
max_h = int(round(sqrt(max_area / ratio)))
if round(h * ratio) < width:
h = int((width + 0.499999) / ratio)
if h < height:
h = height
if h > max_h:
h = max_h
if h < max_h:
h = random.randint(h, max_h)
w = int(round(h * ratio))
if (h - height) < 2 or (w - width) < 2:
continue # marginal padding is not helpful
y = random.randint(0, max(0, h - height))
x = random.randint(0, max(0, w - width))
new_label = self._update_labels(label, (x, y, w, h), height, width)
return (x, y, w, h, new_label)
return () | [
"Generate random padding region"
] |
Please provide a description of the function:def _check_valid_label(self, label):
if len(label.shape) != 2 or label.shape[1] < 5:
msg = "Label with shape (1+, 5+) required, %s received." % str(label)
raise RuntimeError(msg)
valid_label = np.where(np.logical_and(label[:, 0] >= 0, label[:, 3] > label[:, 1],
label[:, 4] > label[:, 2]))[0]
if valid_label.size < 1:
raise RuntimeError('Invalid label occurs.') | [
"Validate label and its shape."
] |
Please provide a description of the function:def _estimate_label_shape(self):
max_count = 0
self.reset()
try:
while True:
label, _ = self.next_sample()
label = self._parse_label(label)
max_count = max(max_count, label.shape[0])
except StopIteration:
pass
self.reset()
return (max_count, label.shape[1]) | [
"Helper function to estimate label shape"
] |
Please provide a description of the function:def _parse_label(self, label):
if isinstance(label, nd.NDArray):
label = label.asnumpy()
raw = label.ravel()
if raw.size < 7:
raise RuntimeError("Label shape is invalid: " + str(raw.shape))
header_width = int(raw[0])
obj_width = int(raw[1])
if (raw.size - header_width) % obj_width != 0:
msg = "Label shape %s inconsistent with annotation width %d." \
%(str(raw.shape), obj_width)
raise RuntimeError(msg)
out = np.reshape(raw[header_width:], (-1, obj_width))
# remove bad ground-truths
valid = np.where(np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2]))[0]
if valid.size < 1:
raise RuntimeError('Encounter sample with no valid label.')
return out[valid, :] | [
"Helper function to parse object detection label.\n\n Format for raw label:\n n \\t k \\t ... \\t [id \\t xmin\\t ymin \\t xmax \\t ymax \\t ...] \\t [repeat]\n where n is the width of header, 2 or larger\n k is the width of each object annotation, can be arbitrary, at least 5\n "
] |
Please provide a description of the function:def reshape(self, data_shape=None, label_shape=None):
if data_shape is not None:
self.check_data_shape(data_shape)
self.provide_data = [(self.provide_data[0][0], (self.batch_size,) + data_shape)]
self.data_shape = data_shape
if label_shape is not None:
self.check_label_shape(label_shape)
self.provide_label = [(self.provide_label[0][0], (self.batch_size,) + label_shape)]
self.label_shape = label_shape | [
"Reshape iterator for data_shape or label_shape.\n\n Parameters\n ----------\n data_shape : tuple or None\n Reshape the data_shape to the new shape if not None\n label_shape : tuple or None\n Reshape label shape to new shape if not None\n "
] |
Please provide a description of the function:def _batchify(self, batch_data, batch_label, start=0):
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image([data])
label = self._parse_label(label)
data, label = self.augmentation_transform(data, label)
self._check_valid_label(label)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
for datum in [data]:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(datum)
num_object = label.shape[0]
batch_label[i][0:num_object] = nd.array(label)
if num_object < batch_label[i].shape[0]:
batch_label[i][num_object:] = -1
i += 1
except StopIteration:
if not i:
raise StopIteration
return i | [
"Override the helper function for batchifying data"
] |
Please provide a description of the function:def next(self):
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
else:
batch_data = nd.zeros((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
batch_label[:] = -1
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad) | [
"Override the function for returning next batch."
] |
Please provide a description of the function:def augmentation_transform(self, data, label): # pylint: disable=arguments-differ
for aug in self.auglist:
data, label = aug(data, label)
return (data, label) | [
"Override Transforms input data with specified augmentations."
] |
Please provide a description of the function:def check_label_shape(self, label_shape):
if not len(label_shape) == 2:
raise ValueError('label_shape should have length 2')
if label_shape[0] < self.label_shape[0]:
msg = 'Attempts to reduce label count from %d to %d, not allowed.' \
% (self.label_shape[0], label_shape[0])
raise ValueError(msg)
if label_shape[1] != self.provide_label[0][1][2]:
msg = 'label_shape object width inconsistent: %d vs %d.' \
% (self.provide_label[0][1][2], label_shape[1])
raise ValueError(msg) | [
"Checks if the new label shape is valid"
] |
Please provide a description of the function:def draw_next(self, color=None, thickness=2, mean=None, std=None, clip=True,
waitKey=None, window_name='draw_next', id2labels=None):
try:
import cv2
except ImportError as e:
warnings.warn('Unable to import cv2, skip drawing: %s', str(e))
return
count = 0
try:
while True:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image([data])
label = self._parse_label(label)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
count += 1
data, label = self.augmentation_transform(data, label)
image = data.asnumpy()
# revert color_normalize
if std is True:
std = np.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, np.ndarray) and std.shape[0] in [1, 3]
if std is not None:
image *= std
if mean is True:
mean = np.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, np.ndarray) and mean.shape[0] in [1, 3]
if mean is not None:
image += mean
# swap RGB
image[:, :, (0, 1, 2)] = image[:, :, (2, 1, 0)]
if clip:
image = np.maximum(0, np.minimum(255, image))
if color:
color = color[::-1]
image = image.astype(np.uint8)
height, width, _ = image.shape
for i in range(label.shape[0]):
x1 = int(label[i, 1] * width)
if x1 < 0:
continue
y1 = int(label[i, 2] * height)
x2 = int(label[i, 3] * width)
y2 = int(label[i, 4] * height)
bc = np.random.rand(3) * 255 if not color else color
cv2.rectangle(image, (x1, y1), (x2, y2), bc, thickness)
if id2labels is not None:
cls_id = int(label[i, 0])
if cls_id in id2labels:
cls_name = id2labels[cls_id]
text = "{:s}".format(cls_name)
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
text_height = cv2.getTextSize(text, font, font_scale, 2)[0][1]
tc = (255, 255, 255)
tpos = (x1 + 5, y1 + text_height + 5)
cv2.putText(image, text, tpos, font, font_scale, tc, 2)
if waitKey is not None:
cv2.imshow(window_name, image)
cv2.waitKey(waitKey)
yield image
except StopIteration:
if not count:
return | [
"Display next image with bounding boxes drawn.\n\n Parameters\n ----------\n color : tuple\n Bounding box color in RGB, use None for random color\n thickness : int\n Bounding box border thickness\n mean : True or numpy.ndarray\n Compensate for the mean to have better visual effect\n std : True or numpy.ndarray\n Revert standard deviations\n clip : bool\n If true, clip to [0, 255] for better visual effect\n waitKey : None or int\n Hold the window for waitKey milliseconds if set, skip ploting if None\n window_name : str\n Plot window name if waitKey is set.\n id2labels : dict\n Mapping of labels id to labels name.\n\n Returns\n -------\n numpy.ndarray\n\n Examples\n --------\n >>> # use draw_next to get images with bounding boxes drawn\n >>> iterator = mx.image.ImageDetIter(1, (3, 600, 600), path_imgrec='train.rec')\n >>> for image in iterator.draw_next(waitKey=None):\n ... # display image\n >>> # or let draw_next display using cv2 module\n >>> for image in iterator.draw_next(waitKey=0, window_name='disp'):\n ... pass\n "
] |
Please provide a description of the function:def sync_label_shape(self, it, verbose=False):
assert isinstance(it, ImageDetIter), 'Synchronize with invalid iterator.'
train_label_shape = self.label_shape
val_label_shape = it.label_shape
assert train_label_shape[1] == val_label_shape[1], "object width mismatch."
max_count = max(train_label_shape[0], val_label_shape[0])
if max_count > train_label_shape[0]:
self.reshape(None, (max_count, train_label_shape[1]))
if max_count > val_label_shape[0]:
it.reshape(None, (max_count, val_label_shape[1]))
if verbose and max_count > min(train_label_shape[0], val_label_shape[0]):
logging.info('Resized label_shape to (%d, %d).', max_count, train_label_shape[1])
return it | [
"Synchronize label shape with the input iterator. This is useful when\n train/validation iterators have different label padding.\n\n Parameters\n ----------\n it : ImageDetIter\n The other iterator to synchronize\n verbose : bool\n Print verbose log if true\n\n Returns\n -------\n ImageDetIter\n The synchronized other iterator, the internal label shape is updated as well.\n\n Examples\n --------\n >>> train_iter = mx.image.ImageDetIter(32, (3, 300, 300), path_imgrec='train.rec')\n >>> val_iter = mx.image.ImageDetIter(32, (3, 300, 300), path.imgrec='val.rec')\n >>> train_iter.label_shape\n (30, 6)\n >>> val_iter.label_shape\n (25, 6)\n >>> val_iter = train_iter.sync_label_shape(val_iter, verbose=False)\n >>> train_iter.label_shape\n (30, 6)\n >>> val_iter.label_shape\n (30, 6)\n "
] |
Please provide a description of the function:def _generate_base_anchors(base_size, scales, ratios):
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = AnchorGenerator._ratio_enum(base_anchor, ratios)
anchors = np.vstack([AnchorGenerator._scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors | [
"\n Generate anchor (reference) windows by enumerating aspect ratios X\n scales wrt a reference (0, 0, 15, 15) window.\n "
] |
Please provide a description of the function:def _whctrs(anchor):
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr | [
"\n Return width, height, x center, and y center for an anchor (window).\n "
] |
Please provide a description of the function:def _mkanchors(ws, hs, x_ctr, y_ctr):
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors | [
"\n Given a vector of widths (ws) and heights (hs) around a center\n (x_ctr, y_ctr), output a set of anchors (windows).\n "
] |
Please provide a description of the function:def _ratio_enum(anchor, ratios):
w, h, x_ctr, y_ctr = AnchorGenerator._whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = AnchorGenerator._mkanchors(ws, hs, x_ctr, y_ctr)
return anchors | [
"\n Enumerate a set of anchors for each aspect ratio wrt an anchor.\n "
] |
Please provide a description of the function:def _scale_enum(anchor, scales):
w, h, x_ctr, y_ctr = AnchorGenerator._whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = AnchorGenerator._mkanchors(ws, hs, x_ctr, y_ctr)
return anchors | [
"\n Enumerate a set of anchors for each scale wrt an anchor.\n "
] |
Please provide a description of the function:def prepare_data(args):
rnn_type = args.config.get("arch", "rnn_type")
num_rnn_layer = args.config.getint("arch", "num_rnn_layer")
num_hidden_rnn_list = json.loads(args.config.get("arch", "num_hidden_rnn_list"))
batch_size = args.config.getint("common", "batch_size")
if rnn_type == 'lstm':
init_c = [('l%d_init_c' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
init_h = [('l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
elif rnn_type == 'bilstm':
forward_init_c = [('forward_l%d_init_c' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
backward_init_c = [('backward_l%d_init_c' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
init_c = forward_init_c + backward_init_c
forward_init_h = [('forward_l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
backward_init_h = [('backward_l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
init_h = forward_init_h + backward_init_h
elif rnn_type == 'gru':
init_h = [('l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
elif rnn_type == 'bigru':
forward_init_h = [('forward_l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
backward_init_h = [('backward_l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
init_h = forward_init_h + backward_init_h
else:
raise Exception('network type should be one of the lstm,bilstm,gru,bigru')
if rnn_type == 'lstm' or rnn_type == 'bilstm':
init_states = init_c + init_h
elif rnn_type == 'gru' or rnn_type == 'bigru':
init_states = init_h
return init_states | [
"\n set atual shape of data\n "
] |
Please provide a description of the function:def arch(args, seq_len=None):
if isinstance(args, argparse.Namespace):
mode = args.config.get("common", "mode")
is_bucketing = args.config.getboolean("arch", "is_bucketing")
if mode == "train" or is_bucketing:
channel_num = args.config.getint("arch", "channel_num")
conv_layer1_filter_dim = \
tuple(json.loads(args.config.get("arch", "conv_layer1_filter_dim")))
conv_layer1_stride = tuple(json.loads(args.config.get("arch", "conv_layer1_stride")))
conv_layer2_filter_dim = \
tuple(json.loads(args.config.get("arch", "conv_layer2_filter_dim")))
conv_layer2_stride = tuple(json.loads(args.config.get("arch", "conv_layer2_stride")))
rnn_type = args.config.get("arch", "rnn_type")
num_rnn_layer = args.config.getint("arch", "num_rnn_layer")
num_hidden_rnn_list = json.loads(args.config.get("arch", "num_hidden_rnn_list"))
is_batchnorm = args.config.getboolean("arch", "is_batchnorm")
if seq_len is None:
seq_len = args.config.getint('arch', 'max_t_count')
num_label = args.config.getint('arch', 'max_label_length')
num_rear_fc_layers = args.config.getint("arch", "num_rear_fc_layers")
num_hidden_rear_fc_list = json.loads(args.config.get("arch", "num_hidden_rear_fc_list"))
act_type_rear_fc_list = json.loads(args.config.get("arch", "act_type_rear_fc_list"))
# model symbol generation
# input preparation
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
net = mx.sym.Reshape(data=data, shape=(-4, -1, 1, 0, 0))
net = conv(net=net,
channels=channel_num,
filter_dimension=conv_layer1_filter_dim,
stride=conv_layer1_stride,
no_bias=is_batchnorm,
name='conv1')
if is_batchnorm:
# batch norm normalizes axis 1
net = batchnorm(net, name="conv1_batchnorm")
net = conv(net=net,
channels=channel_num,
filter_dimension=conv_layer2_filter_dim,
stride=conv_layer2_stride,
no_bias=is_batchnorm,
name='conv2')
if is_batchnorm:
# batch norm normalizes axis 1
net = batchnorm(net, name="conv2_batchnorm")
net = mx.sym.transpose(data=net, axes=(0, 2, 1, 3))
net = mx.sym.Reshape(data=net, shape=(0, 0, -3))
seq_len_after_conv_layer1 = int(
math.floor((seq_len - conv_layer1_filter_dim[0]) / conv_layer1_stride[0])) + 1
seq_len_after_conv_layer2 = int(
math.floor((seq_len_after_conv_layer1 - conv_layer2_filter_dim[0])
/ conv_layer2_stride[0])) + 1
net = slice_symbol_to_seq_symobls(net=net, seq_len=seq_len_after_conv_layer2, axis=1)
if rnn_type == "bilstm":
net = bi_lstm_unroll(net=net,
seq_len=seq_len_after_conv_layer2,
num_hidden_lstm_list=num_hidden_rnn_list,
num_lstm_layer=num_rnn_layer,
dropout=0.,
is_batchnorm=is_batchnorm,
is_bucketing=is_bucketing)
elif rnn_type == "gru":
net = gru_unroll(net=net,
seq_len=seq_len_after_conv_layer2,
num_hidden_gru_list=num_hidden_rnn_list,
num_gru_layer=num_rnn_layer,
dropout=0.,
is_batchnorm=is_batchnorm,
is_bucketing=is_bucketing)
elif rnn_type == "bigru":
net = bi_gru_unroll(net=net,
seq_len=seq_len_after_conv_layer2,
num_hidden_gru_list=num_hidden_rnn_list,
num_gru_layer=num_rnn_layer,
dropout=0.,
is_batchnorm=is_batchnorm,
is_bucketing=is_bucketing)
else:
raise Exception('rnn_type should be one of the followings, bilstm,gru,bigru')
# rear fc layers
net = sequence_fc(net=net, seq_len=seq_len_after_conv_layer2,
num_layer=num_rear_fc_layers, prefix="rear",
num_hidden_list=num_hidden_rear_fc_list,
act_type_list=act_type_rear_fc_list,
is_batchnorm=is_batchnorm)
# warpctc layer
net = warpctc_layer(net=net,
seq_len=seq_len_after_conv_layer2,
label=label,
num_label=num_label,
character_classes_count=
(args.config.getint('arch', 'n_classes') + 1))
args.config.set('arch', 'max_t_count', str(seq_len_after_conv_layer2))
return net
elif mode == 'load' or mode == 'predict':
conv_layer1_filter_dim = \
tuple(json.loads(args.config.get("arch", "conv_layer1_filter_dim")))
conv_layer1_stride = tuple(json.loads(args.config.get("arch", "conv_layer1_stride")))
conv_layer2_filter_dim = \
tuple(json.loads(args.config.get("arch", "conv_layer2_filter_dim")))
conv_layer2_stride = tuple(json.loads(args.config.get("arch", "conv_layer2_stride")))
if seq_len is None:
seq_len = args.config.getint('arch', 'max_t_count')
seq_len_after_conv_layer1 = int(
math.floor((seq_len - conv_layer1_filter_dim[0]) / conv_layer1_stride[0])) + 1
seq_len_after_conv_layer2 = int(
math.floor((seq_len_after_conv_layer1 - conv_layer2_filter_dim[0])
/ conv_layer2_stride[0])) + 1
args.config.set('arch', 'max_t_count', str(seq_len_after_conv_layer2))
else:
raise Exception('mode must be the one of the followings - train,predict,load') | [
"\n define deep speech 2 network\n "
] |
Please provide a description of the function:def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--image_path', type=str, default='./data/datasets/')
parser.add_argument('--align_path', type=str, default='./data/align/')
parser.add_argument('--dr_rate', type=float, default=0.5)
parser.add_argument('--num_gpus', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=0)
parser.add_argument('--model_path', type=str, default=None)
config = parser.parse_args()
trainer = Train(config)
trainer.build_model(dr_rate=config.dr_rate, path=config.model_path)
trainer.load_dataloader()
trainer.run(epochs=config.epochs) | [
"\n Description : run lipnet training code using argument info\n "
] |
Please provide a description of the function:def vis_detection(im_orig, detections, class_names, thresh=0.7):
import matplotlib.pyplot as plt
import random
plt.imshow(im_orig)
colors = [(random.random(), random.random(), random.random()) for _ in class_names]
for [cls, conf, x1, y1, x2, y2] in detections:
cls = int(cls)
if cls > 0 and conf > thresh:
rect = plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor=colors[cls], linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(x1, y1 - 2, '{:s} {:.3f}'.format(class_names[cls], conf),
bbox=dict(facecolor=colors[cls], alpha=0.5), fontsize=12, color='white')
plt.show() | [
"visualize [cls, conf, x1, y1, x2, y2]"
] |
Please provide a description of the function:def check_error(model, path, shapes, output = 'softmax_output', verbose = True):
coreml_model = _coremltools.models.MLModel(path)
input_data = {}
input_data_copy = {}
for ip in shapes:
input_data[ip] = _np.random.rand(*shapes[ip]).astype('f')
input_data_copy[ip] = _np.copy(input_data[ip])
dataIter = _mxnet.io.NDArrayIter(input_data_copy)
mx_out = model.predict(dataIter).flatten()
e_out_dict = coreml_model.predict(_mxnet_remove_batch(input_data))
e_out = e_out_dict[output].flatten()
error = _np.linalg.norm(e_out - mx_out)
if verbose:
print("First few predictions from CoreML : %s" % e_out[0:10])
print("First few predictions from MXNet : %s" % e_out[0:10])
print("L2 Error on random data %s" % error)
return error | [
"\n Check the difference between predictions from MXNet and CoreML.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.