INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Get outputs of the previous forward computation.
If begin or end is specified, return [begin, end)-th outputs,
otherwise return all outputs.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
begin : int
starting index of returned outputs in all outputs
end : int or None
ending index (excluded) of returned outputs.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
|
def get_outputs(self, merge_multi_context=True, begin=0, end=None):
"""Get outputs of the previous forward computation.
If begin or end is specified, return [begin, end)-th outputs,
otherwise return all outputs.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
begin : int
starting index of returned outputs in all outputs
end : int or None
ending index (excluded) of returned outputs.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
"""
if end is None:
end = self.num_outputs
outputs = [[exec_.outputs[i] for exec_ in self.execs]
for i in range(begin, end)]
if merge_multi_context:
outputs = _merge_multi_context(outputs, self.output_layouts)
return outputs
|
Set value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like [[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]].
value : number
a single scalar value for all state arrays.
|
def set_states(self, states=None, value=None):
"""Set value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like [[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]].
value : number
a single scalar value for all state arrays.
"""
if states is not None:
assert value is None, "Only one of states & value can be specified."
_load_general(states, self.state_arrays, (0,)*len(states))
else:
assert value is not None, "At least one of states & value must be specified."
assert states is None, "Only one of states & value can be specified."
for d_dst in self.state_arrays:
for dst in d_dst:
dst[:] = value
|
Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
|
def get_input_grads(self, merge_multi_context=True):
"""Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
"""
assert self.inputs_need_grad
if merge_multi_context:
return _merge_multi_context(self.input_grad_arrays, self.data_layouts)
return self.input_grad_arrays
|
Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
``self.for_training`` is ``True``.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
|
def backward(self, out_grads=None):
"""Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
``self.for_training`` is ``True``.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.for_training, 're-bind with for_training=True to run backward'
if out_grads is None:
out_grads = []
for i, (exec_, islice) in enumerate(zip(self.execs, self.slices)):
out_grads_slice = []
for grad, axis in zip(out_grads, self.output_layouts):
if axis >= 0:
# pylint: disable=no-member
og_my_slice = nd.slice_axis(grad, axis=axis, begin=islice.start,
end=islice.stop)
out_grads_slice.append(og_my_slice.as_in_context(self.contexts[i]))
# pylint: enable=no-member
else:
out_grads_slice.append(grad.copyto(self.contexts[i]))
exec_.backward(out_grads=out_grads_slice)
|
Accumulate the performance according to `eval_metric` on all devices
by comparing outputs from [begin, end) to labels. By default use all
outputs.
Parameters
----------
eval_metric : EvalMetric
The metric used for evaluation.
labels : list of NDArray
Typically comes from `label` of a `DataBatch`.
pre_sliced : bool
Whether labels are already sliced.
begin : int
Starting index of used outputs.
end : int or None
Ending index of used outputs.
|
def update_metric(self, eval_metric, labels, pre_sliced):
"""Accumulate the performance according to `eval_metric` on all devices
by comparing outputs from [begin, end) to labels. By default use all
outputs.
Parameters
----------
eval_metric : EvalMetric
The metric used for evaluation.
labels : list of NDArray
Typically comes from `label` of a `DataBatch`.
pre_sliced : bool
Whether labels are already sliced.
begin : int
Starting index of used outputs.
end : int or None
Ending index of used outputs.
"""
for current_exec, (texec, islice) in enumerate(zip(self.execs, self.slices)):
if not pre_sliced:
labels_slice = []
for label, axis in zip(labels, self.label_layouts):
if axis == 0:
# slicing NDArray along axis 0 can avoid copying
labels_slice.append(label[islice])
elif axis > 0:
# pylint: disable=no-member
label_my_slice = nd.slice_axis(label, axis=axis, begin=islice.start,
end=islice.stop).as_in_context(label.context)
# pylint: enable=no-member
labels_slice.append(label_my_slice)
else:
labels_slice.append(label)
else:
labels_slice = labels[current_exec]
labels_ = OrderedDict(zip(self.label_names, labels_slice))
preds = OrderedDict(zip(self.output_names, texec.outputs))
eval_metric.update_dict(labels_, preds)
|
Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : list of (str, tuple)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
|
def _sliced_shape(self, shapes, i, major_axis):
"""Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : list of (str, tuple)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
"""
sliced_shapes = []
for desc, axis in zip(shapes, major_axis):
shape = list(desc.shape)
if axis >= 0:
shape[axis] = self.slices[i].stop - self.slices[i].start
sliced_shapes.append(DataDesc(desc.name, tuple(shape), desc.dtype, desc.layout))
return sliced_shapes
|
Internal utility function to bind the i-th executor.
This function utilizes simple_bind python interface.
|
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group):
"""Internal utility function to bind the i-th executor.
This function utilizes simple_bind python interface.
"""
shared_exec = None if shared_group is None else shared_group.execs[i]
context = self.contexts[i]
shared_data_arrays = self.shared_data_arrays[i]
input_shapes = dict(data_shapes)
if label_shapes is not None:
input_shapes.update(dict(label_shapes))
input_types = {x.name: x.dtype for x in data_shapes}
if label_shapes is not None:
input_types.update({x.name: x.dtype for x in label_shapes})
group2ctx = self.group2ctxs[i]
executor = self.symbol.simple_bind(ctx=context, grad_req=self.grad_req,
type_dict=input_types, shared_arg_names=self.param_names,
shared_exec=shared_exec, group2ctx=group2ctx,
shared_buffer=shared_data_arrays, **input_shapes)
self._total_exec_bytes += int(executor.debug_str().split('\n')[-3].split()[1])
return executor
|
parse # classes and class_names if applicable
|
def parse_class_names(args):
""" parse # classes and class_names if applicable """
num_class = args.num_class
if len(args.class_names) > 0:
if os.path.isfile(args.class_names):
# try to open it to read class names
with open(args.class_names, 'r') as f:
class_names = [l.strip() for l in f.readlines()]
else:
class_names = [c.strip() for c in args.class_names.split(',')]
assert len(class_names) == num_class, str(len(class_names))
for name in class_names:
assert len(name) > 0
else:
class_names = None
return class_names
|
Return True if ``data`` has instance of ``dtype``.
This function is called after _init_data.
``data`` is a list of (str, NDArray)
|
def _has_instance(data, dtype):
"""Return True if ``data`` has instance of ``dtype``.
This function is called after _init_data.
``data`` is a list of (str, NDArray)"""
for item in data:
_, arr = item
if isinstance(arr, dtype):
return True
return False
|
Shuffle the data.
|
def _getdata_by_idx(data, idx):
"""Shuffle the data."""
shuffle_data = []
for k, v in data:
if (isinstance(v, h5py.Dataset) if h5py else False):
shuffle_data.append((k, v))
elif isinstance(v, CSRNDArray):
shuffle_data.append((k, sparse_array(v.asscipy()[idx], v.context)))
else:
shuffle_data.append((k, array(v.asnumpy()[idx], v.context)))
return shuffle_data
|
r"""MobileNet model from the
`"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
<https://arxiv.org/abs/1704.04861>`_ paper.
Parameters
----------
multiplier : float
The width multiplier for controling the model size. Only multipliers that are no
less than 0.25 are supported. The actual number of channels is equal to the original
channel size multiplied by this multiplier.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
|
def get_mobilenet(multiplier, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""MobileNet model from the
`"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
<https://arxiv.org/abs/1704.04861>`_ paper.
Parameters
----------
multiplier : float
The width multiplier for controling the model size. Only multipliers that are no
less than 0.25 are supported. The actual number of channels is equal to the original
channel size multiplied by this multiplier.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = MobileNet(multiplier, **kwargs)
if pretrained:
from ..model_store import get_model_file
version_suffix = '{0:.2f}'.format(multiplier)
if version_suffix in ('1.00', '0.50'):
version_suffix = version_suffix[:-1]
net.load_parameters(
get_model_file('mobilenet%s' % version_suffix, root=root), ctx=ctx)
return net
|
Get the canonical name for a symbol.
This is the default implementation.
If the user specifies a name,
the user-specified name will be used.
When user does not specify a name, we automatically generate a
name based on the hint string.
Parameters
----------
name : str or None
The name specified by the user.
hint : str
A hint string, which can be used to generate name.
Returns
-------
full_name : str
A canonical name for the symbol.
|
def get(self, name, hint):
"""Get the canonical name for a symbol.
This is the default implementation.
If the user specifies a name,
the user-specified name will be used.
When user does not specify a name, we automatically generate a
name based on the hint string.
Parameters
----------
name : str or None
The name specified by the user.
hint : str
A hint string, which can be used to generate name.
Returns
-------
full_name : str
A canonical name for the symbol.
"""
if name:
return name
if hint not in self._counter:
self._counter[hint] = 0
name = '%s%d' % (hint, self._counter[hint])
self._counter[hint] += 1
return name
|
Draw samples from log uniform distribution and returns sampled candidates,
expected count for true classes and sampled classes.
|
def draw(self, true_classes):
"""Draw samples from log uniform distribution and returns sampled candidates,
expected count for true classes and sampled classes."""
range_max = self.range_max
num_sampled = self.num_sampled
ctx = true_classes.context
log_range = math.log(range_max + 1)
num_tries = 0
true_classes = true_classes.reshape((-1,))
sampled_classes, num_tries = self.sampler.sample_unique(num_sampled)
true_cls = true_classes.as_in_context(ctx).astype('float64')
prob_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range
count_true = self._prob_helper(num_tries, num_sampled, prob_true)
sampled_classes = ndarray.array(sampled_classes, ctx=ctx, dtype='int64')
sampled_cls_fp64 = sampled_classes.astype('float64')
prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
count_sampled = self._prob_helper(num_tries, num_sampled, prob_sampled)
return [sampled_classes, count_true, count_sampled]
|
Inception_score function.
The images will be divided into 'splits' parts, and calculate each inception_score separately,
then return the mean and std of inception_scores of these parts.
:param images: Images(num x c x w x h) that needs to calculate inception_score.
:param splits:
:return: mean and std of inception_score
|
def get_inception_score(images, splits=10):
"""
Inception_score function.
The images will be divided into 'splits' parts, and calculate each inception_score separately,
then return the mean and std of inception_scores of these parts.
:param images: Images(num x c x w x h) that needs to calculate inception_score.
:param splits:
:return: mean and std of inception_score
"""
assert (images.shape[1] == 3)
# load inception model
if inception_model is None:
_init_inception()
# resize images to adapt inception model(inceptionV3)
if images.shape[2] != 299:
images = resize(images, 299, 299)
preds = []
bs = 4
n_batches = int(math.ceil(float(images.shape[0])/float(bs)))
# to get the predictions/picture of inception model
for i in range(n_batches):
sys.stdout.write(".")
sys.stdout.flush()
inps = images[(i * bs):min((i + 1) * bs, len(images))]
# inps size. bs x 3 x 299 x 299
pred = nd.softmax(inception_model(inps))
# pred size. bs x 1000
preds.append(pred.asnumpy())
# list to array
preds = np.concatenate(preds, 0)
scores = []
# to calculate the inception_score each split.
for i in range(splits):
# extract per split image pred
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
|
same as mx.model.load_checkpoint, but do not load symnet and will convert context
|
def load_param(params, ctx=None):
"""same as mx.model.load_checkpoint, but do not load symnet and will convert context"""
if ctx is None:
ctx = mx.cpu()
save_dict = mx.nd.load(params)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v.as_in_context(ctx)
if tp == 'aux':
aux_params[name] = v.as_in_context(ctx)
return arg_params, aux_params
|
Deprecated. Please use cell.unroll instead
|
def rnn_unroll(cell, length, inputs=None, begin_state=None, input_prefix='', layout='NTC'):
"""Deprecated. Please use cell.unroll instead"""
warnings.warn('rnn_unroll is deprecated. Please call cell.unroll directly.')
return cell.unroll(length=length, inputs=inputs, begin_state=begin_state,
input_prefix=input_prefix, layout=layout)
|
Save checkpoint for model using RNN cells.
Unpacks weight before saving.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input symbol
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
|
def save_rnn_checkpoint(cells, prefix, epoch, symbol, arg_params, aux_params):
"""Save checkpoint for model using RNN cells.
Unpacks weight before saving.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input symbol
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if isinstance(cells, BaseRNNCell):
cells = [cells]
for cell in cells:
arg_params = cell.unpack_weights(arg_params)
save_checkpoint(prefix, epoch, symbol, arg_params, aux_params)
|
Load model checkpoint from file.
Pack weights after loading.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- symbol will be loaded from ``prefix-symbol.json``.
- parameters will be loaded from ``prefix-epoch.params``.
|
def load_rnn_checkpoint(cells, prefix, epoch):
"""Load model checkpoint from file.
Pack weights after loading.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- symbol will be loaded from ``prefix-symbol.json``.
- parameters will be loaded from ``prefix-epoch.params``.
"""
sym, arg, aux = load_checkpoint(prefix, epoch)
if isinstance(cells, BaseRNNCell):
cells = [cells]
for cell in cells:
arg = cell.pack_weights(arg)
return sym, arg, aux
|
Activates or deactivates `HybridBlock` s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
**kwargs : string
Additional flags for hybridized operator.
|
def hybridize(self, active=True, **kwargs):
"""Activates or deactivates `HybridBlock` s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
**kwargs : string
Additional flags for hybridized operator.
"""
if self._children and all(isinstance(c, HybridBlock) for c in self._children.values()):
warnings.warn(
"All children of this Sequential layer '%s' are HybridBlocks. Consider "
"using HybridSequential for the best performance."%self.prefix, stacklevel=2)
super(Sequential, self).hybridize(active, **kwargs)
|
Reads image specified by path into numpy.ndarray
|
def read_img(path):
""" Reads image specified by path into numpy.ndarray"""
img = cv2.resize(cv2.imread(path, 0), (80, 30)).astype(np.float32) / 255
img = np.expand_dims(img.transpose(1, 0), 0)
return img
|
Returns a tuple of names and zero arrays for LSTM init states
|
def lstm_init_states(batch_size):
""" Returns a tuple of names and zero arrays for LSTM init states"""
hp = Hyperparams()
init_shapes = lstm.init_states(batch_size=batch_size, num_lstm_layer=hp.num_lstm_layer, num_hidden=hp.num_hidden)
init_names = [s[0] for s in init_shapes]
init_arrays = [mx.nd.zeros(x[1]) for x in init_shapes]
return init_names, init_arrays
|
Loads the model from checkpoint specified by prefix and epoch, binds it
to an executor, and sets its parameters and returns a mx.mod.Module
|
def load_module(prefix, epoch, data_names, data_shapes):
"""Loads the model from checkpoint specified by prefix and epoch, binds it
to an executor, and sets its parameters and returns a mx.mod.Module
"""
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
# We don't need CTC loss for prediction, just a simple softmax will suffice.
# We get the output of the layer just before the loss layer ('pred_fc') and add softmax on top
pred_fc = sym.get_internals()['pred_fc_output']
sym = mx.sym.softmax(data=pred_fc)
mod = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=data_names, label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes)
mod.set_params(arg_params, aux_params, allow_missing=False)
return mod
|
Program entry point
|
def main():
"""Program entry point"""
parser = argparse.ArgumentParser()
parser.add_argument("path", help="Path to the CAPTCHA image file")
parser.add_argument("--prefix", help="Checkpoint prefix [Default 'ocr']", default='ocr')
parser.add_argument("--epoch", help="Checkpoint epoch [Default 100]", type=int, default=100)
args = parser.parse_args()
init_state_names, init_state_arrays = lstm_init_states(batch_size=1)
img = read_img(args.path)
sample = SimpleBatch(
data_names=['data'] + init_state_names,
data=[mx.nd.array(img)] + init_state_arrays)
mod = load_module(args.prefix, args.epoch, sample.data_names, sample.provide_data)
mod.forward(sample)
prob = mod.get_outputs()[0].asnumpy()
prediction = CtcMetrics.ctc_label(np.argmax(prob, axis=-1).tolist())
# Predictions are 1 to 10 for digits 0 to 9 respectively (prediction 0 means no-digit)
prediction = [p - 1 for p in prediction]
print("Digits:", prediction)
|
invalid value in bbox_transform if this wrong (no overlap), note index 0 and 2
also note need to save before assignment
:param bbox: [n][x1, y1, x2, y2]
:param width: cv2 (height, width, channel)
:param flip_x: will flip x1 and x2
:return: flipped box
|
def bbox_flip(bbox, width, flip_x=False):
"""
invalid value in bbox_transform if this wrong (no overlap), note index 0 and 2
also note need to save before assignment
:param bbox: [n][x1, y1, x2, y2]
:param width: cv2 (height, width, channel)
:param flip_x: will flip x1 and x2
:return: flipped box
"""
if flip_x:
xmax = width - bbox[:, 0]
xmin = width - bbox[:, 2]
bbox[:, 0] = xmin
bbox[:, 2] = xmax
return bbox
|
determine overlaps between boxes and query_boxes
:param boxes: n * 4 bounding boxes
:param query_boxes: k * 4 bounding boxes
:return: overlaps: n * k overlaps
|
def bbox_overlaps(boxes, query_boxes):
"""
determine overlaps between boxes and query_boxes
:param boxes: n * 4 bounding boxes
:param query_boxes: k * 4 bounding boxes
:return: overlaps: n * k overlaps
"""
n_ = boxes.shape[0]
k_ = query_boxes.shape[0]
overlaps = np.zeros((n_, k_), dtype=np.float)
for k in range(k_):
query_box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)
for n in range(n_):
iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1
if iw > 0:
ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1
if ih > 0:
box_area = (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)
all_area = float(box_area + query_box_area - iw * ih)
overlaps[n, k] = iw * ih / all_area
return overlaps
|
Clip boxes to image boundaries.
:param boxes: [N, 4* num_classes]
:param im_shape: tuple of 2
:return: [N, 4* num_classes]
|
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
:param boxes: [N, 4* num_classes]
:param im_shape: tuple of 2
:return: [N, 4* num_classes]
"""
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
|
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
|
def bbox_transform(ex_rois, gt_rois, box_stds):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
"""
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14) / box_stds[0]
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14) / box_stds[1]
targets_dw = np.log(gt_widths / ex_widths) / box_stds[2]
targets_dh = np.log(gt_heights / ex_heights) / box_stds[3]
targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
|
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
|
def bbox_pred(boxes, box_deltas, box_stds):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
dx = box_deltas[:, 0::4] * box_stds[0]
dy = box_deltas[:, 1::4] * box_stds[1]
dw = box_deltas[:, 2::4] * box_stds[2]
dh = box_deltas[:, 3::4] * box_stds[3]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * (pred_w - 1.0)
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * (pred_h - 1.0)
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * (pred_w - 1.0)
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * (pred_h - 1.0)
return pred_boxes
|
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep
|
def nms(dets, thresh):
"""
greedily select boxes with high confidence and overlap with current maximum <= thresh
rule out overlap >= thresh
:param dets: [[x1, y1, x2, y2 score]]
:param thresh: retain overlap < thresh
:return: indexes to keep
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
|
rois (nroi, 4), scores (nrois, nclasses), bbox_deltas (nrois, 4 * nclasses), im_info (3)
|
def im_detect(rois, scores, bbox_deltas, im_info,
bbox_stds, nms_thresh, conf_thresh):
"""rois (nroi, 4), scores (nrois, nclasses), bbox_deltas (nrois, 4 * nclasses), im_info (3)"""
rois = rois.asnumpy()
scores = scores.asnumpy()
bbox_deltas = bbox_deltas.asnumpy()
im_info = im_info.asnumpy()
height, width, scale = im_info
# post processing
pred_boxes = bbox_pred(rois, bbox_deltas, bbox_stds)
pred_boxes = clip_boxes(pred_boxes, (height, width))
# we used scaled image & roi to train, so it is necessary to transform them back
pred_boxes = pred_boxes / scale
# convert to per class detection results
det = []
for j in range(1, scores.shape[-1]):
indexes = np.where(scores[:, j] > conf_thresh)[0]
cls_scores = scores[indexes, j, np.newaxis]
cls_boxes = pred_boxes[indexes, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores))
keep = nms(cls_dets, thresh=nms_thresh)
cls_id = np.ones_like(cls_scores) * j
det.append(np.hstack((cls_id, cls_scores, cls_boxes))[keep, :])
# assemble all classes
det = np.concatenate(det, axis=0)
return det
|
Convert a python string to C string.
|
def c_str(string):
""""Convert a python string to C string."""
if not isinstance(string, str):
string = string.decode('ascii')
return ctypes.c_char_p(string.encode('utf-8'))
|
Find mxnet library.
|
def _find_lib_path():
"""Find mxnet library."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
amalgamation_lib_path = os.path.join(curr_path, '../../lib/libmxnet_predict.so')
if os.path.exists(amalgamation_lib_path) and os.path.isfile(amalgamation_lib_path):
lib_path = [amalgamation_lib_path]
return lib_path
else:
logging.info('Cannot find libmxnet_predict.so. Will search for MXNet library using libinfo.py then.')
try:
from mxnet.libinfo import find_lib_path
lib_path = find_lib_path()
return lib_path
except ImportError:
libinfo_path = os.path.join(curr_path, '../../python/mxnet/libinfo.py')
if os.path.exists(libinfo_path) and os.path.isfile(libinfo_path):
libinfo = {'__file__': libinfo_path}
exec(compile(open(libinfo_path, "rb").read(), libinfo_path, 'exec'), libinfo, libinfo)
lib_path = libinfo['find_lib_path']()
return lib_path
else:
raise RuntimeError('Cannot find libinfo.py at %s.' % libinfo_path)
|
Load libary by searching possible path.
|
def _load_lib():
"""Load libary by searching possible path."""
lib_path = _find_lib_path()
lib = ctypes.cdll.LoadLibrary(lib_path[0])
# DMatrix functions
lib.MXGetLastError.restype = ctypes.c_char_p
return lib
|
Load ndarray file and return as list of numpy array.
Parameters
----------
nd_bytes : str or bytes
The internal ndarray bytes
Returns
-------
out : dict of str to numpy array or list of numpy array
The output list or dict, depending on whether the saved type is list or dict.
|
def load_ndarray_file(nd_bytes):
"""Load ndarray file and return as list of numpy array.
Parameters
----------
nd_bytes : str or bytes
The internal ndarray bytes
Returns
-------
out : dict of str to numpy array or list of numpy array
The output list or dict, depending on whether the saved type is list or dict.
"""
handle = NDListHandle()
olen = mx_uint()
nd_bytes = bytearray(nd_bytes)
ptr = (ctypes.c_char * len(nd_bytes)).from_buffer(nd_bytes)
_check_call(_LIB.MXNDListCreate(
ptr, len(nd_bytes),
ctypes.byref(handle), ctypes.byref(olen)))
keys = []
arrs = []
for i in range(olen.value):
key = ctypes.c_char_p()
cptr = mx_float_p()
pdata = ctypes.POINTER(mx_uint)()
ndim = mx_uint()
_check_call(_LIB.MXNDListGet(
handle, mx_uint(i), ctypes.byref(key),
ctypes.byref(cptr), ctypes.byref(pdata), ctypes.byref(ndim)))
shape = tuple(pdata[:ndim.value])
dbuffer = (mx_float * np.prod(shape)).from_address(ctypes.addressof(cptr.contents))
ret = np.frombuffer(dbuffer, dtype=np.float32).reshape(shape)
ret = np.array(ret, dtype=np.float32)
keys.append(py_str(key.value))
arrs.append(ret)
_check_call(_LIB.MXNDListFree(handle))
if len(keys) == 0 or len(keys[0]) == 0:
return arrs
else:
return {keys[i] : arrs[i] for i in range(len(keys))}
|
Perform forward to get the output.
Parameters
----------
**kwargs
Keyword arguments of input variable name to data.
Examples
--------
>>> predictor.forward(data=mydata)
>>> out = predictor.get_output(0)
|
def forward(self, **kwargs):
"""Perform forward to get the output.
Parameters
----------
**kwargs
Keyword arguments of input variable name to data.
Examples
--------
>>> predictor.forward(data=mydata)
>>> out = predictor.get_output(0)
"""
for k, v in kwargs.items():
if not isinstance(v, np.ndarray):
raise ValueError("Expect numpy ndarray as input")
v = np.asarray(v, dtype=np.float32, order='C')
_check_call(_LIB.MXPredSetInput(
self.handle, c_str(k),
v.ctypes.data_as(mx_float_p),
mx_uint(v.size)))
_check_call(_LIB.MXPredForward(self.handle))
|
Change the input shape of the predictor.
Parameters
----------
input_shapes : dict of str to tuple
The new shape of input data.
Examples
--------
>>> predictor.reshape({'data':data_shape_tuple})
|
def reshape(self, input_shapes):
"""Change the input shape of the predictor.
Parameters
----------
input_shapes : dict of str to tuple
The new shape of input data.
Examples
--------
>>> predictor.reshape({'data':data_shape_tuple})
"""
indptr = [0]
sdata = []
keys = []
for k, v in input_shapes.items():
if not isinstance(v, tuple):
raise ValueError("Expect input_shapes to be dict str->tuple")
keys.append(c_str(k))
sdata.extend(v)
indptr.append(len(sdata))
new_handle = PredictorHandle()
_check_call(_LIB.MXPredReshape(
mx_uint(len(indptr) - 1),
c_array(ctypes.c_char_p, keys),
c_array(mx_uint, indptr),
c_array(mx_uint, sdata),
self.handle,
ctypes.byref(new_handle)))
_check_call(_LIB.MXPredFree(self.handle))
self.handle = new_handle
|
Get the index-th output.
Parameters
----------
index : int
The index of output.
Returns
-------
out : numpy array.
The output array.
|
def get_output(self, index):
"""Get the index-th output.
Parameters
----------
index : int
The index of output.
Returns
-------
out : numpy array.
The output array.
"""
pdata = ctypes.POINTER(mx_uint)()
ndim = mx_uint()
_check_call(_LIB.MXPredGetOutputShape(
self.handle, index,
ctypes.byref(pdata),
ctypes.byref(ndim)))
shape = tuple(pdata[:ndim.value])
data = np.empty(shape, dtype=np.float32)
_check_call(_LIB.MXPredGetOutput(
self.handle, mx_uint(index),
data.ctypes.data_as(mx_float_p),
mx_uint(data.size)))
return data
|
Begin an episode of a game instance. We can play the game for a maximum of
`max_episode_step` and after that, we are forced to restart
|
def begin_episode(self, max_episode_step=DEFAULT_MAX_EPISODE_STEP):
"""
Begin an episode of a game instance. We can play the game for a maximum of
`max_episode_step` and after that, we are forced to restart
"""
if self.episode_step > self.max_episode_step or self.ale.game_over():
self.start()
else:
for i in range(self.screen_buffer_length):
self.ale.act(0)
self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :])
self.max_episode_step = max_episode_step
self.start_lives = self.ale.lives()
self.episode_reward = 0
self.episode_step = 0
|
Reset before re-using the cell for another graph.
|
def reset(self):
"""Reset before re-using the cell for another graph."""
self._init_counter = -1
self._counter = -1
for cell in self._children.values():
cell.reset()
|
Initial state for this cell.
Parameters
----------
func : callable, default symbol.zeros
Function for creating initial state.
For Symbol API, func can be `symbol.zeros`, `symbol.uniform`,
`symbol.var etc`. Use `symbol.var` if you want to directly
feed input as states.
For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc.
batch_size: int, default 0
Only required for NDArray API. Size of the batch ('N' in layout)
dimension of input.
**kwargs :
Additional keyword arguments passed to func. For example
`mean`, `std`, `dtype`, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
|
def begin_state(self, batch_size=0, func=ndarray.zeros, **kwargs):
"""Initial state for this cell.
Parameters
----------
func : callable, default symbol.zeros
Function for creating initial state.
For Symbol API, func can be `symbol.zeros`, `symbol.uniform`,
`symbol.var etc`. Use `symbol.var` if you want to directly
feed input as states.
For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc.
batch_size: int, default 0
Only required for NDArray API. Size of the batch ('N' in layout)
dimension of input.
**kwargs :
Additional keyword arguments passed to func. For example
`mean`, `std`, `dtype`, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
"""
assert not self._modified, \
"After applying modifier cells (e.g. ZoneoutCell) the base " \
"cell cannot be called directly. Call the modifier cell instead."
states = []
for info in self.state_info(batch_size):
self._init_counter += 1
if info is not None:
info.update(kwargs)
else:
info = kwargs
state = func(name='%sbegin_state_%d'%(self._prefix, self._init_counter),
**info)
states.append(state)
return states
|
Unrolls an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if `layout` is 'NTC',
or (length, batch_size, ...) if `layout` is 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, optional
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if `None`.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If `False`, returns outputs as a list of Symbols.
If `True`, concatenates output across time steps
and returns a single symbol with shape
(batch_size, length, ...) if layout is 'NTC',
or (length, batch_size, ...) if layout is 'TNC'.
If `None`, output whatever is faster.
valid_length : Symbol, NDArray or None
`valid_length` specifies the length of the sequences in the batch without padding.
This option is especially useful for building sequence-to-sequence models where
the input and output sequences would potentially be padded.
If `valid_length` is None, all sequences are assumed to have the same length.
If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,).
The ith element will be the length of the ith sequence in the batch.
The last valid state will be return and the padded outputs will be masked with 0.
Note that `valid_length` must be smaller or equal to `length`.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
|
def unroll(self, length, inputs, begin_state=None, layout='NTC', merge_outputs=None,
valid_length=None):
"""Unrolls an RNN cell across time steps.
Parameters
----------
length : int
Number of steps to unroll.
inputs : Symbol, list of Symbol, or None
If `inputs` is a single Symbol (usually the output
of Embedding symbol), it should have shape
(batch_size, length, ...) if `layout` is 'NTC',
or (length, batch_size, ...) if `layout` is 'TNC'.
If `inputs` is a list of symbols (usually output of
previous unroll), they should all have shape
(batch_size, ...).
begin_state : nested list of Symbol, optional
Input states created by `begin_state()`
or output state of another cell.
Created from `begin_state()` if `None`.
layout : str, optional
`layout` of input symbol. Only used if inputs
is a single Symbol.
merge_outputs : bool, optional
If `False`, returns outputs as a list of Symbols.
If `True`, concatenates output across time steps
and returns a single symbol with shape
(batch_size, length, ...) if layout is 'NTC',
or (length, batch_size, ...) if layout is 'TNC'.
If `None`, output whatever is faster.
valid_length : Symbol, NDArray or None
`valid_length` specifies the length of the sequences in the batch without padding.
This option is especially useful for building sequence-to-sequence models where
the input and output sequences would potentially be padded.
If `valid_length` is None, all sequences are assumed to have the same length.
If `valid_length` is a Symbol or NDArray, it should have shape (batch_size,).
The ith element will be the length of the ith sequence in the batch.
The last valid state will be return and the padded outputs will be masked with 0.
Note that `valid_length` must be smaller or equal to `length`.
Returns
-------
outputs : list of Symbol or Symbol
Symbol (if `merge_outputs` is True) or list of Symbols
(if `merge_outputs` is False) corresponding to the output from
the RNN from this unrolling.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
"""
# pylint: disable=too-many-locals
self.reset()
inputs, axis, F, batch_size = _format_sequence(length, inputs, layout, False)
begin_state = _get_begin_state(self, F, begin_state, inputs, batch_size)
states = begin_state
outputs = []
all_states = []
for i in range(length):
output, states = self(inputs[i], states)
outputs.append(output)
if valid_length is not None:
all_states.append(states)
if valid_length is not None:
states = [F.SequenceLast(F.stack(*ele_list, axis=0),
sequence_length=valid_length,
use_sequence_length=True,
axis=0)
for ele_list in zip(*all_states)]
outputs = _mask_sequence_variable_length(F, outputs, length, valid_length, axis, True)
outputs, _, _, _ = _format_sequence(length, outputs, layout, merge_outputs)
return outputs, states
|
Get activation function. Convert if is string
|
def _get_activation(self, F, inputs, activation, **kwargs):
"""Get activation function. Convert if is string"""
func = {'tanh': F.tanh,
'relu': F.relu,
'sigmoid': F.sigmoid,
'softsign': F.softsign}.get(activation)
if func:
return func(inputs, **kwargs)
elif isinstance(activation, string_types):
return F.Activation(inputs, act_type=activation, **kwargs)
elif isinstance(activation, LeakyReLU):
return F.LeakyReLU(inputs, act_type='leaky', slope=activation._alpha, **kwargs)
return activation(inputs, **kwargs)
|
Unrolls the recurrent cell for one time step.
Parameters
----------
inputs : sym.Variable
Input symbol, 2D, of shape (batch_size * num_units).
states : list of sym.Variable
RNN state from previous step or the output of begin_state().
Returns
-------
output : Symbol
Symbol corresponding to the output from the RNN when unrolling
for a single time step.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
This can be used as an input state to the next time step
of this RNN.
See Also
--------
begin_state: This function can provide the states for the first time step.
unroll: This function unrolls an RNN for a given number of (>=1) time steps.
|
def forward(self, inputs, states):
"""Unrolls the recurrent cell for one time step.
Parameters
----------
inputs : sym.Variable
Input symbol, 2D, of shape (batch_size * num_units).
states : list of sym.Variable
RNN state from previous step or the output of begin_state().
Returns
-------
output : Symbol
Symbol corresponding to the output from the RNN when unrolling
for a single time step.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
This can be used as an input state to the next time step
of this RNN.
See Also
--------
begin_state: This function can provide the states for the first time step.
unroll: This function unrolls an RNN for a given number of (>=1) time steps.
"""
# pylint: disable= arguments-differ
self._counter += 1
return super(RecurrentCell, self).forward(inputs, states)
|
Check that all input names are in symbol's arguments.
|
def _check_input_names(symbol, names, typename, throw):
"""Check that all input names are in symbol's arguments."""
args = symbol.list_arguments()
for name in names:
if name in args:
continue
candidates = [arg for arg in args if
not arg.endswith('_weight') and
not arg.endswith('_bias') and
not arg.endswith('_gamma') and
not arg.endswith('_beta')]
msg = "\033[91mYou created Module with Module(..., %s_names=%s) but " \
"input with name '%s' is not found in symbol.list_arguments(). " \
"Did you mean one of:\n\t%s\033[0m"%(
typename, str(names), name, '\n\t'.join(candidates))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
|
Check that input names matches input data descriptors.
|
def _check_names_match(data_names, data_shapes, name, throw):
"""Check that input names matches input data descriptors."""
actual = [x[0] for x in data_shapes]
if sorted(data_names) != sorted(actual):
msg = "Data provided by %s_shapes don't match names specified by %s_names (%s vs. %s)"%(
name, name, str(data_shapes), str(data_names))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
|
parse data_attrs into DataDesc format and check that names match
|
def _parse_data_desc(data_names, label_names, data_shapes, label_shapes):
"""parse data_attrs into DataDesc format and check that names match"""
data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes]
_check_names_match(data_names, data_shapes, 'data', True)
if label_shapes is not None:
label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
_check_names_match(label_names, label_shapes, 'label', False)
else:
_check_names_match(label_names, [], 'label', False)
return data_shapes, label_shapes
|
A convenient function that calls both ``forward`` and ``backward``.
|
def forward_backward(self, data_batch):
"""A convenient function that calls both ``forward`` and ``backward``."""
self.forward(data_batch, is_train=True)
self.backward()
|
Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
|
def score(self, eval_data, eval_metric, num_batch=None, batch_end_callback=None,
score_end_callback=None,
reset=True, epoch=0, sparse_row_id_fn=None):
"""Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
eval_metric.reset()
actual_num_batch = 0
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
if isinstance(eval_batch, list):
self.update_metric(eval_metric, [eb.label for eb in eval_batch], pre_sliced=True)
else:
self.update_metric(eval_metric, eval_batch.label)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
actual_num_batch += 1
if score_end_callback:
params = BatchEndParam(epoch=epoch,
nbatch=actual_num_batch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(score_end_callback):
callback(params)
return eval_metric.get_name_value()
|
Iterates over predictions.
Examples
--------
>>> for pred, i_batch, batch in module.iter_predict(eval_data):
... # pred is a list of outputs from the module
... # i_batch is a integer
... # batch is the data batch from the data iterator
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Default is ``None``, indicating running all the batches in the data iterator.
reset : bool
Default is ``True``, indicating whether we should reset the data iter before start
doing prediction.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
|
def iter_predict(self, eval_data, num_batch=None, reset=True, sparse_row_id_fn=None):
"""Iterates over predictions.
Examples
--------
>>> for pred, i_batch, batch in module.iter_predict(eval_data):
... # pred is a list of outputs from the module
... # i_batch is a integer
... # batch is the data batch from the data iterator
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Default is ``None``, indicating running all the batches in the data iterator.
reset : bool
Default is ``True``, indicating whether we should reset the data iter before start
doing prediction.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad] for out in self.get_outputs()]
yield (outputs, nbatch, eval_batch)
|
Runs prediction and collects the outputs.
When `merge_batches` is ``True`` (by default), the return value will be a list
``[out1, out2, out3]``, where each element is formed by concatenating the outputs for
all the mini-batches. When `always_output_list` is ``False`` (as by default),
then in the case of a single output, `out1` is returned instead of ``[out1]``.
When `merge_batches` is ``False``, the return value will be a nested list like
``[[out1_batch1, out2_batch1], [out1_batch2], ...]``. This mode is useful because
in some cases (e.g. bucketing), the module does not necessarily produce the same
number of outputs.
The objects in the results have type `NDArray`. If you need to work with a numpy array,
just call ``.asnumpy()`` on each `NDArray`.
Parameters
----------
eval_data : DataIter or NDArray or numpy array
Evaluation data to run prediction on.
num_batch : int
Defaults to ``None``, indicates running all the batches in the data iterator.
merge_batches : bool
Defaults to ``True``, see above for return values.
reset : bool
Defaults to ``True``, indicates whether we should reset the data iter before
doing prediction.
always_output_list : bool
Defaults to ``False``, see above for return values.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Returns
-------
list of NDArray or list of list of NDArray
Prediction results.
Examples
--------
>>> # An example of using `predict` for prediction.
>>> # Predict on the first 10 batches of val_dataiter
>>> mod.predict(eval_data=val_dataiter, num_batch=10)
|
def predict(self, eval_data, num_batch=None, merge_batches=True, reset=True,
always_output_list=False, sparse_row_id_fn=None):
"""Runs prediction and collects the outputs.
When `merge_batches` is ``True`` (by default), the return value will be a list
``[out1, out2, out3]``, where each element is formed by concatenating the outputs for
all the mini-batches. When `always_output_list` is ``False`` (as by default),
then in the case of a single output, `out1` is returned instead of ``[out1]``.
When `merge_batches` is ``False``, the return value will be a nested list like
``[[out1_batch1, out2_batch1], [out1_batch2], ...]``. This mode is useful because
in some cases (e.g. bucketing), the module does not necessarily produce the same
number of outputs.
The objects in the results have type `NDArray`. If you need to work with a numpy array,
just call ``.asnumpy()`` on each `NDArray`.
Parameters
----------
eval_data : DataIter or NDArray or numpy array
Evaluation data to run prediction on.
num_batch : int
Defaults to ``None``, indicates running all the batches in the data iterator.
merge_batches : bool
Defaults to ``True``, see above for return values.
reset : bool
Defaults to ``True``, indicates whether we should reset the data iter before
doing prediction.
always_output_list : bool
Defaults to ``False``, see above for return values.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Returns
-------
list of NDArray or list of list of NDArray
Prediction results.
Examples
--------
>>> # An example of using `predict` for prediction.
>>> # Predict on the first 10 batches of val_dataiter
>>> mod.predict(eval_data=val_dataiter, num_batch=10)
"""
assert self.binded and self.params_initialized
if isinstance(eval_data, (ndarray.NDArray, np.ndarray)):
if isinstance(eval_data, np.ndarray):
eval_data = ndarray.array(eval_data)
self.forward(DataBatch([eval_data]))
return self.get_outputs()[0]
if not isinstance(eval_data, DataIter):
raise ValueError('eval_data must be of type NDArray or DataIter')
if reset:
eval_data.reset()
output_list = []
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad].copy() for out in self.get_outputs()]
output_list.append(outputs)
if len(output_list) == 0:
return output_list
if merge_batches:
num_outputs = len(output_list[0])
for out in output_list:
assert len(out) == num_outputs, \
'Cannot merge batches, as num of outputs is not the same ' + \
'in mini-batches. Maybe bucketing is used?'
output_list2 = [ndarray.concatenate([out[i] for out in output_list])
for i in range(num_outputs)]
if num_outputs == 1 and not always_output_list:
return output_list2[0]
return output_list2
return output_list
|
Assigns parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to value (`NDArray`) mapping.
aux_params : dict
Dictionary of name to value (`NDArray`) mapping.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
|
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
"""Assigns parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to value (`NDArray`) mapping.
aux_params : dict
Dictionary of name to value (`NDArray`) mapping.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init,
allow_extra=allow_extra)
|
Saves model parameters to file.
Parameters
----------
fname : str
Path to output param file.
Examples
--------
>>> # An example of saving module parameters.
>>> mod.save_params('myfile')
|
def save_params(self, fname):
"""Saves model parameters to file.
Parameters
----------
fname : str
Path to output param file.
Examples
--------
>>> # An example of saving module parameters.
>>> mod.save_params('myfile')
"""
arg_params, aux_params = self.get_params()
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
ndarray.save(fname, save_dict)
|
Loads model parameters from file.
Parameters
----------
fname : str
Path to input param file.
Examples
--------
>>> # An example of loading module parameters.
>>> mod.load_params('myfile')
|
def load_params(self, fname):
"""Loads model parameters from file.
Parameters
----------
fname : str
Path to input param file.
Examples
--------
>>> # An example of loading module parameters.
>>> mod.load_params('myfile')
"""
save_dict = ndarray.load(fname)
arg_params = {}
aux_params = {}
for k, value in save_dict.items():
arg_type, name = k.split(':', 1)
if arg_type == 'arg':
arg_params[name] = value
elif arg_type == 'aux':
aux_params[name] = value
else:
raise ValueError("Invalid param file " + fname)
self.set_params(arg_params, aux_params)
|
Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_data``. Can also be a list of
(data name, data shape).
label_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_label``. Can also be a list of
(label name, label shape).
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
Examples
--------
>>> # An example of binding symbols.
>>> mod.bind(data_shapes=[('data', (1, 10, 10))])
>>> # Assume train_iter is already created.
>>> mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
|
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_data``. Can also be a list of
(data name, data shape).
label_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_label``. Can also be a list of
(label name, label shape).
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
Examples
--------
>>> # An example of binding symbols.
>>> mod.bind(data_shapes=[('data', (1, 10, 10))])
>>> # Assume train_iter is already created.
>>> mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
"""
raise NotImplementedError()
|
Find MXNet dynamic library files.
Returns
-------
lib_path : list(string)
List of all found path to the libraries.
|
def find_lib_path():
"""Find MXNet dynamic library files.
Returns
-------
lib_path : list(string)
List of all found path to the libraries.
"""
lib_from_env = os.environ.get('MXNET_LIBRARY_PATH')
if lib_from_env:
if os.path.isfile(lib_from_env):
if not os.path.isabs(lib_from_env):
logging.warning("MXNET_LIBRARY_PATH should be an absolute path, instead of: %s",
lib_from_env)
else:
if os.name == 'nt':
os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.dirname(lib_from_env)
return [lib_from_env]
else:
logging.warning("MXNET_LIBRARY_PATH '%s' doesn't exist", lib_from_env)
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
api_path = os.path.join(curr_path, '../../lib/')
cmake_build_path = os.path.join(curr_path, '../../build/')
dll_path = [curr_path, api_path, cmake_build_path]
if os.name == 'nt':
dll_path.append(os.path.join(curr_path, '../../build'))
vs_configuration = 'Release'
if platform.architecture()[0] == '64bit':
dll_path.append(os.path.join(curr_path, '../../build', vs_configuration))
dll_path.append(os.path.join(curr_path, '../../windows/x64', vs_configuration))
else:
dll_path.append(os.path.join(curr_path, '../../build', vs_configuration))
dll_path.append(os.path.join(curr_path, '../../windows', vs_configuration))
elif os.name == "posix" and os.environ.get('LD_LIBRARY_PATH', None):
dll_path[0:0] = [p.strip() for p in os.environ['LD_LIBRARY_PATH'].split(":")]
if os.name == 'nt':
os.environ['PATH'] = os.path.dirname(__file__) + ';' + os.environ['PATH']
dll_path = [os.path.join(p, 'libmxnet.dll') for p in dll_path]
elif platform.system() == 'Darwin':
dll_path = [os.path.join(p, 'libmxnet.dylib') for p in dll_path] + \
[os.path.join(p, 'libmxnet.so') for p in dll_path]
else:
dll_path.append('../../../')
dll_path = [os.path.join(p, 'libmxnet.so') for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
if len(lib_path) == 0:
raise RuntimeError('Cannot find the MXNet library.\n' +
'List of candidates:\n' + str('\n'.join(dll_path)))
if os.name == 'nt':
os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.dirname(lib_path[0])
return lib_path
|
Find MXNet included header files.
Returns
-------
incl_path : string
Path to the header files.
|
def find_include_path():
"""Find MXNet included header files.
Returns
-------
incl_path : string
Path to the header files.
"""
incl_from_env = os.environ.get('MXNET_INCLUDE_PATH')
if incl_from_env:
if os.path.isdir(incl_from_env):
if not os.path.isabs(incl_from_env):
logging.warning("MXNET_INCLUDE_PATH should be an absolute path, instead of: %s",
incl_from_env)
else:
return incl_from_env
else:
logging.warning("MXNET_INCLUDE_PATH '%s' doesn't exist", incl_from_env)
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# include path in pip package
pip_incl_path = os.path.join(curr_path, 'include/')
if os.path.isdir(pip_incl_path):
return pip_incl_path
else:
# include path if build from source
src_incl_path = os.path.join(curr_path, '../../include/')
if os.path.isdir(src_incl_path):
return src_incl_path
else:
raise RuntimeError('Cannot find the MXNet include path in either ' + pip_incl_path +
' or ' + src_incl_path + '\n')
|
Generate a greyscale captcha image representing number string
Parameters
----------
captcha_str: str
string a characters for captcha image
Returns
-------
numpy.ndarray
Generated greyscale image in np.ndarray float type with values normalized to [0, 1]
|
def image(self, captcha_str):
"""Generate a greyscale captcha image representing number string
Parameters
----------
captcha_str: str
string a characters for captcha image
Returns
-------
numpy.ndarray
Generated greyscale image in np.ndarray float type with values normalized to [0, 1]
"""
img = self.captcha.generate(captcha_str)
img = np.fromstring(img.getvalue(), dtype='uint8')
img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (self.h, self.w))
img = img.transpose(1, 0)
img = np.multiply(img, 1 / 255.0)
return img
|
Generates a character string of digits. Number of digits are
between self.num_digit_min and self.num_digit_max
Returns
-------
str
|
def get_rand(num_digit_min, num_digit_max):
"""Generates a character string of digits. Number of digits are
between self.num_digit_min and self.num_digit_max
Returns
-------
str
"""
buf = ""
max_len = random.randint(num_digit_min, num_digit_max)
for i in range(max_len):
buf += str(random.randint(0, 9))
return buf
|
Generate a random captcha image sample
Returns
-------
(numpy.ndarray, str)
Tuple of image (numpy ndarray) and character string of digits used to generate the image
|
def _gen_sample(self):
"""Generate a random captcha image sample
Returns
-------
(numpy.ndarray, str)
Tuple of image (numpy ndarray) and character string of digits used to generate the image
"""
num_str = self.get_rand(self.num_digit_min, self.num_digit_max)
return self.captcha.image(num_str), num_str
|
Registers a new optimizer.
Once an optimizer is registered, we can create an instance of this
optimizer with `create_optimizer` later.
Examples
--------
>>> @mx.optimizer.Optimizer.register
... class MyOptimizer(mx.optimizer.Optimizer):
... pass
>>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer')
>>> print(type(optim))
<class '__main__.MyOptimizer'>
|
def register(klass):
"""Registers a new optimizer.
Once an optimizer is registered, we can create an instance of this
optimizer with `create_optimizer` later.
Examples
--------
>>> @mx.optimizer.Optimizer.register
... class MyOptimizer(mx.optimizer.Optimizer):
... pass
>>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer')
>>> print(type(optim))
<class '__main__.MyOptimizer'>
"""
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in Optimizer.opt_registry:
warnings.warn('WARNING: New optimizer %s.%s is overriding '
'existing optimizer %s.%s' %
(klass.__module__, klass.__name__,
Optimizer.opt_registry[name].__module__,
Optimizer.opt_registry[name].__name__))
Optimizer.opt_registry[name] = klass
return klass
|
Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
|
def create_optimizer(name, **kwargs):
"""Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
"""
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError('Cannot find optimizer %s' % name)
|
Creates auxiliary state for a given weight, including FP32 high
precision copy if original weight is FP16.
This method is provided to perform automatic mixed precision training
for optimizers that do not support it themselves.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
|
def create_state_multi_precision(self, index, weight):
"""Creates auxiliary state for a given weight, including FP32 high
precision copy if original weight is FP16.
This method is provided to perform automatic mixed precision training
for optimizers that do not support it themselves.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = weight.astype(numpy.float32)
return (weight_master_copy,) + (self.create_state(index, weight_master_copy),)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"optimizer")
return self.create_state(index, weight)
|
Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
|
def update_multi_precision(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
if self.multi_precision and weight.dtype == numpy.float16:
# Wrapper for mixed precision
weight_master_copy = state[0]
original_state = state[1]
grad32 = grad.astype(numpy.float32)
self.update(index, weight_master_copy, grad32, original_state)
cast(weight_master_copy, dtype=weight.dtype, out=weight)
else:
self.update(index, weight, grad, state)
|
Sets an individual learning rate multiplier for each parameter.
If you specify a learning rate multiplier for a parameter, then
the learning rate for the parameter will be set as the product of
the global learning rate `self.lr` and its multiplier.
.. note:: The default learning rate multiplier of a `Variable`
can be set with `lr_mult` argument in the constructor.
Parameters
----------
args_lr_mult : dict of str/int to float
For each of its key-value entries, the learning rate multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
|
def set_lr_mult(self, args_lr_mult):
"""Sets an individual learning rate multiplier for each parameter.
If you specify a learning rate multiplier for a parameter, then
the learning rate for the parameter will be set as the product of
the global learning rate `self.lr` and its multiplier.
.. note:: The default learning rate multiplier of a `Variable`
can be set with `lr_mult` argument in the constructor.
Parameters
----------
args_lr_mult : dict of str/int to float
For each of its key-value entries, the learning rate multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.lr_mult = {}
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__lr_mult__' in attr[name]:
self.lr_mult[name] = float(attr[name]['__lr_mult__'])
self.lr_mult.update(args_lr_mult)
|
Sets an individual weight decay multiplier for each parameter.
By default, if `param_idx2name` was provided in the
constructor, the weight decay multipler is set as 0 for all
parameters whose name don't end with ``_weight`` or
``_gamma``.
.. note:: The default weight decay multiplier for a `Variable`
can be set with its `wd_mult` argument in the constructor.
Parameters
----------
args_wd_mult : dict of string/int to float
For each of its key-value entries, the weight decay multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
|
def set_wd_mult(self, args_wd_mult):
"""Sets an individual weight decay multiplier for each parameter.
By default, if `param_idx2name` was provided in the
constructor, the weight decay multipler is set as 0 for all
parameters whose name don't end with ``_weight`` or
``_gamma``.
.. note:: The default weight decay multiplier for a `Variable`
can be set with its `wd_mult` argument in the constructor.
Parameters
----------
args_wd_mult : dict of string/int to float
For each of its key-value entries, the weight decay multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.wd_mult = {}
for n in self.idx2name.values():
if not (n.endswith('_weight') or n.endswith('_gamma')):
self.wd_mult[n] = 0.0
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__wd_mult__' in attr[name]:
self.wd_mult[name] = float(attr[name]['__wd_mult__'])
self.wd_mult.update(args_wd_mult)
|
Sets the number of the currently handled device.
Parameters
----------
device_id : int
The number of current device.
|
def _set_current_context(self, device_id):
"""Sets the number of the currently handled device.
Parameters
----------
device_id : int
The number of current device.
"""
if device_id not in self._all_index_update_counts:
self._all_index_update_counts[device_id] = {}
self._index_update_count = self._all_index_update_counts[device_id]
|
Updates num_update.
Parameters
----------
index : int or list of int
The index to be updated.
|
def _update_count(self, index):
"""Updates num_update.
Parameters
----------
index : int or list of int
The index to be updated.
"""
if not isinstance(index, (list, tuple)):
index = [index]
for idx in index:
if idx not in self._index_update_count:
self._index_update_count[idx] = self.begin_num_update
self._index_update_count[idx] += 1
self.num_update = max(self._index_update_count[idx], self.num_update)
|
Gets the learning rates given the indices of the weights.
Parameters
----------
indices : list of int
Indices corresponding to weights.
Returns
-------
lrs : list of float
Learning rates for those indices.
|
def _get_lrs(self, indices):
"""Gets the learning rates given the indices of the weights.
Parameters
----------
indices : list of int
Indices corresponding to weights.
Returns
-------
lrs : list of float
Learning rates for those indices.
"""
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
lrs = [lr for _ in indices]
for i, index in enumerate(indices):
if index in self.param_dict:
lrs[i] *= self.param_dict[index].lr_mult
elif index in self.lr_mult:
lrs[i] *= self.lr_mult[index]
elif index in self.idx2name:
lrs[i] *= self.lr_mult.get(self.idx2name[index], 1.0)
return lrs
|
Gets weight decays for indices.
Returns 0 for non-weights if the name of weights are provided for `__init__`.
Parameters
----------
indices : list of int
Indices of weights.
Returns
-------
wds : list of float
Weight decays for those indices.
|
def _get_wds(self, indices):
"""Gets weight decays for indices.
Returns 0 for non-weights if the name of weights are provided for `__init__`.
Parameters
----------
indices : list of int
Indices of weights.
Returns
-------
wds : list of float
Weight decays for those indices.
"""
wds = [self.wd for _ in indices]
for i, index in enumerate(indices):
if index in self.param_dict:
wds[i] *= self.param_dict[index].wd_mult
elif index in self.wd_mult:
wds[i] *= self.wd_mult[index]
elif index in self.idx2name:
wds[i] *= self.wd_mult.get(self.idx2name[index], 1.0)
return wds
|
sync state context.
|
def sync_state_context(self, state, context):
"""sync state context."""
if isinstance(state, NDArray):
return state.as_in_context(context)
elif isinstance(state, (tuple, list)):
synced_state = (self.sync_state_context(i, context) for i in state)
if isinstance(state, tuple):
return tuple(synced_state)
else:
return list(synced_state)
else:
return state
|
Sets updater states.
|
def set_states(self, states):
"""Sets updater states."""
states = pickle.loads(states)
if isinstance(states, tuple) and len(states) == 2:
self.states, self.optimizer = states
else:
self.states = states
self.states_synced = dict.fromkeys(self.states.keys(), False)
|
Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
|
def get_states(self, dump_optimizer=False):
"""Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
return pickle.dumps((self.states, self.optimizer) if dump_optimizer else self.states)
|
Preprocess: Convert a video into the mouth images
|
def preprocess(from_idx, to_idx, _params):
"""
Preprocess: Convert a video into the mouth images
"""
source_exts = '*.mpg'
src_path = _params['src_path']
tgt_path = _params['tgt_path']
face_predictor_path = './shape_predictor_68_face_landmarks.dat'
succ = set()
fail = set()
for idx in range(from_idx, to_idx):
s_id = 's' + str(idx) + '/'
source_path = src_path + '/' + s_id
target_path = tgt_path + '/' + s_id
fail_cnt = 0
for filepath in find_files(source_path, source_exts):
print("Processing: {}".format(filepath))
filepath_wo_ext = os.path.splitext(filepath)[0].split('/')[-2:]
target_dir = os.path.join(tgt_path, '/'.join(filepath_wo_ext))
if os.path.exists(target_dir):
continue
try:
video = Video(vtype='face', \
face_predictor_path=face_predictor_path).from_video(filepath)
mkdir_p(target_dir)
i = 0
if video.mouth[0] is None:
continue
for frame in video.mouth:
io.imsave(os.path.join(target_dir, "mouth_{0:03d}.png".format(i)), frame)
i += 1
except ValueError as error:
print(error)
fail_cnt += 1
if fail_cnt == 0:
succ.add(idx)
else:
fail.add(idx)
return (succ, fail)
|
Read from frames
|
def from_frames(self, path):
"""
Read from frames
"""
frames_path = sorted([os.path.join(path, x) for x in os.listdir(path)])
frames = [ndimage.imread(frame_path) for frame_path in frames_path]
self.handle_type(frames)
return self
|
Read from videos
|
def from_video(self, path):
"""
Read from videos
"""
frames = self.get_video_frames(path)
self.handle_type(frames)
return self
|
Config video types
|
def handle_type(self, frames):
"""
Config video types
"""
if self.vtype == 'mouth':
self.process_frames_mouth(frames)
elif self.vtype == 'face':
self.process_frames_face(frames)
else:
raise Exception('Video type not found')
|
Preprocess from frames using face detector
|
def process_frames_face(self, frames):
"""
Preprocess from frames using face detector
"""
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(self.face_predictor_path)
mouth_frames = self.get_frames_mouth(detector, predictor, frames)
self.face = np.array(frames)
self.mouth = np.array(mouth_frames)
if mouth_frames[0] is not None:
self.set_data(mouth_frames)
|
Preprocess from frames using mouth detector
|
def process_frames_mouth(self, frames):
"""
Preprocess from frames using mouth detector
"""
self.face = np.array(frames)
self.mouth = np.array(frames)
self.set_data(frames)
|
Get frames using mouth crop
|
def get_frames_mouth(self, detector, predictor, frames):
"""
Get frames using mouth crop
"""
mouth_width = 100
mouth_height = 50
horizontal_pad = 0.19
normalize_ratio = None
mouth_frames = []
for frame in frames:
dets = detector(frame, 1)
shape = None
for det in dets:
shape = predictor(frame, det)
i = -1
if shape is None: # Detector doesn't detect face, just return None
return [None]
mouth_points = []
for part in shape.parts():
i += 1
if i < 48: # Only take mouth region
continue
mouth_points.append((part.x, part.y))
np_mouth_points = np.array(mouth_points)
mouth_centroid = np.mean(np_mouth_points[:, -2:], axis=0)
if normalize_ratio is None:
mouth_left = np.min(np_mouth_points[:, :-1]) * (1.0 - horizontal_pad)
mouth_right = np.max(np_mouth_points[:, :-1]) * (1.0 + horizontal_pad)
normalize_ratio = mouth_width / float(mouth_right - mouth_left)
new_img_shape = (int(frame.shape[0] * normalize_ratio),
int(frame.shape[1] * normalize_ratio))
resized_img = imresize(frame, new_img_shape)
mouth_centroid_norm = mouth_centroid * normalize_ratio
mouth_l = int(mouth_centroid_norm[0] - mouth_width / 2)
mouth_r = int(mouth_centroid_norm[0] + mouth_width / 2)
mouth_t = int(mouth_centroid_norm[1] - mouth_height / 2)
mouth_b = int(mouth_centroid_norm[1] + mouth_height / 2)
mouth_crop_image = resized_img[mouth_t:mouth_b, mouth_l:mouth_r]
mouth_frames.append(mouth_crop_image)
return mouth_frames
|
Get video frames
|
def get_video_frames(self, path):
"""
Get video frames
"""
videogen = skvideo.io.vreader(path)
frames = np.array([frame for frame in videogen])
return frames
|
Prepare the input of model
|
def set_data(self, frames):
"""
Prepare the input of model
"""
data_frames = []
for frame in frames:
#frame H x W x C
frame = frame.swapaxes(0, 1) # swap width and height to form format W x H x C
if len(frame.shape) < 3:
frame = np.array([frame]).swapaxes(0, 2).swapaxes(0, 1) # Add grayscale channel
data_frames.append(frame)
frames_n = len(data_frames)
data_frames = np.array(data_frames) # T x W x H x C
data_frames = np.rollaxis(data_frames, 3) # C x T x W x H
data_frames = data_frames.swapaxes(2, 3) # C x T x H x W = NCDHW
self.data = data_frames
self.length = frames_n
|
Resets the iterator to the beginning of the data.
|
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck)
|
Returns the next batch of data.
|
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
audio_paths = []
texts = []
for duration, audio_path, text in self.data[i][j:j+self.batch_size]:
audio_paths.append(audio_path)
texts.append(text)
if self.is_first_epoch:
data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=True,
is_bi_graphemes=self.is_bi_graphemes,
seq_length=self.buckets[i],
save_feature_as_csvfile=self.save_feature_as_csvfile)
else:
data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=False,
is_bi_graphemes=self.is_bi_graphemes,
seq_length=self.buckets[i],
save_feature_as_csvfile=self.save_feature_as_csvfile)
data_all = [mx.nd.array(data_set['x'])] + self.init_state_arrays
label_all = [mx.nd.array(data_set['y'])]
self.label = label_all
provide_data = [('data', (self.batch_size, self.buckets[i], self.width * self.height))] + self.init_states
return mx.io.DataBatch(data_all, label_all, pad=0,
bucket_key=self.buckets[i],
provide_data=provide_data,
provide_label=self.provide_label)
|
Subtract ImageNet mean pixel-wise from a BGR image.
|
def subtract_imagenet_mean_preprocess_batch(batch):
"""Subtract ImageNet mean pixel-wise from a BGR image."""
batch = F.swapaxes(batch,0, 1)
(r, g, b) = F.split(batch, num_outputs=3, axis=0)
r = r - 123.680
g = g - 116.779
b = b - 103.939
batch = F.concat(b, g, r, dim=0)
batch = F.swapaxes(batch,0, 1)
return batch
|
Not necessary in practice
|
def imagenet_clamp_batch(batch, low, high):
""" Not necessary in practice """
F.clip(batch[:,0,:,:],low-123.680, high-123.680)
F.clip(batch[:,1,:,:],low-116.779, high-116.779)
F.clip(batch[:,2,:,:],low-103.939, high-103.939)
|
Create a linear regression network for performing SVRG optimization.
:return: an instance of mx.io.NDArrayIter
:return: an instance of mx.mod.svrgmodule for performing SVRG optimization
|
def create_network(batch_size, update_freq):
"""Create a linear regression network for performing SVRG optimization.
:return: an instance of mx.io.NDArrayIter
:return: an instance of mx.mod.svrgmodule for performing SVRG optimization
"""
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
data = np.random.randint(1, 5, [1000, 2])
#Test_Train data split
n_train = int(data.shape[0] * 0.8)
weights = np.array([1.0, 2.0])
label = data.dot(weights)
di = mx.io.NDArrayIter(data[:n_train, :], label[:n_train], batch_size=batch_size, shuffle=True, label_name='lin_reg_label')
val_iter = mx.io.NDArrayIter(data[n_train:, :], label[n_train:], batch_size=batch_size)
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
mod = SVRGModule(
symbol=lro,
data_names=['data'],
label_names=['lin_reg_label'], update_freq=update_freq, logger=logging)
return di, val_iter, mod
|
Function to evaluate accuracy of any data iterator passed to it as an argument
|
def evaluate_accuracy(data_iterator, net):
"""Function to evaluate accuracy of any data iterator passed to it as an argument"""
acc = mx.metric.Accuracy()
for data, label in data_iterator:
output = net(data)
predictions = nd.argmax(output, axis=1)
predictions = predictions.reshape((-1, 1))
acc.update(preds=predictions, labels=label)
return acc.get()[1]
|
Function responsible for running the training the model.
|
def train(train_dir=None, train_csv=None, epochs=30, batch_size=32):
"""Function responsible for running the training the model."""
if not train_dir or not os.path.exists(train_dir) or not train_csv:
warnings.warn("No train directory could be found ")
return
# Make a dataset from the local folder containing Audio data
print("\nMaking an Audio Dataset...\n")
tick = time.time()
aud_dataset = AudioFolderDataset(train_dir, train_csv=train_csv, file_format='.wav', skip_header=True)
tock = time.time()
print("Loading the dataset took ", (tock-tick), " seconds.")
print("\n=======================================\n")
print("Number of output classes = ", len(aud_dataset.synsets))
print("\nThe labels are : \n")
print(aud_dataset.synsets)
# Get the model to train
net = model.get_net(len(aud_dataset.synsets))
print("\nNeural Network = \n")
print(net)
print("\nModel - Neural Network Generated!\n")
print("=======================================\n")
#Define the loss - Softmax CE Loss
softmax_loss = gluon.loss.SoftmaxCELoss(from_logits=False, sparse_label=True)
print("Loss function initialized!\n")
print("=======================================\n")
#Define the trainer with the optimizer
trainer = gluon.Trainer(net.collect_params(), 'adadelta')
print("Optimizer - Trainer function initialized!\n")
print("=======================================\n")
print("Loading the dataset to the Gluon's OOTB Dataloader...")
#Getting the data loader out of the AudioDataset and passing the transform
from transforms import MFCC
aud_transform = MFCC()
tick = time.time()
audio_train_loader = gluon.data.DataLoader(aud_dataset.transform_first(aud_transform), batch_size=32, shuffle=True)
tock = time.time()
print("Time taken to load data and apply transform here is ", (tock-tick), " seconds.")
print("=======================================\n")
print("Starting the training....\n")
# Training loop
tick = time.time()
batch_size = batch_size
num_examples = len(aud_dataset)
for epoch in range(epochs):
cumulative_loss = 0
for data, label in audio_train_loader:
with autograd.record():
output = net(data)
loss = softmax_loss(output, label)
loss.backward()
trainer.step(batch_size)
cumulative_loss += mx.nd.sum(loss).asscalar()
if epoch%5 == 0:
train_accuracy = evaluate_accuracy(audio_train_loader, net)
print("Epoch {}. Loss: {} Train accuracy : {} ".format(epoch, cumulative_loss/num_examples, train_accuracy))
print("\n------------------------------\n")
train_accuracy = evaluate_accuracy(audio_train_loader, net)
tock = time.time()
print("\nFinal training accuracy: ", train_accuracy)
print("Training the sound classification for ", epochs, " epochs, MLP model took ", (tock-tick), " seconds")
print("====================== END ======================\n")
print("Trying to save the model parameters here...")
net.save_parameters("./net.params")
print("Saved the model parameters in current directory.")
|
Set size limit on bulk execution.
Bulk execution bundles many operators to run together.
This can improve performance when running a lot of small
operators sequentially.
Parameters
----------
size : int
Maximum number of operators that can be bundled in a bulk.
Returns
-------
int
Previous bulk size.
|
def set_bulk_size(size):
"""Set size limit on bulk execution.
Bulk execution bundles many operators to run together.
This can improve performance when running a lot of small
operators sequentially.
Parameters
----------
size : int
Maximum number of operators that can be bundled in a bulk.
Returns
-------
int
Previous bulk size.
"""
prev = ctypes.c_int()
check_call(_LIB.MXEngineSetBulkSize(
ctypes.c_int(size), ctypes.byref(prev)))
return prev.value
|
calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars
|
def applyLM(parentBeam, childBeam, classes, lm):
"""
calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars
"""
if lm and not childBeam.lmApplied:
c1 = classes[parentBeam.labeling[-1] if parentBeam.labeling else classes.index(' ')] # first char
c2 = classes[childBeam.labeling[-1]] # second char
lmFactor = 0.01 # influence of language model
bigramProb = lm.getCharBigram(c1, c2) ** lmFactor # probability of seeing first and second char next to each other
childBeam.prText = parentBeam.prText * bigramProb # probability of char sequence
childBeam.lmApplied = True
|
add beam if it does not yet exist
|
def addBeam(beamState, labeling):
"""
add beam if it does not yet exist
"""
if labeling not in beamState.entries:
beamState.entries[labeling] = BeamEntry()
|
beam search as described by the paper of Hwang et al. and the paper of Graves et al.
|
def ctcBeamSearch(mat, classes, lm, k, beamWidth):
"""
beam search as described by the paper of Hwang et al. and the paper of Graves et al.
"""
blankIdx = len(classes)
maxT, maxC = mat.shape
# initialise beam state
last = BeamState()
labeling = ()
last.entries[labeling] = BeamEntry()
last.entries[labeling].prBlank = 1
last.entries[labeling].prTotal = 1
# go over all time-steps
for t in range(maxT):
curr = BeamState()
# get beam-labelings of best beams
bestLabelings = last.sort()[0:beamWidth]
# go over best beams
for labeling in bestLabelings:
# probability of paths ending with a non-blank
prNonBlank = 0
# in case of non-empty beam
if labeling:
# probability of paths with repeated last char at the end
try:
prNonBlank = last.entries[labeling].prNonBlank * mat[t, labeling[-1]]
except FloatingPointError:
prNonBlank = 0
# probability of paths ending with a blank
prBlank = (last.entries[labeling].prTotal) * mat[t, blankIdx]
# add beam at current time-step if needed
addBeam(curr, labeling)
# fill in data
curr.entries[labeling].labeling = labeling
curr.entries[labeling].prNonBlank += prNonBlank
curr.entries[labeling].prBlank += prBlank
curr.entries[labeling].prTotal += prBlank + prNonBlank
curr.entries[labeling].prText = last.entries[labeling].prText # beam-labeling not changed, therefore also LM score unchanged from
curr.entries[labeling].lmApplied = True # LM already applied at previous time-step for this beam-labeling
# extend current beam-labeling
for c in range(maxC - 1):
# add new char to current beam-labeling
newLabeling = labeling + (c,)
# if new labeling contains duplicate char at the end, only consider paths ending with a blank
if labeling and labeling[-1] == c:
prNonBlank = mat[t, c] * last.entries[labeling].prBlank
else:
prNonBlank = mat[t, c] * last.entries[labeling].prTotal
# add beam at current time-step if needed
addBeam(curr, newLabeling)
# fill in data
curr.entries[newLabeling].labeling = newLabeling
curr.entries[newLabeling].prNonBlank += prNonBlank
curr.entries[newLabeling].prTotal += prNonBlank
# apply LM
applyLM(curr.entries[labeling], curr.entries[newLabeling], classes, lm)
# set new beam state
last = curr
# normalise LM scores according to beam-labeling-length
last.norm()
# sort by probability
bestLabelings = last.sort()[:k] # get most probable labeling
output = []
for bestLabeling in bestLabelings:
# map labels to chars
res = ''
for l in bestLabeling:
res += classes[l]
output.append(res)
return output
|
return beam-labelings, sorted by probability
|
def sort(self):
"""
return beam-labelings, sorted by probability
"""
beams = [v for (_, v) in self.entries.items()]
sortedBeams = sorted(beams, reverse=True, key=lambda x: x.prTotal*x.prText)
return [x.labeling for x in sortedBeams]
|
length-normalise LM score
|
def norm(self):
"""
length-normalise LM score
"""
for (k, _) in self.entries.items():
labelingLen = len(self.entries[k].labeling)
self.entries[k].prText = self.entries[k].prText ** (1.0 / (labelingLen if labelingLen else 1.0))
|
the localisation network in lenet-stn, it will increase acc about more than 1%,
when num-epoch >=15
|
def get_loc(data, attr={'lr_mult':'0.01'}):
"""
the localisation network in lenet-stn, it will increase acc about more than 1%,
when num-epoch >=15
"""
loc = mx.symbol.Convolution(data=data, num_filter=30, kernel=(5, 5), stride=(2,2))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')
loc = mx.symbol.Convolution(data=loc, num_filter=60, kernel=(3, 3), stride=(1,1), pad=(1, 1))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, global_pool=True, kernel=(2, 2), pool_type='avg')
loc = mx.symbol.Flatten(data=loc)
loc = mx.symbol.FullyConnected(data=loc, num_hidden=6, name="stn_loc", attr=attr)
return loc
|
wrapper for initialize a detector
Parameters:
----------
net : str
test network name
prefix : str
load model prefix
epoch : int
load model epoch
data_shape : int
resize image shape
mean_pixels : tuple (float, float, float)
mean pixel values (R, G, B)
ctx : mx.ctx
running context, mx.cpu() or mx.gpu(?)
num_class : int
number of classes
nms_thresh : float
non-maximum suppression threshold
force_nms : bool
force suppress different categories
|
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, num_class,
nms_thresh=0.5, force_nms=True, nms_topk=400):
"""
wrapper for initialize a detector
Parameters:
----------
net : str
test network name
prefix : str
load model prefix
epoch : int
load model epoch
data_shape : int
resize image shape
mean_pixels : tuple (float, float, float)
mean pixel values (R, G, B)
ctx : mx.ctx
running context, mx.cpu() or mx.gpu(?)
num_class : int
number of classes
nms_thresh : float
non-maximum suppression threshold
force_nms : bool
force suppress different categories
"""
if net is not None:
if isinstance(data_shape, tuple):
data_shape = data_shape[0]
net = get_symbol(net, data_shape, num_classes=num_class, nms_thresh=nms_thresh,
force_nms=force_nms, nms_topk=nms_topk)
detector = Detector(net, prefix, epoch, data_shape, mean_pixels, ctx=ctx)
return detector
|
parse # classes and class_names if applicable
|
def parse_class_names(class_names):
""" parse # classes and class_names if applicable """
if len(class_names) > 0:
if os.path.isfile(class_names):
# try to open it to read class names
with open(class_names, 'r') as f:
class_names = [l.strip() for l in f.readlines()]
else:
class_names = [c.strip() for c in class_names.split(',')]
for name in class_names:
assert len(name) > 0
else:
raise RuntimeError("No valid class_name provided...")
return class_names
|
Parse string to tuple or int
|
def parse_data_shape(data_shape_str):
"""Parse string to tuple or int"""
ds = data_shape_str.strip().split(',')
if len(ds) == 1:
data_shape = (int(ds[0]), int(ds[0]))
elif len(ds) == 2:
data_shape = (int(ds[0]), int(ds[1]))
else:
raise ValueError("Unexpected data_shape: %s", data_shape_str)
return data_shape
|
A lenet style net, takes difference of each frame as input.
|
def get_lenet():
""" A lenet style net, takes difference of each frame as input.
"""
source = mx.sym.Variable("data")
source = (source - 128) * (1.0/128)
frames = mx.sym.SliceChannel(source, num_outputs=30)
diffs = [frames[i+1] - frames[i] for i in range(29)]
source = mx.sym.Concat(*diffs)
net = mx.sym.Convolution(source, kernel=(5, 5), num_filter=40)
net = mx.sym.BatchNorm(net, fix_gamma=True)
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(net, pool_type="max", kernel=(2,2), stride=(2,2))
net = mx.sym.Convolution(net, kernel=(3, 3), num_filter=40)
net = mx.sym.BatchNorm(net, fix_gamma=True)
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(net, pool_type="max", kernel=(2,2), stride=(2,2))
# first fullc
flatten = mx.symbol.Flatten(net)
flatten = mx.symbol.Dropout(flatten)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=600)
# Name the final layer as softmax so it auto matches the naming of data iterator
# Otherwise we can also change the provide_data in the data iter
return mx.symbol.LogisticRegressionOutput(data=fc1, name='softmax')
|
Custom evaluation metric on CRPS.
|
def CRPS(label, pred):
""" Custom evaluation metric on CRPS.
"""
for i in range(pred.shape[0]):
for j in range(pred.shape[1] - 1):
if pred[i, j] > pred[i, j + 1]:
pred[i, j + 1] = pred[i, j]
return np.sum(np.square(label - pred)) / label.size
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.