INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Run encoding to encode the label into the CDF target.
|
def encode_label(label_data):
"""Run encoding to encode the label into the CDF target.
"""
systole = label_data[:, 1]
diastole = label_data[:, 2]
systole_encode = np.array([
(x < np.arange(600)) for x in systole
], dtype=np.uint8)
diastole_encode = np.array([
(x < np.arange(600)) for x in diastole
], dtype=np.uint8)
return systole_encode, diastole_encode
|
coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']
iscrowd:
crowd instances are handled by marking their overlaps with all categories to -1
and later excluded in training
bbox:
[x1, y1, w, h]
:param index: coco image id
:return: roidb entry
|
def _load_annotation(self, _coco, coco_ind_to_class_ind, index):
"""
coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']
iscrowd:
crowd instances are handled by marking their overlaps with all categories to -1
and later excluded in training
bbox:
[x1, y1, w, h]
:param index: coco image id
:return: roidb entry
"""
im_ann = _coco.loadImgs(index)[0]
filename = self._image_file_tmpl.format(im_ann['file_name'])
width = im_ann['width']
height = im_ann['height']
annIds = _coco.getAnnIds(imgIds=index, iscrowd=None)
objs = _coco.loadAnns(annIds)
# sanitize bboxes
valid_objs = []
for obj in objs:
x, y, w, h = obj['bbox']
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((width - 1, x1 + np.max((0, w - 1))))
y2 = np.min((height - 1, y1 + np.max((0, h - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs,), dtype=np.int32)
for ix, obj in enumerate(objs):
cls = coco_ind_to_class_ind[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
roi_rec = {'index': index,
'image': filename,
'height': height,
'width': width,
'boxes': boxes,
'gt_classes': gt_classes,
'flipped': False}
return roi_rec
|
example results
[{"image_id": 42,
"category_id": 18,
"bbox": [258.15,41.29,348.26,243.78],
"score": 0.236}, ...]
|
def _write_coco_results(self, _coco, detections):
""" example results
[{"image_id": 42,
"category_id": 18,
"bbox": [258.15,41.29,348.26,243.78],
"score": 0.236}, ...]
"""
cats = [cat['name'] for cat in _coco.loadCats(_coco.getCatIds())]
class_to_coco_ind = dict(zip(cats, _coco.getCatIds()))
results = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
logger.info('collecting %s results (%d/%d)' % (cls, cls_ind, self.num_classes - 1))
coco_cat_id = class_to_coco_ind[cls]
results.extend(self._coco_results_one_category(detections[cls_ind], coco_cat_id))
logger.info('writing results json to %s' % self._result_file)
with open(self._result_file, 'w') as f:
json.dump(results, f, sort_keys=True, indent=4)
|
Draw random samples from an approximately log-uniform or Zipfian distribution.
This operation randomly samples *num_sampled* candidates the range of integers [0, range_max).
The elements of sampled_candidates are drawn with replacement from the base distribution.
The base distribution for this operator is an approximately log-uniform or Zipfian distribution:
P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)
This sampler is useful when the true classes approximately follow such a distribution.
For example, if the classes represent words in a lexicon sorted in decreasing order of \
frequency. If your classes are not ordered by decreasing frequency, do not use this op.
Additionaly, it also returns the number of times each of the \
true classes and the sampled classes is expected to occur.
Parameters
----------
true_classes : NDArray
A 1-D NDArray of the target classes.
num_sampled: int
The number of classes to randomly sample.
range_max: int
The number of possible classes.
ctx : Context
Device context of output. Default is current context.
Returns
-------
samples: NDArray
The sampled candidate classes in 1-D `int64` dtype.
expected_count_true: NDArray
The expected count for true classes in 1-D `float64` dtype.
expected_count_sample: NDArray
The expected count for sampled candidates in 1-D `float64` dtype.
Examples
--------
>>> true_cls = mx.nd.array([3])
>>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5)
>>> samples
[1 3 3 3]
<NDArray 4 @cpu(0)>
>>> exp_count_true
[ 0.12453879]
<NDArray 1 @cpu(0)>
>>> exp_count_sample
[ 0.22629439 0.12453879 0.12453879 0.12453879]
<NDArray 4 @cpu(0)>
|
def rand_zipfian(true_classes, num_sampled, range_max, ctx=None):
"""Draw random samples from an approximately log-uniform or Zipfian distribution.
This operation randomly samples *num_sampled* candidates the range of integers [0, range_max).
The elements of sampled_candidates are drawn with replacement from the base distribution.
The base distribution for this operator is an approximately log-uniform or Zipfian distribution:
P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)
This sampler is useful when the true classes approximately follow such a distribution.
For example, if the classes represent words in a lexicon sorted in decreasing order of \
frequency. If your classes are not ordered by decreasing frequency, do not use this op.
Additionaly, it also returns the number of times each of the \
true classes and the sampled classes is expected to occur.
Parameters
----------
true_classes : NDArray
A 1-D NDArray of the target classes.
num_sampled: int
The number of classes to randomly sample.
range_max: int
The number of possible classes.
ctx : Context
Device context of output. Default is current context.
Returns
-------
samples: NDArray
The sampled candidate classes in 1-D `int64` dtype.
expected_count_true: NDArray
The expected count for true classes in 1-D `float64` dtype.
expected_count_sample: NDArray
The expected count for sampled candidates in 1-D `float64` dtype.
Examples
--------
>>> true_cls = mx.nd.array([3])
>>> samples, exp_count_true, exp_count_sample = mx.nd.contrib.rand_zipfian(true_cls, 4, 5)
>>> samples
[1 3 3 3]
<NDArray 4 @cpu(0)>
>>> exp_count_true
[ 0.12453879]
<NDArray 1 @cpu(0)>
>>> exp_count_sample
[ 0.22629439 0.12453879 0.12453879 0.12453879]
<NDArray 4 @cpu(0)>
"""
if ctx is None:
ctx = current_context()
log_range = math.log(range_max + 1)
rand = uniform(0, log_range, shape=(num_sampled,), dtype='float64', ctx=ctx)
# make sure sampled_classes are in the range of [0, range_max)
sampled_classes = (rand.exp() - 1).astype('int64') % range_max
true_cls = true_classes.as_in_context(ctx).astype('float64')
expected_count_true = ((true_cls + 2.0) / (true_cls + 1.0)).log() / log_range * num_sampled
# cast sampled classes to fp64 to avoid interget division
sampled_cls_fp64 = sampled_classes.astype('float64')
expected_prob_sampled = ((sampled_cls_fp64 + 2.0) / (sampled_cls_fp64 + 1.0)).log() / log_range
expected_count_sampled = expected_prob_sampled * num_sampled
return sampled_classes, expected_count_true, expected_count_sampled
|
Run a for loop with user-defined computation over NDArrays on dimension 0.
This operator simulates a for loop and body has the computation for an iteration
of the for loop. It runs the computation in body on each slice from the input
NDArrays.
body takes two arguments as input and outputs a tuple of two elements,
as illustrated below::
out, states = body(data1, states)
data1 can be either an NDArray or a list of NDArrays. If data is an NDArray,
data1 is an NDArray. Otherwise, data1 is a list of NDArrays and has the same
size as data. states is a list of NDArrays and have the same size as init_states.
Similarly, out can be either an NDArray or a list of NDArrays, which are concatenated
as the first output of foreach; states from the last execution of body
are the second output of foreach.
The computation done by this operator is equivalent to the pseudo code below
when the input data is NDArray::
states = init_states
outs = []
for i in data.shape[0]:
s = data[i]
out, states = body(s, states)
outs.append(out)
outs = stack(*outs)
Parameters
----------
body : a Python function.
Define computation in an iteration.
data: an NDArray or a list of NDArrays.
The input data.
init_states: an NDArray or nested lists of NDArrays.
The initial values of the loop states.
name: string.
The name of the operator.
Returns
-------
outputs: an NDArray or nested lists of NDArrays.
The output data concatenated from the output of all iterations.
states: an NDArray or nested lists of NDArrays.
The loop states in the last iteration.
Examples
--------
>>> step = lambda data, states: (data + states[0], [states[0] * 2])
>>> data = mx.nd.random.uniform(shape=(2, 10))
>>> states = [mx.nd.random.uniform(shape=(10))]
>>> outs, states = mx.nd.contrib.foreach(step, data, states)
|
def foreach(body, data, init_states):
"""Run a for loop with user-defined computation over NDArrays on dimension 0.
This operator simulates a for loop and body has the computation for an iteration
of the for loop. It runs the computation in body on each slice from the input
NDArrays.
body takes two arguments as input and outputs a tuple of two elements,
as illustrated below::
out, states = body(data1, states)
data1 can be either an NDArray or a list of NDArrays. If data is an NDArray,
data1 is an NDArray. Otherwise, data1 is a list of NDArrays and has the same
size as data. states is a list of NDArrays and have the same size as init_states.
Similarly, out can be either an NDArray or a list of NDArrays, which are concatenated
as the first output of foreach; states from the last execution of body
are the second output of foreach.
The computation done by this operator is equivalent to the pseudo code below
when the input data is NDArray::
states = init_states
outs = []
for i in data.shape[0]:
s = data[i]
out, states = body(s, states)
outs.append(out)
outs = stack(*outs)
Parameters
----------
body : a Python function.
Define computation in an iteration.
data: an NDArray or a list of NDArrays.
The input data.
init_states: an NDArray or nested lists of NDArrays.
The initial values of the loop states.
name: string.
The name of the operator.
Returns
-------
outputs: an NDArray or nested lists of NDArrays.
The output data concatenated from the output of all iterations.
states: an NDArray or nested lists of NDArrays.
The loop states in the last iteration.
Examples
--------
>>> step = lambda data, states: (data + states[0], [states[0] * 2])
>>> data = mx.nd.random.uniform(shape=(2, 10))
>>> states = [mx.nd.random.uniform(shape=(10))]
>>> outs, states = mx.nd.contrib.foreach(step, data, states)
"""
def check_input(inputs, in_type, msg):
is_NDArray_or_list = True
if isinstance(inputs, list):
for i in inputs:
if not isinstance(i, in_type):
is_NDArray_or_list = False
break
else:
is_NDArray_or_list = isinstance(inputs, in_type)
assert is_NDArray_or_list, msg
flatten, _ = _flatten(data, "foreach input")
check_input(flatten, ndarray.NDArray,
"data should be an NDArray or a nested list of NDArrays")
flatten, _ = _flatten(init_states, "foreach states")
check_input(flatten, ndarray.NDArray,
"init_states should be an NDArray or a nested list of NDArrays")
not_data_list = isinstance(data, ndarray.NDArray)
num_iters = data.shape[0] if not_data_list else data[0].shape[0]
states = init_states
outputs = []
for i in range(num_iters):
if not_data_list:
eles = data[i]
else:
eles = [d[i] for d in data]
outs, states = body(eles, states)
outs, out_fmt = _flatten(outs, "foreach output")
outputs.append(outs)
outputs = zip(*outputs)
tmp_outputs = []
for out in outputs:
tmp_outputs.append(ndarray.op.stack(*out))
outputs = tmp_outputs
outputs, _ = _regroup(outputs, out_fmt)
return (outputs, states)
|
Run a while loop with user-defined computation and loop condition.
This operator simulates a while loop which iterately does customized computation
as long as the condition is satisfied.
`loop_vars` is a list of NDArrays on which the computation uses.
`cond` is a user-defined function, used as the loop condition.
It consumes `loop_vars`, and produces a scalar MXNet NDArray,
indicating the termination of the loop.
The loop ends when `cond` returns false (zero).
The `cond` is variadic, and its signature should be
`cond(*loop_vars) => NDArray`.
`func` is a user-defined function, used as the loop body.
It also consumes `loop_vars`, and produces `step_output` and `new_loop_vars` at each step.
In each step, `step_output` should contain the same number elements.
Through all steps, the i-th element of `step_output` should have the same shape and dtype.
Also, `new_loop_vars` should contain the same number of elements as `loop_vars`,
and the corresponding element should have the same shape and dtype.
The `func` is variadic, and its signature should be
`func(*loop_vars) =>
(NDArray or nested List[NDArray] step_output, NDArray or nested List[NDArray] new_loop_vars)`.
`max_iterations` is a scalar that defines the maximum number of iterations allowed.
This function returns two lists.
The first list has the length of `|step_output|`,
in which the i-th element are all i-th elements of
`step_output` from all steps, stacked along axis 0.
The second list has the length of `|loop_vars|`,
which represents final states of loop variables.
.. warning::
For now, the axis 0 of all NDArrays in the first list are `max_iterations`,
due to lack of dynamic shape inference.
.. warning::
When `cond` is never satisfied, we assume `step_output` is empty,
because it cannot be inferred. This is different from the symbolic version.
Parameters
----------
cond: a Python function.
The loop condition.
func: a Python function.
The loop body.
loop_vars: an NDArray or nested lists of NDArrays.
The initial values of the loop variables.
max_iterations: a python int.
Maximum number of iterations.
Returns
------
outputs: an NDArray or nested lists of NDArrays
stacked output from each step
states: an NDArray or nested lists of NDArrays
final state
Examples
--------
>>> cond = lambda i, s: i <= 5
>>> func = lambda i, s: ([i + s], [i + 1, s + i])
>>> loop_vars = (mx.nd.array([0], dtype="int64"), mx.nd.array([1], dtype="int64"))
>>> outputs, states = mx.nd.contrib.while_loop(cond, func, loop_vars, max_iterations=10)
>>> outputs
[
[[ 1]
[ 2]
[ 4]
[ 7]
[11]
[16]
[...] # undefined value
[...]
[...]
[...]]
<NDArray 6x1 @cpu(0)>]
>>> states
[
[6]
<NDArray 1 @cpu(0)>,
[16]
<NDArray 1 @cpu(0)>]
|
def while_loop(cond, func, loop_vars, max_iterations=None):
"""Run a while loop with user-defined computation and loop condition.
This operator simulates a while loop which iterately does customized computation
as long as the condition is satisfied.
`loop_vars` is a list of NDArrays on which the computation uses.
`cond` is a user-defined function, used as the loop condition.
It consumes `loop_vars`, and produces a scalar MXNet NDArray,
indicating the termination of the loop.
The loop ends when `cond` returns false (zero).
The `cond` is variadic, and its signature should be
`cond(*loop_vars) => NDArray`.
`func` is a user-defined function, used as the loop body.
It also consumes `loop_vars`, and produces `step_output` and `new_loop_vars` at each step.
In each step, `step_output` should contain the same number elements.
Through all steps, the i-th element of `step_output` should have the same shape and dtype.
Also, `new_loop_vars` should contain the same number of elements as `loop_vars`,
and the corresponding element should have the same shape and dtype.
The `func` is variadic, and its signature should be
`func(*loop_vars) =>
(NDArray or nested List[NDArray] step_output, NDArray or nested List[NDArray] new_loop_vars)`.
`max_iterations` is a scalar that defines the maximum number of iterations allowed.
This function returns two lists.
The first list has the length of `|step_output|`,
in which the i-th element are all i-th elements of
`step_output` from all steps, stacked along axis 0.
The second list has the length of `|loop_vars|`,
which represents final states of loop variables.
.. warning::
For now, the axis 0 of all NDArrays in the first list are `max_iterations`,
due to lack of dynamic shape inference.
.. warning::
When `cond` is never satisfied, we assume `step_output` is empty,
because it cannot be inferred. This is different from the symbolic version.
Parameters
----------
cond: a Python function.
The loop condition.
func: a Python function.
The loop body.
loop_vars: an NDArray or nested lists of NDArrays.
The initial values of the loop variables.
max_iterations: a python int.
Maximum number of iterations.
Returns
------
outputs: an NDArray or nested lists of NDArrays
stacked output from each step
states: an NDArray or nested lists of NDArrays
final state
Examples
--------
>>> cond = lambda i, s: i <= 5
>>> func = lambda i, s: ([i + s], [i + 1, s + i])
>>> loop_vars = (mx.nd.array([0], dtype="int64"), mx.nd.array([1], dtype="int64"))
>>> outputs, states = mx.nd.contrib.while_loop(cond, func, loop_vars, max_iterations=10)
>>> outputs
[
[[ 1]
[ 2]
[ 4]
[ 7]
[11]
[16]
[...] # undefined value
[...]
[...]
[...]]
<NDArray 6x1 @cpu(0)>]
>>> states
[
[6]
<NDArray 1 @cpu(0)>,
[16]
<NDArray 1 @cpu(0)>]
"""
def _to_python_scalar(inputs, type_, name):
"""Converts "inputs", possibly typed mxnet NDArray, a numpy ndarray, other python types,
to the given type
"""
if isinstance(inputs, ndarray.NDArray):
inputs = inputs.asscalar()
try:
inputs = type_(inputs)
except:
raise ValueError("Cannot convert %s to python %s" % (name, type_.__name__))
return inputs
def _func_wrapper(loop_vars):
"""This wrapper unifies
"func: loop_vars -> new_loop_vars"
and "func: loop_vars -> (step_output, new_loop_vars)"
into "func: loop_vars -> (None or tuple of step_outputs, tuple of new_loop_vars)
"""
step_output, new_loop_vars = func(*loop_vars)
if step_output is None:
step_output = []
if new_loop_vars is None:
new_loop_vars = []
if isinstance(step_output, tuple):
step_output = list(step_output)
if isinstance(new_loop_vars, tuple):
new_loop_vars = list(new_loop_vars)
new_loop_vars = _as_list(new_loop_vars)
if len(loop_vars) != len(new_loop_vars):
raise ValueError("The length of loop_vars should be consistent during the loop")
return step_output, new_loop_vars
if max_iterations is None:
raise ValueError("max_iterations should be specified")
max_iterations = _to_python_scalar(max_iterations, int, "max_iteration")
# It should be work as fine if loop_vars are empty I guess,
# but it is semantically unnecessary to include this case.
if len(loop_vars) == 0:
raise ValueError("loop_vars should contain at least one element")
steps = 0
outputs = []
# there might not be an iteration.
out_fmt = None
not_loop_var_list = isinstance(loop_vars, ndarray.NDArray)
loop_vars = _as_list(loop_vars)
while steps < max_iterations and \
_to_python_scalar(cond(*loop_vars), bool, "Return value of cond"): # loop condition
step_output, loop_vars = _func_wrapper(loop_vars)
step_output, out_fmt = _flatten(step_output, "while output")
outputs.append(step_output)
steps += 1
if len(outputs) != steps or len(step_output) != len(outputs[0]):
raise ValueError("Number of elements in step_output should be the same in each step")
stacked_outputs = []
for i_th, items in enumerate(zip(*outputs), 1):
# `mx.ndarray.pad` only support 4-D or 5-D inputs for now
# so we could not use it.
items = [x.expand_dims(0) for x in items]
if steps != max_iterations and items:
pad_shape = [max_iterations - steps] + list(items[0].shape[1: ])
pad = ndarray.empty(
shape=pad_shape,
ctx=items[0].context,
dtype=items[0].dtype,
)
items = list(items) + [pad]
try:
stacked_outputs.append(ndarray.op.concat(*items, dim=0))
except ValueError:
raise ValueError("\n".join(
["Shapes of %d-th elements in step_outputs are inconsistent, which are:" % i_th] +
[" Step %d, shape is %s" % (i, str(x.shape)) for i, x in enumerate(items)]
))
if out_fmt is not None:
stacked_outputs, _ = _regroup(stacked_outputs, out_fmt)
if not_loop_var_list:
loop_vars = loop_vars[0]
return stacked_outputs, loop_vars
|
Run an if-then-else using user-defined condition and computation
This operator simulates a if-like branch which chooses to do one of
the two customized computations according to the specified condition.
`pred` is a scalar MXNet NDArray,
indicating which branch of computation should be used.
`then_func` is a user-defined function, used as computation of the then branch.
It produces `outputs`, which is a list of NDArrays.
The signature of `then_func` should be
`then_func() => NDArray or nested List[NDArray]`.
`else_func` is a user-defined function, used as computation of the else branch.
It produces `outputs`, which is a list of NDArrays.
The signature of `else_func` should be
`else_func() => NDArray or nested List[NDArray]`.
The `outputs` produces by `then_func` and `else_func` should have the same number
of elements, all of which should be in the same shape, of the same dtype and stype.
This function returns a list of symbols, representing the computation result.
Parameters
----------
pred: a MXNet NDArray representing a scalar.
The branch condition.
then_func: a Python function.
The computation to be executed if `pred` is true.
else_func: a Python function.
The computation to be executed if `pred` is false.
Returns
-------
outputs: an NDArray or nested lists of NDArrays, representing the result of computation.
Examples
--------
>>> a, b = mx.nd.array([1]), mx.nd.array([2])
>>> pred = a * b < 5
>>> then_func = lambda: (a + 5) * (b + 5)
>>> else_func = lambda: (a - 5) * (b - 5)
>>> outputs = mx.nd.contrib.cond(pred, then_func, else_func)
>>> outputs[0]
[42.]
<NDArray 1 @cpu(0)>
|
def cond(pred, then_func, else_func):
"""Run an if-then-else using user-defined condition and computation
This operator simulates a if-like branch which chooses to do one of
the two customized computations according to the specified condition.
`pred` is a scalar MXNet NDArray,
indicating which branch of computation should be used.
`then_func` is a user-defined function, used as computation of the then branch.
It produces `outputs`, which is a list of NDArrays.
The signature of `then_func` should be
`then_func() => NDArray or nested List[NDArray]`.
`else_func` is a user-defined function, used as computation of the else branch.
It produces `outputs`, which is a list of NDArrays.
The signature of `else_func` should be
`else_func() => NDArray or nested List[NDArray]`.
The `outputs` produces by `then_func` and `else_func` should have the same number
of elements, all of which should be in the same shape, of the same dtype and stype.
This function returns a list of symbols, representing the computation result.
Parameters
----------
pred: a MXNet NDArray representing a scalar.
The branch condition.
then_func: a Python function.
The computation to be executed if `pred` is true.
else_func: a Python function.
The computation to be executed if `pred` is false.
Returns
-------
outputs: an NDArray or nested lists of NDArrays, representing the result of computation.
Examples
--------
>>> a, b = mx.nd.array([1]), mx.nd.array([2])
>>> pred = a * b < 5
>>> then_func = lambda: (a + 5) * (b + 5)
>>> else_func = lambda: (a - 5) * (b - 5)
>>> outputs = mx.nd.contrib.cond(pred, then_func, else_func)
>>> outputs[0]
[42.]
<NDArray 1 @cpu(0)>
"""
def _to_python_scalar(inputs, type_, name):
"""Converts "inputs", possibly typed mxnet NDArray, a numpy ndarray, other python types,
to the given type
"""
if hasattr(inputs, "asscalar"):
inputs = inputs.asscalar()
try:
inputs = type_(inputs)
except:
raise ValueError("Cannot convert %s to python %s" % (name, type_.__name__))
return inputs
branch = _to_python_scalar(pred, bool, "pred")
if branch:
return then_func()
else:
return else_func()
|
Performs an element-wise check to determine if the NDArray contains an infinite element
or not.
Parameters
----------
input : NDArray
An N-D NDArray.
Returns
-------
output: NDArray
The output NDarray, with same shape as input, where 1 indicates the array element is
finite i.e. not equal to positive or negative infinity and 0 in places where it is
positive or negative infinity.
Examples
--------
>>> data = mx.nd.array([np.inf, -np.inf, np.NINF, -1])
>>> output = mx.nd.contrib.isfinite(data)
>>> output
[0. 0. 0. 1.]
<NDArray 4 @cpu(0)>
|
def isfinite(data):
"""Performs an element-wise check to determine if the NDArray contains an infinite element
or not.
Parameters
----------
input : NDArray
An N-D NDArray.
Returns
-------
output: NDArray
The output NDarray, with same shape as input, where 1 indicates the array element is
finite i.e. not equal to positive or negative infinity and 0 in places where it is
positive or negative infinity.
Examples
--------
>>> data = mx.nd.array([np.inf, -np.inf, np.NINF, -1])
>>> output = mx.nd.contrib.isfinite(data)
>>> output
[0. 0. 0. 1.]
<NDArray 4 @cpu(0)>
"""
is_data_not_nan = data == data
is_data_not_infinite = data.abs() != np.inf
return ndarray.logical_and(is_data_not_infinite, is_data_not_nan)
|
LSTM Cell symbol
|
def vanilla_lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, is_batchnorm=False, gamma=None, beta=None, name=None):
"""LSTM Cell symbol"""
i2h = mx.sym.FullyConnected(data=indata,
weight=param.i2h_weight,
bias=param.i2h_bias,
num_hidden=num_hidden * 4,
name="t%d_l%d_i2h" % (seqidx, layeridx))
if is_batchnorm:
if name is not None:
i2h = batchnorm(net=i2h, gamma=gamma, beta=beta, name="%s_batchnorm" % name)
else:
i2h = batchnorm(net=i2h, gamma=gamma, beta=beta)
h2h = mx.sym.FullyConnected(data=prev_state.h,
weight=param.h2h_weight,
bias=param.h2h_bias,
num_hidden=num_hidden * 4,
name="t%d_l%d_h2h" % (seqidx, layeridx))
gates = i2h + h2h
slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,
name="t%d_l%d_slice" % (seqidx, layeridx))
in_gate = mx.sym.Activation(slice_gates[0], act_type="sigmoid")
in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh")
forget_gate = mx.sym.Activation(slice_gates[2], act_type="sigmoid")
out_gate = mx.sym.Activation(slice_gates[3], act_type="sigmoid")
next_c = (forget_gate * prev_state.c) + (in_gate * in_transform)
next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh")
return LSTMState(c=next_c, h=next_h)
|
LSTM Cell symbol
|
def lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0., num_hidden_proj=0, is_batchnorm=False,
gamma=None, beta=None, name=None):
"""LSTM Cell symbol"""
# dropout input
if dropout > 0.:
indata = mx.sym.Dropout(data=indata, p=dropout)
i2h = mx.sym.FullyConnected(data=indata,
weight=param.i2h_weight,
bias=param.i2h_bias,
num_hidden=num_hidden * 4,
name="t%d_l%d_i2h" % (seqidx, layeridx))
if is_batchnorm:
if name is not None:
i2h = batchnorm(net=i2h, gamma=gamma, beta=beta, name="%s_batchnorm" % name)
else:
i2h = batchnorm(net=i2h, gamma=gamma, beta=beta)
h2h = mx.sym.FullyConnected(data=prev_state.h,
weight=param.h2h_weight,
# bias=param.h2h_bias,
no_bias=True,
num_hidden=num_hidden * 4,
name="t%d_l%d_h2h" % (seqidx, layeridx))
gates = i2h + h2h
slice_gates = mx.sym.SliceChannel(gates, num_outputs=4,
name="t%d_l%d_slice" % (seqidx, layeridx))
Wcidc = mx.sym.broadcast_mul(param.c2i_bias, prev_state.c) + slice_gates[0]
in_gate = mx.sym.Activation(Wcidc, act_type="sigmoid")
in_transform = mx.sym.Activation(slice_gates[1], act_type="tanh")
Wcfdc = mx.sym.broadcast_mul(param.c2f_bias, prev_state.c) + slice_gates[2]
forget_gate = mx.sym.Activation(Wcfdc, act_type="sigmoid")
next_c = (forget_gate * prev_state.c) + (in_gate * in_transform)
Wcoct = mx.sym.broadcast_mul(param.c2o_bias, next_c) + slice_gates[3]
out_gate = mx.sym.Activation(Wcoct, act_type="sigmoid")
next_h = out_gate * mx.sym.Activation(next_c, act_type="tanh")
if num_hidden_proj > 0:
proj_next_h = mx.sym.FullyConnected(data=next_h,
weight=param.ph2h_weight,
no_bias=True,
num_hidden=num_hidden_proj,
name="t%d_l%d_ph2h" % (seqidx, layeridx))
return LSTMState(c=next_c, h=proj_next_h)
else:
return LSTMState(c=next_c, h=next_h)
|
read, resize, transform image, return im_tensor, im_info, gt_boxes
roi_rec should have keys: ["image", "boxes", "gt_classes", "flipped"]
0 --- x (width, second dim of im)
|
y (height, first dim of im)
|
def get_image(roi_rec, short, max_size, mean, std):
"""
read, resize, transform image, return im_tensor, im_info, gt_boxes
roi_rec should have keys: ["image", "boxes", "gt_classes", "flipped"]
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
im = imdecode(roi_rec['image'])
if roi_rec["flipped"]:
im = im[:, ::-1, :]
im, im_scale = resize(im, short, max_size)
height, width = im.shape[:2]
im_info = np.array([height, width, im_scale], dtype=np.float32)
im_tensor = transform(im, mean, std)
# gt boxes: (x1, y1, x2, y2, cls)
if roi_rec['gt_classes'].size > 0:
gt_inds = np.where(roi_rec['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roi_rec['boxes'][gt_inds, :]
gt_boxes[:, 4] = roi_rec['gt_classes'][gt_inds]
# scale gt_boxes
gt_boxes[:, 0:4] *= im_scale
else:
gt_boxes = np.empty((0, 5), dtype=np.float32)
return im_tensor, im_info, gt_boxes
|
Return BGR image read by opencv
|
def imdecode(image_path):
"""Return BGR image read by opencv"""
import os
assert os.path.exists(image_path), image_path + ' not found'
im = cv2.imread(image_path)
return im
|
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param short: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:return: resized image (NDArray) and scale (float)
|
def resize(im, short, max_size):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param short: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:return: resized image (NDArray) and scale (float)
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(short) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return im, im_scale
|
transform into mxnet tensor,
subtract pixel size and transform to correct format
:param im: [height, width, channel] in BGR
:param mean: [RGB pixel mean]
:param std: [RGB pixel std var]
:return: [batch, channel, height, width]
|
def transform(im, mean, std):
"""
transform into mxnet tensor,
subtract pixel size and transform to correct format
:param im: [height, width, channel] in BGR
:param mean: [RGB pixel mean]
:param std: [RGB pixel std var]
:return: [batch, channel, height, width]
"""
im_tensor = np.zeros((3, im.shape[0], im.shape[1]))
for i in range(3):
im_tensor[i, :, :] = (im[:, :, 2 - i] - mean[i]) / std[i]
return im_tensor
|
transform from mxnet im_tensor to ordinary RGB image
im_tensor is limited to one image
:param im_tensor: [batch, channel, height, width]
:param mean: [RGB pixel mean]
:param std: [RGB pixel std var]
:return: im [height, width, channel(RGB)]
|
def transform_inverse(im_tensor, mean, std):
"""
transform from mxnet im_tensor to ordinary RGB image
im_tensor is limited to one image
:param im_tensor: [batch, channel, height, width]
:param mean: [RGB pixel mean]
:param std: [RGB pixel std var]
:return: im [height, width, channel(RGB)]
"""
assert im_tensor.shape[0] == 3
im = im_tensor.transpose((1, 2, 0))
im = im * std + mean
im = im.astype(np.uint8)
return im
|
vertically stack tensors by adding a new axis
expand dims if only 1 tensor
:param tensor_list: list of tensor to be stacked vertically
:param pad: label to pad with
:return: tensor with max shape
|
def tensor_vstack(tensor_list, pad=0):
"""
vertically stack tensors by adding a new axis
expand dims if only 1 tensor
:param tensor_list: list of tensor to be stacked vertically
:param pad: label to pad with
:return: tensor with max shape
"""
if len(tensor_list) == 1:
return tensor_list[0][np.newaxis, :]
ndim = len(tensor_list[0].shape)
dimensions = [len(tensor_list)] # first dim is batch size
for dim in range(ndim):
dimensions.append(max([tensor.shape[dim] for tensor in tensor_list]))
dtype = tensor_list[0].dtype
if pad == 0:
all_tensor = np.zeros(tuple(dimensions), dtype=dtype)
elif pad == 1:
all_tensor = np.ones(tuple(dimensions), dtype=dtype)
else:
all_tensor = np.full(tuple(dimensions), pad, dtype=dtype)
if ndim == 1:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind, :tensor.shape[0]] = tensor
elif ndim == 2:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind, :tensor.shape[0], :tensor.shape[1]] = tensor
elif ndim == 3:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind, :tensor.shape[0], :tensor.shape[1], :tensor.shape[2]] = tensor
else:
raise Exception('Sorry, unimplemented.')
return all_tensor
|
Get distance matrix given a matrix. Used in testing.
|
def get_distance_matrix(x):
"""Get distance matrix given a matrix. Used in testing."""
square = nd.sum(x ** 2.0, axis=1, keepdims=True)
distance_square = square + square.transpose() - (2.0 * nd.dot(x, x.transpose()))
return nd.sqrt(distance_square)
|
Evaluate embeddings based on Recall@k.
|
def evaluate_emb(emb, labels):
"""Evaluate embeddings based on Recall@k."""
d_mat = get_distance_matrix(emb)
d_mat = d_mat.asnumpy()
labels = labels.asnumpy()
names = []
accs = []
for k in [1, 2, 4, 8, 16]:
names.append('Recall@%d' % k)
correct, cnt = 0.0, 0.0
for i in range(emb.shape[0]):
d_mat[i, i] = 1e10
nns = argpartition(d_mat[i], k)[:k]
if any(labels[i] == labels[nn] for nn in nns):
correct += 1
cnt += 1
accs.append(correct/cnt)
return names, accs
|
Get learning rate based on schedule.
|
def get_lr(lr, epoch, steps, factor):
"""Get learning rate based on schedule."""
for s in steps:
if epoch >= s:
lr *= factor
return lr
|
Training function.
|
def train(epochs, ctx):
"""Training function."""
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.initialize(mx.init.Xavier(magnitude=2), ctx=ctx)
opt_options = {'learning_rate': opt.lr, 'wd': opt.wd}
if opt.optimizer == 'sgd':
opt_options['momentum'] = 0.9
if opt.optimizer == 'adam':
opt_options['epsilon'] = 1e-7
trainer = gluon.Trainer(net.collect_params(), opt.optimizer,
opt_options,
kvstore=opt.kvstore)
if opt.lr_beta > 0.0:
# Jointly train class-specific beta.
# See "sampling matters in deep embedding learning" paper for details.
beta.initialize(mx.init.Constant(opt.beta), ctx=ctx)
trainer_beta = gluon.Trainer([beta], 'sgd',
{'learning_rate': opt.lr_beta, 'momentum': 0.9},
kvstore=opt.kvstore)
loss = MarginLoss(margin=opt.margin, nu=opt.nu)
best_val = 0.0
for epoch in range(epochs):
tic = time.time()
prev_loss, cumulative_loss = 0.0, 0.0
# Learning rate schedule.
trainer.set_learning_rate(get_lr(opt.lr, epoch, steps, opt.factor))
logging.info('Epoch %d learning rate=%f', epoch, trainer.learning_rate)
if opt.lr_beta > 0.0:
trainer_beta.set_learning_rate(get_lr(opt.lr_beta, epoch, steps, opt.factor))
logging.info('Epoch %d beta learning rate=%f', epoch, trainer_beta.learning_rate)
# Inner training loop.
for i in range(200):
batch = train_data.next()
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
Ls = []
with ag.record():
for x, y in zip(data, label):
a_indices, anchors, positives, negatives, _ = net(x)
if opt.lr_beta > 0.0:
L = loss(anchors, positives, negatives, beta, y[a_indices])
else:
L = loss(anchors, positives, negatives, opt.beta, None)
# Store the loss and do backward after we have done forward
# on all GPUs for better speed on multiple GPUs.
Ls.append(L)
cumulative_loss += nd.mean(L).asscalar()
for L in Ls:
L.backward()
# Update.
trainer.step(batch.data[0].shape[0])
if opt.lr_beta > 0.0:
trainer_beta.step(batch.data[0].shape[0])
if (i+1) % opt.log_interval == 0:
logging.info('[Epoch %d, Iter %d] training loss=%f' % (
epoch, i+1, cumulative_loss - prev_loss))
prev_loss = cumulative_loss
logging.info('[Epoch %d] training loss=%f'%(epoch, cumulative_loss))
logging.info('[Epoch %d] time cost: %f'%(epoch, time.time()-tic))
names, val_accs = test(ctx)
for name, val_acc in zip(names, val_accs):
logging.info('[Epoch %d] validation: %s=%f'%(epoch, name, val_acc))
if val_accs[0] > best_val:
best_val = val_accs[0]
logging.info('Saving %s.' % opt.save_model_prefix)
net.save_parameters('%s.params' % opt.save_model_prefix)
return best_val
|
Returns symbol for LSTM model up to loss/softmax
|
def _lstm_unroll_base(num_lstm_layer, seq_len, num_hidden):
""" Returns symbol for LSTM model up to loss/softmax"""
param_cells = []
last_states = []
for i in range(num_lstm_layer):
param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable("l%d_i2h_weight" % i),
i2h_bias=mx.sym.Variable("l%d_i2h_bias" % i),
h2h_weight=mx.sym.Variable("l%d_h2h_weight" % i),
h2h_bias=mx.sym.Variable("l%d_h2h_bias" % i)))
state = LSTMState(c=mx.sym.Variable("l%d_init_c" % i),
h=mx.sym.Variable("l%d_init_h" % i))
last_states.append(state)
assert len(last_states) == num_lstm_layer
# embedding layer
data = mx.sym.Variable('data')
wordvec = mx.sym.SliceChannel(data=data, num_outputs=seq_len, squeeze_axis=1)
hidden_all = []
for seqidx in range(seq_len):
hidden = wordvec[seqidx]
for i in range(num_lstm_layer):
next_state = _lstm(
num_hidden=num_hidden,
indata=hidden,
prev_state=last_states[i],
param=param_cells[i],
seqidx=seqidx,
layeridx=i)
hidden = next_state.h
last_states[i] = next_state
hidden_all.append(hidden)
hidden_concat = mx.sym.Concat(*hidden_all, dim=0)
pred_fc = mx.sym.FullyConnected(data=hidden_concat, num_hidden=11, name="pred_fc")
return pred_fc
|
Adds Symbol.contrib.ctc_loss on top of pred symbol and returns the resulting symbol
|
def _add_warp_ctc_loss(pred, seq_len, num_label, label):
""" Adds Symbol.contrib.ctc_loss on top of pred symbol and returns the resulting symbol """
label = mx.sym.Reshape(data=label, shape=(-1,))
label = mx.sym.Cast(data=label, dtype='int32')
return mx.sym.WarpCTC(data=pred, label=label, label_length=num_label, input_length=seq_len)
|
Adds Symbol.WapCTC on top of pred symbol and returns the resulting symbol
|
def _add_mxnet_ctc_loss(pred, seq_len, label):
""" Adds Symbol.WapCTC on top of pred symbol and returns the resulting symbol """
pred_ctc = mx.sym.Reshape(data=pred, shape=(-4, seq_len, -1, 0))
loss = mx.sym.contrib.ctc_loss(data=pred_ctc, label=label)
ctc_loss = mx.sym.MakeLoss(loss)
softmax_class = mx.symbol.SoftmaxActivation(data=pred)
softmax_loss = mx.sym.MakeLoss(softmax_class)
softmax_loss = mx.sym.BlockGrad(softmax_loss)
return mx.sym.Group([softmax_loss, ctc_loss])
|
Adds CTC loss on top of pred symbol and returns the resulting symbol
|
def _add_ctc_loss(pred, seq_len, num_label, loss_type):
""" Adds CTC loss on top of pred symbol and returns the resulting symbol """
label = mx.sym.Variable('label')
if loss_type == 'warpctc':
print("Using WarpCTC Loss")
sm = _add_warp_ctc_loss(pred, seq_len, num_label, label)
else:
print("Using MXNet CTC Loss")
assert loss_type == 'ctc'
sm = _add_mxnet_ctc_loss(pred, seq_len, label)
return sm
|
Creates an unrolled LSTM symbol for inference if loss_type is not specified, and for training
if loss_type is specified. loss_type must be one of 'ctc' or 'warpctc'
Parameters
----------
num_lstm_layer: int
seq_len: int
num_hidden: int
num_label: int
loss_type: str
'ctc' or 'warpctc'
Returns
-------
mxnet.symbol.symbol.Symbol
|
def lstm_unroll(num_lstm_layer, seq_len, num_hidden, num_label, loss_type=None):
"""
Creates an unrolled LSTM symbol for inference if loss_type is not specified, and for training
if loss_type is specified. loss_type must be one of 'ctc' or 'warpctc'
Parameters
----------
num_lstm_layer: int
seq_len: int
num_hidden: int
num_label: int
loss_type: str
'ctc' or 'warpctc'
Returns
-------
mxnet.symbol.symbol.Symbol
"""
# Create the base (shared between training and inference) and add loss to the end
pred = _lstm_unroll_base(num_lstm_layer, seq_len, num_hidden)
if loss_type:
# Training mode, add loss
return _add_ctc_loss(pred, seq_len, num_label, loss_type)
else:
# Inference mode, add softmax
return mx.sym.softmax(data=pred, name='softmax')
|
Returns name and shape of init states of LSTM network
Parameters
----------
batch_size: list of tuple of str and tuple of int and int
num_lstm_layer: int
num_hidden: int
Returns
-------
list of tuple of str and tuple of int and int
|
def init_states(batch_size, num_lstm_layer, num_hidden):
"""
Returns name and shape of init states of LSTM network
Parameters
----------
batch_size: list of tuple of str and tuple of int and int
num_lstm_layer: int
num_hidden: int
Returns
-------
list of tuple of str and tuple of int and int
"""
init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
return init_c + init_h
|
ctypes implementation of imperative invoke wrapper
|
def _imperative_invoke(handle, ndargs, keys, vals, out):
"""ctypes implementation of imperative invoke wrapper"""
if out is not None:
original_output = out
if isinstance(out, NDArrayBase):
out = (out,)
num_output = ctypes.c_int(len(out))
output_vars = c_handle_array(out)
output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
else:
original_output = None
output_vars = ctypes.POINTER(NDArrayHandle)()
num_output = ctypes.c_int(0)
# return output stypes to avoid the c_api call for checking
# a handle's stype in _ndarray_cls
out_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXImperativeInvokeEx(
ctypes.c_void_p(handle),
ctypes.c_int(len(ndargs)),
c_handle_array(ndargs),
ctypes.byref(num_output),
ctypes.byref(output_vars),
ctypes.c_int(len(keys)),
c_str_array(keys),
c_str_array([str(s) for s in vals]),
ctypes.byref(out_stypes)))
if original_output is not None:
return original_output
if num_output.value == 1:
return _ndarray_cls(ctypes.cast(output_vars[0], NDArrayHandle),
stype=out_stypes[0])
else:
return [_ndarray_cls(ctypes.cast(output_vars[i], NDArrayHandle),
stype=out_stypes[i])
for i in range(num_output.value)]
|
Set status to training/not training. When training, graph will be constructed
for gradient computation. Operators will also run with ctx.is_train=True. For example,
Dropout will drop inputs randomly when is_train=True while simply passing through
if is_train=False.
Parameters
----------
is_train: bool
Returns
-------
previous state before this set.
|
def set_is_training(is_train):
"""Set status to training/not training. When training, graph will be constructed
for gradient computation. Operators will also run with ctx.is_train=True. For example,
Dropout will drop inputs randomly when is_train=True while simply passing through
if is_train=False.
Parameters
----------
is_train: bool
Returns
-------
previous state before this set.
"""
prev = ctypes.c_int()
check_call(_LIB.MXAutogradSetIsTraining(
ctypes.c_int(is_train), ctypes.byref(prev)))
check_call(_LIB.MXAutogradSetIsRecording(
ctypes.c_int(is_train), ctypes.byref(prev)))
return bool(prev.value)
|
Compute the gradients of outputs w.r.t variables.
Parameters
----------
outputs: list of NDArray
out_grads: list of NDArray or None
|
def backward(outputs, out_grads=None, retain_graph=False):
"""Compute the gradients of outputs w.r.t variables.
Parameters
----------
outputs: list of NDArray
out_grads: list of NDArray or None
"""
assert isinstance(outputs, (list, tuple)), \
"outputs must be a list or tuple of NDArrays"
if out_grads is None:
check_call(_LIB.MXAutogradBackward(
len(outputs),
c_handle_array(outputs),
ctypes.c_void_p(0),
ctypes.c_int(retain_graph)))
return
ograd_handles = []
for arr in out_grads:
if arr is not None:
ograd_handles.append(arr.handle)
else:
ograd_handles.append(NDArrayHandle(0))
assert len(ograd_handles) == len(outputs), \
"outputs and out_grads must have the same length"
check_call(_LIB.MXAutogradBackward(
len(outputs),
c_handle_array(outputs),
c_array(NDArrayHandle, ograd_handles),
ctypes.c_int(retain_graph)))
|
Return function that computes both gradient of arguments and loss value.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_and_loss_func: a python function
A function that would compute both the gradient of arguments and loss value.
|
def grad_and_loss(func, argnum=None):
"""Return function that computes both gradient of arguments and loss value.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_and_loss_func: a python function
A function that would compute both the gradient of arguments and loss value.
"""
@functools.wraps(func)
def wrapped(*args):
"""Wrapped function."""
variables = args
if argnum is not None:
argnum_ = argnum if isinstance(argnum, list) else [argnum]
variables = [args[i] for i in argnum_]
for x in variables:
assert isinstance(x, NDArray), "type of autograd input should NDArray."
grads = [zeros_like(x) for x in variables]
mark_variables(variables, grads)
with train_section():
outputs = func(*args)
compute_gradient([outputs] if isinstance(outputs, NDArray) else outputs)
return grads, outputs
return wrapped
|
Return function that computes gradient of arguments.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_func: a python function
A function that would compute the gradient of arguments.
Examples
--------
>>> # autograd supports dynamic graph which is changed
>>> # every instance
>>> def func(x):
>>> r = random.randint(0, 1)
>>> if r % 2:
>>> return x**2
>>> else:
>>> return x/3
>>> # use `grad(func)` to get the gradient function
>>> for x in range(10):
>>> grad_func = grad(func)
>>> inputs = nd.array([[1, 2, 3], [4, 5, 6]])
>>> grad_vals = grad_func(inputs)
|
def grad(func, argnum=None):
"""Return function that computes gradient of arguments.
Parameters
----------
func: a python function
The forward (loss) function.
argnum: an int or a list of int
The index of argument to calculate gradient for.
Returns
-------
grad_func: a python function
A function that would compute the gradient of arguments.
Examples
--------
>>> # autograd supports dynamic graph which is changed
>>> # every instance
>>> def func(x):
>>> r = random.randint(0, 1)
>>> if r % 2:
>>> return x**2
>>> else:
>>> return x/3
>>> # use `grad(func)` to get the gradient function
>>> for x in range(10):
>>> grad_func = grad(func)
>>> inputs = nd.array([[1, 2, 3], [4, 5, 6]])
>>> grad_vals = grad_func(inputs)
"""
grad_with_loss_func = grad_and_loss(func, argnum)
@functools.wraps(grad_with_loss_func)
def wrapped(*args):
return grad_with_loss_func(*args)[0]
return wrapped
|
Splits an NDArray into `num_slice` slices along `batch_axis`.
Usually used for data parallelism where each slices is sent
to one device (i.e. GPU).
Parameters
----------
data : NDArray
A batch of data.
num_slice : int
Number of desired slices.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
If `True`, an error will be raised when `num_slice` does not evenly
divide `data.shape[batch_axis]`.
Returns
-------
list of NDArray
Return value is a list even if `num_slice` is 1.
|
def split_data(data, num_slice, batch_axis=0, even_split=True):
"""Splits an NDArray into `num_slice` slices along `batch_axis`.
Usually used for data parallelism where each slices is sent
to one device (i.e. GPU).
Parameters
----------
data : NDArray
A batch of data.
num_slice : int
Number of desired slices.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
If `True`, an error will be raised when `num_slice` does not evenly
divide `data.shape[batch_axis]`.
Returns
-------
list of NDArray
Return value is a list even if `num_slice` is 1.
"""
size = data.shape[batch_axis]
if even_split and size % num_slice != 0:
raise ValueError(
"data with shape %s cannot be evenly split into %d slices along axis %d. " \
"Use a batch size that's multiple of %d or set even_split=False to allow " \
"uneven partitioning of data."%(
str(data.shape), num_slice, batch_axis, num_slice))
step = size // num_slice
# If size < num_slice, make fewer slices
if not even_split and size < num_slice:
step = 1
num_slice = size
if batch_axis == 0:
slices = [data[i*step:(i+1)*step] if i < num_slice - 1 else data[i*step:size]
for i in range(num_slice)]
elif even_split:
slices = ndarray.split(data, num_outputs=num_slice, axis=batch_axis)
else:
slices = [ndarray.slice_axis(data, batch_axis, i*step, (i+1)*step)
if i < num_slice - 1 else
ndarray.slice_axis(data, batch_axis, i*step, size)
for i in range(num_slice)]
return slices
|
Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArray
Each corresponds to a context in `ctx_list`.
|
def split_and_load(data, ctx_list, batch_axis=0, even_split=True):
"""Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads
each slice to one context in `ctx_list`.
Parameters
----------
data : NDArray
A batch of data.
ctx_list : list of Context
A list of Contexts.
batch_axis : int, default 0
The axis along which to slice.
even_split : bool, default True
Whether to force all slices to have the same number of elements.
Returns
-------
list of NDArray
Each corresponds to a context in `ctx_list`.
"""
if not isinstance(data, ndarray.NDArray):
data = ndarray.array(data, ctx=ctx_list[0])
if len(ctx_list) == 1:
return [data.as_in_context(ctx_list[0])]
slices = split_data(data, len(ctx_list), batch_axis, even_split)
return [i.as_in_context(ctx) for i, ctx in zip(slices, ctx_list)]
|
Rescales NDArrays so that the sum of their 2-norm is smaller than `max_norm`.
Parameters
----------
arrays : list of NDArray
max_norm : float
check_isfinite : bool, default True
If True, check that the total_norm is finite (not nan or inf). This
requires a blocking .asscalar() call.
Returns
-------
NDArray or float
Total norm. Return type is NDArray of shape (1,) if check_isfinite is
False. Otherwise a float is returned.
|
def clip_global_norm(arrays, max_norm, check_isfinite=True):
"""Rescales NDArrays so that the sum of their 2-norm is smaller than `max_norm`.
Parameters
----------
arrays : list of NDArray
max_norm : float
check_isfinite : bool, default True
If True, check that the total_norm is finite (not nan or inf). This
requires a blocking .asscalar() call.
Returns
-------
NDArray or float
Total norm. Return type is NDArray of shape (1,) if check_isfinite is
False. Otherwise a float is returned.
"""
def _norm(array):
if array.stype == 'default':
x = array.reshape((-1,))
return ndarray.dot(x, x)
return array.norm().square()
assert len(arrays) > 0
ctx = arrays[0].context
total_norm = ndarray.add_n(*[_norm(arr).as_in_context(ctx) for arr in arrays])
total_norm = ndarray.sqrt(total_norm)
if check_isfinite:
if not np.isfinite(total_norm.asscalar()):
warnings.warn(
UserWarning('nan or inf is detected. '
'Clipping results will be undefined.'), stacklevel=2)
scale = max_norm / (total_norm + 1e-8)
scale = ndarray.min(ndarray.concat(scale, ndarray.ones(1, ctx=ctx), dim=0))
for arr in arrays:
arr *= scale.as_in_context(arr.context)
if check_isfinite:
return total_norm.asscalar()
else:
return total_norm
|
Indent string
|
def _indent(s_, numSpaces):
"""Indent string
"""
s = s_.split('\n')
if len(s) == 1:
return s_
first = s.pop(0)
s = [first] + [(numSpaces * ' ') + line for line in s]
s = '\n'.join(s)
return s
|
Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
|
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash
|
Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl : bool, default True
Verify SSL certificates.
Returns
-------
str
The file path of the downloaded file.
|
def download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
"""Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl : bool, default True
Verify SSL certificates.
Returns
-------
str
The file path of the downloaded file.
"""
if path is None:
fname = url.split('/')[-1]
# Empty filenames are invalid
assert fname, 'Can\'t construct file-name from this URL. ' \
'Please set the `path` option manually.'
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0, currently it's {}".format(
retries)
if not verify_ssl:
warnings.warn(
'Unverified HTTPS request is being made (verify_ssl=False). '
'Adding certificate verification is strongly advised.')
if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print('Downloading {} from {}...'.format(fname, url))
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError('Failed downloading url {}'.format(url))
# create uuid for temporary files
random_uuid = str(uuid.uuid4())
with open('{}.{}'.format(fname, random_uuid), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
# if the target file exists(created by other processes)
# and have the same hash with target file
# delete the temporary file
if not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
# atmoic operation in the same file system
_replace_atomic('{}.{}'.format(fname, random_uuid), fname)
else:
try:
os.remove('{}.{}'.format(fname, random_uuid))
except OSError:
pass
finally:
warnings.warn(
'File {} exists in file system so the downloaded file is deleted'.format(fname))
if sha1_hash and not check_sha1(fname, sha1_hash):
raise UserWarning(
'File {} is downloaded but the content hash does not match.'
' The repo may be outdated or download may be incomplete. '
'If the "repo_url" is overridden, consider switching to '
'the default repo.'.format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
else:
print('download failed due to {}, retrying, {} attempt{} left'
.format(repr(e), retries, 's' if retries > 1 else ''))
return fname
|
Return the base URL for Gluon dataset and model repository.
|
def _get_repo_url():
"""Return the base URL for Gluon dataset and model repository."""
default_repo = 'https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/'
repo_url = os.environ.get('MXNET_GLUON_REPO', default_repo)
if repo_url[-1] != '/':
repo_url = repo_url+'/'
return repo_url
|
Return the URL for hosted file in Gluon repository.
Parameters
----------
namespace : str
Namespace of the file.
filename : str
Name of the file
|
def _get_repo_file_url(namespace, filename):
"""Return the URL for hosted file in Gluon repository.
Parameters
----------
namespace : str
Namespace of the file.
filename : str
Name of the file
"""
return '{base_url}{namespace}/{filename}'.format(base_url=_get_repo_url(),
namespace=namespace,
filename=filename)
|
Print at most `limit` elements of list.
|
def _brief_print_list(lst, limit=7):
"""Print at most `limit` elements of list."""
lst = list(lst)
if len(lst) > limit:
return _brief_print_list(lst[:limit//2], limit) + ', ..., ' + \
_brief_print_list(lst[-limit//2:], limit)
return ', '.join(["'%s'"%str(i) for i in lst])
|
Create a symbol function by handle and function name.
|
def _make_symbol_function(handle, name, func_name):
"""Create a symbol function by handle and function name."""
code, doc_str = _generate_symbol_function_code(handle, name, func_name)
local = {}
exec(code, None, local) # pylint: disable=exec-used
symbol_function = local[func_name]
symbol_function.__name__ = func_name
symbol_function.__doc__ = doc_str
symbol_function.__module__ = 'mxnet.symbol'
return symbol_function
|
Generate row ids based on the current mini-batch
|
def batch_row_ids(data_batch):
""" Generate row ids based on the current mini-batch """
item = data_batch.data[0]
user = data_batch.data[1]
return {'user_weight': user.astype(np.int64),
'item_weight': item.astype(np.int64)}
|
Generate row ids for all rows
|
def all_row_ids(data_batch):
""" Generate row ids for all rows """
all_users = mx.nd.arange(0, MOVIELENS['max_user'], dtype='int64')
all_movies = mx.nd.arange(0, MOVIELENS['max_movie'], dtype='int64')
return {'user_weight': all_users, 'item_weight': all_movies}
|
Convert caffe model
Parameters
----------
prototxt_fname : str
Filename of the prototxt model definition
caffemodel_fname : str
Filename of the binary caffe model
output_prefix : str, optinoal
If given, then save the converted MXNet into output_prefx+'.json' and
output_prefx+'.params'
Returns
-------
sym : Symbol
Symbol convereted from prototxt
arg_params : list of NDArray
Argument parameters
aux_params : list of NDArray
Aux parameters
input_dim : tuple
Input dimension
|
def convert_model(prototxt_fname, caffemodel_fname, output_prefix=None):
"""Convert caffe model
Parameters
----------
prototxt_fname : str
Filename of the prototxt model definition
caffemodel_fname : str
Filename of the binary caffe model
output_prefix : str, optinoal
If given, then save the converted MXNet into output_prefx+'.json' and
output_prefx+'.params'
Returns
-------
sym : Symbol
Symbol convereted from prototxt
arg_params : list of NDArray
Argument parameters
aux_params : list of NDArray
Aux parameters
input_dim : tuple
Input dimension
"""
sym, input_dim = convert_symbol(prototxt_fname)
arg_shapes, _, aux_shapes = sym.infer_shape(data=tuple(input_dim))
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
arg_shape_dic = dict(zip(arg_names, arg_shapes))
aux_shape_dic = dict(zip(aux_names, aux_shapes))
arg_params = {}
aux_params = {}
first_conv = True
layers, names = caffe_parser.read_caffemodel(prototxt_fname, caffemodel_fname)
layer_iter = caffe_parser.layer_iter(layers, names)
layers_proto = caffe_parser.get_layers(caffe_parser.read_prototxt(prototxt_fname))
for layer_name, layer_type, layer_blobs in layer_iter:
if layer_type == 'Convolution' or layer_type == 'InnerProduct' \
or layer_type == 4 or layer_type == 14 or layer_type == 'PReLU' \
or layer_type == 'Deconvolution' or layer_type == 39 or layer_type == 'Normalize':
if layer_type == 'PReLU':
assert (len(layer_blobs) == 1)
wmat = layer_blobs[0].data
weight_name = layer_name + '_gamma'
arg_params[weight_name] = mx.nd.zeros(wmat.shape)
arg_params[weight_name][:] = wmat
continue
if layer_type == 'Normalize':
assert (len(layer_blobs) == 1)
weight_name = layer_name + '_scale'
wmat = layer_blobs[0].data
arg_params[weight_name] = mx.nd.zeros((1, len(wmat), 1, 1))
arg_params[weight_name][:] = np.array(list(wmat)).reshape((1, len(wmat), 1, 1))
continue
wmat_dim = []
if getattr(layer_blobs[0].shape, 'dim', None) is not None:
if len(layer_blobs[0].shape.dim) > 0:
wmat_dim = layer_blobs[0].shape.dim
else:
wmat_dim = [layer_blobs[0].num, layer_blobs[0].channels,
layer_blobs[0].height, layer_blobs[0].width]
else:
wmat_dim = list(layer_blobs[0].shape)
wmat = np.array(layer_blobs[0].data).reshape(wmat_dim)
channels = wmat_dim[1]
if channels == 3 or channels == 4: # RGB or RGBA
if first_conv:
# Swapping BGR of caffe into RGB in mxnet
wmat[:, [0, 2], :, :] = wmat[:, [2, 0], :, :]
assert(wmat.flags['C_CONTIGUOUS'] is True)
sys.stdout.write('converting layer {0}, wmat shape = {1}'.format(
layer_name, wmat.shape))
if len(layer_blobs) == 2:
bias = np.array(layer_blobs[1].data)
bias = bias.reshape((bias.shape[0], 1))
assert(bias.flags['C_CONTIGUOUS'] is True)
bias_name = layer_name + "_bias"
if bias_name not in arg_shape_dic:
print(bias_name + ' not found in arg_shape_dic.')
continue
bias = bias.reshape(arg_shape_dic[bias_name])
arg_params[bias_name] = mx.nd.zeros(bias.shape)
arg_params[bias_name][:] = bias
sys.stdout.write(', bias shape = {}'.format(bias.shape))
sys.stdout.write('\n')
sys.stdout.flush()
wmat = wmat.reshape((wmat.shape[0], -1))
weight_name = layer_name + "_weight"
if weight_name not in arg_shape_dic:
print(weight_name + ' not found in arg_shape_dic.')
continue
wmat = wmat.reshape(arg_shape_dic[weight_name])
arg_params[weight_name] = mx.nd.zeros(wmat.shape)
arg_params[weight_name][:] = wmat
if first_conv and (layer_type == 'Convolution' or layer_type == 4):
first_conv = False
elif layer_type == 'Scale':
if 'scale' in layer_name:
bn_name = layer_name.replace('scale', 'bn')
elif 'sc' in layer_name:
bn_name = layer_name.replace('sc', 'bn')
else:
assert False, 'Unknown name convention for bn/scale'
gamma = np.array(layer_blobs[0].data)
beta = np.array(layer_blobs[1].data)
# beta = np.expand_dims(beta, 1)
beta_name = '{}_beta'.format(bn_name)
gamma_name = '{}_gamma'.format(bn_name)
beta = beta.reshape(arg_shape_dic[beta_name])
gamma = gamma.reshape(arg_shape_dic[gamma_name])
arg_params[beta_name] = mx.nd.zeros(beta.shape)
arg_params[gamma_name] = mx.nd.zeros(gamma.shape)
arg_params[beta_name][:] = beta
arg_params[gamma_name][:] = gamma
assert gamma.flags['C_CONTIGUOUS'] is True
assert beta.flags['C_CONTIGUOUS'] is True
print('converting scale layer, beta shape = {}, gamma shape = {}'.format(
beta.shape, gamma.shape))
elif layer_type == 'BatchNorm':
bn_name = layer_name
mean = np.array(layer_blobs[0].data)
var = np.array(layer_blobs[1].data)
rescale_factor = layer_blobs[2].data[0]
if rescale_factor != 0:
rescale_factor = 1 / rescale_factor
mean_name = '{}_moving_mean'.format(bn_name)
var_name = '{}_moving_var'.format(bn_name)
mean = mean.reshape(aux_shape_dic[mean_name])
var = var.reshape(aux_shape_dic[var_name])
aux_params[mean_name] = mx.nd.zeros(mean.shape)
aux_params[var_name] = mx.nd.zeros(var.shape)
# Get the original epsilon
for idx, layer in enumerate(layers_proto):
if layer.name == bn_name:
bn_index = idx
eps_caffe = layers_proto[bn_index].batch_norm_param.eps
# Compensate for the epsilon shift performed in convert_symbol
eps_symbol = float(sym.attr_dict()[bn_name + '_moving_mean']['eps'])
eps_correction = eps_caffe - eps_symbol
# Fill parameters
aux_params[mean_name][:] = mean * rescale_factor
aux_params[var_name][:] = var * rescale_factor + eps_correction
assert var.flags['C_CONTIGUOUS'] is True
assert mean.flags['C_CONTIGUOUS'] is True
print('converting batchnorm layer, mean shape = {}, var shape = {}'.format(
mean.shape, var.shape))
fix_gamma = layers_proto[bn_index+1].type != 'Scale'
if fix_gamma:
gamma_name = '{}_gamma'.format(bn_name)
gamma = np.array(np.ones(arg_shape_dic[gamma_name]))
beta_name = '{}_beta'.format(bn_name)
beta = np.array(np.zeros(arg_shape_dic[beta_name]))
arg_params[beta_name] = mx.nd.zeros(beta.shape)
arg_params[gamma_name] = mx.nd.zeros(gamma.shape)
arg_params[beta_name][:] = beta
arg_params[gamma_name][:] = gamma
assert gamma.flags['C_CONTIGUOUS'] is True
assert beta.flags['C_CONTIGUOUS'] is True
else:
print('\tskipping layer {} of type {}'.format(layer_name, layer_type))
assert len(layer_blobs) == 0
if output_prefix is not None:
model = mx.mod.Module(symbol=sym, label_names=None)
model.bind(data_shapes=[('data', tuple(input_dim))])
model.init_params(arg_params=arg_params, aux_params=aux_params)
model.save_checkpoint(output_prefix, 0)
return sym, arg_params, aux_params, input_dim
|
Parse Caffe prototxt into symbol string
|
def _parse_proto(prototxt_fname):
"""Parse Caffe prototxt into symbol string
"""
proto = caffe_parser.read_prototxt(prototxt_fname)
# process data layer
input_name, input_dim, layers = _get_input(proto)
# only support single input, so always use `data` as the input data
mapping = {input_name: 'data'}
need_flatten = {input_name: False}
symbol_string = "import mxnet as mx\ndata = mx.symbol.Variable(name='data')\n"
flatten_count = 0
output_name = ""
prev_name = None
_output_name = {}
# convert reset layers one by one
for i, layer in enumerate(layers):
type_string = ''
param_string = ''
skip_layer = False
name = re.sub('[-/]', '_', layer.name)
for k in range(len(layer.bottom)):
if layer.bottom[k] in _output_name:
_output_name[layer.bottom[k]]['count'] = _output_name[layer.bottom[k]]['count']+1
else:
_output_name[layer.bottom[k]] = {'count':0}
for k in range(len(layer.top)):
if layer.top[k] in _output_name:
_output_name[layer.top[k]]['count'] = _output_name[layer.top[k]]['count']+1
else:
_output_name[layer.top[k]] = {'count':0, 'name':name}
if layer.type == 'Convolution' or layer.type == 4:
type_string = 'mx.symbol.Convolution'
param_string = _convert_conv_param(layer.convolution_param)
need_flatten[name] = True
if layer.type == 'Deconvolution' or layer.type == 39:
type_string = 'mx.symbol.Deconvolution'
param_string = _convert_conv_param(layer.convolution_param)
need_flatten[name] = True
if layer.type == 'Pooling' or layer.type == 17:
type_string = 'mx.symbol.Pooling'
param_string = _convert_pooling_param(layer.pooling_param)
need_flatten[name] = True
if layer.type == 'ReLU' or layer.type == 18:
type_string = 'mx.symbol.Activation'
param_string = "act_type='relu'"
param = layer.relu_param
if hasattr(param, 'negative_slope'):
if param.negative_slope > 0:
type_string = 'mx.symbol.LeakyReLU'
param_string = "act_type='leaky', slope=%f" % param.negative_slope
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'TanH' or layer.type == 23:
type_string = 'mx.symbol.Activation'
param_string = "act_type='tanh'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Sigmoid' or layer.type == 19:
type_string = 'mx.symbol.Activation'
param_string = "act_type='sigmoid'"
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'LRN' or layer.type == 15:
type_string = 'mx.symbol.LRN'
param = layer.lrn_param
param_string = "alpha=%f, beta=%f, knorm=%f, nsize=%d" % (
param.alpha, param.beta, param.k, param.local_size)
need_flatten[name] = True
if layer.type == 'InnerProduct' or layer.type == 14:
type_string = 'mx.symbol.FullyConnected'
param = layer.inner_product_param
param_string = "num_hidden=%d, no_bias=%s" % (
param.num_output, not param.bias_term)
need_flatten[name] = False
if layer.type == 'Dropout' or layer.type == 6:
type_string = 'mx.symbol.Dropout'
param = layer.dropout_param
param_string = "p=%f" % param.dropout_ratio
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Softmax' or layer.type == 20:
type_string = 'mx.symbol.SoftmaxOutput'
if layer.type == 'Flatten' or layer.type == 8:
type_string = 'mx.symbol.Flatten'
need_flatten[name] = False
if layer.type == 'Split' or layer.type == 22:
type_string = 'split' # will process later
if layer.type == 'Concat' or layer.type == 3:
type_string = 'mx.symbol.Concat'
need_flatten[name] = True
if layer.type == 'Crop':
type_string = 'mx.symbol.Crop'
need_flatten[name] = True
param_string = 'center_crop=True'
if layer.type == 'BatchNorm':
type_string = 'mx.symbol.BatchNorm'
param = layer.batch_norm_param
# CuDNN requires eps to be greater than 1e-05
# We compensate for this change in convert_model
epsilon = param.eps
if (epsilon <= 1e-05):
epsilon = 1e-04
# if next layer is scale, don't fix gamma
fix_gamma = layers[i+1].type != 'Scale'
param_string = 'use_global_stats=%s, fix_gamma=%s, eps=%f' % (
param.use_global_stats, fix_gamma, epsilon)
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Scale':
assert layers[i-1].type == 'BatchNorm'
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
skip_layer = True
prev_name = re.sub('[-/]', '_', layers[i-1].name)
if layer.type == 'PReLU':
type_string = 'mx.symbol.LeakyReLU'
param = layer.prelu_param
param_string = "act_type='prelu', slope=%f" % param.filler.value
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if layer.type == 'Eltwise':
type_string = 'mx.symbol.broadcast_add'
param = layer.eltwise_param
param_string = ""
need_flatten[name] = False
if layer.type == 'Reshape':
type_string = 'mx.symbol.Reshape'
need_flatten[name] = False
param = layer.reshape_param
param_string = "shape=(%s)" % (','.join(param.shape.dim),)
if layer.type == 'AbsVal':
type_string = 'mx.symbol.abs'
need_flatten[name] = need_flatten[mapping[layer.bottom[0]]]
if skip_layer:
assert len(layer.bottom) == 1
symbol_string += "%s = %s\n" % (name, prev_name)
elif type_string == '':
raise ValueError('Unknown layer %s!' % layer.type)
elif type_string != 'split':
bottom = layer.bottom
if param_string != "":
param_string = ", " + param_string
if len(bottom) == 1:
if need_flatten[mapping[bottom[0]]] and type_string == 'mx.symbol.FullyConnected':
flatten_name = "flatten_%d" % flatten_count
symbol_string += "%s=mx.symbol.Flatten(name='%s', data=%s)\n" % (
flatten_name, flatten_name, mapping[bottom[0]])
flatten_count += 1
need_flatten[flatten_name] = False
bottom[0] = flatten_name
mapping[bottom[0]] = bottom[0]
symbol_string += "%s = %s(name='%s', data=%s %s)\n" % (
name, type_string, name, mapping[bottom[0]], param_string)
else:
if layer.type == 'Eltwise' and param.operation == 1 and len(param.coeff) > 0:
symbol_string += "%s = " % name
symbol_string += " + ".join(["%s * %s" % (
mapping[bottom[i]], param.coeff[i]) for i in range(len(param.coeff))])
symbol_string += "\n"
else:
symbol_string += "%s = %s(name='%s', *[%s] %s)\n" % (
name, type_string, name, ','.join(
[mapping[x] for x in bottom]), param_string)
for j in range(len(layer.top)):
mapping[layer.top[j]] = name
output_name = name
output_name = []
for i in _output_name:
if 'name' in _output_name[i] and _output_name[i]['count'] == 0:
output_name.append(_output_name[i]['name'])
return symbol_string, output_name, input_dim
|
Convert caffe model definition into Symbol
Parameters
----------
prototxt_fname : str
Filename of the prototxt file
Returns
-------
Symbol
Converted Symbol
tuple
Input shape
|
def convert_symbol(prototxt_fname):
"""Convert caffe model definition into Symbol
Parameters
----------
prototxt_fname : str
Filename of the prototxt file
Returns
-------
Symbol
Converted Symbol
tuple
Input shape
"""
sym, output_name, input_dim = _parse_proto(prototxt_fname)
exec(sym) # pylint: disable=exec-used
_locals = locals()
ret = []
for i in output_name:
exec("ret = " + i, globals(), _locals) # pylint: disable=exec-used
ret.append(_locals['ret'])
ret = mx.sym.Group(ret)
return ret, input_dim
|
r"""VGG model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 11, 13, 16, 19.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
|
def get_vgg(num_layers, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""VGG model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 11, 13, 16, 19.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
layers, filters = vgg_spec[num_layers]
net = VGG(layers, filters, **kwargs)
if pretrained:
from ..model_store import get_model_file
batch_norm_suffix = '_bn' if kwargs.get('batch_norm') else ''
net.load_parameters(get_model_file('vgg%d%s'%(num_layers, batch_norm_suffix),
root=root), ctx=ctx)
return net
|
check function consistency with uniform random numbers
|
def check_with_uniform(uf, arg_shapes, dim=None, npuf=None, rmin=-10, type_list=[np.float32]):
"""check function consistency with uniform random numbers"""
if isinstance(arg_shapes, int):
assert dim
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
arg_shapes = [shape] * arg_shapes
for dtype in type_list:
ndarray_arg = []
numpy_arg = []
for s in arg_shapes:
npy = np.random.uniform(rmin, 10, s).astype(dtype)
narr = mx.nd.array(npy, dtype=dtype)
ndarray_arg.append(narr)
numpy_arg.append(npy)
out1 = uf(*ndarray_arg)
if npuf is None:
out2 = uf(*numpy_arg).astype(dtype)
else:
out2 = npuf(*numpy_arg).astype(dtype)
assert out1.shape == out2.shape
if isinstance(out1, mx.nd.NDArray):
out1 = out1.asnumpy()
if dtype == np.float16:
assert reldiff(out1, out2) < 2e-3
else:
assert reldiff(out1, out2) < 1e-6
|
Remove images without usable rois
|
def filter_roidb(self):
"""Remove images without usable rois"""
num_roidb = len(self._roidb)
self._roidb = [roi_rec for roi_rec in self._roidb if len(roi_rec['gt_classes'])]
num_after = len(self._roidb)
logger.info('filter roidb: {} -> {}'.format(num_roidb, num_after))
|
Only flip boxes coordinates, images will be flipped when loading into network
|
def append_flipped_images(self):
"""Only flip boxes coordinates, images will be flipped when loading into network"""
logger.info('%s append flipped images to roidb' % self._name)
roidb_flipped = []
for roi_rec in self._roidb:
boxes = roi_rec['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = roi_rec['width'] - oldx2 - 1
boxes[:, 2] = roi_rec['width'] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
roi_rec_flipped = roi_rec.copy()
roi_rec_flipped['boxes'] = boxes
roi_rec_flipped['flipped'] = True
roidb_flipped.append(roi_rec_flipped)
self._roidb.extend(roidb_flipped)
|
r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the model.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
|
def get_model_file(name, root=os.path.join(base.data_dir(), 'models')):
r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the model.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
file_name = '{name}-{short_hash}'.format(name=name,
short_hash=short_hash(name))
root = os.path.expanduser(root)
file_path = os.path.join(root, file_name+'.params')
sha1_hash = _model_sha1[name]
if os.path.exists(file_path):
if check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning('Mismatch in the content of model file detected. Downloading again.')
else:
logging.info('Model file not found. Downloading to %s.', file_path)
util.makedirs(root)
zip_file_path = os.path.join(root, file_name+'.zip')
repo_url = os.environ.get('MXNET_GLUON_REPO', apache_repo_url)
if repo_url[-1] != '/':
repo_url = repo_url + '/'
download(_url_format.format(repo_url=repo_url, file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(root)
os.remove(zip_file_path)
if check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError('Downloaded file has different hash. Please try again.')
|
r"""Purge all pretrained model files in local file store.
Parameters
----------
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
|
def purge(root=os.path.join(base.data_dir(), 'models')):
r"""Purge all pretrained model files in local file store.
Parameters
----------
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
root = os.path.expanduser(root)
files = os.listdir(root)
for f in files:
if f.endswith(".params"):
os.remove(os.path.join(root, f))
|
given image index, find out full path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of this image
|
def image_path_from_index(self, index):
"""
given image index, find out full path
Parameters:
----------
index: int
index of a specific image
Returns:
----------
full path of this image
"""
assert self.image_set_index is not None, "Dataset not initialized"
name = self.image_set_index[index]
image_file = os.path.join(self.image_dir, 'images', name)
assert os.path.isfile(image_file), 'Path does not exist: {}'.format(image_file)
return image_file
|
initialize all entries given annotation json file
Parameters:
----------
anno_file: str
annotation json file
shuffle: bool
whether to shuffle image list
|
def _load_all(self, anno_file, shuffle):
"""
initialize all entries given annotation json file
Parameters:
----------
anno_file: str
annotation json file
shuffle: bool
whether to shuffle image list
"""
image_set_index = []
labels = []
coco = COCO(anno_file)
img_ids = coco.getImgIds()
# deal with class names
cats = [cat['name'] for cat in coco.loadCats(coco.getCatIds())]
class_to_coco_ind = dict(zip(cats, coco.getCatIds()))
class_to_ind = dict(zip(self.classes, range(len(self.classes))))
coco_ind_to_class_ind = dict([(class_to_coco_ind[cls], class_to_ind[cls])
for cls in self.classes[0:]])
for img_id in img_ids:
# filename
image_info = coco.loadImgs(img_id)[0]
filename = image_info["file_name"]
subdir = filename.split('_')[1]
height = image_info["height"]
width = image_info["width"]
# label
anno_ids = coco.getAnnIds(imgIds=img_id)
annos = coco.loadAnns(anno_ids)
label = []
for anno in annos:
cat_id = coco_ind_to_class_ind[anno['category_id']]
bbox = anno["bbox"]
assert len(bbox) == 4
xmin = float(bbox[0]) / width
ymin = float(bbox[1]) / height
xmax = xmin + float(bbox[2]) / width
ymax = ymin + float(bbox[3]) / height
label.append([cat_id, xmin, ymin, xmax, ymax, 0])
if label:
labels.append(np.array(label))
image_set_index.append(os.path.join(subdir, filename))
if shuffle:
import random
indices = list(range(len(image_set_index)))
random.shuffle(indices)
image_set_index = [image_set_index[i] for i in indices]
labels = [labels[i] for i in indices]
# store the results
self.image_set_index = image_set_index
self.labels = labels
|
Initializes the parameters and auxiliary states.
|
def init_params(self, initializer=mx.init.Uniform(0.01), **kwargs):
"""Initializes the parameters and auxiliary states.
"""
self._module.init_params(initializer=initializer, **kwargs)
|
Forward computation. States from previous forward computation are carried
to the current iteration if `carry_state` is set to `True`.
|
def forward(self, data_batch, is_train=None, carry_state=True):
"""Forward computation. States from previous forward computation are carried
to the current iteration if `carry_state` is set to `True`.
"""
# propagate states from the previous iteration
if carry_state:
if isinstance(self._next_states, (int, float)):
self._module.set_states(value=self._next_states)
else:
self._module.set_states(states=self._next_states)
self._module.forward(data_batch, is_train=is_train)
outputs = self._module.get_outputs(merge_multi_context=False)
self._next_states = outputs[:-1]
|
Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch. Gradients are clipped by their global norm
if `max_norm` is set.
Parameters
----------
max_norm: float, optional
If set, clip values of all gradients the ratio of the sum of their norms.
|
def update(self, max_norm=None):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch. Gradients are clipped by their global norm
if `max_norm` is set.
Parameters
----------
max_norm: float, optional
If set, clip values of all gradients the ratio of the sum of their norms.
"""
if max_norm is not None:
self._clip_by_global_norm(max_norm)
self._module.update()
|
Clips gradient norm.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
The method is first used in
`[ICML2013] On the difficulty of training recurrent neural networks`
Parameters
----------
max_norm : float or int
The maximum clipping threshold of the gradient norm.
Returns
-------
norm_val : float
The computed norm of the gradients.
|
def _clip_by_global_norm(self, max_norm):
"""Clips gradient norm.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
The method is first used in
`[ICML2013] On the difficulty of training recurrent neural networks`
Parameters
----------
max_norm : float or int
The maximum clipping threshold of the gradient norm.
Returns
-------
norm_val : float
The computed norm of the gradients.
"""
assert self._module.binded and self._module.params_initialized \
and self._module.optimizer_initialized
grad_array = []
for grad in self._module._exec_group.grad_arrays:
grad_array += grad
return mx.gluon.utils.clip_global_norm(grad_array, max_norm)
|
Image visualization and preservation
:param title: title
:param X: images to visualized
:param name: saved picture`s name
:return:
|
def visual(title, X, name):
"""Image visualization and preservation
:param title: title
:param X: images to visualized
:param name: saved picture`s name
:return:
"""
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:, :, ::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
|
Get the translation of images
|
def transformer(data, label):
"""Get the translation of images"""
# resize to 64x64
data = mx.image.imresize(data, 64, 64)
# transpose from (64, 64, 3) to (3, 64, 64)
data = mx.nd.transpose(data, (2, 0, 1))
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
# if image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data, label
|
Load the dataset and split it to train/valid data
:param dataset_name: string
Returns:
train_data: int array
training dataset
val_data: int array
valid dataset
|
def get_dataset(dataset_name):
"""Load the dataset and split it to train/valid data
:param dataset_name: string
Returns:
train_data: int array
training dataset
val_data: int array
valid dataset
"""
# mnist
if dataset == "mnist":
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size, shuffle=False)
# cifar10
elif dataset == "cifar10":
train_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10('./data', train=True, transform=transformer),
batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10('./data', train=False, transform=transformer),
batch_size, shuffle=False)
return train_data, val_data
|
Get net G
|
def get_netG():
"""Get net G"""
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*4) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*2) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
return netG
|
Get the netD
|
def get_netD():
"""Get the netD"""
# build the discriminator
netD = nn.Sequential()
with netD.name_scope():
# input is (nc) x 64 x 64
netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 32 x 32
netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf*2) x 16 x 16
netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf*4) x 8 x 8
netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf*8) x 4 x 4
netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
# state size. 2 x 1 x 1
return netD
|
Get configurations for net
|
def get_configurations(netG, netD):
"""Get configurations for net"""
# loss
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# initialize the generator and the discriminator
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
return loss, trainerG, trainerD
|
Entry point to dcgan
|
def main():
"""Entry point to dcgan"""
print("|------- new changes!!!!!!!!!")
# to get the dataset and net configuration
train_data, val_data = get_dataset(dataset)
netG = get_netG()
netD = get_netD()
loss, trainerG, trainerD = get_configurations(netG, netD)
# set labels
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)
metric = mx.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
# to metric the network
loss_d = []
loss_g = []
inception_score = []
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, _ in train_data:
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real_t
data = data.as_in_context(ctx)
noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx)
with autograd.record():
output = netD(data)
# reshape output from (opt.batch_size, 2, 1, 1) to (opt.batch_size, 2)
output = output.reshape((opt.batch_size, 2))
errD_real = loss(output, real_label)
metric.update([real_label, ], [output, ])
with autograd.record():
fake = netG(noise)
output = netD(fake.detach())
output = output.reshape((opt.batch_size, 2))
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
errD.backward()
metric.update([fake_label,], [output,])
trainerD.step(opt.batch_size)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
with autograd.record():
output = netD(fake)
output = output.reshape((-1, 2))
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d'
, mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch)
if iter % niter == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf, 'fake_img_iter_%d.png' % iter))
visual('data', data.asnumpy(), name=os.path.join(outf, 'real_img_iter_%d.png' % iter))
# record the metric data
loss_d.append(errD)
loss_g.append(errG)
if opt.inception_score:
score, _ = get_inception_score(fake)
inception_score.append(score)
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f', epoch, name, acc)
logging.info('time: %f', time.time() - tic)
# save check_point
if check_point:
netG.save_parameters(os.path.join(outf, 'generator_epoch_%d.params' %epoch))
netD.save_parameters(os.path.join(outf, 'discriminator_epoch_%d.params' % epoch))
# save parameter
netG.save_parameters(os.path.join(outf, 'generator.params'))
netD.save_parameters(os.path.join(outf, 'discriminator.params'))
# visualization the inception_score as a picture
if opt.inception_score:
ins_save(inception_score)
|
Gets a customized logger.
.. note:: `getLogger` is deprecated. Use `get_logger` instead.
|
def getLogger(name=None, filename=None, filemode=None, level=WARNING):
"""Gets a customized logger.
.. note:: `getLogger` is deprecated. Use `get_logger` instead.
"""
warnings.warn("getLogger is deprecated, Use get_logger instead.",
DeprecationWarning, stacklevel=2)
return get_logger(name, filename, filemode, level)
|
Gets a customized logger.
Parameters
----------
name: str, optional
Name of the logger.
filename: str, optional
The filename to which the logger's output will be sent.
filemode: str, optional
The file mode to open the file (corresponding to `filename`),
default is 'a' if `filename` is not ``None``.
level: int, optional
The `logging` level for the logger.
See: https://docs.python.org/2/library/logging.html#logging-levels
Returns
-------
Logger
A customized `Logger` object.
Example
-------
## get_logger call with default parameters.
>>> from mxnet.log import get_logger
>>> logger = get_logger("Test")
>>> logger.warn("Hello World")
W0505 00:29:47 3525 <stdin>:<module>:1] Hello World
## get_logger call with WARNING level.
>>> import logging
>>> logger = get_logger("Test2", level=logging.WARNING)
>>> logger.warn("Hello World")
W0505 00:30:50 3525 <stdin>:<module>:1] Hello World
>>> logger.debug("Hello World") # This doesn't return anything as the level is logging.WARNING.
## get_logger call with DEBUG level.
>>> logger = get_logger("Test3", level=logging.DEBUG)
>>> logger.debug("Hello World") # Logs the debug output as the level is logging.DEBUG.
D0505 00:31:30 3525 <stdin>:<module>:1] Hello World
|
def get_logger(name=None, filename=None, filemode=None, level=WARNING):
"""Gets a customized logger.
Parameters
----------
name: str, optional
Name of the logger.
filename: str, optional
The filename to which the logger's output will be sent.
filemode: str, optional
The file mode to open the file (corresponding to `filename`),
default is 'a' if `filename` is not ``None``.
level: int, optional
The `logging` level for the logger.
See: https://docs.python.org/2/library/logging.html#logging-levels
Returns
-------
Logger
A customized `Logger` object.
Example
-------
## get_logger call with default parameters.
>>> from mxnet.log import get_logger
>>> logger = get_logger("Test")
>>> logger.warn("Hello World")
W0505 00:29:47 3525 <stdin>:<module>:1] Hello World
## get_logger call with WARNING level.
>>> import logging
>>> logger = get_logger("Test2", level=logging.WARNING)
>>> logger.warn("Hello World")
W0505 00:30:50 3525 <stdin>:<module>:1] Hello World
>>> logger.debug("Hello World") # This doesn't return anything as the level is logging.WARNING.
## get_logger call with DEBUG level.
>>> logger = get_logger("Test3", level=logging.DEBUG)
>>> logger.debug("Hello World") # Logs the debug output as the level is logging.DEBUG.
D0505 00:31:30 3525 <stdin>:<module>:1] Hello World
"""
logger = logging.getLogger(name)
if name is not None and not getattr(logger, '_init_done', None):
logger._init_done = True
if filename:
mode = filemode if filemode else 'a'
hdlr = logging.FileHandler(filename, mode)
else:
hdlr = logging.StreamHandler() # pylint: disable=redefined-variable-type
# the `_Formatter` contain some escape character to
# represent color, which is not suitable for FileHandler,
# (TODO) maybe we can add another Formatter for FileHandler.
hdlr.setFormatter(_Formatter())
logger.addHandler(hdlr)
logger.setLevel(level)
return logger
|
data preparation
|
def transformer(data, label):
""" data preparation """
data = mx.image.imresize(data, IMAGE_SIZE, IMAGE_SIZE)
data = mx.nd.transpose(data, (2, 0, 1))
data = data.astype(np.float32) / 128.0 - 1
return data, label
|
helper function to get dataloader
|
def get_training_data(batch_size):
""" helper function to get dataloader"""
return gluon.data.DataLoader(
CIFAR10(train=True, transform=transformer),
batch_size=batch_size, shuffle=True, last_batch='discard')
|
r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
ResNet V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
version : int
Version of ResNet. Options are 1, 2.
num_layers : int
Numbers of layers. Options are 18, 34, 50, 101, 152.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
|
def get_resnet(version, num_layers, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
ResNet V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
version : int
Version of ResNet. Options are 1, 2.
num_layers : int
Numbers of layers. Options are 18, 34, 50, 101, 152.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
assert num_layers in resnet_spec, \
"Invalid number of layers: %d. Options are %s"%(
num_layers, str(resnet_spec.keys()))
block_type, layers, channels = resnet_spec[num_layers]
assert version >= 1 and version <= 2, \
"Invalid resnet version: %d. Options are 1 and 2."%version
resnet_class = resnet_net_versions[version-1]
block_class = resnet_block_versions[version-1][block_type]
net = resnet_class(block_class, layers, channels, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('resnet%d_v%d'%(num_layers, version),
root=root), ctx=ctx)
return net
|
Helper function for random generators.
|
def _random_helper(random, sampler, params, shape, dtype, kwargs):
"""Helper function for random generators."""
if isinstance(params[0], Symbol):
for i in params[1:]:
assert isinstance(i, Symbol), \
"Distribution parameters must all have the same type, but got " \
"both %s and %s."%(type(params[0]), type(i))
return sampler(*params, shape=shape, dtype=dtype, **kwargs)
elif isinstance(params[0], numeric_types):
for i in params[1:]:
assert isinstance(i, numeric_types), \
"Distribution parameters must all have the same type, but got " \
"both %s and %s."%(type(params[0]), type(i))
return random(*params, shape=shape, dtype=dtype, **kwargs)
raise ValueError("Distribution parameters must be either Symbol or numbers, "
"but got %s."%type(params[0]))
|
Draw random samples from a Poisson distribution.
Samples are distributed according to a Poisson distribution parametrized
by *lambda* (rate). Samples will always be returned as a floating point data type.
Parameters
----------
lam : float or Symbol, optional
Expectation of interval, should be >= 0.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
Symbol
If input `shape` has dimensions, e.g., `(m, n)`, and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
|
def poisson(lam=1, shape=_Null, dtype=_Null, **kwargs):
"""Draw random samples from a Poisson distribution.
Samples are distributed according to a Poisson distribution parametrized
by *lambda* (rate). Samples will always be returned as a floating point data type.
Parameters
----------
lam : float or Symbol, optional
Expectation of interval, should be >= 0.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
Symbol
If input `shape` has dimensions, e.g., `(m, n)`, and `lam` is
a scalar, output shape will be `(m, n)`. If `lam`
is an Symbol with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `lam`.
"""
return _random_helper(_internal._random_poisson, _internal._sample_poisson,
[lam], shape, dtype, kwargs)
|
Draw random samples from a generalized negative binomial distribution.
Samples are distributed according to a generalized negative binomial
distribution parametrized by *mu* (mean) and *alpha* (dispersion).
*alpha* is defined as *1/k* where *k* is the failure limit of the
number of unsuccessful experiments (generalized to real numbers).
Samples will always be returned as a floating point data type.
Parameters
----------
mu : float or Symbol, optional
Mean of the negative binomial distribution.
alpha : float or Symbol, optional
Alpha (dispersion) parameter of the negative binomial distribution.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `mu` and
`alpha` are scalars, output shape will be `(m, n)`. If `mu` and `alpha`
are Symbols with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
Symbol
If input `shape` has dimensions, e.g., `(m, n)`, and `mu` and
`alpha` are scalars, returned Symbol will resolve to shape `(m, n)`. If `mu`
and `alpha` are Symbols with shape, e.g., `(x, y)`, returned Symbol will resolve
to shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
|
def generalized_negative_binomial(mu=1, alpha=1, shape=_Null, dtype=_Null, **kwargs):
"""Draw random samples from a generalized negative binomial distribution.
Samples are distributed according to a generalized negative binomial
distribution parametrized by *mu* (mean) and *alpha* (dispersion).
*alpha* is defined as *1/k* where *k* is the failure limit of the
number of unsuccessful experiments (generalized to real numbers).
Samples will always be returned as a floating point data type.
Parameters
----------
mu : float or Symbol, optional
Mean of the negative binomial distribution.
alpha : float or Symbol, optional
Alpha (dispersion) parameter of the negative binomial distribution.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `mu` and
`alpha` are scalars, output shape will be `(m, n)`. If `mu` and `alpha`
are Symbols with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
Returns
-------
Symbol
If input `shape` has dimensions, e.g., `(m, n)`, and `mu` and
`alpha` are scalars, returned Symbol will resolve to shape `(m, n)`. If `mu`
and `alpha` are Symbols with shape, e.g., `(x, y)`, returned Symbol will resolve
to shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[mu, alpha)` pair.
"""
return _random_helper(_internal._random_generalized_negative_binomial,
_internal._sample_generalized_negative_binomial,
[mu, alpha], shape, dtype, kwargs)
|
Concurrent sampling from multiple multinomial distributions.
.. note:: The input distribution must be normalized, i.e. `data` must sum to
1 along its last dimension.
Parameters
----------
data : Symbol
An *n* dimensional array whose last dimension has length `k`, where
`k` is the number of possible outcomes of each multinomial distribution.
For example, data with shape `(m, n, k)` specifies `m*n` multinomial
distributions each with `k` possible outcomes.
shape : int or tuple of ints, optional
The number of samples to draw from each distribution. If shape is empty
one sample will be drawn from each distribution.
get_prob : bool, optional
If true, a second array containing log likelihood of the drawn
samples will also be returned.
This is usually used for reinforcement learning, where you can provide
reward as head gradient w.r.t. this array to estimate gradient.
dtype : str or numpy.dtype, optional
Data type of the sample output array. The default is int32.
Note that the data type of the log likelihood array is the same with that of `data`.
Returns
-------
Symbol
For input `data` with `n` dimensions and shape `(d1, d2, ..., dn-1, k)`, and input
`shape` with shape `(s1, s2, ..., sx)`, returns a Symbol that resovles to shape
`(d1, d2, ... dn-1, s1, s2, ..., sx)`. The `s1, s2, ... sx` dimensions of the
returned Symbol's resolved value will consist of 0-indexed values sampled from each
respective multinomial distribution provided in the `k` dimension of `data`.
For the case `n`=1, and `x`=1 (one shape dimension), returned Symbol will resolve to
shape `(s1,)`.
If `get_prob` is set to True, this function returns a Symbol that will resolve to a list of
outputs: `[ndarray_output, log_likelihood_output]`, where `log_likelihood_output` will resolve
to the same shape as the sampled outputs in ndarray_output.
|
def multinomial(data, shape=_Null, get_prob=True, dtype='int32', **kwargs):
"""Concurrent sampling from multiple multinomial distributions.
.. note:: The input distribution must be normalized, i.e. `data` must sum to
1 along its last dimension.
Parameters
----------
data : Symbol
An *n* dimensional array whose last dimension has length `k`, where
`k` is the number of possible outcomes of each multinomial distribution.
For example, data with shape `(m, n, k)` specifies `m*n` multinomial
distributions each with `k` possible outcomes.
shape : int or tuple of ints, optional
The number of samples to draw from each distribution. If shape is empty
one sample will be drawn from each distribution.
get_prob : bool, optional
If true, a second array containing log likelihood of the drawn
samples will also be returned.
This is usually used for reinforcement learning, where you can provide
reward as head gradient w.r.t. this array to estimate gradient.
dtype : str or numpy.dtype, optional
Data type of the sample output array. The default is int32.
Note that the data type of the log likelihood array is the same with that of `data`.
Returns
-------
Symbol
For input `data` with `n` dimensions and shape `(d1, d2, ..., dn-1, k)`, and input
`shape` with shape `(s1, s2, ..., sx)`, returns a Symbol that resovles to shape
`(d1, d2, ... dn-1, s1, s2, ..., sx)`. The `s1, s2, ... sx` dimensions of the
returned Symbol's resolved value will consist of 0-indexed values sampled from each
respective multinomial distribution provided in the `k` dimension of `data`.
For the case `n`=1, and `x`=1 (one shape dimension), returned Symbol will resolve to
shape `(s1,)`.
If `get_prob` is set to True, this function returns a Symbol that will resolve to a list of
outputs: `[ndarray_output, log_likelihood_output]`, where `log_likelihood_output` will resolve
to the same shape as the sampled outputs in ndarray_output.
"""
return _internal._sample_multinomial(data, shape, get_prob, dtype=dtype, **kwargs)
|
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is a training network with losses
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
|
def get_symbol_train(num_classes=20, nms_thresh=0.5, force_suppress=False,
nms_topk=400, **kwargs):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is a training network with losses
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
non-maximum suppression threshold
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
"""
data = mx.symbol.Variable(name="data")
label = mx.symbol.Variable(name="label")
# group 1
conv1_1 = mx.symbol.Convolution(
data=data, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_1")
relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
conv1_2 = mx.symbol.Convolution(
data=relu1_1, kernel=(3, 3), pad=(1, 1), num_filter=64, name="conv1_2")
relu1_2 = mx.symbol.Activation(data=conv1_2, act_type="relu", name="relu1_2")
pool1 = mx.symbol.Pooling(
data=relu1_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
# group 2
conv2_1 = mx.symbol.Convolution(
data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_1")
relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
conv2_2 = mx.symbol.Convolution(
data=relu2_1, kernel=(3, 3), pad=(1, 1), num_filter=128, name="conv2_2")
relu2_2 = mx.symbol.Activation(data=conv2_2, act_type="relu", name="relu2_2")
pool2 = mx.symbol.Pooling(
data=relu2_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
# group 3
conv3_1 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_1")
relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
conv3_2 = mx.symbol.Convolution(
data=relu3_1, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_2")
relu3_2 = mx.symbol.Activation(data=conv3_2, act_type="relu", name="relu3_2")
conv3_3 = mx.symbol.Convolution(
data=relu3_2, kernel=(3, 3), pad=(1, 1), num_filter=256, name="conv3_3")
relu3_3 = mx.symbol.Activation(data=conv3_3, act_type="relu", name="relu3_3")
pool3 = mx.symbol.Pooling(
data=relu3_3, pool_type="max", kernel=(2, 2), stride=(2, 2), \
pooling_convention="full", name="pool3")
# group 4
conv4_1 = mx.symbol.Convolution(
data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_1")
relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
conv4_2 = mx.symbol.Convolution(
data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_2")
relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
conv4_3 = mx.symbol.Convolution(
data=relu4_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv4_3")
relu4_3 = mx.symbol.Activation(data=conv4_3, act_type="relu", name="relu4_3")
pool4 = mx.symbol.Pooling(
data=relu4_3, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
# group 5
conv5_1 = mx.symbol.Convolution(
data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_1")
relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
conv5_2 = mx.symbol.Convolution(
data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_2")
relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
conv5_3 = mx.symbol.Convolution(
data=relu5_2, kernel=(3, 3), pad=(1, 1), num_filter=512, name="conv5_3")
relu5_3 = mx.symbol.Activation(data=conv5_3, act_type="relu", name="relu5_3")
pool5 = mx.symbol.Pooling(
data=relu5_3, pool_type="max", kernel=(3, 3), stride=(1, 1),
pad=(1,1), name="pool5")
# group 6
conv6 = mx.symbol.Convolution(
data=pool5, kernel=(3, 3), pad=(6, 6), dilate=(6, 6),
num_filter=1024, name="conv6")
relu6 = mx.symbol.Activation(data=conv6, act_type="relu", name="relu6")
# drop6 = mx.symbol.Dropout(data=relu6, p=0.5, name="drop6")
# group 7
conv7 = mx.symbol.Convolution(
data=relu6, kernel=(1, 1), pad=(0, 0), num_filter=1024, name="conv7")
relu7 = mx.symbol.Activation(data=conv7, act_type="relu", name="relu7")
# drop7 = mx.symbol.Dropout(data=relu7, p=0.5, name="drop7")
### ssd extra layers ###
conv8_1, relu8_1 = legacy_conv_act_layer(relu7, "8_1", 256, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv8_2, relu8_2 = legacy_conv_act_layer(relu8_1, "8_2", 512, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv9_1, relu9_1 = legacy_conv_act_layer(relu8_2, "9_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv9_2, relu9_2 = legacy_conv_act_layer(relu9_1, "9_2", 256, kernel=(3,3), pad=(1,1), \
stride=(2,2), act_type="relu", use_batchnorm=False)
conv10_1, relu10_1 = legacy_conv_act_layer(relu9_2, "10_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv10_2, relu10_2 = legacy_conv_act_layer(relu10_1, "10_2", 256, kernel=(3,3), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv11_1, relu11_1 = legacy_conv_act_layer(relu10_2, "11_1", 128, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
conv11_2, relu11_2 = legacy_conv_act_layer(relu11_1, "11_2", 256, kernel=(3,3), pad=(0,0), \
stride=(1,1), act_type="relu", use_batchnorm=False)
# specific parameters for VGG16 network
from_layers = [relu4_3, relu7, relu8_2, relu9_2, relu10_2, relu11_2]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = [20, -1, -1, -1, -1, -1]
steps = [ x / 300.0 for x in [8, 16, 32, 64, 100, 300]]
num_channels = [512]
loc_preds, cls_preds, anchor_boxes = multibox_layer(from_layers, \
num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \
num_channels=num_channels, clip=False, interm_layer=0, steps=steps)
tmp = mx.symbol.contrib.MultiBoxTarget(
*[anchor_boxes, label, cls_preds], overlap_threshold=.5, \
ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \
negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2),
name="multibox_target")
loc_target = tmp[0]
loc_target_mask = tmp[1]
cls_target = tmp[2]
cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \
ignore_label=-1, use_ignore=True, grad_scale=1., multi_output=True, \
normalization='valid', name="cls_prob")
loc_loss_ = mx.symbol.smooth_l1(name="loc_loss_", \
data=loc_target_mask * (loc_preds - loc_target), scalar=1.0)
loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \
normalization='valid', name="loc_loss")
# monitoring training status
cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name="cls_label")
det = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
det = mx.symbol.MakeLoss(data=det, grad_scale=0, name="det_out")
# group output
out = mx.symbol.Group([cls_prob, loc_loss, cls_label, det])
return out
|
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is the detection network
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
threshold of overlap for non-maximum suppression
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
|
def get_symbol(num_classes=20, nms_thresh=0.5, force_suppress=False,
nms_topk=400, **kwargs):
"""
Single-shot multi-box detection with VGG 16 layers ConvNet
This is a modified version, with fc6/fc7 layers replaced by conv layers
And the network is slightly smaller than original VGG 16 network
This is the detection network
Parameters:
----------
num_classes: int
number of object classes not including background
nms_thresh : float
threshold of overlap for non-maximum suppression
force_suppress : boolean
whether suppress different class objects
nms_topk : int
apply NMS to top K detections
Returns:
----------
mx.Symbol
"""
net = get_symbol_train(num_classes)
cls_preds = net.get_internals()["multibox_cls_pred_output"]
loc_preds = net.get_internals()["multibox_loc_pred_output"]
anchor_boxes = net.get_internals()["multibox_anchors_output"]
cls_prob = mx.symbol.softmax(data=cls_preds, axis=1, name='cls_prob')
out = mx.symbol.contrib.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \
name="detection", nms_threshold=nms_thresh, force_suppress=force_suppress,
variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)
return out
|
Creates a model from previously saved checkpoint.
Parameters
----------
prefix : str
path prefix of saved model files. You should have
"prefix-symbol.json", "prefix-xxxx.params", and
optionally "prefix-xxxx.states", where xxxx is the
epoch number.
epoch : int
epoch to load.
load_optimizer_states : bool
whether to load optimizer states. Checkpoint needs
to have been made with save_optimizer_states=True.
data_names : list of str
Default is `('data')` for a typical model used in image classification.
label_names : list of str
Default is `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Default is `logging`.
context : Context or list of Context
Default is ``cpu()``.
work_load_list : list of number
Default ``None``, indicating uniform workload.
fixed_param_names: list of str
Default ``None``, indicating no network parameters are fixed.
|
def load(prefix, epoch, load_optimizer_states=False, **kwargs):
"""Creates a model from previously saved checkpoint.
Parameters
----------
prefix : str
path prefix of saved model files. You should have
"prefix-symbol.json", "prefix-xxxx.params", and
optionally "prefix-xxxx.states", where xxxx is the
epoch number.
epoch : int
epoch to load.
load_optimizer_states : bool
whether to load optimizer states. Checkpoint needs
to have been made with save_optimizer_states=True.
data_names : list of str
Default is `('data')` for a typical model used in image classification.
label_names : list of str
Default is `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Default is `logging`.
context : Context or list of Context
Default is ``cpu()``.
work_load_list : list of number
Default ``None``, indicating uniform workload.
fixed_param_names: list of str
Default ``None``, indicating no network parameters are fixed.
"""
sym, args, auxs = load_checkpoint(prefix, epoch)
mod = Module(symbol=sym, **kwargs)
mod._arg_params = args
mod._aux_params = auxs
mod.params_initialized = True
if load_optimizer_states:
mod._preload_opt_states = '%s-%04d.states'%(prefix, epoch)
return mod
|
Saves current progress to checkpoint.
Use `mx.callback.module_checkpoint` as `epoch_end_callback` to save during training.
Parameters
----------
prefix : str
The file prefix to checkpoint to.
epoch : int
The current epoch number.
save_optimizer_states : bool
Whether to save optimizer states to continue training.
|
def save_checkpoint(self, prefix, epoch, save_optimizer_states=False):
"""Saves current progress to checkpoint.
Use `mx.callback.module_checkpoint` as `epoch_end_callback` to save during training.
Parameters
----------
prefix : str
The file prefix to checkpoint to.
epoch : int
The current epoch number.
save_optimizer_states : bool
Whether to save optimizer states to continue training.
"""
self._symbol.save('%s-symbol.json'%prefix)
param_name = '%s-%04d.params' % (prefix, epoch)
self.save_params(param_name)
logging.info('Saved checkpoint to \"%s\"', param_name)
if save_optimizer_states:
state_name = '%s-%04d.states' % (prefix, epoch)
self.save_optimizer_states(state_name)
logging.info('Saved optimizer state to \"%s\"', state_name)
|
Internal function to reset binded state.
|
def _reset_bind(self):
"""Internal function to reset binded state."""
self.binded = False
self._exec_group = None
self._data_shapes = None
self._label_shapes = None
|
Gets current parameters.
Returns
-------
`(arg_params, aux_params)`
A pair of dictionaries each mapping parameter names to NDArray values.
|
def get_params(self):
"""Gets current parameters.
Returns
-------
`(arg_params, aux_params)`
A pair of dictionaries each mapping parameter names to NDArray values.
"""
assert self.binded and self.params_initialized
if self._params_dirty:
self._sync_params_from_devices()
return (self._arg_params, self._aux_params)
|
Initializes the parameters and auxiliary states.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing arg_params. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing aux_params. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
|
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes the parameters and auxiliary states.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing arg_params. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing aux_params. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
if self.params_initialized and not force_init:
warnings.warn("Parameters already initialized and force_init=False. "
"init_params call ignored.", stacklevel=2)
return
assert self.binded, 'call bind before initializing the parameters'
def _impl(name, arr, cache):
"""Internal helper for parameter initialization"""
if cache is not None:
if name in cache:
cache_arr = cache[name]
# just in case the cached array is just the target itself
if cache_arr is not arr:
cache_arr.copyto(arr)
else:
if not allow_missing:
raise RuntimeError("%s is not presented" % name)
if initializer is not None:
initializer(name, arr)
else:
initializer(name, arr)
attrs = self._symbol.attr_dict()
for name, arr in sorted(self._arg_params.items()):
desc = InitDesc(name, attrs.get(name, None))
_impl(desc, arr, arg_params)
for name, arr in sorted(self._aux_params.items()):
desc = InitDesc(name, attrs.get(name, None))
_impl(desc, arr, aux_params)
self.params_initialized = True
self._params_dirty = False
# copy the initialized parameters to devices
self._exec_group.set_params(self._arg_params, self._aux_params,
allow_extra=allow_extra)
|
Assigns parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to `NDArray`.
aux_params : dict
Dictionary of name to `NDArray`.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
|
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
"""Assigns parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to `NDArray`.
aux_params : dict
Dictionary of name to `NDArray`.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
if not allow_missing:
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init,
allow_extra=allow_extra)
return
if self.params_initialized and not force_init:
warnings.warn("Parameters already initialized and force_init=False. "
"set_params call ignored.", stacklevel=2)
return
self._exec_group.set_params(arg_params, aux_params, allow_extra=allow_extra)
# because we didn't update self._arg_params, they are dirty now.
self._params_dirty = True
self.params_initialized = True
|
Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bound for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
|
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bound for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
"""
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already bound, ignoring bind()')
return
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self._grad_req = grad_req
if not for_training:
assert not inputs_need_grad
else:
pass
# this is not True, as some module might not contains a loss function
# that consumes the labels
# assert label_shapes is not None
self._data_shapes, self._label_shapes = _parse_data_desc(
self.data_names, self.label_names, data_shapes, label_shapes)
if shared_module is not None:
assert isinstance(shared_module, Module) and \
shared_module.binded and shared_module.params_initialized
shared_group = shared_module._exec_group
assert len(shared_group.execs) >= len(self._context)
else:
shared_group = None
self._exec_group = DataParallelExecutorGroup(self._symbol, self._context,
self._work_load_list, self._data_shapes,
self._label_shapes, self._param_names,
for_training, inputs_need_grad,
shared_group, logger=self.logger,
fixed_param_names=self._fixed_param_names,
grad_req=grad_req, group2ctxs=self._group2ctxs,
state_names=self._state_names)
self._total_exec_bytes = self._exec_group._total_exec_bytes
if shared_module is not None:
self.params_initialized = True
self._arg_params = shared_module._arg_params
self._aux_params = shared_module._aux_params
elif self.params_initialized:
# if the parameters are already initialized, we are re-binding
# so automatically copy the already initialized params
self._exec_group.set_params(self._arg_params, self._aux_params)
else:
assert self._arg_params is None and self._aux_params is None
param_arrays = [
zeros(shape=x[0].shape, dtype=x[0].dtype, stype=x[0].stype)
for x in self._exec_group.param_arrays
]
self._arg_params = {name:arr for name, arr in zip(self._param_names, param_arrays)}
aux_arrays = [
zeros(x[0].shape, dtype=x[0].dtype)
for x in self._exec_group.aux_arrays
]
self._aux_params = {name:arr for name, arr in zip(self._aux_names, aux_arrays)}
if shared_module is not None and shared_module.optimizer_initialized:
self.borrow_optimizer(shared_module)
self.binded = True
|
Reshapes the module for new input shapes.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
|
def reshape(self, data_shapes, label_shapes=None):
"""Reshapes the module for new input shapes.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
"""
assert self.binded
self._data_shapes, self._label_shapes = _parse_data_desc(
self.data_names, self.label_names, data_shapes, label_shapes)
self._exec_group.reshape(self._data_shapes, self._label_shapes)
|
Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
|
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring...')
return
if self._params_dirty:
self._sync_params_from_devices()
(kvstore, update_on_kvstore) = \
_create_kvstore(kvstore, len(self._context), self._arg_params)
batch_size = self._exec_group.batch_size
if kvstore and 'dist' in kvstore.type and '_sync' in kvstore.type:
batch_size *= kvstore.num_workers
rescale_grad = 1.0/batch_size
idx2name = {}
if update_on_kvstore:
idx2name.update(enumerate(self._exec_group.param_names))
else:
for k in range(len(self._context)):
idx2name.update({i*len(self._context)+k: n
for i, n in enumerate(self._exec_group.param_names)})
if isinstance(optimizer, str):
optimizer_params = dict(optimizer_params)
if 'rescale_grad' not in optimizer_params:
optimizer_params['rescale_grad'] = rescale_grad
optimizer = opt.create(optimizer,
sym=self.symbol, param_idx2name=idx2name,
**optimizer_params)
else:
assert isinstance(optimizer, opt.Optimizer)
if optimizer.rescale_grad != rescale_grad:
#pylint: disable=no-member
warnings.warn(
"Optimizer created manually outside Module but rescale_grad " +
"is not normalized to 1.0/batch_size/num_workers (%s vs. %s). "%(
optimizer.rescale_grad, rescale_grad) +
"Is this intended?", stacklevel=2)
if not optimizer.idx2name:
optimizer.idx2name = idx2name.copy()
self._optimizer = optimizer
self._kvstore = kvstore
self._update_on_kvstore = update_on_kvstore
self._updater = None
if kvstore:
if self._compression_params:
kvstore.set_gradient_compression(self._compression_params)
if update_on_kvstore:
kvstore.set_optimizer(self._optimizer)
# copy initialized local parameters to kvstore
_initialize_kvstore(kvstore=kvstore,
param_arrays=self._exec_group.param_arrays,
arg_params=self._arg_params,
param_names=self._param_names,
update_on_kvstore=update_on_kvstore)
if not update_on_kvstore:
self._updater = opt.get_updater(optimizer)
self.optimizer_initialized = True
if self._preload_opt_states is not None:
self.load_optimizer_states(self._preload_opt_states)
self._preload_opt_states = None
|
Borrows optimizer from a shared module. Used in bucketing, where exactly the same
optimizer (esp. kvstore) is used.
Parameters
----------
shared_module : Module
|
def borrow_optimizer(self, shared_module):
"""Borrows optimizer from a shared module. Used in bucketing, where exactly the same
optimizer (esp. kvstore) is used.
Parameters
----------
shared_module : Module
"""
assert shared_module.optimizer_initialized
self._optimizer = shared_module._optimizer
self._kvstore = shared_module._kvstore
self._update_on_kvstore = shared_module._update_on_kvstore
self._updater = shared_module._updater
self.optimizer_initialized = True
|
Forward computation. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
See Also
----------
:meth:`BaseModule.forward`.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``.
|
def forward(self, data_batch, is_train=None):
"""Forward computation. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
See Also
----------
:meth:`BaseModule.forward`.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``.
"""
assert self.binded and self.params_initialized
curr_data_shapes = tuple(i.shape for i in self._data_shapes)
if isinstance(data_batch, list):
assert data_batch is not None, "Encountered empty data batch"
new_data_shapes = []
for i in range(len(data_batch[0].data)):
shape = data_batch[0].data[i].shape
for db in data_batch:
assert shape == db.data[i].shape, \
"All data batches in a list need to have the same shape"
new_batch_size = len(data_batch) * shape[0]
new_data_shapes.append((new_batch_size,) + shape[1:])
new_data_shapes = tuple(new_data_shapes)
else:
new_data_shapes = tuple(i.shape for i in data_batch.data)
if curr_data_shapes != new_data_shapes:
if hasattr(data_batch, "provide_data") and data_batch.provide_data:
new_dshape = data_batch.provide_data
else:
new_dshape = [DataDesc(i.name, shape, i.dtype, i.layout) \
for i, shape in zip(self._data_shapes, new_data_shapes)]
if hasattr(data_batch, "provide_label") and data_batch.provide_label:
new_lshape = data_batch.provide_label
elif hasattr(data_batch, "label") and data_batch.label:
new_lshape = [DataDesc(i.name, j.shape, i.dtype, i.layout) \
for i, j in zip(self._label_shapes, data_batch.label)]
else:
new_lshape = None
self.reshape(new_dshape, new_lshape)
self._exec_group.forward(data_batch, is_train)
|
Backward computation.
See Also
----------
:meth:`BaseModule.backward`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
|
def backward(self, out_grads=None):
"""Backward computation.
See Also
----------
:meth:`BaseModule.backward`.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
"""
assert self.binded and self.params_initialized
self._exec_group.backward(out_grads=out_grads)
|
Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
this function does update the copy of parameters in KVStore, but doesn't broadcast the
updated parameters to all devices / machines. Please call `prepare` to broadcast
`row_sparse` parameters with the next batch of data.
See Also
----------
:meth:`BaseModule.update`.
|
def update(self):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
this function does update the copy of parameters in KVStore, but doesn't broadcast the
updated parameters to all devices / machines. Please call `prepare` to broadcast
`row_sparse` parameters with the next batch of data.
See Also
----------
:meth:`BaseModule.update`.
"""
assert self.binded and self.params_initialized and self.optimizer_initialized
self._params_dirty = True
if self._update_on_kvstore:
_update_params_on_kvstore(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
self._kvstore, self._exec_group.param_names)
else:
_update_params(self._exec_group.param_arrays,
self._exec_group.grad_arrays,
updater=self._updater,
num_device=len(self._context),
kvstore=self._kvstore,
param_names=self._exec_group.param_names)
|
Gets outputs of the previous forward computation.
If ``merge_multi_context`` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`. When `merge_multi_context` is `False`, those `NDArray`
might live on different devices.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Output.
|
def get_outputs(self, merge_multi_context=True):
"""Gets outputs of the previous forward computation.
If ``merge_multi_context`` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`. When `merge_multi_context` is `False`, those `NDArray`
might live on different devices.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Output.
"""
assert self.binded and self.params_initialized
return self._exec_group.get_outputs(merge_multi_context=merge_multi_context)
|
Gets the gradients with respect to the inputs of the module.
If ``merge_multi_context`` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Input gradients
|
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients with respect to the inputs of the module.
If ``merge_multi_context`` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Input gradients
"""
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._exec_group.get_input_grads(merge_multi_context=merge_multi_context)
|
Gets states from all devices.
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
States
|
def get_states(self, merge_multi_context=True):
"""Gets states from all devices.
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
States
"""
assert self.binded and self.params_initialized
return self._exec_group.get_states(merge_multi_context=merge_multi_context)
|
Evaluates and accumulates evaluation metric on outputs of the last forward computation.
See Also
----------
:meth:`BaseModule.update_metric`.
Parameters
----------
eval_metric : EvalMetric
Evaluation metric to use.
labels : list of NDArray if `pre_sliced` parameter is set to `False`,
list of lists of NDArray otherwise. Typically `data_batch.label`.
pre_sliced: bool
Whether the labels are already sliced per device (default: False).
|
def update_metric(self, eval_metric, labels, pre_sliced=False):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
See Also
----------
:meth:`BaseModule.update_metric`.
Parameters
----------
eval_metric : EvalMetric
Evaluation metric to use.
labels : list of NDArray if `pre_sliced` parameter is set to `False`,
list of lists of NDArray otherwise. Typically `data_batch.label`.
pre_sliced: bool
Whether the labels are already sliced per device (default: False).
"""
self._exec_group.update_metric(eval_metric, labels, pre_sliced)
|
Synchronizes parameters from devices to CPU. This function should be called after
calling `update` that updates the parameters on the devices, before one can read the
latest parameters from ``self._arg_params`` and ``self._aux_params``.
For row_sparse parameters on devices, ther are pulled from KVStore with all row ids.
|
def _sync_params_from_devices(self):
"""Synchronizes parameters from devices to CPU. This function should be called after
calling `update` that updates the parameters on the devices, before one can read the
latest parameters from ``self._arg_params`` and ``self._aux_params``.
For row_sparse parameters on devices, ther are pulled from KVStore with all row ids.
"""
self._exec_group.get_params(self._arg_params, self._aux_params)
if self._kvstore and self._update_on_kvstore:
for param_name, param_val in sorted(self._arg_params.items()):
if param_val.stype == 'row_sparse':
row_ids = nd.arange(0, param_val.shape[0], dtype='int64')
self._kvstore.row_sparse_pull(param_name, param_val, row_ids=row_ids)
self._params_dirty = False
|
Saves optimizer (updater) state to a file.
Parameters
----------
fname : str
Path to output states file.
|
def save_optimizer_states(self, fname):
"""Saves optimizer (updater) state to a file.
Parameters
----------
fname : str
Path to output states file.
"""
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.save_optimizer_states(fname)
else:
with open(fname, 'wb') as fout:
fout.write(self._updater.get_states())
|
Loads optimizer (updater) state from a file.
Parameters
----------
fname : str
Path to input states file.
|
def load_optimizer_states(self, fname):
"""Loads optimizer (updater) state from a file.
Parameters
----------
fname : str
Path to input states file.
"""
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.load_optimizer_states(fname)
else:
self._updater.set_states(open(fname, 'rb').read())
|
Prepares the module for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
|
def prepare(self, data_batch, sparse_row_id_fn=None):
'''Prepares the module for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
'''
assert self.binded
if sparse_row_id_fn is not None:
if not self._kvstore or not self._update_on_kvstore:
warnings.warn(UserWarning("Parameters are not updated in the KVStore. "
"No need to call sparse_row_id_fn."))
else:
row_ids = sparse_row_id_fn(data_batch)
assert(isinstance(row_ids, dict)), "Expected dict output from sparse_row_id_fn"
for param_name, row_id in row_ids.items():
param_idx = self._exec_group.param_names.index(param_name)
param_val = self._exec_group.param_arrays[param_idx]
assert(isinstance(param_val, (tuple, list)))
if param_val[0].stype != 'row_sparse':
warnings.warn(UserWarning("%s.stype is not 'row_sparse'. No need to "
"perform row_sparse_pull." % param_name))
else:
self._kvstore.row_sparse_pull(param_name, param_val, row_ids=row_id,
priority=-param_idx)
|
Helper function for random generators.
|
def _random_helper(random, sampler, params, shape, dtype, ctx, out, kwargs):
"""Helper function for random generators."""
if isinstance(params[0], NDArray):
for i in params[1:]:
assert isinstance(i, NDArray), \
"Distribution parameters must all have the same type, but got " \
"both %s and %s."%(type(params[0]), type(i))
return sampler(*params, shape=shape, dtype=dtype, out=out, **kwargs)
elif isinstance(params[0], numeric_types):
if ctx is None:
ctx = current_context()
if shape is _Null and out is None:
shape = 1
for i in params[1:]:
assert isinstance(i, numeric_types), \
"Distribution parameters must all have the same type, but got " \
"both %s and %s."%(type(params[0]), type(i))
return random(*params, shape=shape, dtype=dtype, ctx=ctx, out=out, **kwargs)
raise ValueError("Distribution parameters must be either NDArray or numbers, "
"but got %s."%type(params[0]))
|
Draw random samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval *[low, high)*
(includes *low*, but excludes *high*).
Parameters
----------
low : float or NDArray, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float or NDArray, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and
`high` are scalars, output shape will be `(m, n)`. If `low` and `high`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[low, high)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`low.context` when `low` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
An NDArray of type `dtype`. If input `shape` has shape, e.g.,
`(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`.
If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then the
return NDArray will have shape `(x, y, m, n)`, where `m*n` uniformly distributed
samples are drawn for each `[low, high)` pair.
Examples
--------
>>> mx.nd.random.uniform(0, 1)
[ 0.54881352]
<NDArray 1 @cpu(0)
>>> mx.nd.random.uniform(0, 1, ctx=mx.gpu(0))
[ 0.92514056]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.uniform(-1, 1, shape=(2,))
[ 0.71589124 0.08976638]
<NDArray 2 @cpu(0)>
>>> low = mx.nd.array([1,2,3])
>>> high = mx.nd.array([2,3,4])
>>> mx.nd.random.uniform(low, high, shape=2)
[[ 1.78653979 1.93707538]
[ 2.01311183 2.37081361]
[ 3.30491424 3.69977832]]
<NDArray 3x2 @cpu(0)>
|
def uniform(low=0, high=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval *[low, high)*
(includes *low*, but excludes *high*).
Parameters
----------
low : float or NDArray, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float or NDArray, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and
`high` are scalars, output shape will be `(m, n)`. If `low` and `high`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[low, high)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`low.context` when `low` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
An NDArray of type `dtype`. If input `shape` has shape, e.g.,
`(m, n)` and `low` and `high` are scalars, output shape will be `(m, n)`.
If `low` and `high` are NDArrays with shape, e.g., `(x, y)`, then the
return NDArray will have shape `(x, y, m, n)`, where `m*n` uniformly distributed
samples are drawn for each `[low, high)` pair.
Examples
--------
>>> mx.nd.random.uniform(0, 1)
[ 0.54881352]
<NDArray 1 @cpu(0)
>>> mx.nd.random.uniform(0, 1, ctx=mx.gpu(0))
[ 0.92514056]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.uniform(-1, 1, shape=(2,))
[ 0.71589124 0.08976638]
<NDArray 2 @cpu(0)>
>>> low = mx.nd.array([1,2,3])
>>> high = mx.nd.array([2,3,4])
>>> mx.nd.random.uniform(low, high, shape=2)
[[ 1.78653979 1.93707538]
[ 2.01311183 2.37081361]
[ 3.30491424 3.69977832]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_uniform, _internal._sample_uniform,
[low, high], shape, dtype, ctx, out, kwargs)
|
Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float or NDArray, optional
Mean (centre) of the distribution.
scale : float or NDArray, optional
Standard deviation (spread or width) of the distribution.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and
`scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`loc.context` when `loc` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)` and
`loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and
`scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
Examples
--------
>>> mx.nd.random.normal(0, 1)
[ 2.21220636]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.normal(0, 1, ctx=mx.gpu(0))
[ 0.29253659]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.normal(-1, 1, shape=(2,))
[-0.2259962 -0.51619542]
<NDArray 2 @cpu(0)>
>>> loc = mx.nd.array([1,2,3])
>>> scale = mx.nd.array([2,3,4])
>>> mx.nd.random.normal(loc, scale, shape=2)
[[ 0.55912292 3.19566321]
[ 1.91728961 2.47706747]
[ 2.79666662 5.44254589]]
<NDArray 3x2 @cpu(0)>
|
def normal(loc=0, scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float or NDArray, optional
Mean (centre) of the distribution.
scale : float or NDArray, optional
Standard deviation (spread or width) of the distribution.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and
`scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`loc.context` when `loc` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
An NDArray of type `dtype`. If input `shape` has shape, e.g., `(m, n)` and
`loc` and `scale` are scalars, output shape will be `(m, n)`. If `loc` and
`scale` are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
Examples
--------
>>> mx.nd.random.normal(0, 1)
[ 2.21220636]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.normal(0, 1, ctx=mx.gpu(0))
[ 0.29253659]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.normal(-1, 1, shape=(2,))
[-0.2259962 -0.51619542]
<NDArray 2 @cpu(0)>
>>> loc = mx.nd.array([1,2,3])
>>> scale = mx.nd.array([2,3,4])
>>> mx.nd.random.normal(loc, scale, shape=2)
[[ 0.55912292 3.19566321]
[ 1.91728961 2.47706747]
[ 2.79666662 5.44254589]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_normal, _internal._sample_normal,
[loc, scale], shape, dtype, ctx, out, kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.