INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float or NDArray
Mean (centre) of the distribution.
scale : float or NDArray
Standard deviation (spread or width) of the distribution.
shape : int or tuple of ints
The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and
`scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
dtype : {'float16', 'float32', 'float64'}
Data type of output samples. Default is 'float32'
ctx : Context
Device context of output. Default is current context. Overridden by
`loc.context` when `loc` is an NDArray.
out : NDArray
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `loc` and `scale` are scalars, output
shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`,
then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for
each `[loc, scale)` pair.
Examples
--------
>>> mx.nd.random.randn()
2.21220636
<NDArray 1 @cpu(0)>
>>> mx.nd.random.randn(2, 2)
[[-1.856082 -1.9768796 ]
[-0.20801921 0.2444218 ]]
<NDArray 2x2 @cpu(0)>
>>> mx.nd.random.randn(2, 3, loc=5, scale=1)
[[4.19962 4.8311777 5.936328 ]
[5.357444 5.7793283 3.9896927]]
<NDArray 2x3 @cpu(0)>
|
def randn(*shape, **kwargs):
"""Draw random samples from a normal (Gaussian) distribution.
Samples are distributed according to a normal distribution parametrized
by *loc* (mean) and *scale* (standard deviation).
Parameters
----------
loc : float or NDArray
Mean (centre) of the distribution.
scale : float or NDArray
Standard deviation (spread or width) of the distribution.
shape : int or tuple of ints
The number of samples to draw. If shape is, e.g., `(m, n)` and `loc` and
`scale` are scalars, output shape will be `(m, n)`. If `loc` and `scale`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[loc, scale)` pair.
dtype : {'float16', 'float32', 'float64'}
Data type of output samples. Default is 'float32'
ctx : Context
Device context of output. Default is current context. Overridden by
`loc.context` when `loc` is an NDArray.
out : NDArray
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `loc` and `scale` are scalars, output
shape will be `(m, n)`. If `loc` and `scale` are NDArrays with shape, e.g., `(x, y)`,
then output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for
each `[loc, scale)` pair.
Examples
--------
>>> mx.nd.random.randn()
2.21220636
<NDArray 1 @cpu(0)>
>>> mx.nd.random.randn(2, 2)
[[-1.856082 -1.9768796 ]
[-0.20801921 0.2444218 ]]
<NDArray 2x2 @cpu(0)>
>>> mx.nd.random.randn(2, 3, loc=5, scale=1)
[[4.19962 4.8311777 5.936328 ]
[5.357444 5.7793283 3.9896927]]
<NDArray 2x3 @cpu(0)>
"""
loc = kwargs.pop('loc', 0)
scale = kwargs.pop('scale', 1)
dtype = kwargs.pop('dtype', _Null)
ctx = kwargs.pop('ctx', None)
out = kwargs.pop('out', None)
assert isinstance(loc, (int, float))
assert isinstance(scale, (int, float))
return _random_helper(_internal._random_normal, _internal._sample_normal,
[loc, scale], shape, dtype, ctx, out, kwargs)
|
r"""Draw samples from an exponential distribution.
Its probability density function is
.. math:: f(x; \frac{1}{\beta}) = \frac{1}{\beta} \exp(-\frac{x}{\beta}),
for x > 0 and 0 elsewhere. \beta is the scale parameter, which is the
inverse of the rate parameter \lambda = 1/\beta.
Parameters
----------
scale : float or NDArray, optional
The scale parameter, \beta = 1/\lambda.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `scale` is
a scalar, output shape will be `(m, n)`. If `scale`
is an NDArray with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `scale`.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`scale.context` when `scale` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `scale` is a scalar, output shape will
be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then `output`
will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in scale.
Examples
--------
>>> mx.nd.random.exponential(1)
[ 0.79587454]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.exponential(1, shape=(2,))
[ 0.89856035 1.25593066]
<NDArray 2 @cpu(0)>
>>> scale = mx.nd.array([1,2,3])
>>> mx.nd.random.exponential(scale, shape=2)
[[ 0.41063145 0.42140478]
[ 2.59407091 10.12439728]
[ 2.42544937 1.14260709]]
<NDArray 3x2 @cpu(0)>
|
def exponential(scale=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
r"""Draw samples from an exponential distribution.
Its probability density function is
.. math:: f(x; \frac{1}{\beta}) = \frac{1}{\beta} \exp(-\frac{x}{\beta}),
for x > 0 and 0 elsewhere. \beta is the scale parameter, which is the
inverse of the rate parameter \lambda = 1/\beta.
Parameters
----------
scale : float or NDArray, optional
The scale parameter, \beta = 1/\lambda.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `scale` is
a scalar, output shape will be `(m, n)`. If `scale`
is an NDArray with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each entry in `scale`.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`scale.context` when `scale` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `scale` is a scalar, output shape will
be `(m, n)`. If `scale` is an NDArray with shape, e.g., `(x, y)`, then `output`
will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each entry in scale.
Examples
--------
>>> mx.nd.random.exponential(1)
[ 0.79587454]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.exponential(1, shape=(2,))
[ 0.89856035 1.25593066]
<NDArray 2 @cpu(0)>
>>> scale = mx.nd.array([1,2,3])
>>> mx.nd.random.exponential(scale, shape=2)
[[ 0.41063145 0.42140478]
[ 2.59407091 10.12439728]
[ 2.42544937 1.14260709]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_exponential, _internal._sample_exponential,
[1.0/scale], shape, dtype, ctx, out, kwargs)
|
Draw random samples from a gamma distribution.
Samples are distributed according to a gamma distribution parametrized
by *alpha* (shape) and *beta* (scale).
Parameters
----------
alpha : float or NDArray, optional
The shape of the gamma distribution. Should be greater than zero.
beta : float or NDArray, optional
The scale of the gamma distribution. Should be greater than zero.
Default is equal to 1.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `alpha` and
`beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`alpha.context` when `alpha` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `alpha` and `beta` are scalars, output
shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g.,
`(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are
drawn for each `[alpha, beta)` pair.
Examples
--------
>>> mx.nd.random.gamma(1, 1)
[ 1.93308783]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.gamma(1, 1, shape=(2,))
[ 0.48216391 2.09890771]
<NDArray 2 @cpu(0)>
>>> alpha = mx.nd.array([1,2,3])
>>> beta = mx.nd.array([2,3,4])
>>> mx.nd.random.gamma(alpha, beta, shape=2)
[[ 3.24343276 0.94137681]
[ 3.52734375 0.45568955]
[ 14.26264095 14.0170126 ]]
<NDArray 3x2 @cpu(0)>
|
def gamma(alpha=1, beta=1, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a gamma distribution.
Samples are distributed according to a gamma distribution parametrized
by *alpha* (shape) and *beta* (scale).
Parameters
----------
alpha : float or NDArray, optional
The shape of the gamma distribution. Should be greater than zero.
beta : float or NDArray, optional
The scale of the gamma distribution. Should be greater than zero.
Default is equal to 1.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `alpha` and
`beta` are scalars, output shape will be `(m, n)`. If `alpha` and `beta`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[alpha, beta)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`alpha.context` when `alpha` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `alpha` and `beta` are scalars, output
shape will be `(m, n)`. If `alpha` and `beta` are NDArrays with shape, e.g.,
`(x, y)`, then output will have shape `(x, y, m, n)`, where `m*n` samples are
drawn for each `[alpha, beta)` pair.
Examples
--------
>>> mx.nd.random.gamma(1, 1)
[ 1.93308783]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.gamma(1, 1, shape=(2,))
[ 0.48216391 2.09890771]
<NDArray 2 @cpu(0)>
>>> alpha = mx.nd.array([1,2,3])
>>> beta = mx.nd.array([2,3,4])
>>> mx.nd.random.gamma(alpha, beta, shape=2)
[[ 3.24343276 0.94137681]
[ 3.52734375 0.45568955]
[ 14.26264095 14.0170126 ]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_gamma, _internal._sample_gamma,
[alpha, beta], shape, dtype, ctx, out, kwargs)
|
Draw random samples from a negative binomial distribution.
Samples are distributed according to a negative binomial distribution
parametrized by *k* (limit of unsuccessful experiments) and *p* (failure
probability in each experiment). Samples will always be returned as a
floating point data type.
Parameters
----------
k : float or NDArray, optional
Limit of unsuccessful experiments, > 0.
p : float or NDArray, optional
Failure probability in each experiment, >= 0 and <=1.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `k` and
`p` are scalars, output shape will be `(m, n)`. If `k` and `p`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`k.context` when `k` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `k` and `p` are scalars, output shape
will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then
output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair.
Examples
--------
>>> mx.nd.random.negative_binomial(10, 0.5)
[ 4.]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.negative_binomial(10, 0.5, shape=(2,))
[ 3. 4.]
<NDArray 2 @cpu(0)>
>>> k = mx.nd.array([1,2,3])
>>> p = mx.nd.array([0.2,0.4,0.6])
>>> mx.nd.random.negative_binomial(k, p, shape=2)
[[ 3. 2.]
[ 4. 4.]
[ 0. 5.]]
<NDArray 3x2 @cpu(0)>
|
def negative_binomial(k=1, p=1, shape=_Null, dtype=_Null, ctx=None,
out=None, **kwargs):
"""Draw random samples from a negative binomial distribution.
Samples are distributed according to a negative binomial distribution
parametrized by *k* (limit of unsuccessful experiments) and *p* (failure
probability in each experiment). Samples will always be returned as a
floating point data type.
Parameters
----------
k : float or NDArray, optional
Limit of unsuccessful experiments, > 0.
p : float or NDArray, optional
Failure probability in each experiment, >= 0 and <=1.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `k` and
`p` are scalars, output shape will be `(m, n)`. If `k` and `p`
are NDArrays with shape, e.g., `(x, y)`, then output will have shape
`(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`k.context` when `k` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
If input `shape` has shape, e.g., `(m, n)` and `k` and `p` are scalars, output shape
will be `(m, n)`. If `k` and `p` are NDArrays with shape, e.g., `(x, y)`, then
output will have shape `(x, y, m, n)`, where `m*n` samples are drawn for each `[k, p)` pair.
Examples
--------
>>> mx.nd.random.negative_binomial(10, 0.5)
[ 4.]
<NDArray 1 @cpu(0)>
>>> mx.nd.random.negative_binomial(10, 0.5, shape=(2,))
[ 3. 4.]
<NDArray 2 @cpu(0)>
>>> k = mx.nd.array([1,2,3])
>>> p = mx.nd.array([0.2,0.4,0.6])
>>> mx.nd.random.negative_binomial(k, p, shape=2)
[[ 3. 2.]
[ 4. 4.]
[ 0. 5.]]
<NDArray 3x2 @cpu(0)>
"""
return _random_helper(_internal._random_negative_binomial,
_internal._sample_negative_binomial,
[k, p], shape, dtype, ctx, out, kwargs)
|
Concurrent sampling from multiple multinomial distributions.
.. note:: The input distribution must be normalized, i.e. `data` must sum to
1 along its last dimension.
Parameters
----------
data : NDArray
An *n* dimensional array whose last dimension has length `k`, where
`k` is the number of possible outcomes of each multinomial distribution.
For example, data with shape `(m, n, k)` specifies `m*n` multinomial
distributions each with `k` possible outcomes.
shape : int or tuple of ints, optional
The number of samples to draw from each distribution. If shape is empty
one sample will be drawn from each distribution.
get_prob : bool, optional
If true, a second array containing log likelihood of the drawn
samples will also be returned.
This is usually used for reinforcement learning, where you can provide
reward as head gradient w.r.t. this array to estimate gradient.
out : NDArray, optional
Store output to an existing NDArray.
dtype : str or numpy.dtype, optional
Data type of the sample output array. The default is int32.
Note that the data type of the log likelihood array is the same with that of `data`.
Returns
-------
List, or NDArray
For input `data` with `n` dimensions and shape `(d1, d2, ..., dn-1, k)`, and input
`shape` with shape `(s1, s2, ..., sx)`, returns an NDArray with shape
`(d1, d2, ... dn-1, s1, s2, ..., sx)`. The `s1, s2, ... sx` dimensions of the
returned NDArray consist of 0-indexed values sampled from each respective multinomial
distribution provided in the `k` dimension of `data`.
For the case `n`=1, and `x`=1 (one shape dimension), returned NDArray has shape `(s1,)`.
If `get_prob` is set to True, this function returns a list of format:
`[ndarray_output, log_likelihood_output]`, where `log_likelihood_output` is an NDArray of the
same shape as the sampled outputs.
Examples
--------
>>> probs = mx.nd.array([0, 0.1, 0.2, 0.3, 0.4])
>>> mx.nd.random.multinomial(probs)
[3]
<NDArray 1 @cpu(0)>
>>> probs = mx.nd.array([[0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0]])
>>> mx.nd.random.multinomial(probs)
[3 1]
<NDArray 2 @cpu(0)>
>>> mx.nd.random.multinomial(probs, shape=2)
[[4 4]
[1 2]]
<NDArray 2x2 @cpu(0)>
>>> mx.nd.random.multinomial(probs, get_prob=True)
[3 2]
<NDArray 2 @cpu(0)>
[-1.20397282 -1.60943794]
<NDArray 2 @cpu(0)>
|
def multinomial(data, shape=_Null, get_prob=False, out=None, dtype='int32', **kwargs):
"""Concurrent sampling from multiple multinomial distributions.
.. note:: The input distribution must be normalized, i.e. `data` must sum to
1 along its last dimension.
Parameters
----------
data : NDArray
An *n* dimensional array whose last dimension has length `k`, where
`k` is the number of possible outcomes of each multinomial distribution.
For example, data with shape `(m, n, k)` specifies `m*n` multinomial
distributions each with `k` possible outcomes.
shape : int or tuple of ints, optional
The number of samples to draw from each distribution. If shape is empty
one sample will be drawn from each distribution.
get_prob : bool, optional
If true, a second array containing log likelihood of the drawn
samples will also be returned.
This is usually used for reinforcement learning, where you can provide
reward as head gradient w.r.t. this array to estimate gradient.
out : NDArray, optional
Store output to an existing NDArray.
dtype : str or numpy.dtype, optional
Data type of the sample output array. The default is int32.
Note that the data type of the log likelihood array is the same with that of `data`.
Returns
-------
List, or NDArray
For input `data` with `n` dimensions and shape `(d1, d2, ..., dn-1, k)`, and input
`shape` with shape `(s1, s2, ..., sx)`, returns an NDArray with shape
`(d1, d2, ... dn-1, s1, s2, ..., sx)`. The `s1, s2, ... sx` dimensions of the
returned NDArray consist of 0-indexed values sampled from each respective multinomial
distribution provided in the `k` dimension of `data`.
For the case `n`=1, and `x`=1 (one shape dimension), returned NDArray has shape `(s1,)`.
If `get_prob` is set to True, this function returns a list of format:
`[ndarray_output, log_likelihood_output]`, where `log_likelihood_output` is an NDArray of the
same shape as the sampled outputs.
Examples
--------
>>> probs = mx.nd.array([0, 0.1, 0.2, 0.3, 0.4])
>>> mx.nd.random.multinomial(probs)
[3]
<NDArray 1 @cpu(0)>
>>> probs = mx.nd.array([[0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0]])
>>> mx.nd.random.multinomial(probs)
[3 1]
<NDArray 2 @cpu(0)>
>>> mx.nd.random.multinomial(probs, shape=2)
[[4 4]
[1 2]]
<NDArray 2x2 @cpu(0)>
>>> mx.nd.random.multinomial(probs, get_prob=True)
[3 2]
<NDArray 2 @cpu(0)>
[-1.20397282 -1.60943794]
<NDArray 2 @cpu(0)>
"""
return _internal._sample_multinomial(data, shape, get_prob, out=out, dtype=dtype, **kwargs)
|
Draw random samples from a discrete uniform distribution.
Samples are uniformly distributed over the half-open interval *[low, high)*
(includes *low*, but excludes *high*).
Parameters
----------
low : int, required
Lower boundary of the output interval. All values generated will be
greater than or equal to low.
high : int, required
Upper boundary of the output interval. All values generated will be
less than high.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and
`high` are scalars, output shape will be `(m, n)`.
dtype : {'int32', 'int64'}, optional
Data type of output samples. Default is 'int32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`low.context` when `low` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
An NDArray of type `dtype`. If input `shape` has shape, e.g.,
`(m, n)`, the returned NDArray will shape will be `(m, n)`. Contents
of the returned NDArray will be samples from the interval `[low, high)`.
Examples
--------
>>> mx.nd.random.randint(5, 100)
[ 90]
<NDArray 1 @cpu(0)
>>> mx.nd.random.randint(-10, 2, ctx=mx.gpu(0))
[ -8]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.randint(-10, 10, shape=(2,))
[ -5 4]
<NDArray 2 @cpu(0)>
|
def randint(low, high, shape=_Null, dtype=_Null, ctx=None, out=None, **kwargs):
"""Draw random samples from a discrete uniform distribution.
Samples are uniformly distributed over the half-open interval *[low, high)*
(includes *low*, but excludes *high*).
Parameters
----------
low : int, required
Lower boundary of the output interval. All values generated will be
greater than or equal to low.
high : int, required
Upper boundary of the output interval. All values generated will be
less than high.
shape : int or tuple of ints, optional
The number of samples to draw. If shape is, e.g., `(m, n)` and `low` and
`high` are scalars, output shape will be `(m, n)`.
dtype : {'int32', 'int64'}, optional
Data type of output samples. Default is 'int32'
ctx : Context, optional
Device context of output. Default is current context. Overridden by
`low.context` when `low` is an NDArray.
out : NDArray, optional
Store output to an existing NDArray.
Returns
-------
NDArray
An NDArray of type `dtype`. If input `shape` has shape, e.g.,
`(m, n)`, the returned NDArray will shape will be `(m, n)`. Contents
of the returned NDArray will be samples from the interval `[low, high)`.
Examples
--------
>>> mx.nd.random.randint(5, 100)
[ 90]
<NDArray 1 @cpu(0)
>>> mx.nd.random.randint(-10, 2, ctx=mx.gpu(0))
[ -8]
<NDArray 1 @gpu(0)>
>>> mx.nd.random.randint(-10, 10, shape=(2,))
[ -5 4]
<NDArray 2 @cpu(0)>
"""
return _random_helper(_internal._random_randint, None,
[low, high], shape, dtype, ctx, out, kwargs)
|
Some tricks of feature engineering are adapted
from tensorflow's wide and deep tutorial.
|
def preprocess_uci_adult(data_name):
"""Some tricks of feature engineering are adapted
from tensorflow's wide and deep tutorial.
"""
csv_columns = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"
]
vocabulary_dict = {
"gender": [
"Female", "Male"
],
"education": [
"Bachelors", "HS-grad", "11th", "Masters", "9th",
"Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th",
"Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th",
"Preschool", "12th"
],
"marital_status": [
"Married-civ-spouse", "Divorced", "Married-spouse-absent",
"Never-married", "Separated", "Married-AF-spouse", "Widowed"
],
"relationship": [
"Husband", "Not-in-family", "Wife", "Own-child", "Unmarried",
"Other-relative"
],
"workclass": [
"Self-emp-not-inc", "Private", "State-gov", "Federal-gov",
"Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked"
]
}
# wide columns
crossed_columns = [
["education", "occupation"],
["native_country", "occupation"],
["age_buckets", "education", "occupation"],
]
age_boundaries = [18, 25, 30, 35, 40, 45, 50, 55, 60, 65]
# deep columns
indicator_columns = ['workclass', 'education', 'gender', 'relationship']
embedding_columns = ['native_country', 'occupation']
continuous_columns = ['age', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week']
# income_bracket column is the label
labels = ["<", ">"]
hash_bucket_size = 1000
csr_ncols = len(crossed_columns) * hash_bucket_size
dns_ncols = len(continuous_columns) + len(embedding_columns)
for col in indicator_columns:
dns_ncols += len(vocabulary_dict[col])
label_list = []
csr_list = []
dns_list = []
with open(data_name) as f:
for row in DictReader(f, fieldnames=csv_columns):
label_list.append(labels.index(row['income_bracket'].strip()[0]))
for i, cols in enumerate(crossed_columns):
if cols[0] == "age_buckets":
age_bucket = np.digitize(float(row["age"]), age_boundaries)
s = '_'.join([row[col].strip() for col in cols[1:]])
s += '_' + str(age_bucket)
csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0))
else:
s = '_'.join([row[col].strip() for col in cols])
csr_list.append((i * hash_bucket_size + hash(s) % hash_bucket_size, 1.0))
dns_row = [0] * dns_ncols
dns_dim = 0
for col in embedding_columns:
dns_row[dns_dim] = hash(row[col].strip()) % hash_bucket_size
dns_dim += 1
for col in indicator_columns:
dns_row[dns_dim + vocabulary_dict[col].index(row[col].strip())] = 1.0
dns_dim += len(vocabulary_dict[col])
for col in continuous_columns:
dns_row[dns_dim] = float(row[col].strip())
dns_dim += 1
dns_list.append(dns_row)
data_list = [item[1] for item in csr_list]
indices_list = [item[0] for item in csr_list]
indptr_list = range(0, len(indices_list) + 1, len(crossed_columns))
# convert to ndarrays
csr = mx.nd.sparse.csr_matrix((data_list, indices_list, indptr_list),
shape=(len(label_list), hash_bucket_size * len(crossed_columns)))
dns = np.array(dns_list)
label = np.array(label_list)
return csr, dns, label
|
Initialize parameters in the KVStore.
Parameters with incomplete initialization are ignored.
|
def _init_params(self):
"""Initialize parameters in the KVStore.
Parameters with incomplete initialization are ignored.
"""
assert self._kv_initialized, "Cannot initialize parameters in KVStore " \
"when KVStore is not initialized."
params_to_init = []
if self._kvstore:
for param in self._params_to_init:
if param._deferred_init:
params_to_init.append(param)
else:
param_arrays = param._check_and_get(param._data, list)
idx = self._param2idx[param.name]
self._kvstore.init(idx, param_arrays[0])
if param._stype == 'default':
self._kvstore.pull(idx, param_arrays, priority=-idx)
self._params_to_init = params_to_init
|
Reset kvstore.
|
def _reset_kvstore(self):
"""Reset kvstore."""
if self._kvstore and 'dist' in self._kvstore.type:
raise RuntimeError("Cannot reset distributed KVStore.")
self._kv_initialized = False
self._kvstore = None
self._distributed = None
self._update_on_kvstore = None
self._params_to_init = [param for param in self._params]
|
Create kvstore.
|
def _init_kvstore(self):
"""Create kvstore."""
config = self._kvstore_params
# configure kvstore, update_on_kvstore and self._distributed on three cases:
if self._contains_sparse_weight:
# If weight is sparse, kvstore must be present and the weight must be updated on kvstore.
# The training loop is the following:
# - row_sparse_pull(sparse_weight)
# - forward()
# - backward()
# - push_and_update(grad)
# - pull(weight)
kvstore, update_on_kvstore = _create_sparse_kvstore(config['kvstore'])
self._distributed = 'dist' in kvstore.type
# raise err if user provides unsupported configs
if config['update_on_kvstore'] is False:
raise ValueError("Cannot set update_on_kvstore=False when sparse weights "
"are present.")
elif self._contains_sparse_grad:
# For single node training with dense weight and sparse grad,
# we prefer update_on_kvstore=False because this is usually faster.
# This means we push and pull sparse gradients, and we do not store weight in kvstore.
# The training loop is the following:
# - forward()
# - backward()
# - push(grad)
# - pull(grad)
# - update(grad, weight)
#
# For multi-node training with dense weight and sparse grad,
# only update_on_kvstore=True is supported, due to the fact that
# kv.row_sparse_pull(grad) is not implemented.
# Therefore, we push sparse gradients and pull dense weights.
# The training loop contains:
# - forward()
# - backward()
# - push_and_update(grad)
# - pull(weight)
arg_arrays = {param.name: param.data(self._contexts[0]) for param in self._params}
kvstore, _ = _create_kvstore(config['kvstore'], len(self._contexts), arg_arrays)
self._distributed = 'dist' in kvstore.type if kvstore else False
update_on_kvstore = self._distributed
# raise err if user provides unsupported configs
if config['update_on_kvstore'] is not None:
if config['update_on_kvstore'] is False and self._distributed:
raise ValueError("Cannot set update_on_kvstore=False on dist kvstore "
"when sparse gradients are present.")
update_on_kvstore = config['update_on_kvstore']
else:
# Training with dense weight and dense gradients.
# The only unsupported mode is async with update_on_kvstore=False
arg_arrays = {param.name: param.data(self._contexts[0]) for param in self._params}
kvstore, update_on_kvstore = _create_kvstore(config['kvstore'], len(self._contexts),
arg_arrays)
self._distributed = 'dist' in kvstore.type if kvstore else False
if self._distributed and 'async' in kvstore.type:
update_on_kvstore = True
# raise err if user provides unsupported configs
if config['update_on_kvstore'] is False:
raise ValueError("Please set update_on_kvstore=True "
"when training in async mode.")
if config['update_on_kvstore'] is not None:
update_on_kvstore = config['update_on_kvstore']
# set grad compression and optimizers
if kvstore:
if self._compression_params:
kvstore.set_gradient_compression(self._compression_params)
if update_on_kvstore:
# optimizer preferably needs to be set before init for multiprecision
kvstore.set_optimizer(self._optimizer)
self._kvstore = kvstore
self._update_on_kvstore = update_on_kvstore
else:
self._kvstore = None
self._update_on_kvstore = None
self._kv_initialized = True
|
Internal method to invoke pull operations on KVStore. If `full_idx` is set to True,
`kv.pull` is preferred instead of `kv.row_sparse_pull`.
|
def _row_sparse_pull(self, parameter, out, row_id, full_idx=False):
"""Internal method to invoke pull operations on KVStore. If `full_idx` is set to True,
`kv.pull` is preferred instead of `kv.row_sparse_pull`.
"""
# initialize kv and params if not already
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
idx = self._param2idx[parameter.name]
if full_idx and 'dist' not in self._kvstore.type:
assert row_id.size == out.shape[0]
self._kvstore.pull(idx, out=out, priority=-idx, ignore_sparse=False)
else:
self._kvstore.row_sparse_pull(idx, out=out, row_ids=row_id, priority=-idx)
|
Saves trainer states (e.g. optimizer, momentum) to a file.
Parameters
----------
fname : str
Path to output states file.
Note
----
`optimizer.param_dict`, which contains Parameter information (such as
`lr_mult` and `wd_mult`) will not be saved.
|
def save_states(self, fname):
"""Saves trainer states (e.g. optimizer, momentum) to a file.
Parameters
----------
fname : str
Path to output states file.
Note
----
`optimizer.param_dict`, which contains Parameter information (such as
`lr_mult` and `wd_mult`) will not be saved.
"""
assert self._optimizer is not None
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
if self._update_on_kvstore:
assert not self._params_to_init, "Cannot save trainer states when some " \
"parameters are not yet initialized in kvstore."
self._kvstore.save_optimizer_states(fname, dump_optimizer=True)
else:
with open(fname, 'wb') as fout:
fout.write(self._updaters[0].get_states(dump_optimizer=True))
|
For each parameter, reduce the gradients from different contexts.
Should be called after `autograd.backward()`, outside of `record()` scope,
and before `trainer.update()`.
For normal parameter updates, `step()` should be used, which internally calls
`allreduce_grads()` and then `update()`. However, if you need to get the reduced
gradients to perform certain transformation, such as in gradient clipping, then
you may want to manually call `allreduce_grads()` and `update()` separately.
|
def allreduce_grads(self):
"""For each parameter, reduce the gradients from different contexts.
Should be called after `autograd.backward()`, outside of `record()` scope,
and before `trainer.update()`.
For normal parameter updates, `step()` should be used, which internally calls
`allreduce_grads()` and then `update()`. However, if you need to get the reduced
gradients to perform certain transformation, such as in gradient clipping, then
you may want to manually call `allreduce_grads()` and `update()` separately.
"""
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
assert not (self._kvstore and self._update_on_kvstore), \
'allreduce_grads() when parameters are updated on kvstore ' \
'is not supported. Try setting `update_on_kvstore` ' \
'to False when creating trainer.'
self._allreduce_grads()
|
Makes one step of parameter update. Should be called after
`autograd.backward()` and outside of `record()` scope.
For normal parameter updates, `step()` should be used, which internally calls
`allreduce_grads()` and then `update()`. However, if you need to get the reduced
gradients to perform certain transformation, such as in gradient clipping, then
you may want to manually call `allreduce_grads()` and `update()` separately.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you normalized loss manually with `loss = mean(loss)`.
ignore_stale_grad : bool, optional, default=False
If true, ignores Parameters with stale gradient (gradient that has not
been updated by `backward` after last step) and skip update.
|
def step(self, batch_size, ignore_stale_grad=False):
"""Makes one step of parameter update. Should be called after
`autograd.backward()` and outside of `record()` scope.
For normal parameter updates, `step()` should be used, which internally calls
`allreduce_grads()` and then `update()`. However, if you need to get the reduced
gradients to perform certain transformation, such as in gradient clipping, then
you may want to manually call `allreduce_grads()` and `update()` separately.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you normalized loss manually with `loss = mean(loss)`.
ignore_stale_grad : bool, optional, default=False
If true, ignores Parameters with stale gradient (gradient that has not
been updated by `backward` after last step) and skip update.
"""
rescale_grad = self._scale / batch_size
self._check_and_rescale_grad(rescale_grad)
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
self._allreduce_grads()
self._update(ignore_stale_grad)
|
sample 10 times of a size of 1000 for estimating the density of the sparse dataset
|
def estimate_density(DATA_PATH, feature_size):
"""sample 10 times of a size of 1000 for estimating the density of the sparse dataset"""
if not os.path.exists(DATA_PATH):
raise Exception("Data is not there!")
density = []
P = 0.01
for _ in range(10):
num_non_zero = 0
num_sample = 0
with open(DATA_PATH) as f:
for line in f:
if (random.random() < P):
num_non_zero += len(line.split(" ")) - 1
num_sample += 1
density.append(num_non_zero * 1.0 / (feature_size * num_sample))
return sum(density) / len(density)
|
Loads trainer states (e.g. optimizer, momentum) from a file.
Parameters
----------
fname : str
Path to input states file.
Note
----
`optimizer.param_dict`, which contains Parameter information (such as
`lr_mult` and `wd_mult`) will not be loaded from the file, but rather set
based on current Trainer's parameters.
|
def load_states(self, fname):
"""Loads trainer states (e.g. optimizer, momentum) from a file.
Parameters
----------
fname : str
Path to input states file.
Note
----
`optimizer.param_dict`, which contains Parameter information (such as
`lr_mult` and `wd_mult`) will not be loaded from the file, but rather set
based on current Trainer's parameters.
"""
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
if self._update_on_kvstore:
self._kvstore.load_optimizer_states(fname)
self._optimizer = self._kvstore._updater.optimizer
else:
with open(fname, 'rb') as f:
states = f.read()
for updater in self._updaters:
updater.set_states(states)
updater.optimizer = self._updaters[0].optimizer
self._optimizer = self._updaters[0].optimizer
param_dict = {i: param for i, param in enumerate(self._params)}
self._optimizer.param_dict = param_dict
|
Execute the command line command.
|
def exec_cmd(cmd, role, taskid, pass_env):
"""Execute the command line command."""
if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt':
cmd[0] = './' + cmd[0]
cmd = ' '.join(cmd)
env = os.environ.copy()
for k, v in pass_env.items():
env[k] = str(v)
env['DMLC_TASK_ID'] = str(taskid)
env['DMLC_ROLE'] = role
env['DMLC_JOB_CLUSTER'] = 'local'
ntrial = 0
while True:
if os.name == 'nt':
env['DMLC_NUM_ATTEMPT'] = str(ntrial)
ret = subprocess.call(cmd, shell=True, env=env)
if ret != 0:
ntrial += 1
continue
else:
bash = cmd
ret = subprocess.call(bash, shell=True, executable='bash', env=env)
if ret == 0:
logging.debug('Thread %d exit with 0', taskid)
return
else:
if os.name == 'nt':
sys.exit(-1)
else:
raise RuntimeError('Get nonzero return code=%d' % ret)
|
Submit function of local jobs.
|
def submit(args):
gpus = args.gpus.strip().split(',')
"""Submit function of local jobs."""
def mthread_submit(nworker, nserver, envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
----------
nworker: number of slave process to start up
nserver: number of server nodes to start up
envs: enviroment variables to be added to the starting programs
"""
procs = {}
for i, gpu in enumerate(gpus):
for j in range(args.num_threads):
procs[i] = Thread(target=exec_cmd, args=(args.command + ['--gpus=%s'%gpu], 'worker', i*args.num_threads+j, envs))
procs[i].setDaemon(True)
procs[i].start()
for i in range(len(gpus)*args.num_threads, len(gpus)*args.num_threads + nserver):
procs[i] = Thread(target=exec_cmd, args=(args.command, 'server', i, envs))
procs[i].setDaemon(True)
procs[i].start()
# call submit, with nslave, the commands to run each job and submit function
tracker.submit(args.num_threads*len(gpus), args.num_servers, fun_submit=mthread_submit,
pscmd=(' '.join(args.command)))
|
Iterates through p, identifying non-zero and non-repeating values, and returns them in a list Parameters
----------
p: list of int
Returns
-------
list of int
|
def ctc_label(p):
"""Iterates through p, identifying non-zero and non-repeating values, and returns them in a list Parameters
----------
p: list of int
Returns
-------
list of int
"""
ret = []
p1 = [0] + p
for i, _ in enumerate(p):
c1 = p1[i]
c2 = p1[i+1]
if c2 in (0, c1):
continue
ret.append(c2)
return ret
|
Removes trailing zeros in the list of integers and returns a new list of integers
|
def _remove_blank(l):
""" Removes trailing zeros in the list of integers and returns a new list of integers"""
ret = []
for i, _ in enumerate(l):
if l[i] == 0:
break
ret.append(l[i])
return ret
|
Calculates the Longest Common Subsequence between p and l (both list of int) and returns its length
|
def _lcs(p, l):
""" Calculates the Longest Common Subsequence between p and l (both list of int) and returns its length"""
# Dynamic Programming Finding LCS
if len(p) == 0:
return 0
P = np.array(list(p)).reshape((1, len(p)))
L = np.array(list(l)).reshape((len(l), 1))
M = np.ndarray(shape=(len(P), len(L)), dtype=np.int32)
for i in range(M.shape[0]):
for j in range(M.shape[1]):
up = 0 if i == 0 else M[i-1, j]
left = 0 if j == 0 else M[i, j-1]
if i == 0 or j == 0:
M[i, j] = max(up, left, M[i, j])
else:
M[i, j] = M[i, j] + M[i - 1, j - 1]
return M.max()
|
Simple accuracy measure: number of 100% accurate predictions divided by total number
|
def accuracy(self, label, pred):
""" Simple accuracy measure: number of 100% accurate predictions divided by total number """
hit = 0.
total = 0.
batch_size = label.shape[0]
for i in range(batch_size):
l = self._remove_blank(label[i])
p = []
for k in range(self.seq_len):
p.append(np.argmax(pred[k * batch_size + i]))
p = self.ctc_label(p)
if len(p) == len(l):
match = True
for k, _ in enumerate(p):
if p[k] != int(l[k]):
match = False
break
if match:
hit += 1.0
total += 1.0
assert total == batch_size
return hit / total
|
Longest Common Subsequence accuracy measure: calculate accuracy of each prediction as LCS/length
|
def accuracy_lcs(self, label, pred):
""" Longest Common Subsequence accuracy measure: calculate accuracy of each prediction as LCS/length"""
hit = 0.
total = 0.
batch_size = label.shape[0]
for i in range(batch_size):
l = self._remove_blank(label[i])
p = []
for k in range(self.seq_len):
p.append(np.argmax(pred[k * batch_size + i]))
p = self.ctc_label(p)
hit += self._lcs(p, l) * 1.0 / len(l)
total += 1.0
assert total == batch_size
return hit / total
|
Not particularly fast code to parse the text file and load into NDArrays.
return two data iters, one for train, the other for validation.
|
def get_movielens_iter(filename, batch_size):
"""Not particularly fast code to parse the text file and load into NDArrays.
return two data iters, one for train, the other for validation.
"""
logging.info("Preparing data iterators for " + filename + " ... ")
user = []
item = []
score = []
with open(filename, 'r') as f:
num_samples = 0
for line in f:
tks = line.strip().split('::')
if len(tks) != 4:
continue
num_samples += 1
user.append((tks[0]))
item.append((tks[1]))
score.append((tks[2]))
# convert to ndarrays
user = mx.nd.array(user, dtype='int32')
item = mx.nd.array(item)
score = mx.nd.array(score)
# prepare data iters
data_train = {'user': user, 'item': item}
label_train = {'score': score}
iter_train = mx.io.NDArrayIter(data=data_train,label=label_train,
batch_size=batch_size, shuffle=True)
return mx.io.PrefetchingIter(iter_train)
|
Decode image from str buffer.
Wrapper for cv2.imdecode that uses mx.nd.NDArray
Parameters
----------
str_img : str
str buffer read from image file
flag : int
same as flag for cv2.imdecode
Returns
-------
img : NDArray
decoded image in (width, height, channels)
with BGR color channel order
|
def imdecode(str_img, flag=1):
"""Decode image from str buffer.
Wrapper for cv2.imdecode that uses mx.nd.NDArray
Parameters
----------
str_img : str
str buffer read from image file
flag : int
same as flag for cv2.imdecode
Returns
-------
img : NDArray
decoded image in (width, height, channels)
with BGR color channel order
"""
hdl = NDArrayHandle()
check_call(_LIB.MXCVImdecode(ctypes.c_char_p(str_img),
mx_uint(len(str_img)),
flag, ctypes.byref(hdl)))
return mx.nd.NDArray(hdl)
|
Decode image from str buffer.
Wrapper for cv2.imresize that uses mx.nd.NDArray
Parameters
----------
src : NDArray
image in (width, height, channels)
size : tuple
target size in (width, height)
interpolation : int
same as interpolation for cv2.imresize
Returns
-------
img : NDArray
resized image
|
def resize(src, size, interpolation=cv2.INTER_LINEAR):
"""Decode image from str buffer.
Wrapper for cv2.imresize that uses mx.nd.NDArray
Parameters
----------
src : NDArray
image in (width, height, channels)
size : tuple
target size in (width, height)
interpolation : int
same as interpolation for cv2.imresize
Returns
-------
img : NDArray
resized image
"""
hdl = NDArrayHandle()
check_call(_LIB.MXCVResize(src.handle, mx_uint(size[0]), mx_uint(size[1]),
interpolation, ctypes.byref(hdl)))
return mx.nd.NDArray(hdl)
|
Pad image border
Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray
Parameters
----------
src : NDArray
Image in (width, height, channels).
Others are the same with cv2.copyMakeBorder
Returns
-------
img : NDArray
padded image
|
def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0):
"""Pad image border
Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray
Parameters
----------
src : NDArray
Image in (width, height, channels).
Others are the same with cv2.copyMakeBorder
Returns
-------
img : NDArray
padded image
"""
hdl = NDArrayHandle()
check_call(_LIB.MXCVcopyMakeBorder(src.handle, ctypes.c_int(top), ctypes.c_int(bot),
ctypes.c_int(left), ctypes.c_int(right),
ctypes.c_int(border_type), ctypes.c_double(value),
ctypes.byref(hdl)))
return mx.nd.NDArray(hdl)
|
Crop src at fixed location, and (optionally) resize it to size
|
def fixed_crop(src, x0, y0, w, h, size=None, interpolation=cv2.INTER_CUBIC):
"""Crop src at fixed location, and (optionally) resize it to size"""
out = mx.nd.crop(src, begin=(y0, x0, 0), end=(y0+h, x0+w, int(src.shape[2])))
if size is not None and (w, h) != size:
out = resize(out, size, interpolation=interpolation)
return out
|
Randomly crop src with size. Upsample result if src is smaller than size
|
def random_crop(src, size):
"""Randomly crop src with size. Upsample result if src is smaller than size"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size)
return out, (x0, y0, new_w, new_h)
|
Randomly crop src with size. Randomize area and aspect ratio
|
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)):
"""Randomly crop src with size. Randomize area and aspect ratio"""
h, w, _ = src.shape
area = w*h
for _ in range(10):
new_area = random.uniform(min_area, 1.0) * area
new_ratio = random.uniform(*ratio)
new_w = int(new_area*new_ratio)
new_h = int(new_area/new_ratio)
if random.uniform(0., 1.) < 0.5:
new_w, new_h = new_h, new_w
if new_w > w or new_h > h:
continue
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size)
return out, (x0, y0, new_w, new_h)
return random_crop(src, size)
|
Move iterator position forward
|
def next(self):
"""Move iterator position forward"""
batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3))
i = self.cur
for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)):
str_img = open(self.root+self.list[i]+'.jpg').read()
img = imdecode(str_img, 1)
img, _ = random_crop(img, self.size)
batch[i - self.cur] = img
batch = mx.nd.transpose(batch, axes=(0, 3, 1, 2))
ret = mx.io.DataBatch(data=[batch],
label=[],
pad=self.batch_size-(i-self.cur),
index=None)
self.cur = i
return ret
|
Check to see if the two arrays are the same size.
|
def check_label_shapes(labels, preds, shape=0):
"""Check to see if the two arrays are the same size."""
if shape == 0:
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape, preds.shape
if label_shape != pred_shape:
raise ValueError("Shape of labels {} does not match shape of "
"predictions {}".format(label_shape, pred_shape))
|
Imports the ONNX model files, passed as a parameter, into Gluon SymbolBlock object.
Parameters
----------
model_file : str
ONNX model file name
ctx : Context or list of Context
Loads the model into one or many context(s).
Returns
-------
sym_block : :class:`~mxnet.gluon.SymbolBlock`
A SymbolBlock object representing the given model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
|
def import_to_gluon(model_file, ctx):
"""
Imports the ONNX model files, passed as a parameter, into Gluon SymbolBlock object.
Parameters
----------
model_file : str
ONNX model file name
ctx : Context or list of Context
Loads the model into one or many context(s).
Returns
-------
sym_block : :class:`~mxnet.gluon.SymbolBlock`
A SymbolBlock object representing the given model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
"""
graph = GraphProto()
try:
import onnx
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. Instructions to"
+ " install - https://github.com/onnx/onnx#installation")
model_proto = onnx.load_model(model_file)
net = graph.graph_to_gluon(model_proto.graph, ctx)
return net
|
Model initialization.
|
def get_model(model, ctx, opt):
"""Model initialization."""
kwargs = {'ctx': ctx, 'pretrained': opt.use_pretrained, 'classes': classes}
if model.startswith('resnet'):
kwargs['thumbnail'] = opt.use_thumbnail
elif model.startswith('vgg'):
kwargs['batch_norm'] = opt.batch_norm
net = models.get_model(model, **kwargs)
if opt.resume:
net.load_parameters(opt.resume)
elif not opt.use_pretrained:
if model in ['alexnet']:
net.initialize(mx.init.Normal())
else:
net.initialize(mx.init.Xavier(magnitude=2))
net.cast(opt.dtype)
return net
|
get dataset iterators
|
def get_data_iters(dataset, batch_size, opt):
"""get dataset iterators"""
if dataset == 'mnist':
train_data, val_data = get_mnist_iterator(batch_size, (1, 28, 28),
num_parts=kv.num_workers, part_index=kv.rank)
elif dataset == 'cifar10':
train_data, val_data = get_cifar10_iterator(batch_size, (3, 32, 32),
num_parts=kv.num_workers, part_index=kv.rank)
elif dataset == 'imagenet':
shape_dim = 299 if model_name == 'inceptionv3' else 224
if not opt.data_dir:
raise ValueError('Dir containing raw images in train/val is required for imagenet.'
'Please specify "--data-dir"')
train_data, val_data = get_imagenet_iterator(opt.data_dir, batch_size,
opt.num_workers, shape_dim, opt.dtype)
elif dataset == 'caltech101':
train_data, val_data = get_caltech101_iterator(batch_size, opt.num_workers, opt.dtype)
elif dataset == 'dummy':
shape_dim = 299 if model_name == 'inceptionv3' else 224
train_data, val_data = dummy_iterator(batch_size, (3, shape_dim, shape_dim))
return train_data, val_data
|
Set the learning rate to the initial value decayed by ratio every N epochs.
|
def update_learning_rate(lr, trainer, epoch, ratio, steps):
"""Set the learning rate to the initial value decayed by ratio every N epochs."""
new_lr = lr * (ratio ** int(np.sum(np.array(steps) < epoch)))
trainer.set_learning_rate(new_lr)
return trainer
|
Seeds the random number generators in MXNet.
This affects the behavior of modules in MXNet that uses random number generators,
like the dropout operator and `NDArray`'s random sampling operators.
Parameters
----------
seed_state : int
The random number seed.
ctx : Context
The device context of the generator. The default is "all" which means seeding random
number generators of all devices.
Notes
-----
Random number generators in MXNet are device specific.
`mx.random.seed(seed_state)` sets the state of each generator using `seed_state` and the
device id. Therefore, random numbers generated from different devices can be different
even if they are seeded using the same seed.
To produce identical random number sequences independent of the device id,
set optional `ctx` argument. This produces the same sequence of random numbers independent
of the device id, but the sequence can be different on different kind of devices as MXNet's
random number generators for CPU and GPU use different algorithms.
Example
-------
>>> print(mx.nd.random.normal(shape=(2,2)).asnumpy())
[[ 1.36481571 -0.62203991]
[-1.4962182 -0.08511394]]
>>> print(mx.nd.random.normal(shape=(2,2)).asnumpy())
[[ 1.09544981 -0.20014545]
[-0.20808885 0.2527658 ]]
# Same results on the same device with the same seed
>>> mx.random.seed(128)
>>> print(mx.nd.random.normal(shape=(2,2)).asnumpy())
[[ 0.47400656 -0.75213492]
[ 0.20251541 0.95352972]]
>>> mx.random.seed(128)
>>> print(mx.nd.random.normal(shape=(2,2)).asnumpy())
[[ 0.47400656 -0.75213492]
[ 0.20251541 0.95352972]]
# Different results on gpu(0) and gpu(1) with the same seed
>>> mx.random.seed(128)
>>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy())
[[ 2.5020072 -1.6884501]
[-0.7931333 -1.4218881]]
>>> mx.random.seed(128)
>>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy())
[[ 0.24336822 -1.664805 ]
[-1.0223296 1.253198 ]]
# Seeding with `ctx` argument produces identical results on gpu(0) and gpu(1)
>>> mx.random.seed(128, ctx=mx.gpu(0))
>>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy())
[[ 2.5020072 -1.6884501]
[-0.7931333 -1.4218881]]
>>> mx.random.seed(128, ctx=mx.gpu(1))
>>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy())
[[ 2.5020072 -1.6884501]
[-0.7931333 -1.4218881]]
|
def seed(seed_state, ctx="all"):
"""Seeds the random number generators in MXNet.
This affects the behavior of modules in MXNet that uses random number generators,
like the dropout operator and `NDArray`'s random sampling operators.
Parameters
----------
seed_state : int
The random number seed.
ctx : Context
The device context of the generator. The default is "all" which means seeding random
number generators of all devices.
Notes
-----
Random number generators in MXNet are device specific.
`mx.random.seed(seed_state)` sets the state of each generator using `seed_state` and the
device id. Therefore, random numbers generated from different devices can be different
even if they are seeded using the same seed.
To produce identical random number sequences independent of the device id,
set optional `ctx` argument. This produces the same sequence of random numbers independent
of the device id, but the sequence can be different on different kind of devices as MXNet's
random number generators for CPU and GPU use different algorithms.
Example
-------
>>> print(mx.nd.random.normal(shape=(2,2)).asnumpy())
[[ 1.36481571 -0.62203991]
[-1.4962182 -0.08511394]]
>>> print(mx.nd.random.normal(shape=(2,2)).asnumpy())
[[ 1.09544981 -0.20014545]
[-0.20808885 0.2527658 ]]
# Same results on the same device with the same seed
>>> mx.random.seed(128)
>>> print(mx.nd.random.normal(shape=(2,2)).asnumpy())
[[ 0.47400656 -0.75213492]
[ 0.20251541 0.95352972]]
>>> mx.random.seed(128)
>>> print(mx.nd.random.normal(shape=(2,2)).asnumpy())
[[ 0.47400656 -0.75213492]
[ 0.20251541 0.95352972]]
# Different results on gpu(0) and gpu(1) with the same seed
>>> mx.random.seed(128)
>>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy())
[[ 2.5020072 -1.6884501]
[-0.7931333 -1.4218881]]
>>> mx.random.seed(128)
>>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy())
[[ 0.24336822 -1.664805 ]
[-1.0223296 1.253198 ]]
# Seeding with `ctx` argument produces identical results on gpu(0) and gpu(1)
>>> mx.random.seed(128, ctx=mx.gpu(0))
>>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(0)).asnumpy())
[[ 2.5020072 -1.6884501]
[-0.7931333 -1.4218881]]
>>> mx.random.seed(128, ctx=mx.gpu(1))
>>> print(mx.nd.random.normal(shape=(2,2), ctx=mx.gpu(1)).asnumpy())
[[ 2.5020072 -1.6884501]
[-0.7931333 -1.4218881]]
"""
if not isinstance(seed_state, integer_types):
raise ValueError('seed_state must be int')
seed_state = ctypes.c_int(int(seed_state))
if ctx == "all":
check_call(_LIB.MXRandomSeed(seed_state))
else:
ctx = Context(ctx)
check_call(_LIB.MXRandomSeedContext(seed_state, ctx.device_typeid, ctx.device_id))
|
Draw random samples from a uniform distribtuion.
|
def random_uniform(attrs, inputs, proto_obj):
"""Draw random samples from a uniform distribtuion."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
"Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._remove_attributes(attrs, ['seed'])
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs.get('dtype', 1))]
return 'random_uniform', new_attrs, inputs
|
Draw random samples from a Gaussian distribution.
|
def random_normal(attrs, inputs, proto_obj):
"""Draw random samples from a Gaussian distribution."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
"Instructions to install - https://github.com/onnx/onnx")
new_attr = translation_utils._remove_attributes(attrs, ['seed'])
new_attr = translation_utils._fix_attribute_names(new_attr, {'mean': 'loc'})
new_attr['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attr.get('dtype', 1))]
return 'random_normal', new_attr, inputs
|
Adding two tensors
|
def add(attrs, inputs, proto_obj):
"""Adding two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
broadcast_axis = attrs['axis']
op_value = translation_utils._fix_broadcast('broadcast_add', inputs,
broadcast_axis, proto_obj)
return op_value, new_attr, inputs
return 'broadcast_add', new_attr, inputs
|
Mean of all the input tensors.
|
def mean(attrs, inputs, proto_obj):
"""Mean of all the input tensors."""
concat_input = [symbol.expand_dims(op_input, axis=0) for op_input in inputs]
concat_sym = symbol.concat(*concat_input, dim=0)
mean_sym = symbol.mean(concat_sym, axis=0)
return mean_sym, attrs, inputs
|
Returns indices of the maximum values along an axis
|
def argmax(attrs, inputs, proto_obj):
"""Returns indices of the maximum values along an axis"""
axis = attrs.get('axis', 0)
keepdims = attrs.get('keepdims', 1)
argmax_op = symbol.argmax(inputs[0], axis=axis, keepdims=keepdims)
# onnx argmax operator always expects int64 as output type
cast_attrs = {'dtype': 'int64'}
return 'cast', cast_attrs, argmax_op
|
Returns indices of the minimum values along an axis.
|
def argmin(attrs, inputs, proto_obj):
"""Returns indices of the minimum values along an axis."""
axis = attrs.get('axis', 0)
keepdims = attrs.get('keepdims', 1)
argmin_op = symbol.argmin(inputs[0], axis=axis, keepdims=keepdims)
# onnx argmax operator always expects int64 as output type
cast_attrs = {'dtype': 'int64'}
return 'cast', cast_attrs, argmin_op
|
Elementwise maximum of arrays.
MXNet maximum compares only two symbols at a time.
ONNX can send more than two to compare.
Breaking into multiple mxnet ops to compare two symbols at a time
|
def maximum(attrs, inputs, proto_obj):
"""
Elementwise maximum of arrays.
MXNet maximum compares only two symbols at a time.
ONNX can send more than two to compare.
Breaking into multiple mxnet ops to compare two symbols at a time
"""
if len(inputs) > 1:
mxnet_op = symbol.maximum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.maximum(mxnet_op, op_input)
else:
mxnet_op = symbol.maximum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs
|
Elementwise minimum of arrays.
|
def minimum(attrs, inputs, proto_obj):
"""Elementwise minimum of arrays."""
# MXNet minimum compares only two symbols at a time.
# ONNX can send more than two to compare.
# Breaking into multiple mxnet ops to compare two symbols at a time
if len(inputs) > 1:
mxnet_op = symbol.minimum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.minimum(mxnet_op, op_input)
else:
mxnet_op = symbol.minimum(inputs[0], inputs[0])
return mxnet_op, attrs, inputs
|
Joins input arrays along a given axis.
|
def concat(attrs, inputs, proto_obj):
""" Joins input arrays along a given axis. """
new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'})
return 'concat', new_attrs, inputs
|
Add padding to input tensor
|
def pad(attrs, inputs, proto_obj):
""" Add padding to input tensor"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width',
'value' : 'constant_value'
})
new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width'))
return 'pad', new_attrs, inputs
|
Batch normalization.
|
def batch_norm(attrs, inputs, proto_obj):
"""Batch normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon': 'eps',
'is_test': 'fix_gamma'})
new_attrs = translation_utils._remove_attributes(new_attrs,
['spatial', 'consumed_inputs'])
# Disable cuDNN BN only if epsilon from model is < than minimum cuDNN eps (1e-5)
cudnn_min_eps = 1e-5
cudnn_off = 0 if attrs.get('epsilon', cudnn_min_eps) >= cudnn_min_eps else 1
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': cudnn_off})
# in test mode "fix_gamma" should be unset.
new_attrs['fix_gamma'] = not attrs.get('is_test', 1)
return 'BatchNorm', new_attrs, inputs
|
Instance Normalization.
|
def instance_norm(attrs, inputs, proto_obj):
"""Instance Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'})
new_attrs['eps'] = attrs.get('epsilon', 1e-5)
return 'InstanceNorm', new_attrs, inputs
|
Leaky Relu function
|
def leaky_relu(attrs, inputs, proto_obj):
"""Leaky Relu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01})
return 'LeakyReLU', new_attrs, inputs
|
Elu function
|
def _elu(attrs, inputs, proto_obj):
"""Elu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'})
return 'LeakyReLU', new_attrs, inputs
|
PRelu function
|
def _prelu(attrs, inputs, proto_obj):
"""PRelu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'})
return 'LeakyReLU', new_attrs, inputs
|
Selu function
|
def _selu(attrs, inputs, proto_obj):
"""Selu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'selu'})
return 'LeakyReLU', new_attrs, inputs
|
Softmax function.
|
def softmax(attrs, inputs, proto_obj):
"""Softmax function."""
if 'axis' not in attrs:
attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
return 'softmax', attrs, inputs
|
Applies the sofplus activation function element-wise to the input.
|
def softplus(attrs, inputs, proto_obj):
"""Applies the sofplus activation function element-wise to the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type' : 'softrelu'})
return 'Activation', new_attrs, inputs
|
Compute N-D convolution on (N+2)-D input.
|
def conv(attrs, inputs, proto_obj):
"""Compute N-D convolution on (N+2)-D input."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Convolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Convolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel']
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
num_filter = new_attrs['num_filter']
num_group = new_attrs['num_group']
no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else 0
bias = None if no_bias is True else inputs[2]
# Unlike ONNX, MXNet's convolution operator does not support asymmetric padding, so we first
# use 'Pad' operator, which supports asymmetric padding. Then use the convolution operator.
pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel))
pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width)
conv_op = symbol.Convolution(pad_op, inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
return conv_op, new_attrs, inputs
|
Computes transposed convolution of the input tensor.
|
def deconv(attrs, inputs, proto_obj):
"""Computes transposed convolution of the input tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, proto_obj)
kernel = new_attrs['kernel']
stride = new_attrs['stride'] if 'stride' in new_attrs else []
padding = new_attrs['pad'] if 'pad' in new_attrs else []
dilations = new_attrs['dilate'] if 'dilate' in new_attrs else []
num_filter = new_attrs['num_filter']
num_group = new_attrs['num_group']
no_bias = new_attrs['no_bias'] if 'no_bias' in new_attrs else False
bias = None if no_bias is True else inputs[2]
# Unlike ONNX, MXNet's deconvolution operator does not support asymmetric padding, so we first
# use 'Pad' operator, which supports asymmetric padding. Then use the deconvolution operator.
pad_width = (0, 0, 0, 0) + translation_utils._pad_sequence_fix(padding, kernel_dim=len(kernel))
pad_op = symbol.pad(inputs[0], mode='constant', pad_width=pad_width)
deconv_op = symbol.Deconvolution(pad_op, inputs[1], bias,
kernel=kernel, stride=stride, dilate=dilations,
num_filter=num_filter, num_group=num_group, no_bias=no_bias)
return deconv_op, new_attrs, inputs
|
Applies a linear transformation: Y=XWT+b.
|
def fully_connected(attrs, inputs, proto_obj):
"""Applies a linear transformation: Y=XWT+b."""
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
new_attrs = translation_utils._fix_bias('FullyConnected', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('FullyConnected', new_attrs, inputs, proto_obj)
return 'FullyConnected', new_attrs, inputs
|
Performs max pooling on the input.
|
def global_maxpooling(attrs, inputs, proto_obj):
"""Performs max pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'max'})
return 'Pooling', new_attrs, inputs
|
Performs avg pooling on the input.
|
def global_avgpooling(attrs, inputs, proto_obj):
"""Performs avg pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'avg'})
return 'Pooling', new_attrs, inputs
|
Performs global lp pooling on the input.
|
def global_lppooling(attrs, inputs, proto_obj):
"""Performs global lp pooling on the input."""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'lp',
'p_value': p_value})
new_attrs = translation_utils._remove_attributes(new_attrs, ['p'])
return 'Pooling', new_attrs, inputs
|
Performs general matrix multiplication and accumulation
|
def linalg_gemm(attrs, inputs, proto_obj):
"""Performs general matrix multiplication and accumulation"""
trans_a = 0
trans_b = 0
alpha = 1
beta = 1
if 'transA' in attrs:
trans_a = attrs['transA']
if 'transB' in attrs:
trans_b = attrs['transB']
if 'alpha' in attrs:
alpha = attrs['alpha']
if 'beta' in attrs:
beta = attrs['beta']
flatten_a = symbol.flatten(inputs[0])
matmul_op = symbol.linalg_gemm2(A=flatten_a, B=inputs[1],
transpose_a=trans_a, transpose_b=trans_b,
alpha=alpha)
gemm_op = symbol.broadcast_add(matmul_op, beta*inputs[2])
new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a',
'transB': 'transpose_b'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return gemm_op, new_attrs, inputs
|
Local Response Normalization.
|
def local_response_norm(attrs, inputs, proto_obj):
"""Local Response Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'bias': 'knorm',
'size' : 'nsize'})
return 'LRN', new_attrs, inputs
|
Dropout Regularization.
|
def dropout(attrs, inputs, proto_obj):
"""Dropout Regularization."""
mode = 'training'
if 'is_test' in attrs and attrs['is_test'] == 0:
mode = 'always'
new_attrs = translation_utils._fix_attribute_names(attrs,
{'ratio': 'p'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'mode': mode})
return 'Dropout', new_attrs, inputs
|
Reshape the given array by the shape attribute.
|
def reshape(attrs, inputs, proto_obj):
"""Reshape the given array by the shape attribute."""
if len(inputs) == 1:
return 'reshape', attrs, inputs[0]
reshape_shape = list(proto_obj._params[inputs[1].name].asnumpy())
reshape_shape = [int(i) for i in reshape_shape]
new_attrs = {'shape': reshape_shape}
return 'reshape', new_attrs, inputs[:1]
|
Cast input to a given dtype
|
def cast(attrs, inputs, proto_obj):
""" Cast input to a given dtype"""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
new_attrs['dtype'] = TENSOR_TYPE_TO_NP_TYPE[int(new_attrs['dtype'])]
return 'cast', new_attrs, inputs
|
Splits an array along a particular axis into multiple sub-arrays.
|
def split(attrs, inputs, proto_obj):
"""Splits an array along a particular axis into multiple sub-arrays."""
split_list = attrs.get('split') if 'split' in attrs else []
new_attrs = translation_utils._fix_attribute_names(attrs,
{'split' : 'num_outputs'})
if 'axis' not in attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'axis': 0})
if not split_list:
num_outputs = len(proto_obj.model_metadata.get('output_tensor_data'))
else:
if len(set(split_list)) == 1:
num_outputs = len(split_list)
else:
raise NotImplementedError("Operator {} in MXNet does not support variable splits."
"Tracking the issue to support variable split here: "
"https://github.com/apache/incubator-mxnet/issues/11594"
.format('split'))
new_attrs['num_outputs'] = num_outputs
return 'split', new_attrs, inputs
|
Returns a slice of the input tensor along multiple axes.
|
def _slice(attrs, inputs, proto_obj):
"""Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = new_attrs.get('end')
axes = new_attrs.get('axis', tuple(range(len(begin))))
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
if len(axes) > 1:
for i, axis in enumerate(axes):
slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])
return slice_op, new_attrs, inputs
|
Transpose the input array.
|
def transpose(attrs, inputs, proto_obj):
"""Transpose the input array."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'perm' : 'axes'})
return 'transpose', new_attrs, inputs
|
Remove single-dimensional entries from the shape of a tensor.
|
def squeeze(attrs, inputs, proto_obj):
"""Remove single-dimensional entries from the shape of a tensor."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
return 'squeeze', new_attrs, inputs
|
Inserts a new axis of size 1 into the array shape
|
def unsqueeze(attrs, inputs, cls):
"""Inserts a new axis of size 1 into the array shape"""
# MXNet can only add one axis at a time.
mxnet_op = inputs[0]
for axis in attrs["axes"]:
mxnet_op = symbol.expand_dims(mxnet_op, axis=axis)
return mxnet_op, attrs, inputs
|
Flattens the input array into a 2-D array by collapsing the higher dimensions.
|
def flatten(attrs, inputs, proto_obj):
"""Flattens the input array into a 2-D array by collapsing the higher dimensions."""
#Mxnet does not have axis support. By default uses axis=1
if 'axis' in attrs and attrs['axis'] != 1:
raise RuntimeError("Flatten operator only supports axis=1")
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
return 'Flatten', new_attrs, inputs
|
Clips (limits) the values in an array.
|
def clip(attrs, inputs, proto_obj):
"""Clips (limits) the values in an array."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'min' : 'a_min',
'max' : 'a_max'})
if 'a_max' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_max' : np.inf})
if 'a_min' not in new_attrs:
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'a_min' : -np.inf})
return 'clip', new_attrs, inputs
|
Returns element-wise result of base element raised to powers from exp element.
|
def power(attrs, inputs, proto_obj):
"""Returns element-wise result of base element raised to powers from exp element."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'})
if 'broadcast' in attrs:
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
if attrs['broadcast'] == 1:
return 'broadcast_power', new_attrs, inputs
else:
mxnet_op = symbol.pow(inputs[0], inputs[1])
return mxnet_op, new_attrs, inputs
mxnet_op = symbol.broadcast_power(inputs[0], inputs[1])
return mxnet_op, new_attrs, inputs
|
Reduce the array along a given axis by maximum value
|
def reduce_max(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by maximum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'max', new_attrs, inputs
|
Reduce the array along a given axis by mean value
|
def reduce_mean(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'mean', new_attrs, inputs
|
Reduce the array along a given axis by minimum value
|
def reduce_min(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by minimum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs
|
Reduce the array along a given axis by sum value
|
def reduce_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'sum', new_attrs, inputs
|
Reduce the array along a given axis by product value
|
def reduce_prod(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by product value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'prod', new_attrs, inputs
|
Reduce the array along a given axis by log sum value
|
def reduce_log_sum(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
sum_op = symbol.sum(inputs[0], axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs
|
Reduce the array along a given axis by log sum exp value
|
def reduce_log_sum_exp(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by log sum exp value"""
keep_dims = True if 'keepdims' not in attrs else attrs.get('keepdims')
exp_op = symbol.exp(inputs[0])
sum_op = symbol.sum(exp_op, axis=attrs.get('axes'),
keepdims=keep_dims)
log_sym = symbol.log(sum_op)
return log_sym, attrs, inputs
|
Reduce the array along a given axis by sum square value
|
def reduce_sum_square(attrs, inputs, proto_obj):
"""Reduce the array along a given axis by sum square value"""
square_op = symbol.square(inputs[0])
sum_op = symbol.sum(square_op, axis=attrs.get('axes'),
keepdims=attrs.get('keepdims'))
return sum_op, attrs, inputs
|
Reduce input tensor by l1 normalization.
|
def reduce_l1(attrs, inputs, proto_obj):
"""Reduce input tensor by l1 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'ord' : 1})
return 'norm', new_attrs, inputs
|
Reduce input tensor by l2 normalization.
|
def reduce_l2(attrs, inputs, proto_obj):
"""Reduce input tensor by l2 normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'norm', new_attrs, inputs
|
Average pooling
|
def avg_pooling(attrs, inputs, proto_obj):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('avg', inputs, new_attrs)
return new_op, new_attrs, inputs
|
LP Pooling
|
def lp_pooling(attrs, inputs, proto_obj):
"""LP Pooling"""
p_value = attrs.get('p', 2)
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad'
})
new_attrs = translation_utils._remove_attributes(new_attrs, ['p'])
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pooling_convention': 'valid',
'p_value': p_value
})
new_op = translation_utils._fix_pooling('lp', inputs, new_attrs)
return new_op, new_attrs, inputs
|
Max ROI Pooling.
|
def max_roi_pooling(attrs, inputs, proto_obj):
"""Max ROI Pooling."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'pooled_shape': 'pooled_size',
'spatial_scale': 'spatial_scale'
})
return 'ROIPooling', new_attrs, inputs
|
Rearranges data from depth into blocks of spatial data.
|
def depthtospace(attrs, inputs, proto_obj):
"""Rearranges data from depth into blocks of spatial data."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "depth_to_space", new_attrs, inputs
|
Rearranges blocks of spatial data into depth.
|
def spacetodepth(attrs, inputs, proto_obj):
"""Rearranges blocks of spatial data into depth."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'blocksize':'block_size'})
return "space_to_depth", new_attrs, inputs
|
Returns batched one-hot vectors.
|
def hardmax(attrs, inputs, proto_obj):
"""Returns batched one-hot vectors."""
input_tensor_data = proto_obj.model_metadata.get('input_tensor_data')[0]
input_shape = input_tensor_data[1]
axis = int(attrs.get('axis', 1))
axis = axis if axis >= 0 else len(input_shape) + axis
if axis == len(input_shape) - 1:
amax = symbol.argmax(inputs[0], axis=-1)
one_hot = symbol.one_hot(amax, depth=input_shape[-1])
return one_hot, attrs, inputs
# since reshape doesn't take a tensor for shape,
# computing with np.prod. This needs to be changed to
# to use mx.sym.prod() when mx.sym.reshape() is fixed.
# (https://github.com/apache/incubator-mxnet/issues/10789)
new_shape = (int(np.prod(input_shape[:axis])),
int(np.prod(input_shape[axis:])))
reshape_op = symbol.reshape(inputs[0], new_shape)
amax = symbol.argmax(reshape_op, axis=-1)
one_hot = symbol.one_hot(amax, depth=new_shape[-1])
hardmax_op = symbol.reshape(one_hot, input_shape)
return hardmax_op, attrs, inputs
|
ONNX does not have eps attribute, so cannot map it to L2normalization in MXNet
without that, it works as norm operator discussion in PR:
https://github.com/onnx/onnx/pull/1330
|
def lpnormalization(attrs, inputs, proto_obj):
"""ONNX does not have eps attribute, so cannot map it to L2normalization in MXNet
without that, it works as norm operator discussion in PR:
https://github.com/onnx/onnx/pull/1330"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'p': 'ord'})
axis = int(attrs.get("axis", -1))
new_attrs.update(axis=axis)
return 'norm', new_attrs, inputs
|
download mp4s
|
def download_mp4(from_idx, to_idx, _params):
"""
download mp4s
"""
succ = set()
fail = set()
for idx in range(from_idx, to_idx):
name = 's' + str(idx)
save_folder = '{src_path}/{nm}'.format(src_path=_params['src_path'], nm=name)
if idx == 0 or os.path.isdir(save_folder):
continue
script = "http://spandh.dcs.shef.ac.uk/gridcorpus/{nm}/video/{nm}.mpg_vcd.zip".format( \
nm=name)
down_sc = 'cd {src_path} && curl {script} --output {nm}.mpg_vcd.zip && \
unzip {nm}.mpg_vcd.zip'.format(script=script,
nm=name,
src_path=_params['src_path'])
try:
print(down_sc)
os.system(down_sc)
succ.add(idx)
except OSError as error:
print(error)
fail.add(idx)
return (succ, fail)
|
download aligns
|
def download_align(from_idx, to_idx, _params):
"""
download aligns
"""
succ = set()
fail = set()
for idx in range(from_idx, to_idx):
name = 's' + str(idx)
if idx == 0:
continue
script = "http://spandh.dcs.shef.ac.uk/gridcorpus/{nm}/align/{nm}.tar".format(nm=name)
down_sc = 'cd {align_path} && wget {script} && \
tar -xvf {nm}.tar'.format(script=script,
nm=name,
align_path=_params['align_path'])
try:
print(down_sc)
os.system(down_sc)
succ.add(idx)
except OSError as error:
print(error)
fail.add(idx)
return (succ, fail)
|
Run unit tests in the emulator and copy the results back to the host through the mounted
volume in /mxnet
|
def run_ut_py3_qemu():
"""Run unit tests in the emulator and copy the results back to the host through the mounted
volume in /mxnet"""
from vmcontrol import VM
with VM() as vm:
qemu_provision(vm.ssh_port)
logging.info("execute tests")
qemu_ssh(vm.ssh_port, "./runtime_functions.py", "run_ut_python3_qemu_internal")
qemu_rsync_to_host(vm.ssh_port, "*.xml", "mxnet")
logging.info("copied to host")
logging.info("tests finished, vm shutdown.")
vm.shutdown()
|
this runs inside the vm
|
def run_ut_python3_qemu_internal():
"""this runs inside the vm"""
pkg = glob.glob('mxnet_dist/*.whl')[0]
logging.info("=== NOW Running inside QEMU ===")
logging.info("PIP Installing %s", pkg)
check_call(['sudo', 'pip3', 'install', pkg])
logging.info("PIP Installing mxnet/test_requirements.txt")
check_call(['sudo', 'pip3', 'install', '-r', 'mxnet/test_requirements.txt'])
logging.info("Running tests in mxnet/tests/python/unittest/")
check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_engine.py'])
|
Return subword-units presentation, given a word/token.
|
def _get_subword_units(token, gram):
"""Return subword-units presentation, given a word/token.
"""
if token == '</s>': # special token for padding purpose.
return [token]
t = '#' + token + '#'
return [t[i:i + gram] for i in range(0, len(t) - gram + 1)]
|
Train the model using Caffe operator in MXNet
|
def fit(args, network, data_loader, eval_metrics=None, batch_end_callback=None):
"""Train the model using Caffe operator in MXNet"""
# kvstore
kv = mx.kvstore.create(args.kv_store)
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
if 'log_file' in args and args.log_file is not None:
log_file = args.log_file
log_dir = args.log_dir
log_file_full_name = os.path.join(log_dir, log_file)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
logger = logging.getLogger()
handler = logging.FileHandler(log_file_full_name)
formatter = logging.Formatter(head)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
logger.info('start with arguments %s', args)
else:
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# load model
model_prefix = args.model_prefix
if model_prefix is not None:
model_prefix += "-%d" % (kv.rank)
model_args = {}
if args.load_epoch is not None:
assert model_prefix is not None
tmp = mx.model.FeedForward.load(model_prefix, args.load_epoch)
model_args = {'arg_params' : tmp.arg_params,
'aux_params' : tmp.aux_params,
'begin_epoch' : args.load_epoch}
# save model
save_model_prefix = args.save_model_prefix
if save_model_prefix is None:
save_model_prefix = model_prefix
checkpoint = None if save_model_prefix is None else mx.callback.do_checkpoint(save_model_prefix)
# data
(train, val) = data_loader(args, kv)
# train
devs = mx.cpu() if args.gpus is None else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = args.num_examples / args.batch_size
if args.kv_store == 'dist_sync':
epoch_size /= kv.num_workers
model_args['epoch_size'] = epoch_size
if 'lr_factor' in args and args.lr_factor < 1:
model_args['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(
step=max(int(epoch_size * args.lr_factor_epoch), 1),
factor=args.lr_factor)
if 'clip_gradient' in args and args.clip_gradient is not None:
model_args['clip_gradient'] = args.clip_gradient
# disable kvstore for single device
if 'local' in kv.type and (
args.gpus is None or len(args.gpus.split(',')) is 1):
kv = None
mod = mx.mod.Module(network, context=devs)
if eval_metrics is None:
eval_metrics = ['accuracy']
# TopKAccuracy only allows top_k > 1
for top_k in [5, 10, 20]:
eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=top_k))
if batch_end_callback is not None:
if not isinstance(batch_end_callback, list):
batch_end_callback = [batch_end_callback]
else:
batch_end_callback = []
batch_end_callback.append(mx.callback.Speedometer(args.batch_size, 50))
mod.fit(train_data=train, eval_metric=eval_metrics, eval_data=val, optimizer='sgd',
optimizer_params={'learning_rate':args.lr, 'momentum': 0.9, 'wd': 0.00001},
num_epoch=args.num_epochs, batch_end_callback=batch_end_callback,
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
kvstore=kv, epoch_end_callback=checkpoint, **model_args)
|
Preprocess a 210x160x3 uint8 frame into a 6400 (80x80) (1 x input_size)
float vector.
|
def preprocess(self, img):
"""
Preprocess a 210x160x3 uint8 frame into a 6400 (80x80) (1 x input_size)
float vector.
"""
# Crop, down-sample, erase background and set foreground to 1.
# See https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5
img = img[35:195]
img = img[::2, ::2, 0]
img[img == 144] = 0
img[img == 109] = 0
img[img != 0] = 1
curr = np.expand_dims(img.astype(np.float).ravel(), axis=0)
# Subtract the last preprocessed image.
diff = (curr - self.prev if self.prev is not None
else np.zeros((1, curr.shape[1])))
self.prev = curr
return diff
|
Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A new empty `NDArray` handle.
|
def _new_empty_handle():
"""Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl)))
return hdl
|
Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `NDArray` handle.
|
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.