INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Helper function to obtain the shape of an array
|
def get_input_shape(sym, proto_obj):
"""Helper function to obtain the shape of an array"""
arg_params = proto_obj.arg_dict
aux_params = proto_obj.aux_dict
model_input_shape = [data[1] for data in proto_obj.model_metadata.get('input_tensor_data')]
data_names = [data[0] for data in proto_obj.model_metadata.get('input_tensor_data')]
# creating dummy inputs
inputs = []
for in_shape in model_input_shape:
inputs.append(nd.ones(shape=in_shape))
data_shapes = []
for idx, input_name in enumerate(data_names):
data_shapes.append((input_name, inputs[idx].shape))
ctx = context.cpu()
# create a module
mod = module.Module(symbol=sym, data_names=data_names, context=ctx, label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None)
mod.set_params(arg_params=arg_params, aux_params=aux_params)
data_forward = []
for idx, input_name in enumerate(data_names):
val = inputs[idx]
data_forward.append(val)
mod.forward(io.DataBatch(data_forward))
result = mod.get_outputs()[0].asnumpy()
return result.shape
|
r"""Resize image with OpenCV.
.. note:: `imresize` uses OpenCV (not the CV2 Python library). MXNet must have been built
with USE_OPENCV=1 for `imresize` to work.
Parameters
----------
src : NDArray
source image
w : int, required
Width of resized image.
h : int, required
Height of resized image.
interp : int, optional, default=1
Interpolation method (default=cv2.INTER_LINEAR).
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> new_image = mx.img.resize(image, 240, 360)
>>> new_image
<NDArray 240x360x3 @cpu(0)>
|
def imresize(src, w, h, *args, **kwargs):
r"""Resize image with OpenCV.
.. note:: `imresize` uses OpenCV (not the CV2 Python library). MXNet must have been built
with USE_OPENCV=1 for `imresize` to work.
Parameters
----------
src : NDArray
source image
w : int, required
Width of resized image.
h : int, required
Height of resized image.
interp : int, optional, default=1
Interpolation method (default=cv2.INTER_LINEAR).
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> new_image = mx.img.resize(image, 240, 360)
>>> new_image
<NDArray 240x360x3 @cpu(0)>
"""
return _internal._cvimresize(src, w, h, *args, **kwargs)
|
Decode an image to an NDArray.
.. note:: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes/bytearray or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
|
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
.. note:: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes/bytearray or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
if sys.version_info[0] == 3 and not isinstance(buf, (bytes, bytearray, np.ndarray)):
raise ValueError('buf must be of type bytes, bytearray or numpy.ndarray,'
'if you would like to input type str, please convert to bytes')
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
|
Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
|
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
|
Pad image border with OpenCV.
Parameters
----------
src : NDArray
source image
top : int, required
Top margin.
bot : int, required
Bottom margin.
left : int, required
Left margin.
right : int, required
Right margin.
type : int, optional, default='0'
Filling type (default=cv2.BORDER_CONSTANT).
0 - cv2.BORDER_CONSTANT - Adds a constant colored border.
1 - cv2.BORDER_REFLECT - Border will be mirror reflection of the
border elements, like this : fedcba|abcdefgh|hgfedcb
2 - cv2.BORDER_REFLECT_101 or cv.BORDER_DEFAULT - Same as above,
but with a slight change, like this : gfedcb|abcdefgh|gfedcba
3 - cv2.BORDER_REPLICATE - Last element is replicated throughout,
like this: aaaaaa|abcdefgh|hhhhhhh
4 - cv2.BORDER_WRAP - it will look like this : cdefgh|abcdefgh|abcdefg
value : double, optional, default=0
(Deprecated! Use ``values`` instead.) Fill with single value.
values : tuple of <double>, optional, default=[]
Fill with value(RGB[A] or gray), up to 4 channels.
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
--------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> new_image = mx_border = mx.image.copyMakeBorder(mx_img, 1, 2, 3, 4, type=0)
>>> new_image
<NDArray 2324x3489x3 @cpu(0)>
|
def copyMakeBorder(src, top, bot, left, right, *args, **kwargs):
"""Pad image border with OpenCV.
Parameters
----------
src : NDArray
source image
top : int, required
Top margin.
bot : int, required
Bottom margin.
left : int, required
Left margin.
right : int, required
Right margin.
type : int, optional, default='0'
Filling type (default=cv2.BORDER_CONSTANT).
0 - cv2.BORDER_CONSTANT - Adds a constant colored border.
1 - cv2.BORDER_REFLECT - Border will be mirror reflection of the
border elements, like this : fedcba|abcdefgh|hgfedcb
2 - cv2.BORDER_REFLECT_101 or cv.BORDER_DEFAULT - Same as above,
but with a slight change, like this : gfedcb|abcdefgh|gfedcba
3 - cv2.BORDER_REPLICATE - Last element is replicated throughout,
like this: aaaaaa|abcdefgh|hhhhhhh
4 - cv2.BORDER_WRAP - it will look like this : cdefgh|abcdefgh|abcdefg
value : double, optional, default=0
(Deprecated! Use ``values`` instead.) Fill with single value.
values : tuple of <double>, optional, default=[]
Fill with value(RGB[A] or gray), up to 4 channels.
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
--------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> new_image = mx_border = mx.image.copyMakeBorder(mx_img, 1, 2, 3, 4, type=0)
>>> new_image
<NDArray 2324x3489x3 @cpu(0)>
"""
return _internal._cvcopyMakeBorder(src, top, bot, left, right, *args, **kwargs)
|
Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
|
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
|
Resizes shorter edge to size.
.. note:: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
|
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
.. note:: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
|
Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
|
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.slice(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
|
Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
|
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
|
Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
|
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
|
Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
|
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
log_ratio = (np.log(ratio[0]), np.log(ratio[1]))
new_ratio = np.exp(random.uniform(*log_ratio))
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
|
Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
|
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
|
Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
|
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
|
Override the default to avoid duplicate dump.
|
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
|
Resets the iterator to the beginning of the data.
|
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
|
Resets the iterator and ignore roll over data
|
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
|
Helper function for reading in next sample.
|
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
|
Helper function for batchifying data
|
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
|
Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details.
|
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
|
Reads an input image `fname` and returns the decoded raw bytes.
Examples
--------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
|
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Examples
--------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
|
evaluate accuracy
|
def facc(label, pred):
""" evaluate accuracy """
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
|
Convert character vectors to integer vectors.
|
def word_to_vector(word):
"""
Convert character vectors to integer vectors.
"""
vector = []
for char in list(word):
vector.append(char2int(char))
return vector
|
Convert integer vectors to character vectors.
|
def vector_to_word(vector):
"""
Convert integer vectors to character vectors.
"""
word = ""
for vec in vector:
word = word + int2char(vec)
return word
|
Convert integer vectors to character vectors for batch.
|
def char_conv(out):
"""
Convert integer vectors to character vectors for batch.
"""
out_conv = list()
for i in range(out.shape[0]):
tmp_str = ''
for j in range(out.shape[1]):
if int(out[i][j]) >= 0:
tmp_char = int2char(int(out[i][j]))
if int(out[i][j]) == 27:
tmp_char = ''
tmp_str = tmp_str + tmp_char
out_conv.append(tmp_str)
return out_conv
|
Add a pooling layer to the model.
This is our own implementation of add_pooling since current CoreML's version (0.5.0) of builder
doesn't provide support for padding types apart from valid. This support will be added in the
next release of coremltools. When that happens, this can be removed.
Parameters
----------
builder: NeuralNetworkBuilder
A neural network builder object.
name: str
The name of this layer.
height: int
Height of pooling region.
width: int
Number of elements to be padded on the right side of the input blob.
stride_height: int
Stride along the height direction.
stride_width: int
Stride along the height direction.
layer_type: str
Type of pooling performed. Can either be 'MAX', 'AVERAGE' or 'L2'.
padding_type: str
Option for the output blob shape. Can be either 'VALID' , 'SAME' or 'INCLUDE_LAST_PIXEL'. Kindly look at NeuralNetwork.proto for details.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_top, padding_bottom, padding_left, padding_right: int
values of height (top, bottom) and width (left, right) padding to be used if padding type is "VALID" or "INCLUDE_LAST_PIXEL"
same_padding_asymmetry_mode : str.
Type of asymmetric padding to be used when padding_type = 'SAME'. Kindly look at NeuralNetwork.proto for details. Can be either 'BOTTOM_RIGHT_HEAVY' or 'TOP_LEFT_HEAVY'.
exclude_pad_area: boolean
Whether to exclude padded area in the pooling operation. Defaults to True.
- If True, the value of the padded area will be excluded.
- If False, the padded area will be included.
This flag is only used with average pooling.
is_global: boolean
Whether the pooling operation is global. Defaults to False.
- If True, the pooling operation is global -- the pooling region is of the same size of the input blob.
Parameters height, width, stride_height, stride_width will be ignored.
- If False, the pooling operation is not global.
See Also
--------
add_convolution, add_pooling, add_activation
|
def add_pooling_with_padding_types(builder, name, height, width, stride_height, stride_width,
layer_type, padding_type, input_name, output_name,
padding_top = 0, padding_bottom = 0, padding_left = 0, padding_right = 0,
same_padding_asymmetry_mode = 'BOTTOM_RIGHT_HEAVY',
exclude_pad_area = True, is_global = False):
"""
Add a pooling layer to the model.
This is our own implementation of add_pooling since current CoreML's version (0.5.0) of builder
doesn't provide support for padding types apart from valid. This support will be added in the
next release of coremltools. When that happens, this can be removed.
Parameters
----------
builder: NeuralNetworkBuilder
A neural network builder object.
name: str
The name of this layer.
height: int
Height of pooling region.
width: int
Number of elements to be padded on the right side of the input blob.
stride_height: int
Stride along the height direction.
stride_width: int
Stride along the height direction.
layer_type: str
Type of pooling performed. Can either be 'MAX', 'AVERAGE' or 'L2'.
padding_type: str
Option for the output blob shape. Can be either 'VALID' , 'SAME' or 'INCLUDE_LAST_PIXEL'. Kindly look at NeuralNetwork.proto for details.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_top, padding_bottom, padding_left, padding_right: int
values of height (top, bottom) and width (left, right) padding to be used if padding type is "VALID" or "INCLUDE_LAST_PIXEL"
same_padding_asymmetry_mode : str.
Type of asymmetric padding to be used when padding_type = 'SAME'. Kindly look at NeuralNetwork.proto for details. Can be either 'BOTTOM_RIGHT_HEAVY' or 'TOP_LEFT_HEAVY'.
exclude_pad_area: boolean
Whether to exclude padded area in the pooling operation. Defaults to True.
- If True, the value of the padded area will be excluded.
- If False, the padded area will be included.
This flag is only used with average pooling.
is_global: boolean
Whether the pooling operation is global. Defaults to False.
- If True, the pooling operation is global -- the pooling region is of the same size of the input blob.
Parameters height, width, stride_height, stride_width will be ignored.
- If False, the pooling operation is not global.
See Also
--------
add_convolution, add_pooling, add_activation
"""
spec = builder.spec
nn_spec = builder.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.pooling
# Set the parameters
spec_layer_params.type = \
_NeuralNetwork_pb2.PoolingLayerParams.PoolingType.Value(layer_type)
if padding_type == 'VALID':
height_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = padding_top
height_border.endEdgeSize = padding_bottom
width_border = spec_layer_params.valid.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = padding_left
width_border.endEdgeSize = padding_right
elif padding_type == 'SAME':
if not (same_padding_asymmetry_mode == 'BOTTOM_RIGHT_HEAVY' or same_padding_asymmetry_mode == 'TOP_LEFT_HEAVY'):
raise ValueError("Invalid value %d of same_padding_asymmetry_mode parameter" % same_padding_asymmetry_mode)
spec_layer_params.same.asymmetryMode = _NeuralNetwork_pb2.SamePadding.SamePaddingMode.Value(same_padding_asymmetry_mode)
elif padding_type == 'INCLUDE_LAST_PIXEL':
if padding_top != padding_bottom or padding_left != padding_right:
raise ValueError("Only symmetric padding is supported with the INCLUDE_LAST_PIXEL padding type")
spec_layer_params.includeLastPixel.paddingAmounts.append(padding_top)
spec_layer_params.includeLastPixel.paddingAmounts.append(padding_left)
spec_layer_params.kernelSize.append(height)
spec_layer_params.kernelSize.append(width)
spec_layer_params.stride.append(stride_height)
spec_layer_params.stride.append(stride_width)
spec_layer_params.avgPoolExcludePadding = exclude_pad_area
spec_layer_params.globalPooling = is_global
|
Get path to all the frame in view SAX and contain complete frames
|
def get_frames(root_path):
"""Get path to all the frame in view SAX and contain complete frames"""
ret = []
for root, _, files in os.walk(root_path):
root=root.replace('\\','/')
files=[s for s in files if ".dcm" in s]
if len(files) == 0 or not files[0].endswith(".dcm") or root.find("sax") == -1:
continue
prefix = files[0].rsplit('-', 1)[0]
fileset = set(files)
expected = ["%s-%04d.dcm" % (prefix, i + 1) for i in range(30)]
if all(x in fileset for x in expected):
ret.append([root + "/" + x for x in expected])
# sort for reproduciblity
return sorted(ret, key = lambda x: x[0])
|
crop center and resize
|
def crop_resize(img, size):
"""crop center and resize"""
if img.shape[0] < img.shape[1]:
img = img.T
# we crop image from center
short_egde = min(img.shape[:2])
yy = int((img.shape[0] - short_egde) / 2)
xx = int((img.shape[1] - short_egde) / 2)
crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
# resize to 64, 64
resized_img = transform.resize(crop_img, (size, size))
resized_img *= 255
return resized_img.astype("uint8")
|
construct and return generator
|
def get_generator():
""" construct and return generator """
g_net = gluon.nn.Sequential()
with g_net.name_scope():
g_net.add(gluon.nn.Conv2DTranspose(
channels=512, kernel_size=4, strides=1, padding=0, use_bias=False))
g_net.add(gluon.nn.BatchNorm())
g_net.add(gluon.nn.LeakyReLU(0.2))
g_net.add(gluon.nn.Conv2DTranspose(
channels=256, kernel_size=4, strides=2, padding=1, use_bias=False))
g_net.add(gluon.nn.BatchNorm())
g_net.add(gluon.nn.LeakyReLU(0.2))
g_net.add(gluon.nn.Conv2DTranspose(
channels=128, kernel_size=4, strides=2, padding=1, use_bias=False))
g_net.add(gluon.nn.BatchNorm())
g_net.add(gluon.nn.LeakyReLU(0.2))
g_net.add(gluon.nn.Conv2DTranspose(
channels=64, kernel_size=4, strides=2, padding=1, use_bias=False))
g_net.add(gluon.nn.BatchNorm())
g_net.add(gluon.nn.LeakyReLU(0.2))
g_net.add(gluon.nn.Conv2DTranspose(channels=3, kernel_size=4, strides=2, padding=1, use_bias=False))
g_net.add(gluon.nn.Activation('tanh'))
return g_net
|
construct and return descriptor
|
def get_descriptor(ctx):
""" construct and return descriptor """
d_net = gluon.nn.Sequential()
with d_net.name_scope():
d_net.add(SNConv2D(num_filter=64, kernel_size=4, strides=2, padding=1, in_channels=3, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=128, kernel_size=4, strides=2, padding=1, in_channels=64, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=256, kernel_size=4, strides=2, padding=1, in_channels=128, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=512, kernel_size=4, strides=2, padding=1, in_channels=256, ctx=ctx))
d_net.add(gluon.nn.LeakyReLU(0.2))
d_net.add(SNConv2D(num_filter=1, kernel_size=4, strides=1, padding=0, in_channels=512, ctx=ctx))
return d_net
|
spectral normalization
|
def _spectral_norm(self):
""" spectral normalization """
w = self.params.get('weight').data(self.ctx)
w_mat = nd.reshape(w, [w.shape[0], -1])
_u = self.u.data(self.ctx)
_v = None
for _ in range(POWER_ITERATION):
_v = nd.L2Normalization(nd.dot(_u, w_mat))
_u = nd.L2Normalization(nd.dot(_v, w_mat.T))
sigma = nd.sum(nd.dot(_u, w_mat) * _v)
if sigma == 0.:
sigma = EPSILON
with autograd.pause():
self.u.set_data(_u)
return w / sigma
|
Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
|
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
|
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
|
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
|
Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
|
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False, save_feature_as_csvfile=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
if save_feature_as_csvfile:
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
|
generate random cropping boxes according to parameters
if satifactory crops generated, apply to ground-truth as well
Parameters:
----------
label : numpy.array (n x 5 matrix)
ground-truths
Returns:
----------
list of (crop_box, label) tuples, if failed, return empty list []
|
def sample(self, label):
"""
generate random cropping boxes according to parameters
if satifactory crops generated, apply to ground-truth as well
Parameters:
----------
label : numpy.array (n x 5 matrix)
ground-truths
Returns:
----------
list of (crop_box, label) tuples, if failed, return empty list []
"""
samples = []
count = 0
for trial in range(self.max_trials):
if count >= self.max_sample:
return samples
scale = np.random.uniform(self.min_scale, self.max_scale)
min_ratio = max(self.min_aspect_ratio, scale * scale)
max_ratio = min(self.max_aspect_ratio, 1. / scale / scale)
ratio = math.sqrt(np.random.uniform(min_ratio, max_ratio))
width = scale * ratio
height = scale / ratio
left = np.random.uniform(0., 1 - width)
top = np.random.uniform(0., 1 - height)
rand_box = (left, top, left + width, top + height)
valid_mask = np.where(label[:, 0] > -1)[0]
gt = label[valid_mask, :]
ious = self._check_satisfy(rand_box, gt)
if ious is not None:
# transform gt labels after crop, discard bad ones
l, t, r, b = rand_box
new_gt_boxes = []
new_width = r - l
new_height = b - t
for i in range(valid_mask.size):
if ious[i] > 0:
xmin = max(0., (gt[i, 1] - l) / new_width)
ymin = max(0., (gt[i, 2] - t) / new_height)
xmax = min(1., (gt[i, 3] - l) / new_width)
ymax = min(1., (gt[i, 4] - t) / new_height)
new_gt_boxes.append([gt[i, 0], xmin, ymin, xmax, ymax])
if not new_gt_boxes:
continue
new_gt_boxes = np.array(new_gt_boxes)
label = np.lib.pad(new_gt_boxes,
((0, label.shape[0]-new_gt_boxes.shape[0]), (0,0)), \
'constant', constant_values=(-1, -1))
samples.append((rand_box, label))
count += 1
return samples
|
check if overlap with any gt box is larger than threshold
|
def _check_satisfy(self, rand_box, gt_boxes):
"""
check if overlap with any gt box is larger than threshold
"""
l, t, r, b = rand_box
num_gt = gt_boxes.shape[0]
ls = np.ones(num_gt) * l
ts = np.ones(num_gt) * t
rs = np.ones(num_gt) * r
bs = np.ones(num_gt) * b
mask = np.where(ls < gt_boxes[:, 1])[0]
ls[mask] = gt_boxes[mask, 1]
mask = np.where(ts < gt_boxes[:, 2])[0]
ts[mask] = gt_boxes[mask, 2]
mask = np.where(rs > gt_boxes[:, 3])[0]
rs[mask] = gt_boxes[mask, 3]
mask = np.where(bs > gt_boxes[:, 4])[0]
bs[mask] = gt_boxes[mask, 4]
w = rs - ls
w[w < 0] = 0
h = bs - ts
h[h < 0] = 0
inter_area = h * w
union_area = np.ones(num_gt) * max(0, r - l) * max(0, b - t)
union_area += (gt_boxes[:, 3] - gt_boxes[:, 1]) * (gt_boxes[:, 4] - gt_boxes[:, 2])
union_area -= inter_area
ious = inter_area / union_area
ious[union_area <= 0] = 0
max_iou = np.amax(ious)
if max_iou < self.min_overlap:
return None
# check ground-truth constraint
if self.config['gt_constraint'] == 'center':
for i in range(ious.shape[0]):
if ious[i] > 0:
gt_x = (gt_boxes[i, 1] + gt_boxes[i, 3]) / 2.0
gt_y = (gt_boxes[i, 2] + gt_boxes[i, 4]) / 2.0
if gt_x < l or gt_x > r or gt_y < t or gt_y > b:
return None
elif self.config['gt_constraint'] == 'corner':
for i in range(ious.shape[0]):
if ious[i] > 0:
if gt_boxes[i, 1] < l or gt_boxes[i, 3] > r \
or gt_boxes[i, 2] < t or gt_boxes[i, 4] > b:
return None
return ious
|
generate random padding boxes according to parameters
if satifactory padding generated, apply to ground-truth as well
Parameters:
----------
label : numpy.array (n x 5 matrix)
ground-truths
Returns:
----------
list of (crop_box, label) tuples, if failed, return empty list []
|
def sample(self, label):
"""
generate random padding boxes according to parameters
if satifactory padding generated, apply to ground-truth as well
Parameters:
----------
label : numpy.array (n x 5 matrix)
ground-truths
Returns:
----------
list of (crop_box, label) tuples, if failed, return empty list []
"""
samples = []
count = 0
for trial in range(self.max_trials):
if count >= self.max_sample:
return samples
scale = np.random.uniform(self.min_scale, self.max_scale)
min_ratio = max(self.min_aspect_ratio, scale * scale)
max_ratio = min(self.max_aspect_ratio, 1. / scale / scale)
ratio = math.sqrt(np.random.uniform(min_ratio, max_ratio))
width = scale * ratio
if width < 1:
continue
height = scale / ratio
if height < 1:
continue
left = np.random.uniform(0., 1 - width)
top = np.random.uniform(0., 1 - height)
right = left + width
bot = top + height
rand_box = (left, top, right, bot)
valid_mask = np.where(label[:, 0] > -1)[0]
gt = label[valid_mask, :]
new_gt_boxes = []
for i in range(gt.shape[0]):
xmin = (gt[i, 1] - left) / width
ymin = (gt[i, 2] - top) / height
xmax = (gt[i, 3] - left) / width
ymax = (gt[i, 4] - top) / height
new_size = min(xmax - xmin, ymax - ymin)
if new_size < self.min_gt_scale:
new_gt_boxes = []
break
new_gt_boxes.append([gt[i, 0], xmin, ymin, xmax, ymax])
if not new_gt_boxes:
continue
new_gt_boxes = np.array(new_gt_boxes)
label = np.lib.pad(new_gt_boxes,
((0, label.shape[0]-new_gt_boxes.shape[0]), (0,0)), \
'constant', constant_values=(-1, -1))
samples.append((rand_box, label))
count += 1
return samples
|
Measure time cost of running a function
|
def measure_cost(repeat, scipy_trans_lhs, scipy_dns_lhs, func_name, *args, **kwargs):
"""Measure time cost of running a function
"""
mx.nd.waitall()
args_list = []
for arg in args:
args_list.append(arg)
start = time.time()
if scipy_trans_lhs:
args_list[0] = np.transpose(args_list[0]) if scipy_dns_lhs else sp.spmatrix.transpose(args_list[0])
for _ in range(repeat):
func_name(*args_list, **kwargs)
mx.nd.waitall()
end = time.time()
diff = end - start
return diff / repeat
|
Print information about the annotation file.
:return:
|
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
|
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
|
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
|
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
|
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
|
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
|
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
|
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
|
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
|
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
|
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
raise NotImplementedError("maskUtils disabled!")
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
|
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
|
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
|
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
|
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
|
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
|
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
# rles = maskUtils.frPyObjects(segm, h, w)
# rle = maskUtils.merge(rles)
raise NotImplementedError("maskUtils disabled!")
elif type(segm['counts']) == list:
# uncompressed RLE
# rle = maskUtils.frPyObjects(segm, h, w)
raise NotImplementedError("maskUtils disabled!")
else:
# rle
rle = ann['segmentation']
return rle
|
Save cnn model
Returns
----------
callback: A callback function that can be passed as epoch_end_callback to fit
|
def save_model():
"""Save cnn model
Returns
----------
callback: A callback function that can be passed as epoch_end_callback to fit
"""
if not os.path.exists("checkpoint"):
os.mkdir("checkpoint")
return mx.callback.do_checkpoint("checkpoint/checkpoint", args.save_period)
|
Construct highway net
Parameters
----------
data:
Returns
----------
Highway Networks
|
def highway(data):
"""Construct highway net
Parameters
----------
data:
Returns
----------
Highway Networks
"""
_data = data
high_weight = mx.sym.Variable('high_weight')
high_bias = mx.sym.Variable('high_bias')
high_fc = mx.sym.FullyConnected(data=data, weight=high_weight, bias=high_bias, num_hidden=300, name='high_fc')
high_relu = mx.sym.Activation(high_fc, act_type='relu')
high_trans_weight = mx.sym.Variable('high_trans_weight')
high_trans_bias = mx.sym.Variable('high_trans_bias')
high_trans_fc = mx.sym.FullyConnected(data=_data, weight=high_trans_weight, bias=high_trans_bias, num_hidden=300,
name='high_trans_sigmoid')
high_trans_sigmoid = mx.sym.Activation(high_trans_fc, act_type='sigmoid')
return high_relu * high_trans_sigmoid + _data * (1 - high_trans_sigmoid)
|
Train cnn model
Parameters
----------
symbol_data: symbol
train_iterator: DataIter
Train DataIter
valid_iterator: DataIter
Valid DataIter
data_column_names: list of str
Defaults to ('data') for a typical model used in image classification
target_names: list of str
Defaults to ('softmax_label') for a typical model used in image classification
|
def train(symbol_data, train_iterator, valid_iterator, data_column_names, target_names):
"""Train cnn model
Parameters
----------
symbol_data: symbol
train_iterator: DataIter
Train DataIter
valid_iterator: DataIter
Valid DataIter
data_column_names: list of str
Defaults to ('data') for a typical model used in image classification
target_names: list of str
Defaults to ('softmax_label') for a typical model used in image classification
"""
devs = mx.cpu() # default setting
if args.gpus is not None:
for i in args.gpus.split(','):
mx.gpu(int(i))
devs = mx.gpu()
module = mx.mod.Module(symbol_data, data_names=data_column_names, label_names=target_names, context=devs)
init_params = {
'vocab_embed_weight': {'uniform': 0.1},
'convolution0_weight': {'uniform': 0.1}, 'convolution0_bias': {'costant': 0},
'convolution1_weight': {'uniform': 0.1}, 'convolution1_bias': {'costant': 0},
'convolution2_weight': {'uniform': 0.1}, 'convolution2_bias': {'costant': 0},
'high_weight': {'uniform': 0.1}, 'high_bias': {'costant': 0},
'high_trans_weight': {'uniform': 0.1}, 'high_trans_bias': {'costant': -2},
'cls_weight': {'uniform': 0.1}, 'cls_bias': {'costant': 0},
}
# custom init_params
module.bind(data_shapes=train_iterator.provide_data, label_shapes=train_iterator.provide_label)
module.init_params(CustomInit(init_params))
lr_sch = mx.lr_scheduler.FactorScheduler(step=25000, factor=0.999)
module.init_optimizer(
optimizer='rmsprop', optimizer_params={'learning_rate': 0.0005, 'lr_scheduler': lr_sch})
def norm_stat(d):
return mx.nd.norm(d) / np.sqrt(d.size)
mon = mx.mon.Monitor(25000, norm_stat)
module.fit(train_data=train_iterator,
eval_data=valid_iterator,
eval_metric='acc',
kvstore=args.kv_store,
monitor=mon,
num_epoch=args.num_epochs,
batch_end_callback=mx.callback.Speedometer(args.batch_size, args.disp_batches),
epoch_end_callback=save_model())
|
Collate data into batch.
|
def default_batchify_fn(data):
"""Collate data into batch."""
if isinstance(data[0], nd.NDArray):
return nd.stack(*data)
elif isinstance(data[0], tuple):
data = zip(*data)
return [default_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
return nd.array(data, dtype=data.dtype)
|
Collate data into batch. Use shared memory for stacking.
|
def default_mp_batchify_fn(data):
"""Collate data into batch. Use shared memory for stacking."""
if isinstance(data[0], nd.NDArray):
out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype,
ctx=context.Context('cpu_shared', 0))
return nd.stack(*data, out=out)
elif isinstance(data[0], tuple):
data = zip(*data)
return [default_mp_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
return nd.array(data, dtype=data.dtype,
ctx=context.Context('cpu_shared', 0))
|
Move data into new context.
|
def _as_in_context(data, ctx):
"""Move data into new context."""
if isinstance(data, nd.NDArray):
return data.as_in_context(ctx)
elif isinstance(data, (list, tuple)):
return [_as_in_context(d, ctx) for d in data]
return data
|
Worker loop for multiprocessing DataLoader.
|
def worker_loop_v1(dataset, key_queue, data_queue, batchify_fn):
"""Worker loop for multiprocessing DataLoader."""
while True:
idx, samples = key_queue.get()
if idx is None:
break
batch = batchify_fn([dataset[i] for i in samples])
data_queue.put((idx, batch))
|
Fetcher loop for fetching data from queue and put in reorder dict.
|
def fetcher_loop_v1(data_queue, data_buffer, pin_memory=False,
pin_device_id=0, data_buffer_lock=None):
"""Fetcher loop for fetching data from queue and put in reorder dict."""
while True:
idx, batch = data_queue.get()
if idx is None:
break
if pin_memory:
batch = _as_in_context(batch, context.cpu_pinned(pin_device_id))
else:
batch = _as_in_context(batch, context.cpu())
if data_buffer_lock is not None:
with data_buffer_lock:
data_buffer[idx] = batch
else:
data_buffer[idx] = batch
|
Function for processing data in worker process.
|
def _worker_fn(samples, batchify_fn, dataset=None):
"""Function for processing data in worker process."""
# pylint: disable=unused-argument
# it is required that each worker process has to fork a new MXIndexedRecordIO handle
# preserving dataset as global variable can save tons of overhead and is safe in new process
global _worker_dataset
batch = batchify_fn([_worker_dataset[i] for i in samples])
buf = io.BytesIO()
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(batch)
return buf.getvalue()
|
Send object
|
def send(self, obj):
"""Send object"""
buf = io.BytesIO()
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
self.send_bytes(buf.getvalue())
|
Assign next batch workload to workers.
|
def _push_next(self):
"""Assign next batch workload to workers."""
r = next(self._iter, None)
if r is None:
return
self._key_queue.put((self._sent_idx, r))
self._sent_idx += 1
|
Shutdown internal workers by pushing terminate signals.
|
def shutdown(self):
"""Shutdown internal workers by pushing terminate signals."""
if not self._shutdown:
# send shutdown signal to the fetcher and join data queue first
# Remark: loop_fetcher need to be joined prior to the workers.
# otherwise, the the fetcher may fail at getting data
self._data_queue.put((None, None))
self._fetcher.join()
# send shutdown signal to all worker processes
for _ in range(self._num_workers):
self._key_queue.put((None, None))
# force shut down any alive worker processes
for w in self._workers:
if w.is_alive():
w.terminate()
self._shutdown = True
|
Assign next batch workload to workers.
|
def _push_next(self):
"""Assign next batch workload to workers."""
r = next(self._iter, None)
if r is None:
return
async_ret = self._worker_pool.apply_async(
self._worker_fn, (r, self._batchify_fn, self._dataset))
self._data_buffer[self._sent_idx] = async_ret
self._sent_idx += 1
|
Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only.
|
def _ctype_key_value(keys, vals):
"""
Returns ctype arrays for the key-value args, and the whether string keys are used.
For internal use only.
"""
if isinstance(keys, (tuple, list)):
assert(len(keys) == len(vals))
c_keys = []
c_vals = []
use_str_keys = None
for key, val in zip(keys, vals):
c_key_i, c_val_i, str_keys_i = _ctype_key_value(key, val)
c_keys += c_key_i
c_vals += c_val_i
use_str_keys = str_keys_i if use_str_keys is None else use_str_keys
assert(use_str_keys == str_keys_i), "inconsistent types of keys detected."
c_keys_arr = c_array(ctypes.c_char_p, c_keys) if use_str_keys \
else c_array(ctypes.c_int, c_keys)
c_vals_arr = c_array(ctypes.c_void_p, c_vals)
return (c_keys_arr, c_vals_arr, use_str_keys)
assert(isinstance(keys, (int,) + string_types)), \
"unexpected type for keys: " + str(type(keys))
use_str_keys = isinstance(keys, string_types)
if isinstance(vals, NDArray):
c_keys = c_str_array([keys]) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys]))
return (c_keys, c_handle_array([vals]), use_str_keys)
else:
for value in vals:
assert(isinstance(value, NDArray))
c_keys = c_str_array([keys] * len(vals)) if use_str_keys \
else c_array_buf(ctypes.c_int, array('i', [keys] * len(vals)))
return (c_keys, c_handle_array(vals), use_str_keys)
|
Returns ctype arrays for keys and values(converted to strings) in a dictionary
|
def _ctype_dict(param_dict):
"""
Returns ctype arrays for keys and values(converted to strings) in a dictionary
"""
assert(isinstance(param_dict, dict)), \
"unexpected type for param_dict: " + str(type(param_dict))
c_keys = c_array(ctypes.c_char_p, [c_str(k) for k in param_dict.keys()])
c_vals = c_array(ctypes.c_char_p, [c_str(str(v)) for v in param_dict.values()])
return (c_keys, c_vals)
|
A wrapper for the user-defined handle.
|
def _updater_wrapper(updater):
"""A wrapper for the user-defined handle."""
def updater_handle(key, lhs_handle, rhs_handle, _):
""" ctypes function """
lhs = _ndarray_cls(NDArrayHandle(lhs_handle))
rhs = _ndarray_cls(NDArrayHandle(rhs_handle))
updater(key, lhs, rhs)
return updater_handle
|
Creates a new KVStore.
For single machine training, there are two commonly used types:
``local``: Copies all gradients to CPU memory and updates weights there.
``device``: Aggregates gradients and updates weights on GPUs. With this setting,
the KVStore also attempts to use GPU peer-to-peer communication,
potentially accelerating the communication.
For distributed training, KVStore also supports a number of types:
``dist_sync``: Behaves similarly to ``local`` but with one major difference.
With ``dist_sync``, batch-size now means the batch size used on each machine.
So if there are ``n`` machines and we use batch size ``b``,
then ``dist_sync`` behaves like ``local`` with batch size ``n * b``.
``dist_device_sync``: Identical to ``dist_sync`` with the difference similar
to ``device`` vs ``local``.
``dist_async``: Performs asynchronous updates.
The weights are updated whenever gradients are received from any machine.
No two updates happen on the same weight at the same time. However, the order is not
guaranteed.
Parameters
----------
name : {'local', 'device', 'nccl', 'dist_sync', 'dist_device_sync', 'dist_async'}
The type of KVStore.
Returns
-------
kv : KVStore
The created KVStore.
|
def create(name='local'):
"""Creates a new KVStore.
For single machine training, there are two commonly used types:
``local``: Copies all gradients to CPU memory and updates weights there.
``device``: Aggregates gradients and updates weights on GPUs. With this setting,
the KVStore also attempts to use GPU peer-to-peer communication,
potentially accelerating the communication.
For distributed training, KVStore also supports a number of types:
``dist_sync``: Behaves similarly to ``local`` but with one major difference.
With ``dist_sync``, batch-size now means the batch size used on each machine.
So if there are ``n`` machines and we use batch size ``b``,
then ``dist_sync`` behaves like ``local`` with batch size ``n * b``.
``dist_device_sync``: Identical to ``dist_sync`` with the difference similar
to ``device`` vs ``local``.
``dist_async``: Performs asynchronous updates.
The weights are updated whenever gradients are received from any machine.
No two updates happen on the same weight at the same time. However, the order is not
guaranteed.
Parameters
----------
name : {'local', 'device', 'nccl', 'dist_sync', 'dist_device_sync', 'dist_async'}
The type of KVStore.
Returns
-------
kv : KVStore
The created KVStore.
"""
if not isinstance(name, string_types):
raise TypeError('name must be a string')
handle = KVStoreHandle()
check_call(_LIB.MXKVStoreCreate(c_str(name),
ctypes.byref(handle)))
kv = KVStore(handle)
set_kvstore_handle(kv.handle)
return kv
|
Initializes a single or a sequence of key-value pairs into the store.
For each key, one must `init` it before calling `push` or `pull`.
When multiple workers invoke `init` for the same key, only
the value supplied by worker with rank `0` is used. This function returns
after data has been initialized successfully.
Parameters
----------
key : str, int, or sequence of str or int
The keys.
value : NDArray, RowSparseNDArray or sequence of NDArray or RowSparseNDArray
Values corresponding to the keys.
Examples
--------
>>> # init a single key-value pair
>>> shape = (2,3)
>>> kv = mx.kv.create('local')
>>> kv.init('3', mx.nd.ones(shape)*2)
>>> a = mx.nd.zeros(shape)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # init a list of key-value pairs
>>> keys = ['5', '7', '9']
>>> kv.init(keys, [mx.nd.ones(shape)]*len(keys))
>>> # init a row_sparse value
>>> kv.init('4', mx.nd.ones(shape).tostype('row_sparse'))
>>> b = mx.nd.sparse.zeros('row_sparse', shape)
>>> kv.row_sparse_pull('4', row_ids=mx.nd.array([0, 1]), out=b)
>>> print b
<RowSparseNDArray 2x3 @cpu(0)>
|
def init(self, key, value):
""" Initializes a single or a sequence of key-value pairs into the store.
For each key, one must `init` it before calling `push` or `pull`.
When multiple workers invoke `init` for the same key, only
the value supplied by worker with rank `0` is used. This function returns
after data has been initialized successfully.
Parameters
----------
key : str, int, or sequence of str or int
The keys.
value : NDArray, RowSparseNDArray or sequence of NDArray or RowSparseNDArray
Values corresponding to the keys.
Examples
--------
>>> # init a single key-value pair
>>> shape = (2,3)
>>> kv = mx.kv.create('local')
>>> kv.init('3', mx.nd.ones(shape)*2)
>>> a = mx.nd.zeros(shape)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # init a list of key-value pairs
>>> keys = ['5', '7', '9']
>>> kv.init(keys, [mx.nd.ones(shape)]*len(keys))
>>> # init a row_sparse value
>>> kv.init('4', mx.nd.ones(shape).tostype('row_sparse'))
>>> b = mx.nd.sparse.zeros('row_sparse', shape)
>>> kv.row_sparse_pull('4', row_ids=mx.nd.array([0, 1]), out=b)
>>> print b
<RowSparseNDArray 2x3 @cpu(0)>
"""
ckeys, cvals, use_str_keys = _ctype_key_value(key, value)
if use_str_keys:
check_call(_LIB.MXKVStoreInitEx(self.handle, mx_uint(len(ckeys)), ckeys, cvals))
else:
check_call(_LIB.MXKVStoreInit(self.handle, mx_uint(len(ckeys)), ckeys, cvals))
|
Pushes a single or a sequence of key-value pairs into the store.
This function returns immediately after adding an operator to the engine.
The actual operation is executed asynchronously. If there are consecutive
pushes to the same key, there is no guarantee on the serialization of pushes.
The execution of a push does not guarantee that all previous pushes are
finished.
There is no synchronization between workers.
One can use ``_barrier()`` to sync all workers.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
value : NDArray, RowSparseNDArray, list of NDArray or RowSparseNDArray,
or list of list of NDArray or RowSparseNDArray
Values corresponding to the keys.
priority : int, optional
The priority of the push operation.
Higher priority push operations are likely to be executed before
other push actions.
Examples
--------
>>> # push a single key-value pair
>>> kv.push('3', mx.nd.ones(shape)*8)
>>> kv.pull('3', out=a) # pull out the value
>>> print a.asnumpy()
[[ 8. 8. 8.]
[ 8. 8. 8.]]
>>> # aggregate the value and the push
>>> gpus = [mx.gpu(i) for i in range(4)]
>>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
>>> kv.push('3', b)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> # push a list of keys.
>>> # single device
>>> keys = ['4', '5', '6']
>>> kv.push(keys, [mx.nd.ones(shape)]*len(keys))
>>> b = [mx.nd.zeros(shape)]*len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1].asnumpy()
[[ 1. 1. 1.]
[ 1. 1. 1.]]
>>> # multiple devices:
>>> keys = ['7', '8', '9']
>>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
>>> kv.push(keys, b)
>>> kv.pull(keys, out=b)
>>> print b[1][1].asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> # push a row_sparse value
>>> b = mx.nd.sparse.zeros('row_sparse', shape)
>>> kv.init('10', mx.nd.sparse.zeros('row_sparse', shape))
>>> kv.push('10', mx.nd.ones(shape).tostype('row_sparse'))
>>> # pull out the value
>>> kv.row_sparse_pull('10', row_ids=mx.nd.array([0, 1]), out=b)
>>> print b
<RowSparseNDArray 2x3 @cpu(0)>
|
def push(self, key, value, priority=0):
""" Pushes a single or a sequence of key-value pairs into the store.
This function returns immediately after adding an operator to the engine.
The actual operation is executed asynchronously. If there are consecutive
pushes to the same key, there is no guarantee on the serialization of pushes.
The execution of a push does not guarantee that all previous pushes are
finished.
There is no synchronization between workers.
One can use ``_barrier()`` to sync all workers.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
value : NDArray, RowSparseNDArray, list of NDArray or RowSparseNDArray,
or list of list of NDArray or RowSparseNDArray
Values corresponding to the keys.
priority : int, optional
The priority of the push operation.
Higher priority push operations are likely to be executed before
other push actions.
Examples
--------
>>> # push a single key-value pair
>>> kv.push('3', mx.nd.ones(shape)*8)
>>> kv.pull('3', out=a) # pull out the value
>>> print a.asnumpy()
[[ 8. 8. 8.]
[ 8. 8. 8.]]
>>> # aggregate the value and the push
>>> gpus = [mx.gpu(i) for i in range(4)]
>>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
>>> kv.push('3', b)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> # push a list of keys.
>>> # single device
>>> keys = ['4', '5', '6']
>>> kv.push(keys, [mx.nd.ones(shape)]*len(keys))
>>> b = [mx.nd.zeros(shape)]*len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1].asnumpy()
[[ 1. 1. 1.]
[ 1. 1. 1.]]
>>> # multiple devices:
>>> keys = ['7', '8', '9']
>>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
>>> kv.push(keys, b)
>>> kv.pull(keys, out=b)
>>> print b[1][1].asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> # push a row_sparse value
>>> b = mx.nd.sparse.zeros('row_sparse', shape)
>>> kv.init('10', mx.nd.sparse.zeros('row_sparse', shape))
>>> kv.push('10', mx.nd.ones(shape).tostype('row_sparse'))
>>> # pull out the value
>>> kv.row_sparse_pull('10', row_ids=mx.nd.array([0, 1]), out=b)
>>> print b
<RowSparseNDArray 2x3 @cpu(0)>
"""
ckeys, cvals, use_str_keys = _ctype_key_value(key, value)
if use_str_keys:
check_call(_LIB.MXKVStorePushEx(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority)))
else:
check_call(_LIB.MXKVStorePush(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, ctypes.c_int(priority)))
|
Pulls a single value or a sequence of values from the store.
This function returns immediately after adding an operator to the engine.
Subsequent attempts to read from the `out` variable will be blocked until the
pull operation completes.
`pull` is executed asynchronously after all previous `pull` calls and only
the last `push` call for the same input key(s) are finished.
The returned values are guaranteed to be the latest values in the store.
pull with `RowSparseNDArray` is not supported for dist kvstore.
Please use ``row_sparse_pull`` instead.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
out: NDArray or list of NDArray or list of list of NDArray
Values corresponding to the keys.
priority : int, optional
The priority of the pull operation.
Higher priority pull operations are likely to be executed before
other pull actions.
ignore_sparse: bool, optional, default True
Whether to ignore sparse arrays in the request.
Examples
--------
>>> # pull a single key-value pair
>>> a = mx.nd.zeros(shape)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # pull into multiple devices
>>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
>>> kv.pull('3', out=b)
>>> print b[1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # pull a list of key-value pairs.
>>> # On single device
>>> keys = ['5', '7', '9']
>>> b = [mx.nd.zeros(shape)]*len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # On multiple devices
>>> keys = ['6', '8', '10']
>>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1][1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
|
def pull(self, key, out=None, priority=0, ignore_sparse=True):
""" Pulls a single value or a sequence of values from the store.
This function returns immediately after adding an operator to the engine.
Subsequent attempts to read from the `out` variable will be blocked until the
pull operation completes.
`pull` is executed asynchronously after all previous `pull` calls and only
the last `push` call for the same input key(s) are finished.
The returned values are guaranteed to be the latest values in the store.
pull with `RowSparseNDArray` is not supported for dist kvstore.
Please use ``row_sparse_pull`` instead.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
out: NDArray or list of NDArray or list of list of NDArray
Values corresponding to the keys.
priority : int, optional
The priority of the pull operation.
Higher priority pull operations are likely to be executed before
other pull actions.
ignore_sparse: bool, optional, default True
Whether to ignore sparse arrays in the request.
Examples
--------
>>> # pull a single key-value pair
>>> a = mx.nd.zeros(shape)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # pull into multiple devices
>>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
>>> kv.pull('3', out=b)
>>> print b[1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # pull a list of key-value pairs.
>>> # On single device
>>> keys = ['5', '7', '9']
>>> b = [mx.nd.zeros(shape)]*len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
>>> # On multiple devices
>>> keys = ['6', '8', '10']
>>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
>>> kv.pull(keys, out=b)
>>> print b[1][1].asnumpy()
[[ 2. 2. 2.]
[ 2. 2. 2.]]
"""
assert(out is not None)
ckeys, cvals, use_str_keys = _ctype_key_value(key, out)
if use_str_keys:
check_call(_LIB.MXKVStorePullWithSparseEx(self.handle, mx_uint(len(ckeys)), ckeys,
cvals, ctypes.c_int(priority),
ctypes.c_bool(ignore_sparse)))
else:
check_call(_LIB.MXKVStorePullWithSparse(self.handle, mx_uint(len(ckeys)), ckeys,
cvals, ctypes.c_int(priority),
ctypes.c_bool(ignore_sparse)))
|
Pulls a single RowSparseNDArray value or a sequence of RowSparseNDArray values \
from the store with specified row_ids. When there is only one row_id, KVStoreRowSparsePull \
is invoked just once and the result is broadcast to all the rest of outputs.
`row_sparse_pull` is executed asynchronously after all previous
`pull`/`row_sparse_pull` calls and the last `push` call for the
same input key(s) are finished.
The returned values are guaranteed to be the latest values in the store.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
out: RowSparseNDArray or list of RowSparseNDArray or list of list of RowSparseNDArray
Values corresponding to the keys. The stype is expected to be row_sparse
priority : int, optional
The priority of the pull operation.
Higher priority pull operations are likely to be executed before
other pull actions.
row_ids : NDArray or list of NDArray
The row_ids for which to pull for each value. Each row_id is an 1-D NDArray \
whose values don't have to be unique nor sorted.
Examples
--------
>>> shape = (3, 3)
>>> kv.init('3', mx.nd.ones(shape).tostype('row_sparse'))
>>> a = mx.nd.sparse.zeros('row_sparse', shape)
>>> row_ids = mx.nd.array([0, 2], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=row_ids)
>>> print a.asnumpy()
[[ 1. 1. 1.]
[ 0. 0. 0.]
[ 1. 1. 1.]]
>>> duplicate_row_ids = mx.nd.array([2, 2], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=duplicate_row_ids)
>>> print a.asnumpy()
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 1. 1. 1.]]
>>> unsorted_row_ids = mx.nd.array([1, 0], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=unsorted_row_ids)
>>> print a.asnumpy()
[[ 1. 1. 1.]
[ 1. 1. 1.]
[ 0. 0. 0.]]
|
def row_sparse_pull(self, key, out=None, priority=0, row_ids=None):
""" Pulls a single RowSparseNDArray value or a sequence of RowSparseNDArray values \
from the store with specified row_ids. When there is only one row_id, KVStoreRowSparsePull \
is invoked just once and the result is broadcast to all the rest of outputs.
`row_sparse_pull` is executed asynchronously after all previous
`pull`/`row_sparse_pull` calls and the last `push` call for the
same input key(s) are finished.
The returned values are guaranteed to be the latest values in the store.
Parameters
----------
key : str, int, or sequence of str or int
Keys.
out: RowSparseNDArray or list of RowSparseNDArray or list of list of RowSparseNDArray
Values corresponding to the keys. The stype is expected to be row_sparse
priority : int, optional
The priority of the pull operation.
Higher priority pull operations are likely to be executed before
other pull actions.
row_ids : NDArray or list of NDArray
The row_ids for which to pull for each value. Each row_id is an 1-D NDArray \
whose values don't have to be unique nor sorted.
Examples
--------
>>> shape = (3, 3)
>>> kv.init('3', mx.nd.ones(shape).tostype('row_sparse'))
>>> a = mx.nd.sparse.zeros('row_sparse', shape)
>>> row_ids = mx.nd.array([0, 2], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=row_ids)
>>> print a.asnumpy()
[[ 1. 1. 1.]
[ 0. 0. 0.]
[ 1. 1. 1.]]
>>> duplicate_row_ids = mx.nd.array([2, 2], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=duplicate_row_ids)
>>> print a.asnumpy()
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 1. 1. 1.]]
>>> unsorted_row_ids = mx.nd.array([1, 0], dtype='int64')
>>> kv.row_sparse_pull('3', out=a, row_ids=unsorted_row_ids)
>>> print a.asnumpy()
[[ 1. 1. 1.]
[ 1. 1. 1.]
[ 0. 0. 0.]]
"""
assert(out is not None)
assert(row_ids is not None)
if isinstance(row_ids, NDArray):
row_ids = [row_ids]
assert(isinstance(row_ids, list)), \
"row_ids should be NDArray or list of NDArray"
first_out = out
# whether row_ids are the same
single_rowid = False
if len(row_ids) == 1 and isinstance(out, list):
single_rowid = True
first_out = [out[0]]
ckeys, cvals, use_str_keys = _ctype_key_value(key, first_out)
_, crow_ids, _ = _ctype_key_value(key, row_ids)
assert(len(crow_ids) == len(cvals)), \
"the number of row_ids doesn't match the number of values"
if use_str_keys:
check_call(_LIB.MXKVStorePullRowSparseEx(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, crow_ids, ctypes.c_int(priority)))
else:
check_call(_LIB.MXKVStorePullRowSparse(
self.handle, mx_uint(len(ckeys)), ckeys, cvals, crow_ids, ctypes.c_int(priority)))
# the result can be copied to other devices without invoking row_sparse_pull
# if the indices are the same
if single_rowid:
for out_i in out[1:]:
out[0].copyto(out_i)
|
Specifies type of low-bit quantization for gradient compression \
and additional arguments depending on the type of compression being used.
2bit Gradient Compression takes a positive float `threshold`.
The technique works by thresholding values such that positive values in the
gradient above threshold will be set to threshold. Negative values whose absolute
values are higher than threshold, will be set to the negative of threshold.
Values whose absolute values are less than threshold will be set to 0.
By doing so, each value in the gradient is in one of three states. 2bits are
used to represent these states, and every 16 float values in the original
gradient can be represented using one float. This compressed representation
can reduce communication costs. The difference between these thresholded values and
original values is stored at the sender's end as residual and added to the
gradient in the next iteration.
When kvstore is 'local', gradient compression is used to reduce communication
between multiple devices (gpus). Gradient is quantized on each GPU which
computed the gradients, then sent to the GPU which merges the gradients. This
receiving GPU dequantizes the gradients and merges them. Note that this
increases memory usage on each GPU because of the residual array stored.
When kvstore is 'dist', gradient compression is used to reduce communication
from worker to sender. Gradient is quantized on each worker which
computed the gradients, then sent to the server which dequantizes
this data and merges the gradients from each worker. Note that this
increases CPU memory usage on each worker because of the residual array stored.
Only worker to server communication is compressed in this setting.
If each machine has multiple GPUs, currently this GPU to GPU or GPU to CPU communication
is not compressed. Server to worker communication (in the case of pull)
is also not compressed.
To use 2bit compression, we need to specify `type` as `2bit`.
Only specifying `type` would use default value for the threshold.
To completely specify the arguments for 2bit compression, we would need to pass
a dictionary which includes `threshold` like:
{'type': '2bit', 'threshold': 0.5}
Parameters
----------
compression_params : dict
A dictionary specifying the type and parameters for gradient compression.
The key `type` in this dictionary is a
required string argument and specifies the type of gradient compression.
Currently `type` can be only `2bit`
Other keys in this dictionary are optional and specific to the type
of gradient compression.
|
def set_gradient_compression(self, compression_params):
""" Specifies type of low-bit quantization for gradient compression \
and additional arguments depending on the type of compression being used.
2bit Gradient Compression takes a positive float `threshold`.
The technique works by thresholding values such that positive values in the
gradient above threshold will be set to threshold. Negative values whose absolute
values are higher than threshold, will be set to the negative of threshold.
Values whose absolute values are less than threshold will be set to 0.
By doing so, each value in the gradient is in one of three states. 2bits are
used to represent these states, and every 16 float values in the original
gradient can be represented using one float. This compressed representation
can reduce communication costs. The difference between these thresholded values and
original values is stored at the sender's end as residual and added to the
gradient in the next iteration.
When kvstore is 'local', gradient compression is used to reduce communication
between multiple devices (gpus). Gradient is quantized on each GPU which
computed the gradients, then sent to the GPU which merges the gradients. This
receiving GPU dequantizes the gradients and merges them. Note that this
increases memory usage on each GPU because of the residual array stored.
When kvstore is 'dist', gradient compression is used to reduce communication
from worker to sender. Gradient is quantized on each worker which
computed the gradients, then sent to the server which dequantizes
this data and merges the gradients from each worker. Note that this
increases CPU memory usage on each worker because of the residual array stored.
Only worker to server communication is compressed in this setting.
If each machine has multiple GPUs, currently this GPU to GPU or GPU to CPU communication
is not compressed. Server to worker communication (in the case of pull)
is also not compressed.
To use 2bit compression, we need to specify `type` as `2bit`.
Only specifying `type` would use default value for the threshold.
To completely specify the arguments for 2bit compression, we would need to pass
a dictionary which includes `threshold` like:
{'type': '2bit', 'threshold': 0.5}
Parameters
----------
compression_params : dict
A dictionary specifying the type and parameters for gradient compression.
The key `type` in this dictionary is a
required string argument and specifies the type of gradient compression.
Currently `type` can be only `2bit`
Other keys in this dictionary are optional and specific to the type
of gradient compression.
"""
if ('device' in self.type) or ('dist' in self.type): # pylint: disable=unsupported-membership-test
ckeys, cvals = _ctype_dict(compression_params)
check_call(_LIB.MXKVStoreSetGradientCompression(self.handle,
mx_uint(len(compression_params)),
ckeys, cvals))
else:
raise Exception('Gradient compression is not supported for this type of kvstore')
|
Registers an optimizer with the kvstore.
When using a single machine, this function updates the local optimizer.
If using multiple machines and this operation is invoked from a worker node,
it will serialized the optimizer with pickle and send it to all servers.
The function returns after all servers have been updated.
Parameters
----------
optimizer : Optimizer
The new optimizer for the store
Examples
--------
>>> kv = mx.kv.create()
>>> shape = (2, 2)
>>> weight = mx.nd.zeros(shape)
>>> kv.init(3, weight)
>>> # set the optimizer for kvstore as the default SGD optimizer
>>> kv.set_optimizer(mx.optimizer.SGD())
>>> grad = mx.nd.ones(shape)
>>> kv.push(3, grad)
>>> kv.pull(3, out = weight)
>>> # weight is updated via gradient descent
>>> weight.asnumpy()
array([[-0.01, -0.01],
[-0.01, -0.01]], dtype=float32)
|
def set_optimizer(self, optimizer):
""" Registers an optimizer with the kvstore.
When using a single machine, this function updates the local optimizer.
If using multiple machines and this operation is invoked from a worker node,
it will serialized the optimizer with pickle and send it to all servers.
The function returns after all servers have been updated.
Parameters
----------
optimizer : Optimizer
The new optimizer for the store
Examples
--------
>>> kv = mx.kv.create()
>>> shape = (2, 2)
>>> weight = mx.nd.zeros(shape)
>>> kv.init(3, weight)
>>> # set the optimizer for kvstore as the default SGD optimizer
>>> kv.set_optimizer(mx.optimizer.SGD())
>>> grad = mx.nd.ones(shape)
>>> kv.push(3, grad)
>>> kv.pull(3, out = weight)
>>> # weight is updated via gradient descent
>>> weight.asnumpy()
array([[-0.01, -0.01],
[-0.01, -0.01]], dtype=float32)
"""
is_worker = ctypes.c_int()
check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker)))
# pylint: disable=invalid-name
if 'dist' in self.type and is_worker.value: # pylint: disable=unsupported-membership-test
# send the optimizer to server
try:
# use ASCII protocol 0, might be slower, but not a big ideal
optim_str = py_str(pickle.dumps(optimizer, 0))
except:
raise
cmd = _get_kvstore_server_command_type('kController')
self._send_command_to_servers(cmd, optim_str)
if optimizer.multi_precision:
cmd = _get_kvstore_server_command_type('kSetMultiPrecision')
self._send_command_to_servers(cmd, '')
else:
self._set_updater(opt.get_updater(optimizer))
|
Returns the type of this kvstore.
Returns
-------
type : str
the string type
|
def type(self):
""" Returns the type of this kvstore.
Returns
-------
type : str
the string type
"""
kv_type = ctypes.c_char_p()
check_call(_LIB.MXKVStoreGetType(self.handle, ctypes.byref(kv_type)))
return py_str(kv_type.value)
|
Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
|
def rank(self):
""" Returns the rank of this worker node.
Returns
-------
rank : int
The rank of this node, which is in range [0, num_workers())
"""
rank = ctypes.c_int()
check_call(_LIB.MXKVStoreGetRank(self.handle, ctypes.byref(rank)))
return rank.value
|
Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
|
def num_workers(self):
"""Returns the number of worker nodes.
Returns
-------
size :int
The number of worker nodes.
"""
size = ctypes.c_int()
check_call(_LIB.MXKVStoreGetGroupSize(self.handle, ctypes.byref(size)))
return size.value
|
Saves the optimizer (updater) state to a file. This is often used when checkpointing
the model during training.
Parameters
----------
fname : str
Path to the output states file.
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
|
def save_optimizer_states(self, fname, dump_optimizer=False):
"""Saves the optimizer (updater) state to a file. This is often used when checkpointing
the model during training.
Parameters
----------
fname : str
Path to the output states file.
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
assert self._updater is not None, "Cannot save states for distributed training"
with open(fname, 'wb') as fout:
fout.write(self._updater.get_states(dump_optimizer))
|
Loads the optimizer (updater) state from the file.
Parameters
----------
fname : str
Path to input states file.
|
def load_optimizer_states(self, fname):
"""Loads the optimizer (updater) state from the file.
Parameters
----------
fname : str
Path to input states file.
"""
assert self._updater is not None, "Cannot load states for distributed training"
self._updater.set_states(open(fname, 'rb').read())
|
Sets a push updater into the store.
This function only changes the local store. When running on multiple machines one must
use `set_optimizer`.
Parameters
----------
updater : function
The updater function.
Examples
--------
>>> def update(key, input, stored):
... print "update on key: %d" % key
... stored += input * 2
>>> kv._set_updater(update)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> kv.push('3', mx.nd.ones(shape))
update on key: 3
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 6. 6. 6.]
[ 6. 6. 6.]]
|
def _set_updater(self, updater):
"""Sets a push updater into the store.
This function only changes the local store. When running on multiple machines one must
use `set_optimizer`.
Parameters
----------
updater : function
The updater function.
Examples
--------
>>> def update(key, input, stored):
... print "update on key: %d" % key
... stored += input * 2
>>> kv._set_updater(update)
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 4. 4. 4.]
[ 4. 4. 4.]]
>>> kv.push('3', mx.nd.ones(shape))
update on key: 3
>>> kv.pull('3', out=a)
>>> print a.asnumpy()
[[ 6. 6. 6.]
[ 6. 6. 6.]]
"""
self._updater = updater
# set updater with int keys
_updater_proto = ctypes.CFUNCTYPE(
None, ctypes.c_int, NDArrayHandle, NDArrayHandle, ctypes.c_void_p)
self._updater_func = _updater_proto(_updater_wrapper(updater))
# set updater with str keys
_str_updater_proto = ctypes.CFUNCTYPE(
None, ctypes.c_char_p, NDArrayHandle, NDArrayHandle, ctypes.c_void_p)
self._str_updater_func = _str_updater_proto(_updater_wrapper(updater))
check_call(_LIB.MXKVStoreSetUpdaterEx(self.handle, self._updater_func,
self._str_updater_func, None))
|
Sends a command to all server nodes.
Sending command to a server node will cause that server node to invoke
``KVStoreServer.controller`` to execute the command.
This function returns after the command has been executed on all server
nodes.
Parameters
----------
head : int
the head of the command.
body : str
the body of the command.
|
def _send_command_to_servers(self, head, body):
"""Sends a command to all server nodes.
Sending command to a server node will cause that server node to invoke
``KVStoreServer.controller`` to execute the command.
This function returns after the command has been executed on all server
nodes.
Parameters
----------
head : int
the head of the command.
body : str
the body of the command.
"""
check_call(_LIB.MXKVStoreSendCommmandToServers(
self.handle, mx_uint(head), c_str(body)))
|
Add a module to the chain.
Parameters
----------
module : BaseModule
The new module to add.
kwargs : ``**keywords``
All the keyword arguments are saved as meta information
for the added module. The currently known meta includes
- `take_labels`: indicating whether the module expect to
take labels when doing computation. Note any module in
the chain can take labels (not necessarily only the top
most one), and they all take the same labels passed
from the original data batch for the `SequentialModule`.
Returns
-------
self
This function returns `self` to allow us to easily chain a
series of `add` calls.
Examples
--------
>>> # An example of addinging two modules to a chain.
>>> seq_mod = mx.mod.SequentialModule()
>>> seq_mod.add(mod1)
>>> seq_mod.add(mod2)
|
def add(self, module, **kwargs):
"""Add a module to the chain.
Parameters
----------
module : BaseModule
The new module to add.
kwargs : ``**keywords``
All the keyword arguments are saved as meta information
for the added module. The currently known meta includes
- `take_labels`: indicating whether the module expect to
take labels when doing computation. Note any module in
the chain can take labels (not necessarily only the top
most one), and they all take the same labels passed
from the original data batch for the `SequentialModule`.
Returns
-------
self
This function returns `self` to allow us to easily chain a
series of `add` calls.
Examples
--------
>>> # An example of addinging two modules to a chain.
>>> seq_mod = mx.mod.SequentialModule()
>>> seq_mod.add(mod1)
>>> seq_mod.add(mod2)
"""
self._modules.append(module)
# a sanity check to avoid typo
for key in kwargs:
assert key in self._meta_keys, ('Unknown meta "%s", a typo?' % key)
self._metas.append(kwargs)
# after adding new modules, we are reset back to raw states, needs
# to bind, init_params, etc.
self.binded = False
self.params_initialized = False
self.optimizer_initialized = False
return self
|
Gets current parameters.
Returns
-------
(arg_params, aux_params)
A pair of dictionaries each mapping parameter names to NDArray values. This
is a merged dictionary of all the parameters in the modules.
|
def get_params(self):
"""Gets current parameters.
Returns
-------
(arg_params, aux_params)
A pair of dictionaries each mapping parameter names to NDArray values. This
is a merged dictionary of all the parameters in the modules.
"""
assert self.binded and self.params_initialized
arg_params = dict()
aux_params = dict()
for module in self._modules:
arg, aux = module.get_params()
arg_params.update(arg)
aux_params.update(aux)
return (arg_params, aux_params)
|
Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Default ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Default ``None``. Existing auxiliary states. This has higher priority
than `initializer`.
allow_missing : bool
Allow missing values in `arg_params` and `aux_params` (if not ``None``).
In this case, missing values will be filled with `initializer`.
force_init : bool
Default ``False``.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
|
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Default ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Default ``None``. Existing auxiliary states. This has higher priority
than `initializer`.
allow_missing : bool
Allow missing values in `arg_params` and `aux_params` (if not ``None``).
In this case, missing values will be filled with `initializer`.
force_init : bool
Default ``False``.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
if self.params_initialized and not force_init:
return
assert self.binded, 'call bind before initializing the parameters'
for module in self._modules:
module.init_params(initializer=initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
# make sure we do not have duplicated parameter names
def _check_name(known_names, new_names, modules, i):
"""Internal function to help checking duplicated names."""
for name in new_names:
assert not name in known_names, "Duplicated parameter names: " + \
('name "%s" in layer %d (%s) is already ' % (name, i, type(modules[i]))) + \
('used in layer %d (%s).' % (known_names[name],
type(modules[known_names[name]])))
known_names[name] = i
arg_names = dict()
aux_names = dict()
for i_layer, module in enumerate(self._modules):
arg_params, aux_params = module.get_params()
_check_name(arg_names, arg_params.keys(), self._modules, i_layer)
_check_name(aux_names, aux_params.keys(), self._modules, i_layer)
self.params_initialized = True
|
Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is `data_iter.provide_data`.
label_shapes : list of (str, tuple)
Typically is `data_iter.provide_label`.
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. Currently shared module is not supported for `SequentialModule`.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
|
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is `data_iter.provide_data`.
label_shapes : list of (str, tuple)
Typically is `data_iter.provide_label`.
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. Currently shared module is not supported for `SequentialModule`.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
"""
if self.binded and not force_rebind:
self.logger.warning('Already bound, ignoring bind()')
return
if inputs_need_grad:
assert for_training is True
assert shared_module is None, 'Shared module is not supported'
assert len(self._modules) > 0, 'Attempting to bind an empty SequentialModule'
self.binded = True
# the same label shapes are used for all chained modules
self._label_shapes = label_shapes
my_data_shapes = data_shapes
anybody_ever_needs_label = False
for i_layer, module in enumerate(self._modules):
meta = self._metas[i_layer]
if SequentialModule.META_TAKE_LABELS in meta and \
meta[SequentialModule.META_TAKE_LABELS]:
my_label_shapes = label_shapes
anybody_ever_needs_label = True
else:
my_label_shapes = None
my_inputs_need_grad = bool(inputs_need_grad or
(for_training and i_layer > 0))
if meta.get(SequentialModule.META_AUTO_WIRING, False):
data_names = module.data_names
assert len(data_names) == len(my_data_shapes)
my_data_shapes = [(new_name, shape) for (new_name, (_, shape))
in zip(data_names, my_data_shapes)]
module.bind(data_shapes=my_data_shapes, label_shapes=my_label_shapes,
for_training=for_training, inputs_need_grad=my_inputs_need_grad,
force_rebind=force_rebind, shared_module=None, grad_req=grad_req)
# the output of the previous module is the data of the next module
my_data_shapes = module.output_shapes
if not anybody_ever_needs_label:
# then I do not need label either
self._label_shapes = None
|
Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default ``(('learning_rate', 0.01),)``. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
|
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),),
force_init=False):
"""Installs and initializes optimizers.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default ``(('learning_rate', 0.01),)``. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
"""
assert self.binded and self.params_initialized
if self.optimizer_initialized and not force_init:
self.logger.warning('optimizer already initialized, ignoring.')
return
for module in self._modules:
module.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params, force_init=force_init)
self.optimizer_initialized = True
|
Forward computation.
Parameters
----------
data_batch : DataBatch
is_train : bool
Default is ``None``, in which case `is_train` is take as ``self.for_training``.
|
def forward(self, data_batch, is_train=None):
"""Forward computation.
Parameters
----------
data_batch : DataBatch
is_train : bool
Default is ``None``, in which case `is_train` is take as ``self.for_training``.
"""
assert self.binded and self.params_initialized
# make a shallow copy, just to maintain necessary properties (if any) like
# bucket_key, pad, etc.
data_batch = copy.copy(data_batch)
for i_layer, module in enumerate(self._modules):
module.forward(data_batch, is_train=is_train)
if i_layer+1 == len(self._modules):
# the last layer, do not need to do the followings
break
data_batch.data = module.get_outputs()
if hasattr(data_batch, 'provide_data'):
# need to update this, in case the internal module is using bucketing
# or whatever
data_names = [x[0] for x in module.output_shapes]
assert len(data_names) == len(data_batch.data)
data_batch.provide_data = [(name, x.shape) for name, x in
zip(data_names, data_batch.data)]
|
Backward computation.
|
def backward(self, out_grads=None):
"""Backward computation."""
assert self.binded and self.params_initialized
for i_layer, module in reversed(list(zip(range(len(self._modules)), self._modules))):
module.backward(out_grads=out_grads)
if i_layer == 0:
break
out_grads = module.get_input_grads()
|
Updates parameters according to installed optimizer and the gradient computed
in the previous forward-backward cycle.
|
def update(self):
"""Updates parameters according to installed optimizer and the gradient computed
in the previous forward-backward cycle.
"""
assert self.binded and self.params_initialized and self.optimizer_initialized
for module in self._modules:
module.update()
|
Gets outputs from a previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
If `merge_multi_context` is ``True``, it is like ``[out1,
out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1,
out2_dev2]]``. All the output elements are numpy arrays.
|
def get_outputs(self, merge_multi_context=True):
"""Gets outputs from a previous forward computation.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
If `merge_multi_context` is ``True``, it is like ``[out1,
out2]``. Otherwise, it is like ``[[out1_dev1, out1_dev2], [out2_dev1,
out2_dev2]]``. All the output elements are numpy arrays.
"""
assert self.binded and self.params_initialized
return self._modules[-1].get_outputs(merge_multi_context=merge_multi_context)
|
Gets the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArrays or list of list of NDArrays
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
|
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Default is ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArrays or list of list of NDArrays
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
"""
assert self.binded and self.params_initialized and self.inputs_need_grad
return self._modules[0].get_input_grads(merge_multi_context=merge_multi_context)
|
Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
|
def update_metric(self, eval_metric, labels, pre_sliced=False):
"""Evaluates and accumulates evaluation metric on outputs of the last forward computation.
Parameters
----------
eval_metric : EvalMetric
labels : list of NDArray
Typically ``data_batch.label``.
"""
assert self.binded and self.params_initialized
for meta, module in zip(self._metas, self._modules):
if SequentialModule.META_TAKE_LABELS in meta and \
meta[SequentialModule.META_TAKE_LABELS]:
module.update_metric(eval_metric, labels, pre_sliced)
|
Installs monitor on all executors.
|
def install_monitor(self, mon):
"""Installs monitor on all executors."""
assert self.binded
for module in self._modules:
module.install_monitor(mon)
|
Generate the iterator of mnist dataset
|
def get_iterator(data_shape, use_caffe_data):
"""Generate the iterator of mnist dataset"""
def get_iterator_impl_mnist(args, kv):
"""return train and val iterators for mnist"""
# download data
get_mnist_ubyte()
flat = False if len(data_shape) != 1 else True
train = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
input_shape=data_shape,
batch_size=args.batch_size,
shuffle=True,
flat=flat,
num_parts=kv.num_workers,
part_index=kv.rank)
val = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
input_shape=data_shape,
batch_size=args.batch_size,
flat=flat,
num_parts=kv.num_workers,
part_index=kv.rank)
return (train, val)
def get_iterator_impl_caffe(args, kv):
flat = False if len(data_shape) != 1 else True
train = mx.io.CaffeDataIter(
prototxt=
'layer { \
name: "mnist" \
type: "Data" \
top: "data" \
top: "label" \
include { \
phase: TRAIN \
} \
transform_param { \
scale: 0.00390625 \
} \
data_param { \
source: "mnist_train_lmdb" \
batch_size: 64 \
backend: LMDB \
} \
}',
flat=flat,
num_examples=60000
# float32 is the default, so left out here in order to illustrate
)
val = mx.io.CaffeDataIter(
prototxt=
'layer { \
name: "mnist" \
type: "Data" \
top: "data" \
top: "label" \
include { \
phase: TEST \
} \
transform_param { \
scale: 0.00390625 \
} \
data_param { \
source: "mnist_test_lmdb" \
batch_size: 100 \
backend: LMDB \
} \
}',
flat=flat,
num_examples=10000,
dtype="float32" # float32 is the default
)
return train, val
if use_caffe_data:
return get_iterator_impl_caffe
else:
return get_iterator_impl_mnist
|
The function is used to run predictions on the audio files in the directory `pred_directory`.
Parameters
----------
net:
The model that has been trained.
prediction_dir: string, default ./Test
The directory that contains the audio files on which predictions are to be made
|
def predict(prediction_dir='./Test'):
"""The function is used to run predictions on the audio files in the directory `pred_directory`.
Parameters
----------
net:
The model that has been trained.
prediction_dir: string, default ./Test
The directory that contains the audio files on which predictions are to be made
"""
if not os.path.exists(prediction_dir):
warnings.warn("The directory on which predictions are to be made is not found!")
return
if len(os.listdir(prediction_dir)) == 0:
warnings.warn("The directory on which predictions are to be made is empty! Exiting...")
return
# Loading synsets
if not os.path.exists('./synset.txt'):
warnings.warn("The synset or labels for the dataset do not exist. Please run the training script first.")
return
with open("./synset.txt", "r") as f:
synset = [l.rstrip() for l in f]
net = get_net(len(synset))
print("Trying to load the model with the saved parameters...")
if not os.path.exists("./net.params"):
warnings.warn("The model does not have any saved parameters... Cannot proceed! Train the model first")
return
net.load_parameters("./net.params")
file_names = os.listdir(prediction_dir)
full_file_names = [os.path.join(prediction_dir, item) for item in file_names]
from transforms import MFCC
mfcc = MFCC()
print("\nStarting predictions for audio files in ", prediction_dir, " ....\n")
for filename in full_file_names:
# Argument kaiser_fast to res_type is faster than 'kaiser_best'. To reduce the load time, passing kaiser_fast.
X1, _ = librosa.load(filename, res_type='kaiser_fast')
transformed_test_data = mfcc(mx.nd.array(X1))
output = net(transformed_test_data.reshape((1, -1)))
prediction = nd.argmax(output, axis=1)
print(filename, " -> ", synset[(int)(prediction.asscalar())])
|
Thread loop for generating data
Parameters
----------
proc_id: int
Process id
alive: multiprocessing.Value
variable for signaling whether process should continue or not
queue: multiprocessing.Queue
queue for passing data back
fn: function
function object that returns a sample to be pushed into the queue
|
def _proc_loop(proc_id, alive, queue, fn):
"""Thread loop for generating data
Parameters
----------
proc_id: int
Process id
alive: multiprocessing.Value
variable for signaling whether process should continue or not
queue: multiprocessing.Queue
queue for passing data back
fn: function
function object that returns a sample to be pushed into the queue
"""
print("proc {} started".format(proc_id))
try:
while alive.value:
data = fn()
put_success = False
while alive.value and not put_success:
try:
queue.put(data, timeout=0.5)
put_success = True
except QFullExcept:
# print("Queue Full")
pass
except KeyboardInterrupt:
print("W: interrupt received, stopping process {} ...".format(proc_id))
print("Closing process {}".format(proc_id))
queue.close()
|
Start processes if not already started
|
def _init_proc(self):
"""Start processes if not already started"""
if not self.proc:
self.proc = [
mp.Process(target=self._proc_loop, args=(i, self.alive, self.queue, self.fn))
for i in range(self.num_proc)
]
self.alive.value = True
for p in self.proc:
p.start()
|
Resets the generator by stopping all processes
|
def reset(self):
"""Resets the generator by stopping all processes"""
self.alive.value = False
qsize = 0
try:
while True:
self.queue.get(timeout=0.1)
qsize += 1
except QEmptyExcept:
pass
print("Queue size on reset: {}".format(qsize))
for i, p in enumerate(self.proc):
p.join()
self.proc.clear()
|
Create a base class with a metaclass.
|
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
|
Load library by searching possible path.
|
def _load_lib():
"""Load library by searching possible path."""
lib_path = libinfo.find_lib_path()
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)
# DMatrix functions
lib.MXGetLastError.restype = ctypes.c_char_p
return lib
|
Create ctypes array from a Python array.
Parameters
----------
ctype : ctypes data type
Data type of the array we want to convert to, such as mx_float.
values : tuple or list
Data content.
Returns
-------
out : ctypes array
Created ctypes array.
Examples
--------
>>> x = mx.base.c_array(mx.base.mx_float, [1, 2, 3])
>>> print len(x)
3
>>> x[1]
2.0
|
def c_array(ctype, values):
"""Create ctypes array from a Python array.
Parameters
----------
ctype : ctypes data type
Data type of the array we want to convert to, such as mx_float.
values : tuple or list
Data content.
Returns
-------
out : ctypes array
Created ctypes array.
Examples
--------
>>> x = mx.base.c_array(mx.base.mx_float, [1, 2, 3])
>>> print len(x)
3
>>> x[1]
2.0
"""
out = (ctype * len(values))()
out[:] = values
return out
|
Create ctypes const void ** from a list of MXNet objects with handles.
Parameters
----------
objs : list of NDArray/Symbol.
MXNet objects.
Returns
-------
(ctypes.c_void_p * len(objs))
A void ** pointer that can be passed to C API.
|
def c_handle_array(objs):
"""Create ctypes const void ** from a list of MXNet objects with handles.
Parameters
----------
objs : list of NDArray/Symbol.
MXNet objects.
Returns
-------
(ctypes.c_void_p * len(objs))
A void ** pointer that can be passed to C API.
"""
arr = (ctypes.c_void_p * len(objs))()
arr[:] = [o.handle for o in objs]
return arr
|
Convert a ctypes pointer to a numpy array.
The resulting NumPy array shares the memory with the pointer.
Parameters
----------
cptr : ctypes.POINTER(mx_float)
pointer to the memory region
shape : tuple
Shape of target `NDArray`.
Returns
-------
out : numpy_array
A numpy array : numpy array.
|
def ctypes2numpy_shared(cptr, shape):
"""Convert a ctypes pointer to a numpy array.
The resulting NumPy array shares the memory with the pointer.
Parameters
----------
cptr : ctypes.POINTER(mx_float)
pointer to the memory region
shape : tuple
Shape of target `NDArray`.
Returns
-------
out : numpy_array
A numpy array : numpy array.
"""
if not isinstance(cptr, ctypes.POINTER(mx_float)):
raise RuntimeError('expected float pointer')
size = 1
for s in shape:
size *= s
dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))
return _np.frombuffer(dbuffer, dtype=_np.float32).reshape(shape)
|
Build argument docs in python style.
arg_names : list of str
Argument names.
arg_types : list of str
Argument type information.
arg_descs : list of str
Argument description information.
remove_dup : boolean, optional
Whether remove duplication or not.
Returns
-------
docstr : str
Python docstring of parameter sections.
|
def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):
"""Build argument docs in python style.
arg_names : list of str
Argument names.
arg_types : list of str
Argument type information.
arg_descs : list of str
Argument description information.
remove_dup : boolean, optional
Whether remove duplication or not.
Returns
-------
docstr : str
Python docstring of parameter sections.
"""
param_keys = set()
param_str = []
for key, type_info, desc in zip(arg_names, arg_types, arg_descs):
if key in param_keys and remove_dup:
continue
if key == 'num_args':
continue
param_keys.add(key)
ret = '%s : %s' % (key, type_info)
if len(desc) != 0:
ret += '\n ' + desc
param_str.append(ret)
doc_str = ('Parameters\n' +
'----------\n' +
'%s\n')
doc_str = doc_str % ('\n'.join(param_str))
return doc_str
|
Append the definition position to each function contained in module.
Examples
--------
# Put the following codes at the end of a file
add_fileline_to_docstring(__name__)
|
def add_fileline_to_docstring(module, incursive=True):
"""Append the definition position to each function contained in module.
Examples
--------
# Put the following codes at the end of a file
add_fileline_to_docstring(__name__)
"""
def _add_fileline(obj):
"""Add fileinto to a object.
"""
if obj.__doc__ is None or 'From:' in obj.__doc__:
return
fname = inspect.getsourcefile(obj)
if fname is None:
return
try:
line = inspect.getsourcelines(obj)[-1]
except IOError:
return
obj.__doc__ += '\n\nFrom:%s:%d' % (fname, line)
if isinstance(module, str):
module = sys.modules[module]
for _, obj in inspect.getmembers(module):
if inspect.isbuiltin(obj):
continue
if inspect.isfunction(obj):
_add_fileline(obj)
if inspect.ismethod(obj):
_add_fileline(obj.__func__)
if inspect.isclass(obj) and incursive:
add_fileline_to_docstring(obj, False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.