text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Generate the iterator of mnist dataset
<END_TASK>
<USER_TASK:>
Description:
def get_iterator(data_shape, use_caffe_data):
"""Generate the iterator of mnist dataset""" |
def get_iterator_impl_mnist(args, kv):
"""return train and val iterators for mnist"""
# download data
get_mnist_ubyte()
flat = False if len(data_shape) != 1 else True
train = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
input_shape=data_shape,
batch_size=args.batch_size,
shuffle=True,
flat=flat,
num_parts=kv.num_workers,
part_index=kv.rank)
val = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
input_shape=data_shape,
batch_size=args.batch_size,
flat=flat,
num_parts=kv.num_workers,
part_index=kv.rank)
return (train, val)
def get_iterator_impl_caffe(args, kv):
flat = False if len(data_shape) != 1 else True
train = mx.io.CaffeDataIter(
prototxt=
'layer { \
name: "mnist" \
type: "Data" \
top: "data" \
top: "label" \
include { \
phase: TRAIN \
} \
transform_param { \
scale: 0.00390625 \
} \
data_param { \
source: "mnist_train_lmdb" \
batch_size: 64 \
backend: LMDB \
} \
}',
flat=flat,
num_examples=60000
# float32 is the default, so left out here in order to illustrate
)
val = mx.io.CaffeDataIter(
prototxt=
'layer { \
name: "mnist" \
type: "Data" \
top: "data" \
top: "label" \
include { \
phase: TEST \
} \
transform_param { \
scale: 0.00390625 \
} \
data_param { \
source: "mnist_test_lmdb" \
batch_size: 100 \
backend: LMDB \
} \
}',
flat=flat,
num_examples=10000,
dtype="float32" # float32 is the default
)
return train, val
if use_caffe_data:
return get_iterator_impl_caffe
else:
return get_iterator_impl_mnist |
<SYSTEM_TASK:>
The function is used to run predictions on the audio files in the directory `pred_directory`.
<END_TASK>
<USER_TASK:>
Description:
def predict(prediction_dir='./Test'):
"""The function is used to run predictions on the audio files in the directory `pred_directory`.
Parameters
----------
net:
The model that has been trained.
prediction_dir: string, default ./Test
The directory that contains the audio files on which predictions are to be made
""" |
if not os.path.exists(prediction_dir):
warnings.warn("The directory on which predictions are to be made is not found!")
return
if len(os.listdir(prediction_dir)) == 0:
warnings.warn("The directory on which predictions are to be made is empty! Exiting...")
return
# Loading synsets
if not os.path.exists('./synset.txt'):
warnings.warn("The synset or labels for the dataset do not exist. Please run the training script first.")
return
with open("./synset.txt", "r") as f:
synset = [l.rstrip() for l in f]
net = get_net(len(synset))
print("Trying to load the model with the saved parameters...")
if not os.path.exists("./net.params"):
warnings.warn("The model does not have any saved parameters... Cannot proceed! Train the model first")
return
net.load_parameters("./net.params")
file_names = os.listdir(prediction_dir)
full_file_names = [os.path.join(prediction_dir, item) for item in file_names]
from transforms import MFCC
mfcc = MFCC()
print("\nStarting predictions for audio files in ", prediction_dir, " ....\n")
for filename in full_file_names:
# Argument kaiser_fast to res_type is faster than 'kaiser_best'. To reduce the load time, passing kaiser_fast.
X1, _ = librosa.load(filename, res_type='kaiser_fast')
transformed_test_data = mfcc(mx.nd.array(X1))
output = net(transformed_test_data.reshape((1, -1)))
prediction = nd.argmax(output, axis=1)
print(filename, " -> ", synset[(int)(prediction.asscalar())]) |
<SYSTEM_TASK:>
Thread loop for generating data
<END_TASK>
<USER_TASK:>
Description:
def _proc_loop(proc_id, alive, queue, fn):
"""Thread loop for generating data
Parameters
----------
proc_id: int
Process id
alive: multiprocessing.Value
variable for signaling whether process should continue or not
queue: multiprocessing.Queue
queue for passing data back
fn: function
function object that returns a sample to be pushed into the queue
""" |
print("proc {} started".format(proc_id))
try:
while alive.value:
data = fn()
put_success = False
while alive.value and not put_success:
try:
queue.put(data, timeout=0.5)
put_success = True
except QFullExcept:
# print("Queue Full")
pass
except KeyboardInterrupt:
print("W: interrupt received, stopping process {} ...".format(proc_id))
print("Closing process {}".format(proc_id))
queue.close() |
<SYSTEM_TASK:>
Start processes if not already started
<END_TASK>
<USER_TASK:>
Description:
def _init_proc(self):
"""Start processes if not already started""" |
if not self.proc:
self.proc = [
mp.Process(target=self._proc_loop, args=(i, self.alive, self.queue, self.fn))
for i in range(self.num_proc)
]
self.alive.value = True
for p in self.proc:
p.start() |
<SYSTEM_TASK:>
Resets the generator by stopping all processes
<END_TASK>
<USER_TASK:>
Description:
def reset(self):
"""Resets the generator by stopping all processes""" |
self.alive.value = False
qsize = 0
try:
while True:
self.queue.get(timeout=0.1)
qsize += 1
except QEmptyExcept:
pass
print("Queue size on reset: {}".format(qsize))
for i, p in enumerate(self.proc):
p.join()
self.proc.clear() |
<SYSTEM_TASK:>
Load library by searching possible path.
<END_TASK>
<USER_TASK:>
Description:
def _load_lib():
"""Load library by searching possible path.""" |
lib_path = libinfo.find_lib_path()
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)
# DMatrix functions
lib.MXGetLastError.restype = ctypes.c_char_p
return lib |
<SYSTEM_TASK:>
Create ctypes array from a Python array.
<END_TASK>
<USER_TASK:>
Description:
def c_array(ctype, values):
"""Create ctypes array from a Python array.
Parameters
----------
ctype : ctypes data type
Data type of the array we want to convert to, such as mx_float.
values : tuple or list
Data content.
Returns
-------
out : ctypes array
Created ctypes array.
Examples
--------
>>> x = mx.base.c_array(mx.base.mx_float, [1, 2, 3])
>>> print len(x)
3
>>> x[1]
2.0
""" |
out = (ctype * len(values))()
out[:] = values
return out |
<SYSTEM_TASK:>
Convert a ctypes pointer to a numpy array.
<END_TASK>
<USER_TASK:>
Description:
def ctypes2numpy_shared(cptr, shape):
"""Convert a ctypes pointer to a numpy array.
The resulting NumPy array shares the memory with the pointer.
Parameters
----------
cptr : ctypes.POINTER(mx_float)
pointer to the memory region
shape : tuple
Shape of target `NDArray`.
Returns
-------
out : numpy_array
A numpy array : numpy array.
""" |
if not isinstance(cptr, ctypes.POINTER(mx_float)):
raise RuntimeError('expected float pointer')
size = 1
for s in shape:
size *= s
dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))
return _np.frombuffer(dbuffer, dtype=_np.float32).reshape(shape) |
<SYSTEM_TASK:>
Build argument docs in python style.
<END_TASK>
<USER_TASK:>
Description:
def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):
"""Build argument docs in python style.
arg_names : list of str
Argument names.
arg_types : list of str
Argument type information.
arg_descs : list of str
Argument description information.
remove_dup : boolean, optional
Whether remove duplication or not.
Returns
-------
docstr : str
Python docstring of parameter sections.
""" |
param_keys = set()
param_str = []
for key, type_info, desc in zip(arg_names, arg_types, arg_descs):
if key in param_keys and remove_dup:
continue
if key == 'num_args':
continue
param_keys.add(key)
ret = '%s : %s' % (key, type_info)
if len(desc) != 0:
ret += '\n ' + desc
param_str.append(ret)
doc_str = ('Parameters\n' +
'----------\n' +
'%s\n')
doc_str = doc_str % ('\n'.join(param_str))
return doc_str |
<SYSTEM_TASK:>
Append the definition position to each function contained in module.
<END_TASK>
<USER_TASK:>
Description:
def add_fileline_to_docstring(module, incursive=True):
"""Append the definition position to each function contained in module.
Examples
--------
# Put the following codes at the end of a file
add_fileline_to_docstring(__name__)
""" |
def _add_fileline(obj):
"""Add fileinto to a object.
"""
if obj.__doc__ is None or 'From:' in obj.__doc__:
return
fname = inspect.getsourcefile(obj)
if fname is None:
return
try:
line = inspect.getsourcelines(obj)[-1]
except IOError:
return
obj.__doc__ += '\n\nFrom:%s:%d' % (fname, line)
if isinstance(module, str):
module = sys.modules[module]
for _, obj in inspect.getmembers(module):
if inspect.isbuiltin(obj):
continue
if inspect.isfunction(obj):
_add_fileline(obj)
if inspect.ismethod(obj):
_add_fileline(obj.__func__)
if inspect.isclass(obj) and incursive:
add_fileline_to_docstring(obj, False) |
<SYSTEM_TASK:>
Checks whether the NumPy compatibility is currently turned on.
<END_TASK>
<USER_TASK:>
Description:
def is_np_compat():
"""
Checks whether the NumPy compatibility is currently turned on.
NumPy-compatibility is turned off by default in backend.
Returns
-------
A bool value indicating whether the NumPy compatibility is currently on.
""" |
curr = ctypes.c_bool()
check_call(_LIB.MXIsNumpyCompatible(ctypes.byref(curr)))
return curr.value |
<SYSTEM_TASK:>
Wraps a function with an activated NumPy-compatibility scope. This ensures
<END_TASK>
<USER_TASK:>
Description:
def use_np_compat(func):
"""Wraps a function with an activated NumPy-compatibility scope. This ensures
that the execution of the function is guaranteed with NumPy compatible semantics,
such as zero-dim and zero size tensors.
Example::
import mxnet as mx
@mx.use_np_compat
def scalar_one():
return mx.nd.ones(())
print(scalar_one())
Parameters
----------
func : a user-provided callable function to be scoped by the NumPy compatibility state.
Returns
-------
Function
A function for wrapping the user functions in the NumPy compatibility scope.
""" |
@wraps(func)
def _with_np_compat(*args, **kwargs):
with np_compat(active=True):
return func(*args, **kwargs)
return _with_np_compat |
<SYSTEM_TASK:>
computes the empirical correlation coefficient
<END_TASK>
<USER_TASK:>
Description:
def corr(label, pred):
"""computes the empirical correlation coefficient""" |
numerator1 = label - np.mean(label, axis=0)
numerator2 = pred - np.mean(pred, axis = 0)
numerator = np.mean(numerator1 * numerator2, axis=0)
denominator = np.std(label, axis=0) * np.std(pred, axis=0)
return np.mean(numerator / denominator) |
<SYSTEM_TASK:>
parses the trained .caffemodel file
<END_TASK>
<USER_TASK:>
Description:
def parse_caffemodel(file_path):
"""
parses the trained .caffemodel file
filepath: /path/to/trained-model.caffemodel
returns: layers
""" |
f = open(file_path, 'rb')
contents = f.read()
net_param = caffe_pb2.NetParameter()
net_param.ParseFromString(contents)
layers = find_layers(net_param)
return layers |
<SYSTEM_TASK:>
Crop and normnalize an image nd array.
<END_TASK>
<USER_TASK:>
Description:
def transform(data, target_wd, target_ht, is_train, box):
"""Crop and normnalize an image nd array.""" |
if box is not None:
x, y, w, h = box
data = data[y:min(y+h, data.shape[0]), x:min(x+w, data.shape[1])]
# Resize to target_wd * target_ht.
data = mx.image.imresize(data, target_wd, target_ht)
# Normalize in the same way as the pre-trained model.
data = data.astype(np.float32) / 255.0
data = (data - mx.nd.array([0.485, 0.456, 0.406])) / mx.nd.array([0.229, 0.224, 0.225])
if is_train:
if random.random() < 0.5:
data = nd.flip(data, axis=1)
data, _ = mx.image.random_crop(data, (224, 224))
else:
data, _ = mx.image.center_crop(data, (224, 224))
# Transpose from (target_wd, target_ht, 3)
# to (3, target_wd, target_ht).
data = nd.transpose(data, (2, 0, 1))
# If image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = nd.tile(data, (3, 1, 1))
return data.reshape((1,) + data.shape) |
<SYSTEM_TASK:>
Return training and testing iterator for the CUB200-2011 dataset.
<END_TASK>
<USER_TASK:>
Description:
def cub200_iterator(data_path, batch_k, batch_size, data_shape):
"""Return training and testing iterator for the CUB200-2011 dataset.""" |
return (CUB200Iter(data_path, batch_k, batch_size, data_shape, is_train=True),
CUB200Iter(data_path, batch_k, batch_size, data_shape, is_train=False)) |
<SYSTEM_TASK:>
Load and transform an image.
<END_TASK>
<USER_TASK:>
Description:
def get_image(self, img, is_train):
"""Load and transform an image.""" |
img_arr = mx.image.imread(img)
img_arr = transform(img_arr, 256, 256, is_train, self.boxes[img])
return img_arr |
<SYSTEM_TASK:>
Check the library for compile-time features. The list of features are maintained in libinfo.h and libinfo.cc
<END_TASK>
<USER_TASK:>
Description:
def feature_list():
"""
Check the library for compile-time features. The list of features are maintained in libinfo.h and libinfo.cc
Returns
-------
list
List of :class:`.Feature` objects
""" |
lib_features_c_array = ctypes.POINTER(Feature)()
lib_features_size = ctypes.c_size_t()
check_call(_LIB.MXLibInfoFeatures(ctypes.byref(lib_features_c_array), ctypes.byref(lib_features_size)))
features = [lib_features_c_array[i] for i in range(lib_features_size.value)]
return features |
<SYSTEM_TASK:>
Check for a particular feature by name
<END_TASK>
<USER_TASK:>
Description:
def is_enabled(self, feature_name):
"""
Check for a particular feature by name
Parameters
----------
feature_name: str
The name of a valid feature as string for example 'CUDA'
Returns
-------
Boolean
True if it's enabled, False if it's disabled, RuntimeError if the feature is not known
""" |
feature_name = feature_name.upper()
if feature_name not in self:
raise RuntimeError("Feature '{}' is unknown, known features are: {}".format(
feature_name, list(self.keys())))
return self[feature_name].enabled |
<SYSTEM_TASK:>
make a directory to store all caches
<END_TASK>
<USER_TASK:>
Description:
def cache_path(self):
"""
make a directory to store all caches
Returns:
---------
cache path
""" |
cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache')
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return cache_path |
<SYSTEM_TASK:>
Helper function to create multiple random crop augmenters.
<END_TASK>
<USER_TASK:>
Description:
def CreateMultiRandCropAugmenter(min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0), min_eject_coverage=0.3,
max_attempts=50, skip_prob=0):
"""Helper function to create multiple random crop augmenters.
Parameters
----------
min_object_covered : float or list of float, default=0.1
The cropped area of the image must contain at least this fraction of
any bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
min_eject_coverage : float or list of float, default=0.3
The minimum coverage of cropped sample w.r.t its original size. With this
constraint, objects that have marginal area after crop will be discarded.
aspect_ratio_range : tuple of floats or list of tuple of floats, default=(0.75, 1.33)
The cropped area of the image must have an aspect ratio = width / height
within this range.
area_range : tuple of floats or list of tuple of floats, default=(0.05, 1.0)
The cropped area of the image must contain a fraction of the supplied
image within in this range.
max_attempts : int or list of int, default=50
Number of attempts at generating a cropped/padded region of the image of the
specified constraints. After max_attempts failures, return the original image.
Examples
--------
>>> # An example of creating multiple random crop augmenters
>>> min_object_covered = [0.1, 0.3, 0.5, 0.7, 0.9] # use 5 augmenters
>>> aspect_ratio_range = (0.75, 1.33) # use same range for all augmenters
>>> area_range = [(0.1, 1.0), (0.2, 1.0), (0.2, 1.0), (0.3, 0.9), (0.5, 1.0)]
>>> min_eject_coverage = 0.3
>>> max_attempts = 50
>>> aug = mx.image.det.CreateMultiRandCropAugmenter(min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range, area_range=area_range,
min_eject_coverage=min_eject_coverage, max_attempts=max_attempts,
skip_prob=0)
>>> aug.dumps() # show some details
""" |
def align_parameters(params):
"""Align parameters as pairs"""
out_params = []
num = 1
for p in params:
if not isinstance(p, list):
p = [p]
out_params.append(p)
num = max(num, len(p))
# align for each param
for k, p in enumerate(out_params):
if len(p) != num:
assert len(p) == 1
out_params[k] = p * num
return out_params
aligned_params = align_parameters([min_object_covered, aspect_ratio_range, area_range,
min_eject_coverage, max_attempts])
augs = []
for moc, arr, ar, mec, ma in zip(*aligned_params):
augs.append(DetRandomCropAug(min_object_covered=moc, aspect_ratio_range=arr,
area_range=ar, min_eject_coverage=mec, max_attempts=ma))
return DetRandomSelectAug(augs, skip_prob=skip_prob) |
<SYSTEM_TASK:>
Create augmenters for detection.
<END_TASK>
<USER_TASK:>
Description:
def CreateDetAugmenter(data_shape, resize=0, rand_crop=0, rand_pad=0, rand_gray=0,
rand_mirror=False, mean=None, std=None, brightness=0, contrast=0,
saturation=0, pca_noise=0, hue=0, inter_method=2, min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 3.0),
min_eject_coverage=0.3, max_attempts=50, pad_val=(127, 127, 127)):
"""Create augmenters for detection.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : float
[0, 1], probability to apply random cropping
rand_pad : float
[0, 1], probability to apply random padding
rand_gray : float
[0, 1], probability to convert to grayscale for all channels
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
min_object_covered : float
The cropped area of the image must contain at least this fraction of
any bounding box supplied. The value of this parameter should be non-negative.
In the case of 0, the cropped area does not need to overlap any of the
bounding boxes supplied.
min_eject_coverage : float
The minimum coverage of cropped sample w.r.t its original size. With this
constraint, objects that have marginal area after crop will be discarded.
aspect_ratio_range : tuple of floats
The cropped area of the image must have an aspect ratio = width / height
within this range.
area_range : tuple of floats
The cropped area of the image must contain a fraction of the supplied
image within in this range.
max_attempts : int
Number of attempts at generating a cropped/padded region of the image of the
specified constraints. After max_attempts failures, return the original image.
pad_val: float
Pixel value to be filled when padding is enabled. pad_val will automatically
be subtracted by mean and divided by std if applicable.
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateDetAugmenter(data_shape=(3, 300, 300), rand_crop=0.5,
... rand_pad=0.5, rand_mirror=True, mean=True, brightness=0.125, contrast=0.125,
... saturation=0.125, pca_noise=0.05, inter_method=10, min_object_covered=[0.3, 0.5, 0.9],
... area_range=(0.3, 3.0))
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
""" |
auglist = []
if resize > 0:
auglist.append(DetBorrowAug(ResizeAug(resize, inter_method)))
if rand_crop > 0:
crop_augs = CreateMultiRandCropAugmenter(min_object_covered, aspect_ratio_range,
area_range, min_eject_coverage,
max_attempts, skip_prob=(1 - rand_crop))
auglist.append(crop_augs)
if rand_mirror > 0:
auglist.append(DetHorizontalFlipAug(0.5))
# apply random padding as late as possible to save computation
if rand_pad > 0:
pad_aug = DetRandomPadAug(aspect_ratio_range,
(1.0, area_range[1]), max_attempts, pad_val)
auglist.append(DetRandomSelectAug([pad_aug], 1 - rand_pad))
# force resize
auglist.append(DetBorrowAug(ForceResizeAug((data_shape[2], data_shape[1]), inter_method)))
auglist.append(DetBorrowAug(CastAug()))
if brightness or contrast or saturation:
auglist.append(DetBorrowAug(ColorJitterAug(brightness, contrast, saturation)))
if hue:
auglist.append(DetBorrowAug(HueJitterAug(hue)))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(DetBorrowAug(LightingAug(pca_noise, eigval, eigvec)))
if rand_gray > 0:
auglist.append(DetBorrowAug(RandomGrayAug(rand_gray)))
if mean is True:
mean = np.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, np.ndarray) and mean.shape[0] in [1, 3]
if std is True:
std = np.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, np.ndarray) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(DetBorrowAug(ColorNormalizeAug(mean, std)))
return auglist |
<SYSTEM_TASK:>
Calculate areas for multiple labels
<END_TASK>
<USER_TASK:>
Description:
def _calculate_areas(self, label):
"""Calculate areas for multiple labels""" |
heights = np.maximum(0, label[:, 3] - label[:, 1])
widths = np.maximum(0, label[:, 2] - label[:, 0])
return heights * widths |
<SYSTEM_TASK:>
Calculate intersect areas, normalized.
<END_TASK>
<USER_TASK:>
Description:
def _intersect(self, label, xmin, ymin, xmax, ymax):
"""Calculate intersect areas, normalized.""" |
left = np.maximum(label[:, 0], xmin)
right = np.minimum(label[:, 2], xmax)
top = np.maximum(label[:, 1], ymin)
bot = np.minimum(label[:, 3], ymax)
invalid = np.where(np.logical_or(left >= right, top >= bot))[0]
out = label.copy()
out[:, 0] = left
out[:, 1] = top
out[:, 2] = right
out[:, 3] = bot
out[invalid, :] = 0
return out |
<SYSTEM_TASK:>
Check if constrains are satisfied
<END_TASK>
<USER_TASK:>
Description:
def _check_satisfy_constraints(self, label, xmin, ymin, xmax, ymax, width, height):
"""Check if constrains are satisfied""" |
if (xmax - xmin) * (ymax - ymin) < 2:
return False # only 1 pixel
x1 = float(xmin) / width
y1 = float(ymin) / height
x2 = float(xmax) / width
y2 = float(ymax) / height
object_areas = self._calculate_areas(label[:, 1:])
valid_objects = np.where(object_areas * width * height > 2)[0]
if valid_objects.size < 1:
return False
intersects = self._intersect(label[valid_objects, 1:], x1, y1, x2, y2)
coverages = self._calculate_areas(intersects) / object_areas[valid_objects]
coverages = coverages[np.where(coverages > 0)[0]]
return coverages.size > 0 and np.amin(coverages) > self.min_object_covered |
<SYSTEM_TASK:>
Convert labels according to crop box
<END_TASK>
<USER_TASK:>
Description:
def _update_labels(self, label, crop_box, height, width):
"""Convert labels according to crop box""" |
xmin = float(crop_box[0]) / width
ymin = float(crop_box[1]) / height
w = float(crop_box[2]) / width
h = float(crop_box[3]) / height
out = label.copy()
out[:, (1, 3)] -= xmin
out[:, (2, 4)] -= ymin
out[:, (1, 3)] /= w
out[:, (2, 4)] /= h
out[:, 1:5] = np.maximum(0, out[:, 1:5])
out[:, 1:5] = np.minimum(1, out[:, 1:5])
coverage = self._calculate_areas(out[:, 1:]) * w * h / self._calculate_areas(label[:, 1:])
valid = np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2])
valid = np.logical_and(valid, coverage > self.min_eject_coverage)
valid = np.where(valid)[0]
if valid.size < 1:
return None
out = out[valid, :]
return out |
<SYSTEM_TASK:>
Propose cropping areas
<END_TASK>
<USER_TASK:>
Description:
def _random_crop_proposal(self, label, height, width):
"""Propose cropping areas""" |
from math import sqrt
if not self.enabled or height <= 0 or width <= 0:
return ()
min_area = self.area_range[0] * height * width
max_area = self.area_range[1] * height * width
for _ in range(self.max_attempts):
ratio = random.uniform(*self.aspect_ratio_range)
if ratio <= 0:
continue
h = int(round(sqrt(min_area / ratio)))
max_h = int(round(sqrt(max_area / ratio)))
if round(max_h * ratio) > width:
# find smallest max_h satifying round(max_h * ratio) <= width
max_h = int((width + 0.4999999) / ratio)
if max_h > height:
max_h = height
if h > max_h:
h = max_h
if h < max_h:
# generate random h in range [h, max_h]
h = random.randint(h, max_h)
w = int(round(h * ratio))
assert w <= width
# trying to fix rounding problems
area = w * h
if area < min_area:
h += 1
w = int(round(h * ratio))
area = w * h
if area > max_area:
h -= 1
w = int(round(h * ratio))
area = w * h
if not (min_area <= area <= max_area and 0 <= w <= width and 0 <= h <= height):
continue
y = random.randint(0, max(0, height - h))
x = random.randint(0, max(0, width - w))
if self._check_satisfy_constraints(label, x, y, x + w, y + h, width, height):
new_label = self._update_labels(label, (x, y, w, h), height, width)
if new_label is not None:
return (x, y, w, h, new_label)
return () |
<SYSTEM_TASK:>
Update label according to padding region
<END_TASK>
<USER_TASK:>
Description:
def _update_labels(self, label, pad_box, height, width):
"""Update label according to padding region""" |
out = label.copy()
out[:, (1, 3)] = (out[:, (1, 3)] * width + pad_box[0]) / pad_box[2]
out[:, (2, 4)] = (out[:, (2, 4)] * height + pad_box[1]) / pad_box[3]
return out |
<SYSTEM_TASK:>
Generate random padding region
<END_TASK>
<USER_TASK:>
Description:
def _random_pad_proposal(self, label, height, width):
"""Generate random padding region""" |
from math import sqrt
if not self.enabled or height <= 0 or width <= 0:
return ()
min_area = self.area_range[0] * height * width
max_area = self.area_range[1] * height * width
for _ in range(self.max_attempts):
ratio = random.uniform(*self.aspect_ratio_range)
if ratio <= 0:
continue
h = int(round(sqrt(min_area / ratio)))
max_h = int(round(sqrt(max_area / ratio)))
if round(h * ratio) < width:
h = int((width + 0.499999) / ratio)
if h < height:
h = height
if h > max_h:
h = max_h
if h < max_h:
h = random.randint(h, max_h)
w = int(round(h * ratio))
if (h - height) < 2 or (w - width) < 2:
continue # marginal padding is not helpful
y = random.randint(0, max(0, h - height))
x = random.randint(0, max(0, w - width))
new_label = self._update_labels(label, (x, y, w, h), height, width)
return (x, y, w, h, new_label)
return () |
<SYSTEM_TASK:>
Validate label and its shape.
<END_TASK>
<USER_TASK:>
Description:
def _check_valid_label(self, label):
"""Validate label and its shape.""" |
if len(label.shape) != 2 or label.shape[1] < 5:
msg = "Label with shape (1+, 5+) required, %s received." % str(label)
raise RuntimeError(msg)
valid_label = np.where(np.logical_and(label[:, 0] >= 0, label[:, 3] > label[:, 1],
label[:, 4] > label[:, 2]))[0]
if valid_label.size < 1:
raise RuntimeError('Invalid label occurs.') |
<SYSTEM_TASK:>
Helper function to estimate label shape
<END_TASK>
<USER_TASK:>
Description:
def _estimate_label_shape(self):
"""Helper function to estimate label shape""" |
max_count = 0
self.reset()
try:
while True:
label, _ = self.next_sample()
label = self._parse_label(label)
max_count = max(max_count, label.shape[0])
except StopIteration:
pass
self.reset()
return (max_count, label.shape[1]) |
<SYSTEM_TASK:>
Helper function to parse object detection label.
<END_TASK>
<USER_TASK:>
Description:
def _parse_label(self, label):
"""Helper function to parse object detection label.
Format for raw label:
n \t k \t ... \t [id \t xmin\t ymin \t xmax \t ymax \t ...] \t [repeat]
where n is the width of header, 2 or larger
k is the width of each object annotation, can be arbitrary, at least 5
""" |
if isinstance(label, nd.NDArray):
label = label.asnumpy()
raw = label.ravel()
if raw.size < 7:
raise RuntimeError("Label shape is invalid: " + str(raw.shape))
header_width = int(raw[0])
obj_width = int(raw[1])
if (raw.size - header_width) % obj_width != 0:
msg = "Label shape %s inconsistent with annotation width %d." \
%(str(raw.shape), obj_width)
raise RuntimeError(msg)
out = np.reshape(raw[header_width:], (-1, obj_width))
# remove bad ground-truths
valid = np.where(np.logical_and(out[:, 3] > out[:, 1], out[:, 4] > out[:, 2]))[0]
if valid.size < 1:
raise RuntimeError('Encounter sample with no valid label.')
return out[valid, :] |
<SYSTEM_TASK:>
Reshape iterator for data_shape or label_shape.
<END_TASK>
<USER_TASK:>
Description:
def reshape(self, data_shape=None, label_shape=None):
"""Reshape iterator for data_shape or label_shape.
Parameters
----------
data_shape : tuple or None
Reshape the data_shape to the new shape if not None
label_shape : tuple or None
Reshape label shape to new shape if not None
""" |
if data_shape is not None:
self.check_data_shape(data_shape)
self.provide_data = [(self.provide_data[0][0], (self.batch_size,) + data_shape)]
self.data_shape = data_shape
if label_shape is not None:
self.check_label_shape(label_shape)
self.provide_label = [(self.provide_label[0][0], (self.batch_size,) + label_shape)]
self.label_shape = label_shape |
<SYSTEM_TASK:>
Override the helper function for batchifying data
<END_TASK>
<USER_TASK:>
Description:
def _batchify(self, batch_data, batch_label, start=0):
"""Override the helper function for batchifying data""" |
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image([data])
label = self._parse_label(label)
data, label = self.augmentation_transform(data, label)
self._check_valid_label(label)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
for datum in [data]:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(datum)
num_object = label.shape[0]
batch_label[i][0:num_object] = nd.array(label)
if num_object < batch_label[i].shape[0]:
batch_label[i][num_object:] = -1
i += 1
except StopIteration:
if not i:
raise StopIteration
return i |
<SYSTEM_TASK:>
Override the function for returning next batch.
<END_TASK>
<USER_TASK:>
Description:
def next(self):
"""Override the function for returning next batch.""" |
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
else:
batch_data = nd.zeros((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
batch_label[:] = -1
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad) |
<SYSTEM_TASK:>
Override Transforms input data with specified augmentations.
<END_TASK>
<USER_TASK:>
Description:
def augmentation_transform(self, data, label): # pylint: disable=arguments-differ
"""Override Transforms input data with specified augmentations.""" |
for aug in self.auglist:
data, label = aug(data, label)
return (data, label) |
<SYSTEM_TASK:>
Checks if the new label shape is valid
<END_TASK>
<USER_TASK:>
Description:
def check_label_shape(self, label_shape):
"""Checks if the new label shape is valid""" |
if not len(label_shape) == 2:
raise ValueError('label_shape should have length 2')
if label_shape[0] < self.label_shape[0]:
msg = 'Attempts to reduce label count from %d to %d, not allowed.' \
% (self.label_shape[0], label_shape[0])
raise ValueError(msg)
if label_shape[1] != self.provide_label[0][1][2]:
msg = 'label_shape object width inconsistent: %d vs %d.' \
% (self.provide_label[0][1][2], label_shape[1])
raise ValueError(msg) |
<SYSTEM_TASK:>
Enumerate a set of anchors for each aspect ratio wrt an anchor.
<END_TASK>
<USER_TASK:>
Description:
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
""" |
w, h, x_ctr, y_ctr = AnchorGenerator._whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = AnchorGenerator._mkanchors(ws, hs, x_ctr, y_ctr)
return anchors |
<SYSTEM_TASK:>
Enumerate a set of anchors for each scale wrt an anchor.
<END_TASK>
<USER_TASK:>
Description:
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
""" |
w, h, x_ctr, y_ctr = AnchorGenerator._whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = AnchorGenerator._mkanchors(ws, hs, x_ctr, y_ctr)
return anchors |
<SYSTEM_TASK:>
set atual shape of data
<END_TASK>
<USER_TASK:>
Description:
def prepare_data(args):
"""
set atual shape of data
""" |
rnn_type = args.config.get("arch", "rnn_type")
num_rnn_layer = args.config.getint("arch", "num_rnn_layer")
num_hidden_rnn_list = json.loads(args.config.get("arch", "num_hidden_rnn_list"))
batch_size = args.config.getint("common", "batch_size")
if rnn_type == 'lstm':
init_c = [('l%d_init_c' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
init_h = [('l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
elif rnn_type == 'bilstm':
forward_init_c = [('forward_l%d_init_c' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
backward_init_c = [('backward_l%d_init_c' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
init_c = forward_init_c + backward_init_c
forward_init_h = [('forward_l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
backward_init_h = [('backward_l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
init_h = forward_init_h + backward_init_h
elif rnn_type == 'gru':
init_h = [('l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
elif rnn_type == 'bigru':
forward_init_h = [('forward_l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
backward_init_h = [('backward_l%d_init_h' % l, (batch_size, num_hidden_rnn_list[l]))
for l in range(num_rnn_layer)]
init_h = forward_init_h + backward_init_h
else:
raise Exception('network type should be one of the lstm,bilstm,gru,bigru')
if rnn_type == 'lstm' or rnn_type == 'bilstm':
init_states = init_c + init_h
elif rnn_type == 'gru' or rnn_type == 'bigru':
init_states = init_h
return init_states |
<SYSTEM_TASK:>
Check the difference between predictions from MXNet and CoreML.
<END_TASK>
<USER_TASK:>
Description:
def check_error(model, path, shapes, output = 'softmax_output', verbose = True):
"""
Check the difference between predictions from MXNet and CoreML.
""" |
coreml_model = _coremltools.models.MLModel(path)
input_data = {}
input_data_copy = {}
for ip in shapes:
input_data[ip] = _np.random.rand(*shapes[ip]).astype('f')
input_data_copy[ip] = _np.copy(input_data[ip])
dataIter = _mxnet.io.NDArrayIter(input_data_copy)
mx_out = model.predict(dataIter).flatten()
e_out_dict = coreml_model.predict(_mxnet_remove_batch(input_data))
e_out = e_out_dict[output].flatten()
error = _np.linalg.norm(e_out - mx_out)
if verbose:
print("First few predictions from CoreML : %s" % e_out[0:10])
print("First few predictions from MXNet : %s" % e_out[0:10])
print("L2 Error on random data %s" % error)
return error |
<SYSTEM_TASK:>
Sample from independent categorical distributions
<END_TASK>
<USER_TASK:>
Description:
def sample_categorical(prob, rng):
"""Sample from independent categorical distributions
Each batch is an independent categorical distribution.
Parameters
----------
prob : numpy.ndarray
Probability of the categorical distribution. Shape --> (batch_num, category_num)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
Sampling result. Shape --> (batch_num,)
""" |
ret = numpy.empty(prob.shape[0], dtype=numpy.float32)
for ind in range(prob.shape[0]):
ret[ind] = numpy.searchsorted(numpy.cumsum(prob[ind]), rng.rand()).clip(min=0.0,
max=prob.shape[
1] - 0.5)
return ret |
<SYSTEM_TASK:>
Sample from independent normal distributions
<END_TASK>
<USER_TASK:>
Description:
def sample_normal(mean, var, rng):
"""Sample from independent normal distributions
Each element is an independent normal distribution.
Parameters
----------
mean : numpy.ndarray
Means of the normal distribution. Shape --> (batch_num, sample_dim)
var : numpy.ndarray
Variance of the normal distribution. Shape --> (batch_num, sample_dim)
rng : numpy.random.RandomState
Returns
-------
ret : numpy.ndarray
The sampling result. Shape --> (batch_num, sample_dim)
""" |
ret = numpy.sqrt(var) * rng.randn(*mean.shape) + mean
return ret |
<SYSTEM_TASK:>
NCE-Loss layer under subword-units input.
<END_TASK>
<USER_TASK:>
Description:
def nce_loss_subwords(
data, label, label_mask, label_weight, embed_weight, vocab_size, num_hidden):
"""NCE-Loss layer under subword-units input.
""" |
# get subword-units embedding.
label_units_embed = mx.sym.Embedding(data=label,
input_dim=vocab_size,
weight=embed_weight,
output_dim=num_hidden)
# get valid subword-units embedding with the help of label_mask
# it's achieved by multiplying zeros to useless units in order to handle variable-length input.
label_units_embed = mx.sym.broadcast_mul(lhs=label_units_embed,
rhs=label_mask,
name='label_units_embed')
# sum over them to get label word embedding.
label_embed = mx.sym.sum(label_units_embed, axis=2, name='label_embed')
# by boardcast_mul and sum you can get prediction scores in all label_embed inputs,
# which is easy to feed into LogisticRegressionOutput and make your code more concise.
data = mx.sym.Reshape(data=data, shape=(-1, 1, num_hidden))
pred = mx.sym.broadcast_mul(data, label_embed)
pred = mx.sym.sum(data=pred, axis=2)
return mx.sym.LogisticRegressionOutput(data=pred,
label=label_weight) |
<SYSTEM_TASK:>
Download the BSDS500 dataset and return train and test iters.
<END_TASK>
<USER_TASK:>
Description:
def get_dataset(prefetch=False):
"""Download the BSDS500 dataset and return train and test iters.""" |
if path.exists(data_dir):
print(
"Directory {} already exists, skipping.\n"
"To force download and extraction, delete the directory and re-run."
"".format(data_dir),
file=sys.stderr,
)
else:
print("Downloading dataset...", file=sys.stderr)
downloaded_file = download(dataset_url, dirname=datasets_tmpdir)
print("done", file=sys.stderr)
print("Extracting files...", end="", file=sys.stderr)
os.makedirs(data_dir)
os.makedirs(tmp_dir)
with zipfile.ZipFile(downloaded_file) as archive:
archive.extractall(tmp_dir)
shutil.rmtree(datasets_tmpdir)
shutil.copytree(
path.join(tmp_dir, "BSDS500-master", "BSDS500", "data", "images"),
path.join(data_dir, "images"),
)
shutil.copytree(
path.join(tmp_dir, "BSDS500-master", "BSDS500", "data", "groundTruth"),
path.join(data_dir, "groundTruth"),
)
shutil.rmtree(tmp_dir)
print("done", file=sys.stderr)
crop_size = 256
crop_size -= crop_size % upscale_factor
input_crop_size = crop_size // upscale_factor
input_transform = [CenterCropAug((crop_size, crop_size)), ResizeAug(input_crop_size)]
target_transform = [CenterCropAug((crop_size, crop_size))]
iters = (
ImagePairIter(
path.join(data_dir, "images", "train"),
(input_crop_size, input_crop_size),
(crop_size, crop_size),
batch_size,
color_flag,
input_transform,
target_transform,
),
ImagePairIter(
path.join(data_dir, "images", "test"),
(input_crop_size, input_crop_size),
(crop_size, crop_size),
test_batch_size,
color_flag,
input_transform,
target_transform,
),
)
return [PrefetchingIter(i) for i in iters] if prefetch else iters |
<SYSTEM_TASK:>
return one dict which contains "data" and "label"
<END_TASK>
<USER_TASK:>
Description:
def next(self):
"""return one dict which contains "data" and "label" """ |
if self.iter_next():
self.data, self.label = self._read()
return {self.data_name : self.data[0][1],
self.label_name : self.label[0][1]}
else:
raise StopIteration |
<SYSTEM_TASK:>
Construct symbol from onnx graph.
<END_TASK>
<USER_TASK:>
Description:
def from_onnx(self, graph):
"""Construct symbol from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
Returns
-------
sym :symbol.Symbol
The returned mxnet symbol
params : dict
A dict of name: nd.array pairs, used as pretrained weights
""" |
# get input, output shapes
self.model_metadata = self.get_graph_metadata(graph)
# parse network inputs, aka parameters
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
self._params[init_tensor.name] = self._parse_array(init_tensor)
# converting GraphProto message
for i in graph.input:
if i.name in self._params:
# i is a param instead of input
self._nodes[i.name] = symbol.Variable(name=i.name,
shape=self._params[i.name].shape)
else:
self._nodes[i.name] = symbol.Variable(name=i.name)
# constructing nodes, nodes are stored as directed acyclic graph
# converting NodeProto message
for node in graph.node:
op_name = node.op_type
node_name = node.name.strip()
node_name = node_name if node_name else None
onnx_attr = self._parse_attr(node.attribute)
inputs = [self._nodes[i] for i in node.input]
mxnet_sym = self._convert_operator(node_name, op_name, onnx_attr, inputs)
for k, i in zip(list(node.output), range(len(mxnet_sym.list_outputs()))):
self._nodes[k] = mxnet_sym[i]
# splitting params into args and aux params
for args in mxnet_sym.list_arguments():
if args in self._params:
self.arg_dict.update({args: nd.array(self._params[args])})
for aux in mxnet_sym.list_auxiliary_states():
if aux in self._params:
self.aux_dict.update({aux: nd.array(self._params[aux])})
# now return the outputs
out = [self._nodes[i.name] for i in graph.output]
if len(out) > 1:
out = symbol.Group(out)
else:
out = out[0]
return out, self.arg_dict, self.aux_dict |
<SYSTEM_TASK:>
Get the model metadata from a given onnx graph.
<END_TASK>
<USER_TASK:>
Description:
def get_graph_metadata(self, graph):
"""
Get the model metadata from a given onnx graph.
""" |
_params = set()
for tensor_vals in graph.initializer:
_params.add(tensor_vals.name)
input_data = []
for graph_input in graph.input:
if graph_input.name not in _params:
shape = [val.dim_value for val in graph_input.type.tensor_type.shape.dim]
input_data.append((graph_input.name, tuple(shape)))
output_data = []
for graph_out in graph.output:
shape = [val.dim_value for val in graph_out.type.tensor_type.shape.dim]
output_data.append((graph_out.name, tuple(shape)))
metadata = {'input_tensor_data' : input_data,
'output_tensor_data' : output_data
}
return metadata |
<SYSTEM_TASK:>
Construct SymbolBlock from onnx graph.
<END_TASK>
<USER_TASK:>
Description:
def graph_to_gluon(self, graph, ctx):
"""Construct SymbolBlock from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
ctx : Context or list of Context
Loads the model into one or many context(s).
Returns
-------
sym_block :gluon.nn.SymbolBlock
The returned gluon SymbolBlock
""" |
sym, arg_params, aux_params = self.from_onnx(graph)
metadata = self.get_graph_metadata(graph)
data_names = [input_tensor[0] for input_tensor in metadata['input_tensor_data']]
data_inputs = [symbol.var(data_name) for data_name in data_names]
from ....gluon import SymbolBlock
net = SymbolBlock(outputs=sym, inputs=data_inputs)
net_params = net.collect_params()
for param in arg_params:
if param in net_params:
net_params[param].shape = arg_params[param].shape
net_params[param]._load_init(arg_params[param], ctx=ctx)
for param in aux_params:
if param in net_params:
net_params[param].shape = aux_params[param].shape
net_params[param]._load_init(aux_params[param], ctx=ctx)
return net |
<SYSTEM_TASK:>
Reshapes both modules for new input shapes.
<END_TASK>
<USER_TASK:>
Description:
def reshape(self, data_shapes, label_shapes=None):
"""Reshapes both modules for new input shapes.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
""" |
super(SVRGModule, self).reshape(data_shapes, label_shapes=label_shapes)
self._mod_aux.reshape(data_shapes, label_shapes=label_shapes) |
<SYSTEM_TASK:>
Installs and initializes SVRGOptimizer. The SVRGOptimizer is a wrapper class for a regular optimizer that is
<END_TASK>
<USER_TASK:>
Description:
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes SVRGOptimizer. The SVRGOptimizer is a wrapper class for a regular optimizer that is
passed in and a special AssignmentOptimizer to accumulate the full gradients. If KVStore is 'local' or None,
the full gradients will be accumulated locally without pushing to the KVStore. Otherwise, additional keys will
be pushed to accumulate the full gradients in the KVStore.
Parameters
----------
kvstore : str or KVStore
Default `'local'`.
optimizer : str or Optimizer
Default `'sgd'`
optimizer_params : dict
Default `(('learning_rate', 0.01),)`. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Default ``False``, indicating whether we should force re-initializing the
optimizer in the case an optimizer is already installed.
""" |
# Init dict for storing average of full gradients for each device
self._param_dict = [{key: mx.nd.zeros(shape=value.shape, ctx=self._context[i])
for key, value in self.get_params()[0].items()} for i in range(self._ctx_len)]
svrg_optimizer = self._create_optimizer(_SVRGOptimizer.__name__, default_opt=optimizer,
kvstore=kvstore, optimizer_params=optimizer_params)
super(SVRGModule, self).init_optimizer(kvstore=kvstore, optimizer=svrg_optimizer,
optimizer_params=optimizer_params, force_init=force_init)
# Init additional keys for accumulating full grads in KVStore
if self._kvstore:
for idx, param_on_devs in enumerate(self._exec_group.param_arrays):
name = self._exec_group.param_names[idx]
self._kvstore.init(name + "_full", mx.nd.zeros(shape=self._arg_params[name].shape))
if self._update_on_kvstore:
self._kvstore.pull(name + "_full", param_on_devs, priority=-idx) |
<SYSTEM_TASK:>
Binds the symbols to construct executors for both two modules. This is necessary before one
<END_TASK>
<USER_TASK:>
Description:
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'):
"""Binds the symbols to construct executors for both two modules. This is necessary before one
can perform computation with the SVRGModule.
Parameters
----------
data_shapes : list of (str, tuple)
Typically is ``data_iter.provide_data``.
label_shapes : list of (str, tuple)
Typically is ``data_iter.provide_label``.
for_training : bool
Default is ``True``. Whether the executors should be bound for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
""" |
# force rebinding is typically used when one want to switch from
# training to prediction phase.
super(SVRGModule, self).bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind,
shared_module, grad_req)
if for_training:
self._mod_aux.bind(data_shapes, label_shapes, for_training, inputs_need_grad, force_rebind, shared_module,
grad_req) |
<SYSTEM_TASK:>
Forward computation for both two modules. It supports data batches with different shapes, such as
<END_TASK>
<USER_TASK:>
Description:
def forward(self, data_batch, is_train=None):
"""Forward computation for both two modules. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
See Also
----------
:meth:`BaseModule.forward`.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means ``is_train`` takes the value of ``self.for_training``.
""" |
super(SVRGModule, self).forward(data_batch, is_train)
if is_train:
self._mod_aux.forward(data_batch, is_train) |
<SYSTEM_TASK:>
Computes the gradients over all data w.r.t weights of past
<END_TASK>
<USER_TASK:>
Description:
def update_full_grads(self, train_data):
"""Computes the gradients over all data w.r.t weights of past
m epochs. For distributed env, it will accumulate full grads in the kvstore.
Parameters
----------
train_data: DataIter
Train data iterator
""" |
param_names = self._exec_group.param_names
arg, aux = self.get_params()
self._mod_aux.set_params(arg_params=arg, aux_params=aux)
train_data.reset()
nbatch = 0
padding = 0
for batch in train_data:
self._mod_aux.forward(batch, is_train=True)
self._mod_aux.backward()
nbatch += 1
for ctx in range(self._ctx_len):
for index, name in enumerate(param_names):
grads = self._mod_aux._exec_group.grad_arrays[index][ctx]
self._param_dict[ctx][name] = mx.nd.broadcast_add(self._param_dict[ctx][name], grads, axis=0)
padding = batch.pad
true_num_batch = nbatch - padding / train_data.batch_size
for name in param_names:
grad_list = []
for i in range(self._ctx_len):
self._param_dict[i][name] /= true_num_batch
grad_list.append(self._param_dict[i][name])
if self._kvstore:
# If in distributed mode, push a list of gradients from each worker/device to the KVStore
self._accumulate_kvstore(name, grad_list) |
<SYSTEM_TASK:>
Accumulate gradients over all data in the KVStore. In distributed setting, each worker sees a portion of
<END_TASK>
<USER_TASK:>
Description:
def _accumulate_kvstore(self, key, value):
"""Accumulate gradients over all data in the KVStore. In distributed setting, each worker sees a portion of
data. The full gradients will be aggregated from each worker in the KVStore.
Parameters
----------
key: int or str
Key in the KVStore.
value: NDArray, RowSparseNDArray
Average of the full gradients.
""" |
# Accumulate full gradients for current epochs
self._kvstore.push(key + "_full", value)
self._kvstore._barrier()
self._kvstore.pull(key + "_full", value)
self._allocate_gradients(key, value) |
<SYSTEM_TASK:>
Allocate average of full gradients accumulated in the KVStore to each device.
<END_TASK>
<USER_TASK:>
Description:
def _allocate_gradients(self, key, value):
"""Allocate average of full gradients accumulated in the KVStore to each device.
Parameters
----------
key: int or str
Key in the kvstore.
value: List of NDArray, List of RowSparseNDArray
A list of average of the full gradients in the KVStore.
""" |
for i in range(self._ctx_len):
self._param_dict[i][key] = value[i] / self._ctx_len |
<SYSTEM_TASK:>
Calculates gradients based on the SVRG update rule.
<END_TASK>
<USER_TASK:>
Description:
def _update_svrg_gradients(self):
"""Calculates gradients based on the SVRG update rule.
""" |
param_names = self._exec_group.param_names
for ctx in range(self._ctx_len):
for index, name in enumerate(param_names):
g_curr_batch_reg = self._exec_group.grad_arrays[index][ctx]
g_curr_batch_special = self._mod_aux._exec_group.grad_arrays[index][ctx]
g_special_weight_all_batch = self._param_dict[ctx][name]
g_svrg = self._svrg_grads_update_rule(g_curr_batch_reg, g_curr_batch_special,
g_special_weight_all_batch)
self._exec_group.grad_arrays[index][ctx] = g_svrg |
<SYSTEM_TASK:>
Prepares two modules for processing a data batch.
<END_TASK>
<USER_TASK:>
Description:
def prepare(self, data_batch, sparse_row_id_fn=None):
"""Prepares two modules for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
""" |
super(SVRGModule, self).prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn)
self._mod_aux.prepare(data_batch, sparse_row_id_fn=sparse_row_id_fn) |
<SYSTEM_TASK:>
Get registrator function.
<END_TASK>
<USER_TASK:>
Description:
def get_register_func(base_class, nickname):
"""Get registrator function.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function
""" |
if base_class not in _REGISTRY:
_REGISTRY[base_class] = {}
registry = _REGISTRY[base_class]
def register(klass, name=None):
"""Register functions"""
assert issubclass(klass, base_class), \
"Can only register subclass of %s"%base_class.__name__
if name is None:
name = klass.__name__
name = name.lower()
if name in registry:
warnings.warn(
"\033[91mNew %s %s.%s registered with name %s is"
"overriding existing %s %s.%s\033[0m"%(
nickname, klass.__module__, klass.__name__, name,
nickname, registry[name].__module__, registry[name].__name__),
UserWarning, stacklevel=2)
registry[name] = klass
return klass
register.__doc__ = "Register %s to the %s factory"%(nickname, nickname)
return register |
<SYSTEM_TASK:>
Get registrator function that allow aliases.
<END_TASK>
<USER_TASK:>
Description:
def get_alias_func(base_class, nickname):
"""Get registrator function that allow aliases.
Parameters
----------
base_class : type
base class for classes that will be reigstered
nickname : str
nickname of base_class for logging
Returns
-------
a registrator function
""" |
register = get_register_func(base_class, nickname)
def alias(*aliases):
"""alias registrator"""
def reg(klass):
"""registrator function"""
for name in aliases:
register(klass, name)
return klass
return reg
return alias |
<SYSTEM_TASK:>
Pads all sentences to the same length. The length is defined by the longest sentence.
<END_TASK>
<USER_TASK:>
Description:
def pad_sentences(sentences, padding_word="</s>"):
"""Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
""" |
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i, sentence in enumerate(sentences):
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences |
<SYSTEM_TASK:>
Maps sentencs and labels to vectors based on a vocabulary.
<END_TASK>
<USER_TASK:>
Description:
def build_input_data(sentences, labels, vocabulary):
"""Maps sentencs and labels to vectors based on a vocabulary.""" |
x = np.array([[vocabulary[word] for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y] |
<SYSTEM_TASK:>
Map sentences and labels to vectors based on a pretrained word2vec
<END_TASK>
<USER_TASK:>
Description:
def build_input_data_with_word2vec(sentences, labels, word2vec_list):
"""
Map sentences and labels to vectors based on a pretrained word2vec
""" |
x_vec = []
for sent in sentences:
vec = []
for word in sent:
if word in word2vec_list:
vec.append(word2vec_list[word])
else:
vec.append(word2vec_list['</s>'])
x_vec.append(vec)
x_vec = np.array(x_vec)
y_vec = np.array(labels)
return [x_vec, y_vec] |
<SYSTEM_TASK:>
Generates a batch iterator for a dataset.
<END_TASK>
<USER_TASK:>
Description:
def batch_iter(data, batch_size, num_epochs):
"""Generates a batch iterator for a dataset.""" |
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index] |
<SYSTEM_TASK:>
Load the pre-trained word2vec from file.
<END_TASK>
<USER_TASK:>
Description:
def load_pretrained_word2vec(infile):
"""Load the pre-trained word2vec from file.""" |
if isinstance(infile, str):
infile = open(infile)
word2vec_list = {}
for idx, line in enumerate(infile):
if idx == 0:
vocab_size, dim = line.strip().split()
else:
tks = line.strip().split()
word2vec_list[tks[0]] = map(float, tks[1:])
return word2vec_list |
<SYSTEM_TASK:>
Implements forward computation.
<END_TASK>
<USER_TASK:>
Description:
def forward(self, is_train, req, in_data, out_data, aux):
"""Implements forward computation.
is_train : bool, whether forwarding for training or testing.
req : list of {'null', 'write', 'inplace', 'add'}, how to assign to out_data. 'null' means skip assignment, etc.
in_data : list of NDArray, input data.
out_data : list of NDArray, pre-allocated output buffers.
aux : list of NDArray, mutable auxiliary states. Usually not used.
""" |
data = in_data[0]
label = in_data[1]
pred = mx.nd.SoftmaxOutput(data, label)
self.assign(out_data[0], req[0], pred) |
<SYSTEM_TASK:>
Implements backward computation
<END_TASK>
<USER_TASK:>
Description:
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
"""Implements backward computation
req : list of {'null', 'write', 'inplace', 'add'}, how to assign to in_grad
out_grad : list of NDArray, gradient w.r.t. output data.
in_grad : list of NDArray, gradient w.r.t. input data. This is the output buffer.
""" |
label = in_data[1]
pred = out_data[0]
dx = pred - mx.nd.one_hot(label, 2)
pos_cls_weight = self.positive_cls_weight
scale_factor = ((1 + label * pos_cls_weight) / pos_cls_weight).reshape((pred.shape[0],1))
rescaled_dx = scale_factor * dx
self.assign(in_grad[0], req[0], rescaled_dx) |
<SYSTEM_TASK:>
A list of names for data required by this module.
<END_TASK>
<USER_TASK:>
Description:
def data_names(self):
"""A list of names for data required by this module.""" |
if self.binded:
return self._curr_module.data_names
else:
_, data_names, _ = self._call_sym_gen(self._default_bucket_key)
return data_names |
<SYSTEM_TASK:>
A list of names for the outputs of this module.
<END_TASK>
<USER_TASK:>
Description:
def output_names(self):
"""A list of names for the outputs of this module.""" |
if self.binded:
return self._curr_module.output_names
else:
symbol, _, _ = self._call_sym_gen(self._default_bucket_key)
return symbol.list_outputs() |
<SYSTEM_TASK:>
Sets value for states. Only one of states & values can be specified.
<END_TASK>
<USER_TASK:>
Description:
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of states & values can be specified.
Parameters
----------
states : list of list of NDArrays
Source states arrays formatted like ``[[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays.
""" |
assert self.binded and self.params_initialized
self._curr_module.set_states(states, value) |
<SYSTEM_TASK:>
Binding for a `BucketingModule` means setting up the buckets and binding the
<END_TASK>
<USER_TASK:>
Description:
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binding for a `BucketingModule` means setting up the buckets and binding the
executor for the default bucket key. Executors corresponding to other keys are
bound afterwards with `switch_bucket`.
Parameters
----------
data_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
label_shapes : list of (str, tuple)
This should correspond to the symbol for the default bucket.
for_training : bool
Default is ``True``.
inputs_need_grad : bool
Default is ``False``.
force_rebind : bool
Default is ``False``.
shared_module : BucketingModule
Default is ``None``. This value is currently not used.
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
bucket_key : str (or any python object)
bucket key for binding. by default use the default_bucket_key
""" |
# in case we already initialized params, keep it
if self.params_initialized:
arg_params, aux_params = self.get_params()
# force rebinding is typically used when one want to switch from
# training to prediction phase.
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already bound, ignoring bind()')
return
assert shared_module is None, 'shared_module for BucketingModule is not supported'
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
self._grad_req = grad_req
symbol, data_names, label_names = self._call_sym_gen(self._default_bucket_key)
module = Module(symbol, data_names, label_names, logger=self.logger,
context=self._context, work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names,
group2ctxs=self._group2ctxs,
compression_params=self._compression_params)
module.bind(data_shapes, label_shapes, for_training, inputs_need_grad,
force_rebind=False, shared_module=None, grad_req=self._grad_req)
self._curr_module = module
self._curr_bucket_key = self._default_bucket_key
self._buckets[self._default_bucket_key] = module
# copy back saved params, if already initialized
if self.params_initialized:
self.set_params(arg_params, aux_params) |
<SYSTEM_TASK:>
Switches to a different bucket. This will change ``self.curr_module``.
<END_TASK>
<USER_TASK:>
Description:
def switch_bucket(self, bucket_key, data_shapes, label_shapes=None):
"""Switches to a different bucket. This will change ``self.curr_module``.
Parameters
----------
bucket_key : str (or any python object)
The key of the target bucket.
data_shapes : list of (str, tuple)
Typically ``data_batch.provide_data``.
label_shapes : list of (str, tuple)
Typically ``data_batch.provide_label``.
""" |
assert self.binded, 'call bind before switching bucket'
if not bucket_key in self._buckets:
symbol, data_names, label_names = self._call_sym_gen(bucket_key)
module = Module(symbol, data_names, label_names,
logger=self.logger, context=self._context,
work_load_list=self._work_load_list,
fixed_param_names=self._fixed_param_names,
state_names=self._state_names,
group2ctxs=self._group2ctxs,
compression_params=self._compression_params)
module.bind(data_shapes, label_shapes, self._curr_module.for_training,
self._curr_module.inputs_need_grad,
force_rebind=False, shared_module=self._buckets[self._default_bucket_key],
grad_req=self._grad_req)
if self._monitor is not None:
module.install_monitor(self._monitor)
self._buckets[bucket_key] = module
self._curr_module = self._buckets[bucket_key]
self._curr_bucket_key = bucket_key |
<SYSTEM_TASK:>
Mark NDArrays as variables to compute gradient for autograd.
<END_TASK>
<USER_TASK:>
Description:
def mark_variables(variables, gradients, grad_reqs='write'):
"""Mark NDArrays as variables to compute gradient for autograd.
Parameters
----------
variables: NDArray or list of NDArray
gradients: NDArray or list of NDArray
grad_reqs: str or list of str
""" |
if isinstance(variables, NDArray):
assert isinstance(gradients, NDArray)
variables = [variables]
gradients = [gradients]
if isinstance(grad_reqs, string_types):
grad_reqs = [_GRAD_REQ_MAP[grad_reqs]]*len(variables)
else:
grad_reqs = [_GRAD_REQ_MAP[i] for i in grad_reqs]
check_call(_LIB.MXAutogradMarkVariables(
len(variables),
c_handle_array(variables),
c_array_buf(mx_uint, array('I', grad_reqs)),
c_handle_array(gradients))) |
<SYSTEM_TASK:>
parse head gradient for backward and grad.
<END_TASK>
<USER_TASK:>
Description:
def _parse_head(heads, head_grads):
"""parse head gradient for backward and grad.""" |
if isinstance(heads, NDArray):
heads = [heads]
if isinstance(head_grads, NDArray):
head_grads = [head_grads]
head_handles = c_handle_array(heads)
if head_grads is None:
hgrad_handles = ctypes.c_void_p(0)
else:
assert len(heads) == len(head_grads), \
"heads and head_grads must be lists of the same length"
hgrad_handles = c_array(NDArrayHandle,
[i.handle if i is not None else NDArrayHandle(0)
for i in head_grads])
return head_handles, hgrad_handles |
<SYSTEM_TASK:>
Compute the gradients of heads w.r.t previously marked variables.
<END_TASK>
<USER_TASK:>
Description:
def backward(heads, head_grads=None, retain_graph=False, train_mode=True): #pylint: disable=redefined-outer-name
"""Compute the gradients of heads w.r.t previously marked variables.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
train_mode: bool, optional
Whether to do backward for training or predicting.
""" |
head_handles, hgrad_handles = _parse_head(heads, head_grads)
check_call(_LIB.MXAutogradBackwardEx(
len(head_handles),
head_handles,
hgrad_handles,
0,
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(0),
ctypes.c_int(train_mode),
ctypes.c_void_p(0),
ctypes.c_void_p(0))) |
<SYSTEM_TASK:>
Not particularly fast code to parse the text file and load it into three NDArray's
<END_TASK>
<USER_TASK:>
Description:
def load_mldataset(filename):
"""Not particularly fast code to parse the text file and load it into three NDArray's
and product an NDArrayIter
""" |
user = []
item = []
score = []
with open(filename) as f:
for line in f:
tks = line.strip().split('\t')
if len(tks) != 4:
continue
user.append(int(tks[0]))
item.append(int(tks[1]))
score.append(float(tks[2]))
user = mx.nd.array(user)
item = mx.nd.array(item)
score = mx.nd.array(score)
return gluon.data.ArrayDataset(user, item, score) |
<SYSTEM_TASK:>
Read .caffemodel path and .params path as input from command line
<END_TASK>
<USER_TASK:>
Description:
def main():
"""Read .caffemodel path and .params path as input from command line
and use CaffeModelConverter to do the conversion""" |
parser = argparse.ArgumentParser(description='.caffemodel to MXNet .params converter.')
parser.add_argument('caffemodel', help='Path to the .caffemodel file to convert.')
parser.add_argument('output_file_name', help='Name of the output .params file.')
args = parser.parse_args()
converter = CaffeModelConverter()
converter.convert(args.caffemodel, args.output_file_name) |
<SYSTEM_TASK:>
Add a param to the .params file
<END_TASK>
<USER_TASK:>
Description:
def add_param(self, param_name, layer_index, blob_index):
"""Add a param to the .params file""" |
blobs = self.layers[layer_index].blobs
self.dict_param[param_name] = mx.nd.array(caffe.io.blobproto_to_array(blobs[blob_index])) |
<SYSTEM_TASK:>
Add an arg param. If there is no such param in .caffemodel fie, silently ignore it.
<END_TASK>
<USER_TASK:>
Description:
def add_optional_arg_param(self, param_name, layer_index, blob_index):
"""Add an arg param. If there is no such param in .caffemodel fie, silently ignore it.""" |
blobs = self.layers[layer_index].blobs
if blob_index < len(blobs):
self.add_arg_param(param_name, layer_index, blob_index) |
<SYSTEM_TASK:>
Helper function for assigning into dst depending on requirements.
<END_TASK>
<USER_TASK:>
Description:
def assign(self, dst, req, src):
"""Helper function for assigning into dst depending on requirements.""" |
if req == 'null':
return
elif req in ('write', 'inplace'):
dst[:] = src
elif req == 'add':
dst[:] += src |
<SYSTEM_TASK:>
infer_type interface. override to create new operators
<END_TASK>
<USER_TASK:>
Description:
def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
""" |
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states()) |
<SYSTEM_TASK:>
infer_storage_type interface. Used to infer storage type of
<END_TASK>
<USER_TASK:>
Description:
def infer_storage_type(self, in_stype):
"""infer_storage_type interface. Used to infer storage type of
inputs and outputs in the forward pass. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
in_stype : list of stypes, valid stypes are default, row_sparse and
csr
Returns
-------
in_stype : list
list of argument stypes.
out_stype : list
list of output types calculated from in_stype,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_stype,
in the same order as declared in list_auxiliary_states.
""" |
for i, stype in enumerate(in_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type implementation doesnt allow non default stypes: " \
"found non default stype '%s' for in_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input/output stypes" % (stype, i)
return in_stype, \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_outputs()), \
[_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]*len(self.list_auxiliary_states()) |
<SYSTEM_TASK:>
infer_storage_type_backward interface. Used to infer storage
<END_TASK>
<USER_TASK:>
Description:
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
"""infer_storage_type_backward interface. Used to infer storage
type of inputs and outputs in the backward pass.
Will raise an error if undefined storage type is returned.
Returned lists have to be the same size as the input lists to infer_storage_type_backward,
otherwise an exception will be thrown. When this interface is not implemented,
all stypes will be inferred as default.
Parameters
----------
ograd_stype : list
list of output gradient storage types
in_stype : list
list of input storage types
out_stype : list
list of output storage types
igrad_stype : list
list of input gradient storage types
aux_stype : list
list of auxiliary storage types
Returns
-------
ograd_stype : list
list of inferred output gradient storage types
in_stype : list
list of inferred input storage types
out_stype : list
list of inferred output storage types
igrad_stype : list
list of inferred input gradient storage types
aux_stype : list
list of inferred storage types for auxiliary states
""" |
for i, stype in enumerate(ograd_stype):
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for ograd_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default output gradient stypes" % (stype, i)
for i, stype in enumerate(igrad_stype):
if stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED]:
stype = _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]
assert stype == _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT], \
"Default infer_storage_type_backward implementation doesnt allow non default stypes: " \
"found non default stype '%s' for igrad_stype[%d]. Please implement " \
"infer_storage_type and infer_storage_type_backward interface " \
"in your custom operator if you have non-default input gradient stypes" % (stype, i)
stype_lists = [ograd_stype, in_stype, out_stype, igrad_stype, aux_stype]
for stype_list in stype_lists:
stype_list[:] = len(stype_list) * [_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT]]
return stype_lists[0], stype_lists[1], stype_lists[2], stype_lists[3], stype_lists[4] |
<SYSTEM_TASK:>
Closes the record and index files.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the record and index files.""" |
if not self.is_open:
return
super(IndexCreator, self).close()
self.fidx.close() |
<SYSTEM_TASK:>
Returns the current position of read head.
<END_TASK>
<USER_TASK:>
Description:
def tell(self):
"""Returns the current position of read head.
""" |
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos)))
return pos.value |
<SYSTEM_TASK:>
Creates the index file from open record file
<END_TASK>
<USER_TASK:>
Description:
def create_index(self):
"""Creates the index file from open record file
""" |
self.reset()
counter = 0
pre_time = time.time()
while True:
if counter % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', counter)
pos = self.tell()
cont = self.read()
if cont is None:
break
key = self.key_type(counter)
self.fidx.write('%s\t%d\n'%(str(key), pos))
counter = counter + 1 |
<SYSTEM_TASK:>
build scala for scala docs, java docs, and clojure docs to use
<END_TASK>
<USER_TASK:>
Description:
def build_scala(app):
"""build scala for scala docs, java docs, and clojure docs to use""" |
if any(v in _BUILD_VER for v in ['1.2.', '1.3.', '1.4.']):
_run_cmd("cd %s/.. && make scalapkg" % app.builder.srcdir)
_run_cmd("cd %s/.. && make scalainstall" % app.builder.srcdir)
else:
_run_cmd("cd %s/../scala-package && mvn -B install -DskipTests" % app.builder.srcdir) |
<SYSTEM_TASK:>
build scala doc and then move the outdir
<END_TASK>
<USER_TASK:>
Description:
def build_scala_docs(app):
"""build scala doc and then move the outdir""" |
scala_path = app.builder.srcdir + '/../scala-package'
scala_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep -v \"\/javaapi\" | egrep -v \"Suite\"'
scala_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
# There are unresolvable errors on mxnet 1.2.x. We are ignoring those errors while aborting the ci on newer versions
scala_ignore_errors = '; exit 0' if any(v in _BUILD_VER for v in ['1.2.', '1.3.']) else ''
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation {}'
.format(scala_path, scala_doc_sources, scala_doc_classpath, scala_ignore_errors))
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
# 'index' and 'package.html' do not exist in later versions of scala; delete these after upgrading scala>2.12.x
scaladocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0') |
<SYSTEM_TASK:>
build java docs and then move the outdir
<END_TASK>
<USER_TASK:>
Description:
def build_java_docs(app):
"""build java docs and then move the outdir""" |
java_path = app.builder.srcdir + '/../scala-package'
java_doc_sources = 'find . -type f -name "*.scala" | egrep \"\.\/core|\.\/infer\" | egrep \"\/javaapi\" | egrep -v \"Suite\"'
java_doc_classpath = ':'.join([
'`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" `',
'`find macros -name "*.jar" | tr "\\n" ":" `',
'`find core -name "*.jar" | tr "\\n" ":" `',
'`find infer -name "*.jar" | tr "\\n" ":" `'
])
_run_cmd('cd {}; scaladoc `{}` -classpath {} -feature -deprecation'
.format(java_path, java_doc_sources, java_doc_classpath))
dest_path = app.builder.outdir + '/api/java/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
javadocs = ['index', 'index.html', 'org', 'lib', 'index.js', 'package.html']
for doc_file in javadocs:
_run_cmd('cd ' + java_path + ' && mv -f ' + doc_file + ' ' + dest_path + '; exit 0') |
<SYSTEM_TASK:>
build clojure doc and then move the outdir
<END_TASK>
<USER_TASK:>
Description:
def build_clojure_docs(app):
"""build clojure doc and then move the outdir""" |
clojure_path = app.builder.srcdir + '/../contrib/clojure-package'
_run_cmd('cd ' + clojure_path + '; lein codox')
dest_path = app.builder.outdir + '/api/clojure/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
clojure_doc_path = app.builder.srcdir + '/../contrib/clojure-package/target/doc'
_run_cmd('cd ' + clojure_doc_path + ' && cp -r * ' + dest_path + '; exit 0') |
<SYSTEM_TASK:>
Find tables in a markdown and then convert them into the rst format
<END_TASK>
<USER_TASK:>
Description:
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format""" |
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname)) |
<SYSTEM_TASK:>
A iterator that returns if a line is within a code block
<END_TASK>
<USER_TASK:>
Description:
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
""" |
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent) |
<SYSTEM_TASK:>
split lines into code and non-code blocks
<END_TASK>
<USER_TASK:>
Description:
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
""" |
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block) |
<SYSTEM_TASK:>
Copies artifacts needed for website presentation
<END_TASK>
<USER_TASK:>
Description:
def copy_artifacts(app):
"""Copies artifacts needed for website presentation""" |
dest_path = app.builder.outdir + '/error'
source_path = app.builder.srcdir + '/build_version_doc/artifacts'
_run_cmd('cd ' + app.builder.srcdir)
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + source_path + '/404.html ' + dest_path)
_run_cmd('cp ' + source_path + '/api.html ' + dest_path)
dest_path = app.builder.outdir + '/_static'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
_run_cmd('cp ' + app.builder.srcdir + '/_static/mxnet.css ' + dest_path) |
<SYSTEM_TASK:>
Download caffe model into disk by the given meta info
<END_TASK>
<USER_TASK:>
Description:
def download_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download caffe model into disk by the given meta info """ |
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
model_name = os.path.join(dst_dir, model_name)
assert 'prototxt' in meta_info, "missing prototxt url"
proto_url, proto_sha1 = meta_info['prototxt']
prototxt = mx.gluon.utils.download(proto_url,
model_name+'_deploy.prototxt',
sha1_hash=proto_sha1)
assert 'caffemodel' in meta_info, "mssing caffemodel url"
caffemodel_url, caffemodel_sha1 = meta_info['caffemodel']
caffemodel = mx.gluon.utils.download(caffemodel_url,
model_name+'.caffemodel',
sha1_hash=caffemodel_sha1)
assert 'mean' in meta_info, 'no mean info'
mean = meta_info['mean']
if isinstance(mean[0], str):
mean_url, mean_sha1 = mean
mean = mx.gluon.utils.download(mean_url,
model_name+'_mean.binaryproto',
sha1_hash=mean_sha1)
return (prototxt, caffemodel, mean) |
<SYSTEM_TASK:>
Download, convert and save a caffe model
<END_TASK>
<USER_TASK:>
Description:
def convert_caffe_model(model_name, meta_info, dst_dir='./model'):
"""Download, convert and save a caffe model""" |
(prototxt, caffemodel, mean) = download_caffe_model(model_name, meta_info, dst_dir)
model_name = os.path.join(dst_dir, model_name)
convert_model(prototxt, caffemodel, model_name)
if isinstance(mean, str):
mx_mean = model_name + '-mean.nd'
convert_mean(mean, mx_mean)
mean = mx_mean
return (model_name, mean) |
<SYSTEM_TASK:>
Run _func with multi-process using params.
<END_TASK>
<USER_TASK:>
Description:
def multi_p_run(tot_num, _func, worker, params, n_process):
"""
Run _func with multi-process using params.
""" |
from multiprocessing import Process, Queue
out_q = Queue()
procs = []
split_num = split_seq(list(range(0, tot_num)), n_process)
print(tot_num, ">>", split_num)
split_len = len(split_num)
if n_process > split_len:
n_process = split_len
for i in range(n_process):
_p = Process(target=_func,
args=(worker, split_num[i][0], split_num[i][1],
params, out_q))
_p.daemon = True
procs.append(_p)
_p.start()
try:
result = []
for i in range(n_process):
result.append(out_q.get())
for i in procs:
i.join()
except KeyboardInterrupt:
print('Killing all the children in the pool.')
for i in procs:
i.terminate()
i.join()
return -1
while not out_q.empty():
print(out_q.get(block=False))
return result |
<SYSTEM_TASK:>
create a namedtuple with default values
<END_TASK>
<USER_TASK:>
Description:
def namedtuple_with_defaults(typename, field_names, default_values=()):
""" create a namedtuple with default values """ |
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None, ) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T |
<SYSTEM_TASK:>
merge dict a, b, with b overriding keys in a
<END_TASK>
<USER_TASK:>
Description:
def merge_dict(a, b):
""" merge dict a, b, with b overriding keys in a """ |
c = a.copy()
c.update(b)
return c |
<SYSTEM_TASK:>
accept list of namedtuple, return a dict of zipped fields
<END_TASK>
<USER_TASK:>
Description:
def zip_namedtuple(nt_list):
""" accept list of namedtuple, return a dict of zipped fields """ |
if not nt_list:
return dict()
if not isinstance(nt_list, list):
nt_list = [nt_list]
for nt in nt_list:
assert type(nt) == type(nt_list[0])
ret = {k : [v] for k, v in nt_list[0]._asdict().items()}
for nt in nt_list[1:]:
for k, v in nt._asdict().items():
ret[k].append(v)
return ret |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.