text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
download and read data into numpy
<END_TASK>
<USER_TASK:>
Description:
def read_data(label, image):
"""
download and read data into numpy
""" |
base_url = 'http://yann.lecun.com/exdb/mnist/'
with gzip.open(download_file(base_url+label, os.path.join('data',label))) as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
label = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(download_file(base_url+image, os.path.join('data',image)), 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)
return (label, image) |
<SYSTEM_TASK:>
check input imdbs, make sure they have same classes
<END_TASK>
<USER_TASK:>
Description:
def _check_classes(self):
"""
check input imdbs, make sure they have same classes
""" |
try:
self.classes = self.imdbs[0].classes
self.num_classes = len(self.classes)
except AttributeError:
# fine, if no classes is provided
pass
if self.num_classes > 0:
for db in self.imdbs:
assert self.classes == db.classes, "Multiple imdb must have same classes" |
<SYSTEM_TASK:>
get total number of images, init indices
<END_TASK>
<USER_TASK:>
Description:
def _load_image_set_index(self, shuffle):
"""
get total number of images, init indices
Parameters
----------
shuffle : bool
whether to shuffle the initial indices
""" |
self.num_images = 0
for db in self.imdbs:
self.num_images += db.num_images
indices = list(range(self.num_images))
if shuffle:
random.shuffle(indices)
return indices |
<SYSTEM_TASK:>
given index, find out sub-db and sub-index
<END_TASK>
<USER_TASK:>
Description:
def _locate_index(self, index):
"""
given index, find out sub-db and sub-index
Parameters
----------
index : int
index of a specific image
Returns
----------
a tuple (sub-db, sub-index)
""" |
assert index >= 0 and index < self.num_images, "index out of range"
pos = self.image_set_index[index]
for k, v in enumerate(self.imdbs):
if pos >= v.num_images:
pos -= v.num_images
else:
return (k, pos) |
<SYSTEM_TASK:>
Callback to checkpoint Module to prefix every epoch.
<END_TASK>
<USER_TASK:>
Description:
def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False):
"""Callback to checkpoint Module to prefix every epoch.
Parameters
----------
mod : subclass of BaseModule
The module to checkpoint.
prefix : str
The file prefix for this checkpoint.
period : int
How many epochs to wait before checkpointing. Defaults to 1.
save_optimizer_states : bool
Indicates whether or not to save optimizer states for continued training.
Returns
-------
callback : function
The callback function that can be passed as iter_end_callback to fit.
""" |
period = int(max(1, period))
# pylint: disable=unused-argument
def _callback(iter_no, sym=None, arg=None, aux=None):
"""The checkpoint function."""
if (iter_no + 1) % period == 0:
mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states)
return _callback |
<SYSTEM_TASK:>
Callback to log the training evaluation result every period.
<END_TASK>
<USER_TASK:>
Description:
def log_train_metric(period, auto_reset=False):
"""Callback to log the training evaluation result every period.
Parameters
----------
period : int
The number of batch to log the training evaluation metric.
auto_reset : bool
Reset the metric after each log.
Returns
-------
callback : function
The callback function that can be passed as iter_epoch_callback to fit.
""" |
def _callback(param):
"""The checkpoint function."""
if param.nbatch % period == 0 and param.eval_metric is not None:
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
logging.info('Iter[%d] Batch[%d] Train-%s=%f',
param.epoch, param.nbatch, name, value)
if auto_reset:
param.eval_metric.reset_local()
return _callback |
<SYSTEM_TASK:>
install callback to executor.
<END_TASK>
<USER_TASK:>
Description:
def install(self, exe):
"""install callback to executor.
Supports installing to multiple exes.
Parameters
----------
exe : mx.executor.Executor
The Executor (returned by symbol.bind) to install to.
""" |
exe.set_monitor_callback(self.stat_helper, self.monitor_all)
self.exes.append(exe) |
<SYSTEM_TASK:>
Start collecting stats for current batch.
<END_TASK>
<USER_TASK:>
Description:
def tic(self):
"""Start collecting stats for current batch.
Call before calling forward.""" |
if self.step % self.interval == 0:
for exe in self.exes:
for array in exe.arg_arrays:
array.wait_to_read()
for array in exe.aux_arrays:
array.wait_to_read()
self.queue = []
self.activated = True
self.step += 1 |
<SYSTEM_TASK:>
End collecting for current batch and return results.
<END_TASK>
<USER_TASK:>
Description:
def toc(self):
"""End collecting for current batch and return results.
Call after computation of current batch.
Returns
-------
res : list of """ |
if not self.activated:
return []
for exe in self.exes:
for array in exe.arg_arrays:
array.wait_to_read()
for array in exe.aux_arrays:
array.wait_to_read()
for exe in self.exes:
for name, array in zip(exe._symbol.list_arguments(), exe.arg_arrays):
if self.re_prog.match(name):
self.queue.append((self.step, name, self.stat_func(array)))
for name, array in zip(exe._symbol.list_auxiliary_states(), exe.aux_arrays):
if self.re_prog.match(name):
self.queue.append((self.step, name, self.stat_func(array)))
self.activated = False
res = []
if self.sort:
self.queue.sort(key=lambda x: x[1])
for n, k, v_list in self.queue:
if isinstance(v_list, NDArray):
v_list = [v_list]
assert isinstance(v_list, list)
s = ''
for v in v_list:
assert isinstance(v, NDArray)
if v.shape == (1,):
s += str(v.asscalar()) + '\t'
else:
s += str(v.asnumpy()) + '\t'
res.append((n, k, s))
self.queue = []
return res |
<SYSTEM_TASK:>
End collecting and print results.
<END_TASK>
<USER_TASK:>
Description:
def toc_print(self):
"""End collecting and print results.""" |
res = self.toc()
for n, k, v in res:
logging.info('Batch: {:7d} {:30s} {:s}'.format(n, k, v)) |
<SYSTEM_TASK:>
Expand the pending files in the current stage.
<END_TASK>
<USER_TASK:>
Description:
def expand(x, pending, stage):
"""
Expand the pending files in the current stage.
Parameters
----------
x: str
The file to expand.
pending : str
The list of pending files to expand.
stage: str
The current stage for file expansion, used for matching the prefix of files.
""" |
if x in history and x not in ['mshadow/mshadow/expr_scalar-inl.h']: # MULTIPLE includes
return
if x in pending:
#print('loop found: {} in {}'.format(x, pending))
return
whtspace = ' ' * expand.treeDepth
expand.fileCount += 1
comment = u"//=====[{:3d}] STAGE:{:>4} {}EXPANDING: {} =====\n\n".format(expand.fileCount, stage, whtspace, x)
out.write(comment.encode('ascii'))
print(comment)
with open(x, 'rb') as x_h:
for line in x_h.readlines():
uline = line.decode('utf-8')
if '#define DMLC_LOG_STACK_TRACE 1' in uline.strip():
# Do not enable stacktrace logging
continue
if uline.find('#include') < 0:
out.write(line)
continue
if uline.strip().find('#include') > 0:
print(uline)
continue
m = re1.search(uline)
if not m:
m = re2.search(uline)
if m:
path = m.groups()[0]
else:
m = re3.search(uline)
if m:
path = 'execinfo.h'
else:
print(uline + ' not found')
continue
h = path.strip('./') if "../3rdparty/" not in path else path
if h.endswith('complex.h') and x.endswith('openblas_config.h'):
source = ''
elif h.startswith('ps/'):
source = '../3rdparty/ps-lite/include/' + h
else:
source = find_source(h, x, stage)
if not source:
if (h not in blacklist and
h not in sysheaders and
'mkl' not in h and
'nnpack' not in h and
'tensorrt' not in h and
not h.endswith('.cuh')): sysheaders.append(h)
else:
expand.treeDepth += 1
expand(source, pending + [x], stage)
expand.treeDepth -= 1
out.write(u"//===== EXPANDED : {} =====\n\n".format(x).encode('ascii'))
history.add(x) |
<SYSTEM_TASK:>
Load embedding vectors from the pre-trained token embedding file.
<END_TASK>
<USER_TASK:>
Description:
def _load_embedding(self, pretrained_file_path, elem_delim, init_unknown_vec, encoding='utf8'):
"""Load embedding vectors from the pre-trained token embedding file.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
text embedding vector initialized by `init_unknown_vec`.
If a token is encountered multiple times in the pre-trained text embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
""" |
pretrained_file_path = os.path.expanduser(pretrained_file_path)
if not os.path.isfile(pretrained_file_path):
raise ValueError('`pretrained_file_path` must be a valid path to '
'the pre-trained token embedding file.')
logging.info('Loading pre-trained token embedding vectors from %s', pretrained_file_path)
vec_len = None
all_elems = []
tokens = set()
loaded_unknown_vec = None
line_num = 0
with io.open(pretrained_file_path, 'r', encoding=encoding) as f:
for line in f:
line_num += 1
elems = line.rstrip().split(elem_delim)
assert len(elems) > 1, 'At line %d of the pre-trained text embedding file: the ' \
'data format of the pre-trained token embedding file %s ' \
'is unexpected.' % (line_num, pretrained_file_path)
token, elems = elems[0], [float(i) for i in elems[1:]]
if token == self.unknown_token and loaded_unknown_vec is None:
loaded_unknown_vec = elems
tokens.add(self.unknown_token)
elif token in tokens:
warnings.warn('At line %d of the pre-trained token embedding file: the '
'embedding vector for token %s has been loaded and a duplicate '
'embedding for the same token is seen and skipped.' %
(line_num, token))
elif len(elems) == 1:
warnings.warn('At line %d of the pre-trained text embedding file: token %s '
'with 1-dimensional vector %s is likely a header and is '
'skipped.' % (line_num, token, elems))
else:
if vec_len is None:
vec_len = len(elems)
# Reserve a vector slot for the unknown token at the very beggining because
# the unknown index is 0.
all_elems.extend([0] * vec_len)
else:
assert len(elems) == vec_len, \
'At line %d of the pre-trained token embedding file: the dimension ' \
'of token %s is %d but the dimension of previous tokens is %d. ' \
'Dimensions of all the tokens must be the same.' \
% (line_num, token, len(elems), vec_len)
all_elems.extend(elems)
self._idx_to_token.append(token)
self._token_to_idx[token] = len(self._idx_to_token) - 1
tokens.add(token)
self._vec_len = vec_len
self._idx_to_vec = nd.array(all_elems).reshape((-1, self.vec_len))
if loaded_unknown_vec is None:
self._idx_to_vec[C.UNKNOWN_IDX] = init_unknown_vec(shape=self.vec_len)
else:
self._idx_to_vec[C.UNKNOWN_IDX] = nd.array(loaded_unknown_vec) |
<SYSTEM_TASK:>
Sets the mapping between token indices and token embedding vectors.
<END_TASK>
<USER_TASK:>
Description:
def _set_idx_to_vec_by_embeddings(self, token_embeddings, vocab_len, vocab_idx_to_token):
"""Sets the mapping between token indices and token embedding vectors.
Parameters
----------
token_embeddings : instance or list `mxnet.contrib.text.embedding._TokenEmbedding`
One or multiple pre-trained token embeddings to load. If it is a list of multiple
embeddings, these embedding vectors will be concatenated for each token.
vocab_len : int
Length of vocabulary whose tokens are indexed in the token embedding.
vocab_idx_to_token: list of str
A list of indexed tokens in the vocabulary. These tokens are indexed in the token
embedding.
""" |
new_vec_len = sum(embed.vec_len for embed in token_embeddings)
new_idx_to_vec = nd.zeros(shape=(vocab_len, new_vec_len))
col_start = 0
# Concatenate all the embedding vectors in token_embeddings.
for embed in token_embeddings:
col_end = col_start + embed.vec_len
# Cancatenate vectors of the unknown token.
new_idx_to_vec[0, col_start:col_end] = embed.idx_to_vec[0]
new_idx_to_vec[1:, col_start:col_end] = embed.get_vecs_by_tokens(vocab_idx_to_token[1:])
col_start = col_end
self._vec_len = new_vec_len
self._idx_to_vec = new_idx_to_vec |
<SYSTEM_TASK:>
Look up embedding vectors of tokens.
<END_TASK>
<USER_TASK:>
Description:
def get_vecs_by_tokens(self, tokens, lower_case_backup=False):
"""Look up embedding vectors of tokens.
Parameters
----------
tokens : str or list of strs
A token or a list of tokens.
lower_case_backup : bool, default False
If False, each token in the original case will be looked up; if True, each token in the
original case will be looked up first, if not found in the keys of the property
`token_to_idx`, the token in the lower case will be looked up.
Returns
-------
mxnet.ndarray.NDArray:
The embedding vector(s) of the token(s). According to numpy conventions, if `tokens` is
a string, returns a 1-D NDArray of shape `self.vec_len`; if `tokens` is a list of
strings, returns a 2-D NDArray of shape=(len(tokens), self.vec_len).
""" |
to_reduce = False
if not isinstance(tokens, list):
tokens = [tokens]
to_reduce = True
if not lower_case_backup:
indices = [self.token_to_idx.get(token, C.UNKNOWN_IDX) for token in tokens]
else:
indices = [self.token_to_idx[token] if token in self.token_to_idx
else self.token_to_idx.get(token.lower(), C.UNKNOWN_IDX)
for token in tokens]
vecs = nd.Embedding(nd.array(indices), self.idx_to_vec, self.idx_to_vec.shape[0],
self.idx_to_vec.shape[1])
return vecs[0] if to_reduce else vecs |
<SYSTEM_TASK:>
Updates embedding vectors for tokens.
<END_TASK>
<USER_TASK:>
Description:
def update_token_vectors(self, tokens, new_vectors):
"""Updates embedding vectors for tokens.
Parameters
----------
tokens : str or a list of strs
A token or a list of tokens whose embedding vector are to be updated.
new_vectors : mxnet.ndarray.NDArray
An NDArray to be assigned to the embedding vectors of `tokens`. Its length must be equal
to the number of `tokens` and its width must be equal to the dimension of embeddings of
the glossary. If `tokens` is a singleton, it must be 1-D or 2-D. If `tokens` is a list
of multiple strings, it must be 2-D.
""" |
assert self.idx_to_vec is not None, 'The property `idx_to_vec` has not been properly set.'
if not isinstance(tokens, list) or len(tokens) == 1:
assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) in [1, 2], \
'`new_vectors` must be a 1-D or 2-D NDArray if `tokens` is a singleton.'
if not isinstance(tokens, list):
tokens = [tokens]
if len(new_vectors.shape) == 1:
new_vectors = new_vectors.expand_dims(0)
else:
assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) == 2, \
'`new_vectors` must be a 2-D NDArray if `tokens` is a list of multiple strings.'
assert new_vectors.shape == (len(tokens), self.vec_len), \
'The length of new_vectors must be equal to the number of tokens and the width of' \
'new_vectors must be equal to the dimension of embeddings of the glossary.'
indices = []
for token in tokens:
if token in self.token_to_idx:
indices.append(self.token_to_idx[token])
else:
raise ValueError('Token %s is unknown. To update the embedding vector for an '
'unknown token, please specify it explicitly as the '
'`unknown_token` %s in `tokens`. This is to avoid unintended '
'updates.' % (token, self.idx_to_token[C.UNKNOWN_IDX]))
self._idx_to_vec[nd.array(indices)] = new_vectors |
<SYSTEM_TASK:>
Checks if a pre-trained token embedding file name is valid.
<END_TASK>
<USER_TASK:>
Description:
def _check_pretrained_file_names(cls, pretrained_file_name):
"""Checks if a pre-trained token embedding file name is valid.
Parameters
----------
pretrained_file_name : str
The pre-trained token embedding file.
""" |
embedding_name = cls.__name__.lower()
if pretrained_file_name not in cls.pretrained_file_name_sha1:
raise KeyError('Cannot find pretrained file %s for token embedding %s. Valid '
'pretrained files for embedding %s: %s' %
(pretrained_file_name, embedding_name, embedding_name,
', '.join(cls.pretrained_file_name_sha1.keys()))) |
<SYSTEM_TASK:>
Generate the implementation of step HMC
<END_TASK>
<USER_TASK:>
Description:
def step_HMC(exe, exe_params, exe_grads, label_key, noise_precision, prior_precision, L=10, eps=1E-6):
"""Generate the implementation of step HMC""" |
init_params = {k: v.copyto(v.context) for k, v in exe_params.items()}
end_params = {k: v.copyto(v.context) for k, v in exe_params.items()}
init_momentums = {k: mx.random.normal(0, 1, v.shape) for k, v in init_params.items()}
end_momentums = {k: v.copyto(v.context) for k, v in init_momentums.items()}
init_potential = calc_potential(exe, init_params, label_key, noise_precision, prior_precision)
# 0. Calculate Initial Energy and Kinetic
init_kinetic = sum([nd.sum(nd.square(momentum)) / 2.0
for momentum in init_momentums.values()]).asscalar()
# 1. Make a half step for momentum at the beginning
exe.copy_params_from(end_params)
exe.forward(is_train=True)
exe.backward()
for k, v in exe_grads.items():
v.wait_to_read()
for k, momentum in end_momentums.items():
momentum[:] = momentum - (eps / 2) * exe_grads[k]
# 2. Alternate full steps for position and momentum
for i in range(L):
# 2.1 Full step for position
for k, param in exe_params.items():
param[:] = param + eps * end_momentums[k]
# 2.2 Full step for the momentum, except at the end of trajectory we perform a half step
exe.forward(is_train=True)
exe.backward()
for v in exe_grads.values():
v.wait_to_read()
if i != L - 1:
for k, momentum in end_momentums.items():
momentum[:] = momentum - eps * exe_grads[k]
else:
for k, momentum in end_momentums.items():
# We should reverse the sign of the momentum at the end
momentum[:] = -(momentum - eps / 2.0 * exe_grads[k])
copy_param(exe, end_params)
# 3. Calculate acceptance ratio and accept/reject the move
end_potential = calc_potential(exe, end_params, label_key, noise_precision, prior_precision)
end_kinetic = sum([nd.sum(nd.square(momentum)) / 2.0
for momentum in end_momentums.values()]).asscalar()
# print init_potential, init_kinetic, end_potential, end_kinetic
r = numpy.random.rand(1)
if r < numpy.exp(-(end_potential + end_kinetic) + (init_potential + init_kinetic)):
exe.copy_params_from(end_params)
return end_params, 1
else:
exe.copy_params_from(init_params)
return init_params, 0 |
<SYSTEM_TASK:>
Generate the implementation of HMC
<END_TASK>
<USER_TASK:>
Description:
def HMC(sym, data_inputs, X, Y, X_test, Y_test, sample_num,
initializer=None, noise_precision=1 / 9.0, prior_precision=0.1,
learning_rate=1E-6, L=10, dev=mx.gpu()):
"""Generate the implementation of HMC""" |
label_key = list(set(data_inputs.keys()) - set(['data']))[0]
exe, exe_params, exe_grads, _ = get_executor(sym, dev, data_inputs, initializer)
exe.arg_dict['data'][:] = X
exe.arg_dict[label_key][:] = Y
sample_pool = []
accept_num = 0
start = time.time()
for i in range(sample_num):
sample_params, is_accept = step_HMC(exe, exe_params, exe_grads, label_key, noise_precision,
prior_precision, L, learning_rate)
accept_num += is_accept
if (i + 1) % 10 == 0:
sample_pool.append(sample_params)
if (i + 1) % 100000 == 0:
end = time.time()
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start), "MSE:",
sample_test_regression(exe, X=X_test, Y=Y_test, sample_pool=sample_pool,
minibatch_size=Y.shape[0],
save_path='regression_HMC.txt'))
start = time.time()
exe.copy_params_from(sample_params)
print('accept ratio', accept_num / float(sample_num))
return sample_pool |
<SYSTEM_TASK:>
Generate the implementation of SGD
<END_TASK>
<USER_TASK:>
Description:
def SGD(sym, data_inputs, X, Y, X_test, Y_test, total_iter_num,
lr=None,
lr_scheduler=None, prior_precision=1,
out_grad_f=None,
initializer=None,
minibatch_size=100, dev=mx.gpu()):
"""Generate the implementation of SGD""" |
if out_grad_f is None:
label_key = list(set(data_inputs.keys()) - set(['data']))[0]
exe, params, params_grad, _ = get_executor(sym, dev, data_inputs, initializer)
optimizer = mx.optimizer.create('sgd', learning_rate=lr,
rescale_grad=X.shape[0] / minibatch_size,
lr_scheduler=lr_scheduler,
wd=prior_precision)
updater = mx.optimizer.get_updater(optimizer)
start = time.time()
for i in range(total_iter_num):
indices = numpy.random.randint(X.shape[0], size=minibatch_size)
X_batch = X[indices]
Y_batch = Y[indices]
exe.arg_dict['data'][:] = X_batch
if out_grad_f is None:
exe.arg_dict[label_key][:] = Y_batch
exe.forward(is_train=True)
exe.backward()
else:
exe.forward(is_train=True)
exe.backward(out_grad_f(exe.outputs, nd.array(Y_batch, ctx=dev)))
for k in params:
updater(k, params_grad[k], params[k])
if (i + 1) % 500 == 0:
end = time.time()
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start))
sample_test_acc(exe, X=X_test, Y=Y_test, label_num=10, minibatch_size=100)
start = time.time()
return exe, params, params_grad |
<SYSTEM_TASK:>
Generate the implementation of SGLD
<END_TASK>
<USER_TASK:>
Description:
def SGLD(sym, X, Y, X_test, Y_test, total_iter_num,
data_inputs=None,
learning_rate=None,
lr_scheduler=None, prior_precision=1,
out_grad_f=None,
initializer=None,
minibatch_size=100, thin_interval=100, burn_in_iter_num=1000, task='classification',
dev=mx.gpu()):
"""Generate the implementation of SGLD""" |
if out_grad_f is None:
label_key = list(set(data_inputs.keys()) - set(['data']))[0]
exe, params, params_grad, _ = get_executor(sym, dev, data_inputs, initializer)
optimizer = mx.optimizer.create('sgld', learning_rate=learning_rate,
rescale_grad=X.shape[0] / minibatch_size,
lr_scheduler=lr_scheduler,
wd=prior_precision)
updater = mx.optimizer.get_updater(optimizer)
sample_pool = []
start = time.time()
for i in range(total_iter_num):
indices = numpy.random.randint(X.shape[0], size=minibatch_size)
X_batch = X[indices]
Y_batch = Y[indices]
exe.arg_dict['data'][:] = X_batch
if out_grad_f is None:
exe.arg_dict[label_key][:] = Y_batch
exe.forward(is_train=True)
exe.backward()
else:
exe.forward(is_train=True)
exe.backward(out_grad_f(exe.outputs, nd.array(Y_batch, ctx=dev)))
for k in params:
updater(k, params_grad[k], params[k])
if i < burn_in_iter_num:
continue
else:
if (i - burn_in_iter_num) % thin_interval == 0:
if optimizer.lr_scheduler is not None:
lr = optimizer.lr_scheduler(optimizer.num_update)
else:
lr = learning_rate
sample_pool.append([lr, copy_param(exe)])
if (i + 1) % 100000 == 0:
end = time.time()
if task == 'classification':
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start))
test_correct, test_total, test_acc = \
sample_test_acc(exe, sample_pool=sample_pool, X=X_test, Y=Y_test, label_num=10,
minibatch_size=minibatch_size)
print("Test %d/%d=%f" % (test_correct, test_total, test_acc))
else:
print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start), "MSE:",
sample_test_regression(exe=exe, sample_pool=sample_pool,
X=X_test,
Y=Y_test, minibatch_size=minibatch_size,
save_path='regression_SGLD.txt'))
start = time.time()
return exe, sample_pool |
<SYSTEM_TASK:>
Get a list of architectures given our dockerfiles
<END_TASK>
<USER_TASK:>
Description:
def get_platforms(path: str = get_dockerfiles_path()) -> List[str]:
"""Get a list of architectures given our dockerfiles""" |
dockerfiles = glob.glob(os.path.join(path, "Dockerfile.*"))
dockerfiles = list(filter(lambda x: x[-1] != '~', dockerfiles))
files = list(map(lambda x: re.sub(r"Dockerfile.(.*)", r"\1", x), dockerfiles))
platforms = list(map(lambda x: os.path.split(x)[1], sorted(files)))
return platforms |
<SYSTEM_TASK:>
Imports tagged container from the given docker registry
<END_TASK>
<USER_TASK:>
Description:
def load_docker_cache(tag, docker_registry) -> None:
"""Imports tagged container from the given docker registry""" |
if docker_registry:
# noinspection PyBroadException
try:
import docker_cache
logging.info('Docker cache download is enabled from registry %s', docker_registry)
docker_cache.load_docker_cache(registry=docker_registry, docker_tag=tag)
except Exception:
logging.exception('Unable to retrieve Docker cache. Continue without...')
else:
logging.info('Distributed docker cache disabled') |
<SYSTEM_TASK:>
Load data into sliced arrays.
<END_TASK>
<USER_TASK:>
Description:
def _load_data(batch, targets, major_axis):
"""Load data into sliced arrays.""" |
if isinstance(batch, list):
new_batch = []
for i in range(len(targets)):
new_batch.append([b.data[i] for b in batch])
new_targets = [[dst for _, dst in d_target] for d_target in targets]
_load_general(new_batch, new_targets, major_axis)
else:
_load_general(batch.data, targets, major_axis) |
<SYSTEM_TASK:>
Merge outputs that lives on multiple context into one, so that they look
<END_TASK>
<USER_TASK:>
Description:
def _merge_multi_context(outputs, major_axis):
"""Merge outputs that lives on multiple context into one, so that they look
like living on one context.
""" |
rets = []
for tensors, axis in zip(outputs, major_axis):
if axis >= 0:
# pylint: disable=no-member,protected-access
if len(tensors) == 1:
rets.append(tensors[0])
else:
# Concatenate if necessary
rets.append(nd.concat(*[tensor.as_in_context(tensors[0].context)
for tensor in tensors],
dim=axis))
# pylint: enable=no-member,protected-access
else:
# negative axis means the there is no batch_size axis, and all the
# results should be the same on each device. We simply take the
# first one, without checking they are actually the same
rets.append(tensors[0])
return rets |
<SYSTEM_TASK:>
Prepare the group2contexts, will duplicate the context
<END_TASK>
<USER_TASK:>
Description:
def _prepare_group2ctxs(group2ctxs, ctx_len):
"""Prepare the group2contexts, will duplicate the context
if some ctx_group map to only one context.
""" |
if group2ctxs is None:
return [None] * ctx_len
elif isinstance(group2ctxs, list):
assert(len(group2ctxs) == ctx_len), "length of group2ctxs\
should be %d" % ctx_len
return group2ctxs
elif isinstance(group2ctxs, dict):
ret = [{} for i in range(ctx_len)]
for k, v in group2ctxs.items():
ctxs = None
if isinstance(v, ctx.Context):
ctxs = [v] * ctx_len
else:
if len(v) == 1:
ctxs = v * ctx_len
else:
assert(len(v) == ctx_len), "length of group2ctxs[%s]\
should be %d or 1" % (k, ctx_len)
ctxs = v
for i in range(ctx_len):
ret[i][k] = ctxs[i]
return ret
else:
assert(False), "group2ctxs should be list of dict of str to context,\
or dict of str to context or list of context"
return False |
<SYSTEM_TASK:>
Decide the slices for each context according to the workload.
<END_TASK>
<USER_TASK:>
Description:
def decide_slices(self, data_shapes):
"""Decide the slices for each context according to the workload.
Parameters
----------
data_shapes : list
list of (name, shape) specifying the shapes for the input data or label.
""" |
assert len(data_shapes) > 0
major_axis = [DataDesc.get_batch_axis(x.layout) for x in data_shapes]
for (name, shape), axis in zip(data_shapes, major_axis):
if axis == -1:
continue
batch_size = shape[axis]
if self.batch_size is not None:
assert batch_size == self.batch_size, ("all data must have the same batch size: "
+ ("batch_size = %d, but " % self.batch_size)
+ ("%s has shape %s" % (name, shape)))
else:
self.batch_size = batch_size
self.slices = _split_input_slice(self.batch_size, self.workload)
return major_axis |
<SYSTEM_TASK:>
Bind executors on their respective devices.
<END_TASK>
<USER_TASK:>
Description:
def bind_exec(self, data_shapes, label_shapes, shared_group=None, reshape=False):
"""Bind executors on their respective devices.
Parameters
----------
data_shapes : list
label_shapes : list
shared_group : DataParallelExecutorGroup
reshape : bool
""" |
assert reshape or not self.execs
self.batch_size = None
# calculate workload and bind executors
self.data_layouts = self.decide_slices(data_shapes)
if label_shapes is not None:
# call it to make sure labels has the same batch size as data
self.label_layouts = self.decide_slices(label_shapes)
for i in range(len(self.contexts)):
data_shapes_i = self._sliced_shape(data_shapes, i, self.data_layouts)
if label_shapes is not None:
label_shapes_i = self._sliced_shape(label_shapes, i, self.label_layouts)
else:
label_shapes_i = []
if reshape:
self.execs[i] = self._default_execs[i].reshape(
allow_up_sizing=True, **dict(data_shapes_i + label_shapes_i))
else:
self.execs.append(self._bind_ith_exec(i, data_shapes_i, label_shapes_i,
shared_group))
self.data_shapes = data_shapes
self.label_shapes = label_shapes
self.data_names = [i.name for i in self.data_shapes]
if label_shapes is not None:
self.label_names = [i.name for i in self.label_shapes]
self._collect_arrays() |
<SYSTEM_TASK:>
Reshape executors.
<END_TASK>
<USER_TASK:>
Description:
def reshape(self, data_shapes, label_shapes):
"""Reshape executors.
Parameters
----------
data_shapes : list
label_shapes : list
""" |
if data_shapes == self.data_shapes and label_shapes == self.label_shapes:
return
if self._default_execs is None:
self._default_execs = [i for i in self.execs]
self.bind_exec(data_shapes, label_shapes, reshape=True) |
<SYSTEM_TASK:>
Assign, i.e. copy parameters to all the executors.
<END_TASK>
<USER_TASK:>
Description:
def set_params(self, arg_params, aux_params, allow_extra=False):
"""Assign, i.e. copy parameters to all the executors.
Parameters
----------
arg_params : dict
A dictionary of name to `NDArray` parameter mapping.
aux_params : dict
A dictionary of name to `NDArray` auxiliary variable mapping.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
""" |
for exec_ in self.execs:
exec_.copy_params_from(arg_params, aux_params, allow_extra_params=allow_extra) |
<SYSTEM_TASK:>
Copy data from each executor to `arg_params` and `aux_params`.
<END_TASK>
<USER_TASK:>
Description:
def get_params(self, arg_params, aux_params):
""" Copy data from each executor to `arg_params` and `aux_params`.
Parameters
----------
arg_params : list of NDArray
Target parameter arrays.
aux_params : list of NDArray
Target aux arrays.
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
""" |
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name]) |
<SYSTEM_TASK:>
Split `data_batch` according to workload and run forward on each devices.
<END_TASK>
<USER_TASK:>
Description:
def forward(self, data_batch, is_train=None):
"""Split `data_batch` according to workload and run forward on each devices.
Parameters
----------
data_batch : DataBatch
Or could be any object implementing similar interface.
is_train : bool
The hint for the backend, indicating whether we are during training phase.
Default is `None`, then the value `self.for_training` will be used.
Returns
-------
""" |
_load_data(data_batch, self.data_arrays, self.data_layouts)
if is_train is None:
is_train = self.for_training
if isinstance(data_batch, list):
if self.label_arrays is not None and data_batch is not None and data_batch[0].label:
_load_label(data_batch, self.label_arrays, self.label_layouts)
else:
if self.label_arrays is not None and data_batch.label:
_load_label(data_batch, self.label_arrays, self.label_layouts)
for exec_ in self.execs:
exec_.forward(is_train=is_train) |
<SYSTEM_TASK:>
Get the shapes of the outputs.
<END_TASK>
<USER_TASK:>
Description:
def get_output_shapes(self):
"""Get the shapes of the outputs.""" |
outputs = self.execs[0].outputs
shapes = [out.shape for out in outputs]
concat_shapes = []
for key, the_shape, axis in zip(self.symbol.list_outputs(), shapes, self.output_layouts):
the_shape = list(the_shape)
if axis >= 0:
the_shape[axis] = self.batch_size
concat_shapes.append((key, tuple(the_shape)))
return concat_shapes |
<SYSTEM_TASK:>
Get outputs of the previous forward computation.
<END_TASK>
<USER_TASK:>
Description:
def get_outputs(self, merge_multi_context=True, begin=0, end=None):
"""Get outputs of the previous forward computation.
If begin or end is specified, return [begin, end)-th outputs,
otherwise return all outputs.
Parameters
----------
merge_multi_context : bool
Default is `True`. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
begin : int
starting index of returned outputs in all outputs
end : int or None
ending index (excluded) of returned outputs.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise, it
is like ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``. All the output
elements are `NDArray`.
""" |
if end is None:
end = self.num_outputs
outputs = [[exec_.outputs[i] for exec_ in self.execs]
for i in range(begin, end)]
if merge_multi_context:
outputs = _merge_multi_context(outputs, self.output_layouts)
return outputs |
<SYSTEM_TASK:>
Set value for states. Only one of states & value can be specified.
<END_TASK>
<USER_TASK:>
Description:
def set_states(self, states=None, value=None):
"""Set value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArrays
source states arrays formatted like [[state1_dev1, state1_dev2],
[state2_dev1, state2_dev2]].
value : number
a single scalar value for all state arrays.
""" |
if states is not None:
assert value is None, "Only one of states & value can be specified."
_load_general(states, self.state_arrays, (0,)*len(states))
else:
assert value is not None, "At least one of states & value must be specified."
assert states is None, "Only one of states & value can be specified."
for d_dst in self.state_arrays:
for dst in d_dst:
dst[:] = value |
<SYSTEM_TASK:>
Get the gradients with respect to the inputs of the module.
<END_TASK>
<USER_TASK:>
Description:
def get_input_grads(self, merge_multi_context=True):
"""Get the gradients with respect to the inputs of the module.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A `True` value indicate that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements are `NDArray`.
""" |
assert self.inputs_need_grad
if merge_multi_context:
return _merge_multi_context(self.input_grad_arrays, self.data_layouts)
return self.input_grad_arrays |
<SYSTEM_TASK:>
Run backward on all devices. A backward should be called after
<END_TASK>
<USER_TASK:>
Description:
def backward(self, out_grads=None):
"""Run backward on all devices. A backward should be called after
a call to the forward function. Backward cannot be called unless
``self.for_training`` is ``True``.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
""" |
assert self.for_training, 're-bind with for_training=True to run backward'
if out_grads is None:
out_grads = []
for i, (exec_, islice) in enumerate(zip(self.execs, self.slices)):
out_grads_slice = []
for grad, axis in zip(out_grads, self.output_layouts):
if axis >= 0:
# pylint: disable=no-member
og_my_slice = nd.slice_axis(grad, axis=axis, begin=islice.start,
end=islice.stop)
out_grads_slice.append(og_my_slice.as_in_context(self.contexts[i]))
# pylint: enable=no-member
else:
out_grads_slice.append(grad.copyto(self.contexts[i]))
exec_.backward(out_grads=out_grads_slice) |
<SYSTEM_TASK:>
Accumulate the performance according to `eval_metric` on all devices
<END_TASK>
<USER_TASK:>
Description:
def update_metric(self, eval_metric, labels, pre_sliced):
"""Accumulate the performance according to `eval_metric` on all devices
by comparing outputs from [begin, end) to labels. By default use all
outputs.
Parameters
----------
eval_metric : EvalMetric
The metric used for evaluation.
labels : list of NDArray
Typically comes from `label` of a `DataBatch`.
pre_sliced : bool
Whether labels are already sliced.
begin : int
Starting index of used outputs.
end : int or None
Ending index of used outputs.
""" |
for current_exec, (texec, islice) in enumerate(zip(self.execs, self.slices)):
if not pre_sliced:
labels_slice = []
for label, axis in zip(labels, self.label_layouts):
if axis == 0:
# slicing NDArray along axis 0 can avoid copying
labels_slice.append(label[islice])
elif axis > 0:
# pylint: disable=no-member
label_my_slice = nd.slice_axis(label, axis=axis, begin=islice.start,
end=islice.stop).as_in_context(label.context)
# pylint: enable=no-member
labels_slice.append(label_my_slice)
else:
labels_slice.append(label)
else:
labels_slice = labels[current_exec]
labels_ = OrderedDict(zip(self.label_names, labels_slice))
preds = OrderedDict(zip(self.output_names, texec.outputs))
eval_metric.update_dict(labels_, preds) |
<SYSTEM_TASK:>
Internal utility function to bind the i-th executor.
<END_TASK>
<USER_TASK:>
Description:
def _bind_ith_exec(self, i, data_shapes, label_shapes, shared_group):
"""Internal utility function to bind the i-th executor.
This function utilizes simple_bind python interface.
""" |
shared_exec = None if shared_group is None else shared_group.execs[i]
context = self.contexts[i]
shared_data_arrays = self.shared_data_arrays[i]
input_shapes = dict(data_shapes)
if label_shapes is not None:
input_shapes.update(dict(label_shapes))
input_types = {x.name: x.dtype for x in data_shapes}
if label_shapes is not None:
input_types.update({x.name: x.dtype for x in label_shapes})
group2ctx = self.group2ctxs[i]
executor = self.symbol.simple_bind(ctx=context, grad_req=self.grad_req,
type_dict=input_types, shared_arg_names=self.param_names,
shared_exec=shared_exec, group2ctx=group2ctx,
shared_buffer=shared_data_arrays, **input_shapes)
self._total_exec_bytes += int(executor.debug_str().split('\n')[-3].split()[1])
return executor |
<SYSTEM_TASK:>
Get the sliced shapes for the i-th executor.
<END_TASK>
<USER_TASK:>
Description:
def _sliced_shape(self, shapes, i, major_axis):
"""Get the sliced shapes for the i-th executor.
Parameters
----------
shapes : list of (str, tuple)
The original (name, shape) pairs.
i : int
Which executor we are dealing with.
""" |
sliced_shapes = []
for desc, axis in zip(shapes, major_axis):
shape = list(desc.shape)
if axis >= 0:
shape[axis] = self.slices[i].stop - self.slices[i].start
sliced_shapes.append(DataDesc(desc.name, tuple(shape), desc.dtype, desc.layout))
return sliced_shapes |
<SYSTEM_TASK:>
Get the canonical name for a symbol.
<END_TASK>
<USER_TASK:>
Description:
def get(self, name, hint):
"""Get the canonical name for a symbol.
This is the default implementation.
If the user specifies a name,
the user-specified name will be used.
When user does not specify a name, we automatically generate a
name based on the hint string.
Parameters
----------
name : str or None
The name specified by the user.
hint : str
A hint string, which can be used to generate name.
Returns
-------
full_name : str
A canonical name for the symbol.
""" |
if name:
return name
if hint not in self._counter:
self._counter[hint] = 0
name = '%s%d' % (hint, self._counter[hint])
self._counter[hint] += 1
return name |
<SYSTEM_TASK:>
same as mx.model.load_checkpoint, but do not load symnet and will convert context
<END_TASK>
<USER_TASK:>
Description:
def load_param(params, ctx=None):
"""same as mx.model.load_checkpoint, but do not load symnet and will convert context""" |
if ctx is None:
ctx = mx.cpu()
save_dict = mx.nd.load(params)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v.as_in_context(ctx)
if tp == 'aux':
aux_params[name] = v.as_in_context(ctx)
return arg_params, aux_params |
<SYSTEM_TASK:>
Deprecated. Please use cell.unroll instead
<END_TASK>
<USER_TASK:>
Description:
def rnn_unroll(cell, length, inputs=None, begin_state=None, input_prefix='', layout='NTC'):
"""Deprecated. Please use cell.unroll instead""" |
warnings.warn('rnn_unroll is deprecated. Please call cell.unroll directly.')
return cell.unroll(length=length, inputs=inputs, begin_state=begin_state,
input_prefix=input_prefix, layout=layout) |
<SYSTEM_TASK:>
Save checkpoint for model using RNN cells.
<END_TASK>
<USER_TASK:>
Description:
def save_rnn_checkpoint(cells, prefix, epoch, symbol, arg_params, aux_params):
"""Save checkpoint for model using RNN cells.
Unpacks weight before saving.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input symbol
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
""" |
if isinstance(cells, BaseRNNCell):
cells = [cells]
for cell in cells:
arg_params = cell.unpack_weights(arg_params)
save_checkpoint(prefix, epoch, symbol, arg_params, aux_params) |
<SYSTEM_TASK:>
Load model checkpoint from file.
<END_TASK>
<USER_TASK:>
Description:
def load_rnn_checkpoint(cells, prefix, epoch):
"""Load model checkpoint from file.
Pack weights after loading.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- symbol will be loaded from ``prefix-symbol.json``.
- parameters will be loaded from ``prefix-epoch.params``.
""" |
sym, arg, aux = load_checkpoint(prefix, epoch)
if isinstance(cells, BaseRNNCell):
cells = [cells]
for cell in cells:
arg = cell.pack_weights(arg)
return sym, arg, aux |
<SYSTEM_TASK:>
Make a callback to checkpoint Module to prefix every epoch.
<END_TASK>
<USER_TASK:>
Description:
def do_rnn_checkpoint(cells, prefix, period=1):
"""Make a callback to checkpoint Module to prefix every epoch.
unpacks weights used by cells before saving.
Parameters
----------
cells : mxnet.rnn.RNNCell or list of RNNCells
The RNN cells used by this symbol.
prefix : str
The file prefix to checkpoint to
period : int
How many epochs to wait before checkpointing. Default is 1.
Returns
-------
callback : function
The callback function that can be passed as iter_end_callback to fit.
""" |
period = int(max(1, period))
# pylint: disable=unused-argument
def _callback(iter_no, sym=None, arg=None, aux=None):
"""The checkpoint function."""
if (iter_no + 1) % period == 0:
save_rnn_checkpoint(cells, prefix, iter_no+1, sym, arg, aux)
return _callback |
<SYSTEM_TASK:>
Activates or deactivates `HybridBlock` s recursively. Has no effect on
<END_TASK>
<USER_TASK:>
Description:
def hybridize(self, active=True, **kwargs):
"""Activates or deactivates `HybridBlock` s recursively. Has no effect on
non-hybrid children.
Parameters
----------
active : bool, default True
Whether to turn hybrid on or off.
**kwargs : string
Additional flags for hybridized operator.
""" |
if self._children and all(isinstance(c, HybridBlock) for c in self._children.values()):
warnings.warn(
"All children of this Sequential layer '%s' are HybridBlocks. Consider "
"using HybridSequential for the best performance."%self.prefix, stacklevel=2)
super(Sequential, self).hybridize(active, **kwargs) |
<SYSTEM_TASK:>
Reads image specified by path into numpy.ndarray
<END_TASK>
<USER_TASK:>
Description:
def read_img(path):
""" Reads image specified by path into numpy.ndarray""" |
img = cv2.resize(cv2.imread(path, 0), (80, 30)).astype(np.float32) / 255
img = np.expand_dims(img.transpose(1, 0), 0)
return img |
<SYSTEM_TASK:>
Returns a tuple of names and zero arrays for LSTM init states
<END_TASK>
<USER_TASK:>
Description:
def lstm_init_states(batch_size):
""" Returns a tuple of names and zero arrays for LSTM init states""" |
hp = Hyperparams()
init_shapes = lstm.init_states(batch_size=batch_size, num_lstm_layer=hp.num_lstm_layer, num_hidden=hp.num_hidden)
init_names = [s[0] for s in init_shapes]
init_arrays = [mx.nd.zeros(x[1]) for x in init_shapes]
return init_names, init_arrays |
<SYSTEM_TASK:>
Loads the model from checkpoint specified by prefix and epoch, binds it
<END_TASK>
<USER_TASK:>
Description:
def load_module(prefix, epoch, data_names, data_shapes):
"""Loads the model from checkpoint specified by prefix and epoch, binds it
to an executor, and sets its parameters and returns a mx.mod.Module
""" |
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
# We don't need CTC loss for prediction, just a simple softmax will suffice.
# We get the output of the layer just before the loss layer ('pred_fc') and add softmax on top
pred_fc = sym.get_internals()['pred_fc_output']
sym = mx.sym.softmax(data=pred_fc)
mod = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=data_names, label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes)
mod.set_params(arg_params, aux_params, allow_missing=False)
return mod |
<SYSTEM_TASK:>
Load libary by searching possible path.
<END_TASK>
<USER_TASK:>
Description:
def _load_lib():
"""Load libary by searching possible path.""" |
lib_path = _find_lib_path()
lib = ctypes.cdll.LoadLibrary(lib_path[0])
# DMatrix functions
lib.MXGetLastError.restype = ctypes.c_char_p
return lib |
<SYSTEM_TASK:>
Load ndarray file and return as list of numpy array.
<END_TASK>
<USER_TASK:>
Description:
def load_ndarray_file(nd_bytes):
"""Load ndarray file and return as list of numpy array.
Parameters
----------
nd_bytes : str or bytes
The internal ndarray bytes
Returns
-------
out : dict of str to numpy array or list of numpy array
The output list or dict, depending on whether the saved type is list or dict.
""" |
handle = NDListHandle()
olen = mx_uint()
nd_bytes = bytearray(nd_bytes)
ptr = (ctypes.c_char * len(nd_bytes)).from_buffer(nd_bytes)
_check_call(_LIB.MXNDListCreate(
ptr, len(nd_bytes),
ctypes.byref(handle), ctypes.byref(olen)))
keys = []
arrs = []
for i in range(olen.value):
key = ctypes.c_char_p()
cptr = mx_float_p()
pdata = ctypes.POINTER(mx_uint)()
ndim = mx_uint()
_check_call(_LIB.MXNDListGet(
handle, mx_uint(i), ctypes.byref(key),
ctypes.byref(cptr), ctypes.byref(pdata), ctypes.byref(ndim)))
shape = tuple(pdata[:ndim.value])
dbuffer = (mx_float * np.prod(shape)).from_address(ctypes.addressof(cptr.contents))
ret = np.frombuffer(dbuffer, dtype=np.float32).reshape(shape)
ret = np.array(ret, dtype=np.float32)
keys.append(py_str(key.value))
arrs.append(ret)
_check_call(_LIB.MXNDListFree(handle))
if len(keys) == 0 or len(keys[0]) == 0:
return arrs
else:
return {keys[i] : arrs[i] for i in range(len(keys))} |
<SYSTEM_TASK:>
Perform forward to get the output.
<END_TASK>
<USER_TASK:>
Description:
def forward(self, **kwargs):
"""Perform forward to get the output.
Parameters
----------
**kwargs
Keyword arguments of input variable name to data.
Examples
--------
>>> predictor.forward(data=mydata)
>>> out = predictor.get_output(0)
""" |
for k, v in kwargs.items():
if not isinstance(v, np.ndarray):
raise ValueError("Expect numpy ndarray as input")
v = np.asarray(v, dtype=np.float32, order='C')
_check_call(_LIB.MXPredSetInput(
self.handle, c_str(k),
v.ctypes.data_as(mx_float_p),
mx_uint(v.size)))
_check_call(_LIB.MXPredForward(self.handle)) |
<SYSTEM_TASK:>
Change the input shape of the predictor.
<END_TASK>
<USER_TASK:>
Description:
def reshape(self, input_shapes):
"""Change the input shape of the predictor.
Parameters
----------
input_shapes : dict of str to tuple
The new shape of input data.
Examples
--------
>>> predictor.reshape({'data':data_shape_tuple})
""" |
indptr = [0]
sdata = []
keys = []
for k, v in input_shapes.items():
if not isinstance(v, tuple):
raise ValueError("Expect input_shapes to be dict str->tuple")
keys.append(c_str(k))
sdata.extend(v)
indptr.append(len(sdata))
new_handle = PredictorHandle()
_check_call(_LIB.MXPredReshape(
mx_uint(len(indptr) - 1),
c_array(ctypes.c_char_p, keys),
c_array(mx_uint, indptr),
c_array(mx_uint, sdata),
self.handle,
ctypes.byref(new_handle)))
_check_call(_LIB.MXPredFree(self.handle))
self.handle = new_handle |
<SYSTEM_TASK:>
Get the index-th output.
<END_TASK>
<USER_TASK:>
Description:
def get_output(self, index):
"""Get the index-th output.
Parameters
----------
index : int
The index of output.
Returns
-------
out : numpy array.
The output array.
""" |
pdata = ctypes.POINTER(mx_uint)()
ndim = mx_uint()
_check_call(_LIB.MXPredGetOutputShape(
self.handle, index,
ctypes.byref(pdata),
ctypes.byref(ndim)))
shape = tuple(pdata[:ndim.value])
data = np.empty(shape, dtype=np.float32)
_check_call(_LIB.MXPredGetOutput(
self.handle, mx_uint(index),
data.ctypes.data_as(mx_float_p),
mx_uint(data.size)))
return data |
<SYSTEM_TASK:>
Begin an episode of a game instance. We can play the game for a maximum of
<END_TASK>
<USER_TASK:>
Description:
def begin_episode(self, max_episode_step=DEFAULT_MAX_EPISODE_STEP):
"""
Begin an episode of a game instance. We can play the game for a maximum of
`max_episode_step` and after that, we are forced to restart
""" |
if self.episode_step > self.max_episode_step or self.ale.game_over():
self.start()
else:
for i in range(self.screen_buffer_length):
self.ale.act(0)
self.ale.getScreenGrayscale(self.screen_buffer[i % self.screen_buffer_length, :, :])
self.max_episode_step = max_episode_step
self.start_lives = self.ale.lives()
self.episode_reward = 0
self.episode_step = 0 |
<SYSTEM_TASK:>
Unrolls the recurrent cell for one time step.
<END_TASK>
<USER_TASK:>
Description:
def forward(self, inputs, states):
"""Unrolls the recurrent cell for one time step.
Parameters
----------
inputs : sym.Variable
Input symbol, 2D, of shape (batch_size * num_units).
states : list of sym.Variable
RNN state from previous step or the output of begin_state().
Returns
-------
output : Symbol
Symbol corresponding to the output from the RNN when unrolling
for a single time step.
states : list of Symbol
The new state of this RNN after this unrolling.
The type of this symbol is same as the output of `begin_state()`.
This can be used as an input state to the next time step
of this RNN.
See Also
--------
begin_state: This function can provide the states for the first time step.
unroll: This function unrolls an RNN for a given number of (>=1) time steps.
""" |
# pylint: disable= arguments-differ
self._counter += 1
return super(RecurrentCell, self).forward(inputs, states) |
<SYSTEM_TASK:>
Check that all input names are in symbol's arguments.
<END_TASK>
<USER_TASK:>
Description:
def _check_input_names(symbol, names, typename, throw):
"""Check that all input names are in symbol's arguments.""" |
args = symbol.list_arguments()
for name in names:
if name in args:
continue
candidates = [arg for arg in args if
not arg.endswith('_weight') and
not arg.endswith('_bias') and
not arg.endswith('_gamma') and
not arg.endswith('_beta')]
msg = "\033[91mYou created Module with Module(..., %s_names=%s) but " \
"input with name '%s' is not found in symbol.list_arguments(). " \
"Did you mean one of:\n\t%s\033[0m"%(
typename, str(names), name, '\n\t'.join(candidates))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg) |
<SYSTEM_TASK:>
Check that input names matches input data descriptors.
<END_TASK>
<USER_TASK:>
Description:
def _check_names_match(data_names, data_shapes, name, throw):
"""Check that input names matches input data descriptors.""" |
actual = [x[0] for x in data_shapes]
if sorted(data_names) != sorted(actual):
msg = "Data provided by %s_shapes don't match names specified by %s_names (%s vs. %s)"%(
name, name, str(data_shapes), str(data_names))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg) |
<SYSTEM_TASK:>
parse data_attrs into DataDesc format and check that names match
<END_TASK>
<USER_TASK:>
Description:
def _parse_data_desc(data_names, label_names, data_shapes, label_shapes):
"""parse data_attrs into DataDesc format and check that names match""" |
data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes]
_check_names_match(data_names, data_shapes, 'data', True)
if label_shapes is not None:
label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
_check_names_match(label_names, label_shapes, 'label', False)
else:
_check_names_match(label_names, [], 'label', False)
return data_shapes, label_shapes |
<SYSTEM_TASK:>
A convenient function that calls both ``forward`` and ``backward``.
<END_TASK>
<USER_TASK:>
Description:
def forward_backward(self, data_batch):
"""A convenient function that calls both ``forward`` and ``backward``.""" |
self.forward(data_batch, is_train=True)
self.backward() |
<SYSTEM_TASK:>
Runs prediction on ``eval_data`` and evaluates the performance according to
<END_TASK>
<USER_TASK:>
Description:
def score(self, eval_data, eval_metric, num_batch=None, batch_end_callback=None,
score_end_callback=None,
reset=True, epoch=0, sparse_row_id_fn=None):
"""Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
""" |
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
eval_metric.reset()
actual_num_batch = 0
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
if isinstance(eval_batch, list):
self.update_metric(eval_metric, [eb.label for eb in eval_batch], pre_sliced=True)
else:
self.update_metric(eval_metric, eval_batch.label)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
actual_num_batch += 1
if score_end_callback:
params = BatchEndParam(epoch=epoch,
nbatch=actual_num_batch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(score_end_callback):
callback(params)
return eval_metric.get_name_value() |
<SYSTEM_TASK:>
Iterates over predictions.
<END_TASK>
<USER_TASK:>
Description:
def iter_predict(self, eval_data, num_batch=None, reset=True, sparse_row_id_fn=None):
"""Iterates over predictions.
Examples
--------
>>> for pred, i_batch, batch in module.iter_predict(eval_data):
... # pred is a list of outputs from the module
... # i_batch is a integer
... # batch is the data batch from the data iterator
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Default is ``None``, indicating running all the batches in the data iterator.
reset : bool
Default is ``True``, indicating whether we should reset the data iter before start
doing prediction.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
""" |
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad] for out in self.get_outputs()]
yield (outputs, nbatch, eval_batch) |
<SYSTEM_TASK:>
Runs prediction and collects the outputs.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, eval_data, num_batch=None, merge_batches=True, reset=True,
always_output_list=False, sparse_row_id_fn=None):
"""Runs prediction and collects the outputs.
When `merge_batches` is ``True`` (by default), the return value will be a list
``[out1, out2, out3]``, where each element is formed by concatenating the outputs for
all the mini-batches. When `always_output_list` is ``False`` (as by default),
then in the case of a single output, `out1` is returned instead of ``[out1]``.
When `merge_batches` is ``False``, the return value will be a nested list like
``[[out1_batch1, out2_batch1], [out1_batch2], ...]``. This mode is useful because
in some cases (e.g. bucketing), the module does not necessarily produce the same
number of outputs.
The objects in the results have type `NDArray`. If you need to work with a numpy array,
just call ``.asnumpy()`` on each `NDArray`.
Parameters
----------
eval_data : DataIter or NDArray or numpy array
Evaluation data to run prediction on.
num_batch : int
Defaults to ``None``, indicates running all the batches in the data iterator.
merge_batches : bool
Defaults to ``True``, see above for return values.
reset : bool
Defaults to ``True``, indicates whether we should reset the data iter before
doing prediction.
always_output_list : bool
Defaults to ``False``, see above for return values.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Returns
-------
list of NDArray or list of list of NDArray
Prediction results.
Examples
--------
>>> # An example of using `predict` for prediction.
>>> # Predict on the first 10 batches of val_dataiter
>>> mod.predict(eval_data=val_dataiter, num_batch=10)
""" |
assert self.binded and self.params_initialized
if isinstance(eval_data, (ndarray.NDArray, np.ndarray)):
if isinstance(eval_data, np.ndarray):
eval_data = ndarray.array(eval_data)
self.forward(DataBatch([eval_data]))
return self.get_outputs()[0]
if not isinstance(eval_data, DataIter):
raise ValueError('eval_data must be of type NDArray or DataIter')
if reset:
eval_data.reset()
output_list = []
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad].copy() for out in self.get_outputs()]
output_list.append(outputs)
if len(output_list) == 0:
return output_list
if merge_batches:
num_outputs = len(output_list[0])
for out in output_list:
assert len(out) == num_outputs, \
'Cannot merge batches, as num of outputs is not the same ' + \
'in mini-batches. Maybe bucketing is used?'
output_list2 = [ndarray.concatenate([out[i] for out in output_list])
for i in range(num_outputs)]
if num_outputs == 1 and not always_output_list:
return output_list2[0]
return output_list2
return output_list |
<SYSTEM_TASK:>
Loads model parameters from file.
<END_TASK>
<USER_TASK:>
Description:
def load_params(self, fname):
"""Loads model parameters from file.
Parameters
----------
fname : str
Path to input param file.
Examples
--------
>>> # An example of loading module parameters.
>>> mod.load_params('myfile')
""" |
save_dict = ndarray.load(fname)
arg_params = {}
aux_params = {}
for k, value in save_dict.items():
arg_type, name = k.split(':', 1)
if arg_type == 'arg':
arg_params[name] = value
elif arg_type == 'aux':
aux_params[name] = value
else:
raise ValueError("Invalid param file " + fname)
self.set_params(arg_params, aux_params) |
<SYSTEM_TASK:>
Find MXNet included header files.
<END_TASK>
<USER_TASK:>
Description:
def find_include_path():
"""Find MXNet included header files.
Returns
-------
incl_path : string
Path to the header files.
""" |
incl_from_env = os.environ.get('MXNET_INCLUDE_PATH')
if incl_from_env:
if os.path.isdir(incl_from_env):
if not os.path.isabs(incl_from_env):
logging.warning("MXNET_INCLUDE_PATH should be an absolute path, instead of: %s",
incl_from_env)
else:
return incl_from_env
else:
logging.warning("MXNET_INCLUDE_PATH '%s' doesn't exist", incl_from_env)
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# include path in pip package
pip_incl_path = os.path.join(curr_path, 'include/')
if os.path.isdir(pip_incl_path):
return pip_incl_path
else:
# include path if build from source
src_incl_path = os.path.join(curr_path, '../../include/')
if os.path.isdir(src_incl_path):
return src_incl_path
else:
raise RuntimeError('Cannot find the MXNet include path in either ' + pip_incl_path +
' or ' + src_incl_path + '\n') |
<SYSTEM_TASK:>
Generate a greyscale captcha image representing number string
<END_TASK>
<USER_TASK:>
Description:
def image(self, captcha_str):
"""Generate a greyscale captcha image representing number string
Parameters
----------
captcha_str: str
string a characters for captcha image
Returns
-------
numpy.ndarray
Generated greyscale image in np.ndarray float type with values normalized to [0, 1]
""" |
img = self.captcha.generate(captcha_str)
img = np.fromstring(img.getvalue(), dtype='uint8')
img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (self.h, self.w))
img = img.transpose(1, 0)
img = np.multiply(img, 1 / 255.0)
return img |
<SYSTEM_TASK:>
Registers a new optimizer.
<END_TASK>
<USER_TASK:>
Description:
def register(klass):
"""Registers a new optimizer.
Once an optimizer is registered, we can create an instance of this
optimizer with `create_optimizer` later.
Examples
--------
>>> @mx.optimizer.Optimizer.register
... class MyOptimizer(mx.optimizer.Optimizer):
... pass
>>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer')
>>> print(type(optim))
<class '__main__.MyOptimizer'>
""" |
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in Optimizer.opt_registry:
warnings.warn('WARNING: New optimizer %s.%s is overriding '
'existing optimizer %s.%s' %
(klass.__module__, klass.__name__,
Optimizer.opt_registry[name].__module__,
Optimizer.opt_registry[name].__name__))
Optimizer.opt_registry[name] = klass
return klass |
<SYSTEM_TASK:>
Instantiates an optimizer with a given name and kwargs.
<END_TASK>
<USER_TASK:>
Description:
def create_optimizer(name, **kwargs):
"""Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
""" |
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError('Cannot find optimizer %s' % name) |
<SYSTEM_TASK:>
Creates auxiliary state for a given weight, including FP32 high
<END_TASK>
<USER_TASK:>
Description:
def create_state_multi_precision(self, index, weight):
"""Creates auxiliary state for a given weight, including FP32 high
precision copy if original weight is FP16.
This method is provided to perform automatic mixed precision training
for optimizers that do not support it themselves.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
""" |
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = weight.astype(numpy.float32)
return (weight_master_copy,) + (self.create_state(index, weight_master_copy),)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"optimizer")
return self.create_state(index, weight) |
<SYSTEM_TASK:>
Updates the given parameter using the corresponding gradient and state.
<END_TASK>
<USER_TASK:>
Description:
def update_multi_precision(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
""" |
if self.multi_precision and weight.dtype == numpy.float16:
# Wrapper for mixed precision
weight_master_copy = state[0]
original_state = state[1]
grad32 = grad.astype(numpy.float32)
self.update(index, weight_master_copy, grad32, original_state)
cast(weight_master_copy, dtype=weight.dtype, out=weight)
else:
self.update(index, weight, grad, state) |
<SYSTEM_TASK:>
Sets an individual learning rate multiplier for each parameter.
<END_TASK>
<USER_TASK:>
Description:
def set_lr_mult(self, args_lr_mult):
"""Sets an individual learning rate multiplier for each parameter.
If you specify a learning rate multiplier for a parameter, then
the learning rate for the parameter will be set as the product of
the global learning rate `self.lr` and its multiplier.
.. note:: The default learning rate multiplier of a `Variable`
can be set with `lr_mult` argument in the constructor.
Parameters
----------
args_lr_mult : dict of str/int to float
For each of its key-value entries, the learning rate multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
""" |
self.lr_mult = {}
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__lr_mult__' in attr[name]:
self.lr_mult[name] = float(attr[name]['__lr_mult__'])
self.lr_mult.update(args_lr_mult) |
<SYSTEM_TASK:>
Sets an individual weight decay multiplier for each parameter.
<END_TASK>
<USER_TASK:>
Description:
def set_wd_mult(self, args_wd_mult):
"""Sets an individual weight decay multiplier for each parameter.
By default, if `param_idx2name` was provided in the
constructor, the weight decay multipler is set as 0 for all
parameters whose name don't end with ``_weight`` or
``_gamma``.
.. note:: The default weight decay multiplier for a `Variable`
can be set with its `wd_mult` argument in the constructor.
Parameters
----------
args_wd_mult : dict of string/int to float
For each of its key-value entries, the weight decay multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
""" |
self.wd_mult = {}
for n in self.idx2name.values():
if not (n.endswith('_weight') or n.endswith('_gamma')):
self.wd_mult[n] = 0.0
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__wd_mult__' in attr[name]:
self.wd_mult[name] = float(attr[name]['__wd_mult__'])
self.wd_mult.update(args_wd_mult) |
<SYSTEM_TASK:>
Sets the number of the currently handled device.
<END_TASK>
<USER_TASK:>
Description:
def _set_current_context(self, device_id):
"""Sets the number of the currently handled device.
Parameters
----------
device_id : int
The number of current device.
""" |
if device_id not in self._all_index_update_counts:
self._all_index_update_counts[device_id] = {}
self._index_update_count = self._all_index_update_counts[device_id] |
<SYSTEM_TASK:>
Gets the learning rates given the indices of the weights.
<END_TASK>
<USER_TASK:>
Description:
def _get_lrs(self, indices):
"""Gets the learning rates given the indices of the weights.
Parameters
----------
indices : list of int
Indices corresponding to weights.
Returns
-------
lrs : list of float
Learning rates for those indices.
""" |
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
lrs = [lr for _ in indices]
for i, index in enumerate(indices):
if index in self.param_dict:
lrs[i] *= self.param_dict[index].lr_mult
elif index in self.lr_mult:
lrs[i] *= self.lr_mult[index]
elif index in self.idx2name:
lrs[i] *= self.lr_mult.get(self.idx2name[index], 1.0)
return lrs |
<SYSTEM_TASK:>
Gets updater states.
<END_TASK>
<USER_TASK:>
Description:
def get_states(self, dump_optimizer=False):
"""Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
""" |
return pickle.dumps((self.states, self.optimizer) if dump_optimizer else self.states) |
<SYSTEM_TASK:>
Read from frames
<END_TASK>
<USER_TASK:>
Description:
def from_frames(self, path):
"""
Read from frames
""" |
frames_path = sorted([os.path.join(path, x) for x in os.listdir(path)])
frames = [ndimage.imread(frame_path) for frame_path in frames_path]
self.handle_type(frames)
return self |
<SYSTEM_TASK:>
Read from videos
<END_TASK>
<USER_TASK:>
Description:
def from_video(self, path):
"""
Read from videos
""" |
frames = self.get_video_frames(path)
self.handle_type(frames)
return self |
<SYSTEM_TASK:>
Preprocess from frames using face detector
<END_TASK>
<USER_TASK:>
Description:
def process_frames_face(self, frames):
"""
Preprocess from frames using face detector
""" |
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(self.face_predictor_path)
mouth_frames = self.get_frames_mouth(detector, predictor, frames)
self.face = np.array(frames)
self.mouth = np.array(mouth_frames)
if mouth_frames[0] is not None:
self.set_data(mouth_frames) |
<SYSTEM_TASK:>
Preprocess from frames using mouth detector
<END_TASK>
<USER_TASK:>
Description:
def process_frames_mouth(self, frames):
"""
Preprocess from frames using mouth detector
""" |
self.face = np.array(frames)
self.mouth = np.array(frames)
self.set_data(frames) |
<SYSTEM_TASK:>
Get frames using mouth crop
<END_TASK>
<USER_TASK:>
Description:
def get_frames_mouth(self, detector, predictor, frames):
"""
Get frames using mouth crop
""" |
mouth_width = 100
mouth_height = 50
horizontal_pad = 0.19
normalize_ratio = None
mouth_frames = []
for frame in frames:
dets = detector(frame, 1)
shape = None
for det in dets:
shape = predictor(frame, det)
i = -1
if shape is None: # Detector doesn't detect face, just return None
return [None]
mouth_points = []
for part in shape.parts():
i += 1
if i < 48: # Only take mouth region
continue
mouth_points.append((part.x, part.y))
np_mouth_points = np.array(mouth_points)
mouth_centroid = np.mean(np_mouth_points[:, -2:], axis=0)
if normalize_ratio is None:
mouth_left = np.min(np_mouth_points[:, :-1]) * (1.0 - horizontal_pad)
mouth_right = np.max(np_mouth_points[:, :-1]) * (1.0 + horizontal_pad)
normalize_ratio = mouth_width / float(mouth_right - mouth_left)
new_img_shape = (int(frame.shape[0] * normalize_ratio),
int(frame.shape[1] * normalize_ratio))
resized_img = imresize(frame, new_img_shape)
mouth_centroid_norm = mouth_centroid * normalize_ratio
mouth_l = int(mouth_centroid_norm[0] - mouth_width / 2)
mouth_r = int(mouth_centroid_norm[0] + mouth_width / 2)
mouth_t = int(mouth_centroid_norm[1] - mouth_height / 2)
mouth_b = int(mouth_centroid_norm[1] + mouth_height / 2)
mouth_crop_image = resized_img[mouth_t:mouth_b, mouth_l:mouth_r]
mouth_frames.append(mouth_crop_image)
return mouth_frames |
<SYSTEM_TASK:>
Not necessary in practice
<END_TASK>
<USER_TASK:>
Description:
def imagenet_clamp_batch(batch, low, high):
""" Not necessary in practice """ |
F.clip(batch[:,0,:,:],low-123.680, high-123.680)
F.clip(batch[:,1,:,:],low-116.779, high-116.779)
F.clip(batch[:,2,:,:],low-103.939, high-103.939) |
<SYSTEM_TASK:>
Function to evaluate accuracy of any data iterator passed to it as an argument
<END_TASK>
<USER_TASK:>
Description:
def evaluate_accuracy(data_iterator, net):
"""Function to evaluate accuracy of any data iterator passed to it as an argument""" |
acc = mx.metric.Accuracy()
for data, label in data_iterator:
output = net(data)
predictions = nd.argmax(output, axis=1)
predictions = predictions.reshape((-1, 1))
acc.update(preds=predictions, labels=label)
return acc.get()[1] |
<SYSTEM_TASK:>
Set size limit on bulk execution.
<END_TASK>
<USER_TASK:>
Description:
def set_bulk_size(size):
"""Set size limit on bulk execution.
Bulk execution bundles many operators to run together.
This can improve performance when running a lot of small
operators sequentially.
Parameters
----------
size : int
Maximum number of operators that can be bundled in a bulk.
Returns
-------
int
Previous bulk size.
""" |
prev = ctypes.c_int()
check_call(_LIB.MXEngineSetBulkSize(
ctypes.c_int(size), ctypes.byref(prev)))
return prev.value |
<SYSTEM_TASK:>
calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars
<END_TASK>
<USER_TASK:>
Description:
def applyLM(parentBeam, childBeam, classes, lm):
"""
calculate LM score of child beam by taking score from parent beam and bigram probability of last two chars
""" |
if lm and not childBeam.lmApplied:
c1 = classes[parentBeam.labeling[-1] if parentBeam.labeling else classes.index(' ')] # first char
c2 = classes[childBeam.labeling[-1]] # second char
lmFactor = 0.01 # influence of language model
bigramProb = lm.getCharBigram(c1, c2) ** lmFactor # probability of seeing first and second char next to each other
childBeam.prText = parentBeam.prText * bigramProb # probability of char sequence
childBeam.lmApplied = True |
<SYSTEM_TASK:>
add beam if it does not yet exist
<END_TASK>
<USER_TASK:>
Description:
def addBeam(beamState, labeling):
"""
add beam if it does not yet exist
""" |
if labeling not in beamState.entries:
beamState.entries[labeling] = BeamEntry() |
<SYSTEM_TASK:>
beam search as described by the paper of Hwang et al. and the paper of Graves et al.
<END_TASK>
<USER_TASK:>
Description:
def ctcBeamSearch(mat, classes, lm, k, beamWidth):
"""
beam search as described by the paper of Hwang et al. and the paper of Graves et al.
""" |
blankIdx = len(classes)
maxT, maxC = mat.shape
# initialise beam state
last = BeamState()
labeling = ()
last.entries[labeling] = BeamEntry()
last.entries[labeling].prBlank = 1
last.entries[labeling].prTotal = 1
# go over all time-steps
for t in range(maxT):
curr = BeamState()
# get beam-labelings of best beams
bestLabelings = last.sort()[0:beamWidth]
# go over best beams
for labeling in bestLabelings:
# probability of paths ending with a non-blank
prNonBlank = 0
# in case of non-empty beam
if labeling:
# probability of paths with repeated last char at the end
try:
prNonBlank = last.entries[labeling].prNonBlank * mat[t, labeling[-1]]
except FloatingPointError:
prNonBlank = 0
# probability of paths ending with a blank
prBlank = (last.entries[labeling].prTotal) * mat[t, blankIdx]
# add beam at current time-step if needed
addBeam(curr, labeling)
# fill in data
curr.entries[labeling].labeling = labeling
curr.entries[labeling].prNonBlank += prNonBlank
curr.entries[labeling].prBlank += prBlank
curr.entries[labeling].prTotal += prBlank + prNonBlank
curr.entries[labeling].prText = last.entries[labeling].prText # beam-labeling not changed, therefore also LM score unchanged from
curr.entries[labeling].lmApplied = True # LM already applied at previous time-step for this beam-labeling
# extend current beam-labeling
for c in range(maxC - 1):
# add new char to current beam-labeling
newLabeling = labeling + (c,)
# if new labeling contains duplicate char at the end, only consider paths ending with a blank
if labeling and labeling[-1] == c:
prNonBlank = mat[t, c] * last.entries[labeling].prBlank
else:
prNonBlank = mat[t, c] * last.entries[labeling].prTotal
# add beam at current time-step if needed
addBeam(curr, newLabeling)
# fill in data
curr.entries[newLabeling].labeling = newLabeling
curr.entries[newLabeling].prNonBlank += prNonBlank
curr.entries[newLabeling].prTotal += prNonBlank
# apply LM
applyLM(curr.entries[labeling], curr.entries[newLabeling], classes, lm)
# set new beam state
last = curr
# normalise LM scores according to beam-labeling-length
last.norm()
# sort by probability
bestLabelings = last.sort()[:k] # get most probable labeling
output = []
for bestLabeling in bestLabelings:
# map labels to chars
res = ''
for l in bestLabeling:
res += classes[l]
output.append(res)
return output |
<SYSTEM_TASK:>
return beam-labelings, sorted by probability
<END_TASK>
<USER_TASK:>
Description:
def sort(self):
"""
return beam-labelings, sorted by probability
""" |
beams = [v for (_, v) in self.entries.items()]
sortedBeams = sorted(beams, reverse=True, key=lambda x: x.prTotal*x.prText)
return [x.labeling for x in sortedBeams] |
<SYSTEM_TASK:>
the localisation network in lenet-stn, it will increase acc about more than 1%,
<END_TASK>
<USER_TASK:>
Description:
def get_loc(data, attr={'lr_mult':'0.01'}):
"""
the localisation network in lenet-stn, it will increase acc about more than 1%,
when num-epoch >=15
""" |
loc = mx.symbol.Convolution(data=data, num_filter=30, kernel=(5, 5), stride=(2,2))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, kernel=(2, 2), stride=(2, 2), pool_type='max')
loc = mx.symbol.Convolution(data=loc, num_filter=60, kernel=(3, 3), stride=(1,1), pad=(1, 1))
loc = mx.symbol.Activation(data = loc, act_type='relu')
loc = mx.symbol.Pooling(data=loc, global_pool=True, kernel=(2, 2), pool_type='avg')
loc = mx.symbol.Flatten(data=loc)
loc = mx.symbol.FullyConnected(data=loc, num_hidden=6, name="stn_loc", attr=attr)
return loc |
<SYSTEM_TASK:>
wrapper for initialize a detector
<END_TASK>
<USER_TASK:>
Description:
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, num_class,
nms_thresh=0.5, force_nms=True, nms_topk=400):
"""
wrapper for initialize a detector
Parameters:
----------
net : str
test network name
prefix : str
load model prefix
epoch : int
load model epoch
data_shape : int
resize image shape
mean_pixels : tuple (float, float, float)
mean pixel values (R, G, B)
ctx : mx.ctx
running context, mx.cpu() or mx.gpu(?)
num_class : int
number of classes
nms_thresh : float
non-maximum suppression threshold
force_nms : bool
force suppress different categories
""" |
if net is not None:
if isinstance(data_shape, tuple):
data_shape = data_shape[0]
net = get_symbol(net, data_shape, num_classes=num_class, nms_thresh=nms_thresh,
force_nms=force_nms, nms_topk=nms_topk)
detector = Detector(net, prefix, epoch, data_shape, mean_pixels, ctx=ctx)
return detector |
<SYSTEM_TASK:>
Parse string to tuple or int
<END_TASK>
<USER_TASK:>
Description:
def parse_data_shape(data_shape_str):
"""Parse string to tuple or int""" |
ds = data_shape_str.strip().split(',')
if len(ds) == 1:
data_shape = (int(ds[0]), int(ds[0]))
elif len(ds) == 2:
data_shape = (int(ds[0]), int(ds[1]))
else:
raise ValueError("Unexpected data_shape: %s", data_shape_str)
return data_shape |
<SYSTEM_TASK:>
A lenet style net, takes difference of each frame as input.
<END_TASK>
<USER_TASK:>
Description:
def get_lenet():
""" A lenet style net, takes difference of each frame as input.
""" |
source = mx.sym.Variable("data")
source = (source - 128) * (1.0/128)
frames = mx.sym.SliceChannel(source, num_outputs=30)
diffs = [frames[i+1] - frames[i] for i in range(29)]
source = mx.sym.Concat(*diffs)
net = mx.sym.Convolution(source, kernel=(5, 5), num_filter=40)
net = mx.sym.BatchNorm(net, fix_gamma=True)
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(net, pool_type="max", kernel=(2,2), stride=(2,2))
net = mx.sym.Convolution(net, kernel=(3, 3), num_filter=40)
net = mx.sym.BatchNorm(net, fix_gamma=True)
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(net, pool_type="max", kernel=(2,2), stride=(2,2))
# first fullc
flatten = mx.symbol.Flatten(net)
flatten = mx.symbol.Dropout(flatten)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=600)
# Name the final layer as softmax so it auto matches the naming of data iterator
# Otherwise we can also change the provide_data in the data iter
return mx.symbol.LogisticRegressionOutput(data=fc1, name='softmax') |
<SYSTEM_TASK:>
Run encoding to encode the label into the CDF target.
<END_TASK>
<USER_TASK:>
Description:
def encode_label(label_data):
"""Run encoding to encode the label into the CDF target.
""" |
systole = label_data[:, 1]
diastole = label_data[:, 2]
systole_encode = np.array([
(x < np.arange(600)) for x in systole
], dtype=np.uint8)
diastole_encode = np.array([
(x < np.arange(600)) for x in diastole
], dtype=np.uint8)
return systole_encode, diastole_encode |
<SYSTEM_TASK:>
Run a for loop with user-defined computation over NDArrays on dimension 0.
<END_TASK>
<USER_TASK:>
Description:
def foreach(body, data, init_states):
"""Run a for loop with user-defined computation over NDArrays on dimension 0.
This operator simulates a for loop and body has the computation for an iteration
of the for loop. It runs the computation in body on each slice from the input
NDArrays.
body takes two arguments as input and outputs a tuple of two elements,
as illustrated below::
out, states = body(data1, states)
data1 can be either an NDArray or a list of NDArrays. If data is an NDArray,
data1 is an NDArray. Otherwise, data1 is a list of NDArrays and has the same
size as data. states is a list of NDArrays and have the same size as init_states.
Similarly, out can be either an NDArray or a list of NDArrays, which are concatenated
as the first output of foreach; states from the last execution of body
are the second output of foreach.
The computation done by this operator is equivalent to the pseudo code below
when the input data is NDArray::
states = init_states
outs = []
for i in data.shape[0]:
s = data[i]
out, states = body(s, states)
outs.append(out)
outs = stack(*outs)
Parameters
----------
body : a Python function.
Define computation in an iteration.
data: an NDArray or a list of NDArrays.
The input data.
init_states: an NDArray or nested lists of NDArrays.
The initial values of the loop states.
name: string.
The name of the operator.
Returns
-------
outputs: an NDArray or nested lists of NDArrays.
The output data concatenated from the output of all iterations.
states: an NDArray or nested lists of NDArrays.
The loop states in the last iteration.
Examples
--------
>>> step = lambda data, states: (data + states[0], [states[0] * 2])
>>> data = mx.nd.random.uniform(shape=(2, 10))
>>> states = [mx.nd.random.uniform(shape=(10))]
>>> outs, states = mx.nd.contrib.foreach(step, data, states)
""" |
def check_input(inputs, in_type, msg):
is_NDArray_or_list = True
if isinstance(inputs, list):
for i in inputs:
if not isinstance(i, in_type):
is_NDArray_or_list = False
break
else:
is_NDArray_or_list = isinstance(inputs, in_type)
assert is_NDArray_or_list, msg
flatten, _ = _flatten(data, "foreach input")
check_input(flatten, ndarray.NDArray,
"data should be an NDArray or a nested list of NDArrays")
flatten, _ = _flatten(init_states, "foreach states")
check_input(flatten, ndarray.NDArray,
"init_states should be an NDArray or a nested list of NDArrays")
not_data_list = isinstance(data, ndarray.NDArray)
num_iters = data.shape[0] if not_data_list else data[0].shape[0]
states = init_states
outputs = []
for i in range(num_iters):
if not_data_list:
eles = data[i]
else:
eles = [d[i] for d in data]
outs, states = body(eles, states)
outs, out_fmt = _flatten(outs, "foreach output")
outputs.append(outs)
outputs = zip(*outputs)
tmp_outputs = []
for out in outputs:
tmp_outputs.append(ndarray.op.stack(*out))
outputs = tmp_outputs
outputs, _ = _regroup(outputs, out_fmt)
return (outputs, states) |
<SYSTEM_TASK:>
Performs an element-wise check to determine if the NDArray contains an infinite element
<END_TASK>
<USER_TASK:>
Description:
def isfinite(data):
"""Performs an element-wise check to determine if the NDArray contains an infinite element
or not.
Parameters
----------
input : NDArray
An N-D NDArray.
Returns
-------
output: NDArray
The output NDarray, with same shape as input, where 1 indicates the array element is
finite i.e. not equal to positive or negative infinity and 0 in places where it is
positive or negative infinity.
Examples
--------
>>> data = mx.nd.array([np.inf, -np.inf, np.NINF, -1])
>>> output = mx.nd.contrib.isfinite(data)
>>> output
[0. 0. 0. 1.]
<NDArray 4 @cpu(0)>
""" |
is_data_not_nan = data == data
is_data_not_infinite = data.abs() != np.inf
return ndarray.logical_and(is_data_not_infinite, is_data_not_nan) |
<SYSTEM_TASK:>
Get distance matrix given a matrix. Used in testing.
<END_TASK>
<USER_TASK:>
Description:
def get_distance_matrix(x):
"""Get distance matrix given a matrix. Used in testing.""" |
square = nd.sum(x ** 2.0, axis=1, keepdims=True)
distance_square = square + square.transpose() - (2.0 * nd.dot(x, x.transpose()))
return nd.sqrt(distance_square) |
<SYSTEM_TASK:>
Evaluate embeddings based on Recall@k.
<END_TASK>
<USER_TASK:>
Description:
def evaluate_emb(emb, labels):
"""Evaluate embeddings based on Recall@k.""" |
d_mat = get_distance_matrix(emb)
d_mat = d_mat.asnumpy()
labels = labels.asnumpy()
names = []
accs = []
for k in [1, 2, 4, 8, 16]:
names.append('Recall@%d' % k)
correct, cnt = 0.0, 0.0
for i in range(emb.shape[0]):
d_mat[i, i] = 1e10
nns = argpartition(d_mat[i], k)[:k]
if any(labels[i] == labels[nn] for nn in nns):
correct += 1
cnt += 1
accs.append(correct/cnt)
return names, accs |
<SYSTEM_TASK:>
Get learning rate based on schedule.
<END_TASK>
<USER_TASK:>
Description:
def get_lr(lr, epoch, steps, factor):
"""Get learning rate based on schedule.""" |
for s in steps:
if epoch >= s:
lr *= factor
return lr |
<SYSTEM_TASK:>
Adds Symbol.contrib.ctc_loss on top of pred symbol and returns the resulting symbol
<END_TASK>
<USER_TASK:>
Description:
def _add_warp_ctc_loss(pred, seq_len, num_label, label):
""" Adds Symbol.contrib.ctc_loss on top of pred symbol and returns the resulting symbol """ |
label = mx.sym.Reshape(data=label, shape=(-1,))
label = mx.sym.Cast(data=label, dtype='int32')
return mx.sym.WarpCTC(data=pred, label=label, label_length=num_label, input_length=seq_len) |
<SYSTEM_TASK:>
Adds Symbol.WapCTC on top of pred symbol and returns the resulting symbol
<END_TASK>
<USER_TASK:>
Description:
def _add_mxnet_ctc_loss(pred, seq_len, label):
""" Adds Symbol.WapCTC on top of pred symbol and returns the resulting symbol """ |
pred_ctc = mx.sym.Reshape(data=pred, shape=(-4, seq_len, -1, 0))
loss = mx.sym.contrib.ctc_loss(data=pred_ctc, label=label)
ctc_loss = mx.sym.MakeLoss(loss)
softmax_class = mx.symbol.SoftmaxActivation(data=pred)
softmax_loss = mx.sym.MakeLoss(softmax_class)
softmax_loss = mx.sym.BlockGrad(softmax_loss)
return mx.sym.Group([softmax_loss, ctc_loss]) |
<SYSTEM_TASK:>
Adds CTC loss on top of pred symbol and returns the resulting symbol
<END_TASK>
<USER_TASK:>
Description:
def _add_ctc_loss(pred, seq_len, num_label, loss_type):
""" Adds CTC loss on top of pred symbol and returns the resulting symbol """ |
label = mx.sym.Variable('label')
if loss_type == 'warpctc':
print("Using WarpCTC Loss")
sm = _add_warp_ctc_loss(pred, seq_len, num_label, label)
else:
print("Using MXNet CTC Loss")
assert loss_type == 'ctc'
sm = _add_mxnet_ctc_loss(pred, seq_len, label)
return sm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.