INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Convert gradients to grayscale. This gives a saliency map.
def to_grayscale(cv2im): """Convert gradients to grayscale. This gives a saliency map.""" # How strongly does each position activate the output grayscale_im = np.sum(np.abs(cv2im), axis=0) # Normalize between min and 99th percentile im_max = np.percentile(grayscale_im, 99) im_min = np.min(grayscale_im) grayscale_im = np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1) grayscale_im = np.expand_dims(grayscale_im, axis=0) return grayscale_im
Helper function for checking shape of label and prediction Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. wrap : boolean If True, wrap labels/preds in a list if they are single NDArray shape : boolean If True, check the shape of labels and preds; Otherwise only check their length.
def check_label_shapes(labels, preds, wrap=False, shape=False): """Helper function for checking shape of label and prediction Parameters ---------- labels : list of `NDArray` The labels of the data. preds : list of `NDArray` Predicted values. wrap : boolean If True, wrap labels/preds in a list if they are single NDArray shape : boolean If True, check the shape of labels and preds; Otherwise only check their length. """ if not shape: label_shape, pred_shape = len(labels), len(preds) else: label_shape, pred_shape = labels.shape, preds.shape if label_shape != pred_shape: raise ValueError("Shape of labels {} does not match shape of " "predictions {}".format(label_shape, pred_shape)) if wrap: if isinstance(labels, ndarray.ndarray.NDArray): labels = [labels] if isinstance(preds, ndarray.ndarray.NDArray): preds = [preds] return labels, preds
Creates evaluation metric from metric names or instances of EvalMetric or a custom metric function. Parameters ---------- metric : str or callable Specifies the metric to create. This argument must be one of the below: - Name of a metric. - An instance of `EvalMetric`. - A list, each element of which is a metric or a metric name. - An evaluation function that computes custom metric for a given batch of labels and predictions. *args : list Additional arguments to metric constructor. Only used when metric is str. **kwargs : dict Additional arguments to metric constructor. Only used when metric is str Examples -------- >>> def custom_metric(label, pred): ... return np.mean(np.abs(label - pred)) ... >>> metric1 = mx.metric.create('acc') >>> metric2 = mx.metric.create(custom_metric) >>> metric3 = mx.metric.create([metric1, metric2, 'rmse'])
def create(metric, *args, **kwargs): """Creates evaluation metric from metric names or instances of EvalMetric or a custom metric function. Parameters ---------- metric : str or callable Specifies the metric to create. This argument must be one of the below: - Name of a metric. - An instance of `EvalMetric`. - A list, each element of which is a metric or a metric name. - An evaluation function that computes custom metric for a given batch of labels and predictions. *args : list Additional arguments to metric constructor. Only used when metric is str. **kwargs : dict Additional arguments to metric constructor. Only used when metric is str Examples -------- >>> def custom_metric(label, pred): ... return np.mean(np.abs(label - pred)) ... >>> metric1 = mx.metric.create('acc') >>> metric2 = mx.metric.create(custom_metric) >>> metric3 = mx.metric.create([metric1, metric2, 'rmse']) """ if callable(metric): return CustomMetric(metric, *args, **kwargs) elif isinstance(metric, list): composite_metric = CompositeEvalMetric() for child_metric in metric: composite_metric.add(create(child_metric, *args, **kwargs)) return composite_metric return _create(metric, *args, **kwargs)
Creates a custom evaluation metric that receives its inputs as numpy arrays. Parameters ---------- numpy_feval : callable(label, pred) Custom evaluation function that receives labels and predictions for a minibatch as numpy arrays and returns the corresponding custom metric as a floating point number. name : str, optional Name of the custom metric. allow_extra_outputs : bool, optional Whether prediction output is allowed to have extra outputs. This is useful in cases like RNN where states are also part of output which can then be fed back to the RNN in the next step. By default, extra outputs are not allowed. Returns ------- float Custom metric corresponding to the provided labels and predictions. Example ------- >>> def custom_metric(label, pred): ... return np.mean(np.abs(label-pred)) ... >>> metric = mx.metric.np(custom_metric)
def np(numpy_feval, name=None, allow_extra_outputs=False): """Creates a custom evaluation metric that receives its inputs as numpy arrays. Parameters ---------- numpy_feval : callable(label, pred) Custom evaluation function that receives labels and predictions for a minibatch as numpy arrays and returns the corresponding custom metric as a floating point number. name : str, optional Name of the custom metric. allow_extra_outputs : bool, optional Whether prediction output is allowed to have extra outputs. This is useful in cases like RNN where states are also part of output which can then be fed back to the RNN in the next step. By default, extra outputs are not allowed. Returns ------- float Custom metric corresponding to the provided labels and predictions. Example ------- >>> def custom_metric(label, pred): ... return np.mean(np.abs(label-pred)) ... >>> metric = mx.metric.np(custom_metric) """ def feval(label, pred): """Internal eval function.""" return numpy_feval(label, pred) feval.__name__ = numpy_feval.__name__ return CustomMetric(feval, name, allow_extra_outputs)
Save configurations of metric. Can be recreated from configs with metric.create(``**config``)
def get_config(self): """Save configurations of metric. Can be recreated from configs with metric.create(``**config``) """ config = self._kwargs.copy() config.update({ 'metric': self.__class__.__name__, 'name': self.name, 'output_names': self.output_names, 'label_names': self.label_names}) return config
Update the internal evaluation with named label and pred Parameters ---------- labels : OrderedDict of str -> NDArray name to array mapping for labels. preds : OrderedDict of str -> NDArray name to array mapping of predicted outputs.
def update_dict(self, label, pred): """Update the internal evaluation with named label and pred Parameters ---------- labels : OrderedDict of str -> NDArray name to array mapping for labels. preds : OrderedDict of str -> NDArray name to array mapping of predicted outputs. """ if self.output_names is not None: pred = [pred[name] for name in self.output_names] else: pred = list(pred.values()) if self.label_names is not None: label = [label[name] for name in self.label_names] else: label = list(label.values()) self.update(label, pred)
Resets the internal evaluation result to initial state.
def reset(self): """Resets the internal evaluation result to initial state.""" self.num_inst = 0 self.sum_metric = 0.0 self.global_num_inst = 0 self.global_sum_metric = 0.0
Gets the current evaluation result. Returns ------- names : list of str Name of the metrics. values : list of float Value of the evaluations.
def get(self): """Gets the current evaluation result. Returns ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self.num_inst == 0: return (self.name, float('nan')) else: return (self.name, self.sum_metric / self.num_inst)
Gets the current global evaluation result. Returns ------- names : list of str Name of the metrics. values : list of float Value of the evaluations.
def get_global(self): """Gets the current global evaluation result. Returns ------- names : list of str Name of the metrics. values : list of float Value of the evaluations. """ if self._has_global_stats: if self.global_num_inst == 0: return (self.name, float('nan')) else: return (self.name, self.global_sum_metric / self.global_num_inst) else: return self.get()
Returns zipped name and value pairs. Returns ------- list of tuples A (name, value) tuple list.
def get_name_value(self): """Returns zipped name and value pairs. Returns ------- list of tuples A (name, value) tuple list. """ name, value = self.get() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value))
Returns zipped name and value pairs for global results. Returns ------- list of tuples A (name, value) tuple list.
def get_global_name_value(self): """Returns zipped name and value pairs for global results. Returns ------- list of tuples A (name, value) tuple list. """ if self._has_global_stats: name, value = self.get_global() if not isinstance(name, list): name = [name] if not isinstance(value, list): value = [value] return list(zip(name, value)) else: return self.get_name_value()
Update various binary classification counts for a single (label, pred) pair. Parameters ---------- label : `NDArray` The labels of the data. pred : `NDArray` Predicted values.
def update_binary_stats(self, label, pred): """ Update various binary classification counts for a single (label, pred) pair. Parameters ---------- label : `NDArray` The labels of the data. pred : `NDArray` Predicted values. """ pred = pred.asnumpy() label = label.asnumpy().astype('int32') pred_label = numpy.argmax(pred, axis=1) check_label_shapes(label, pred) if len(numpy.unique(label)) > 2: raise ValueError("%s currently only supports binary classification." % self.__class__.__name__) pred_true = (pred_label == 1) pred_false = 1 - pred_true label_true = (label == 1) label_false = 1 - label_true true_pos = (pred_true * label_true).sum() false_pos = (pred_true * label_false).sum() false_neg = (pred_false * label_true).sum() true_neg = (pred_false * label_false).sum() self.true_positives += true_pos self.global_true_positives += true_pos self.false_positives += false_pos self.global_false_positives += false_pos self.false_negatives += false_neg self.global_false_negatives += false_neg self.true_negatives += true_neg self.global_true_negatives += true_neg
Calculate the Matthew's Correlation Coefficent
def matthewscc(self, use_global=False): """ Calculate the Matthew's Correlation Coefficent """ if use_global: if not self.global_total_examples: return 0. true_pos = float(self.global_true_positives) false_pos = float(self.global_false_positives) false_neg = float(self.global_false_negatives) true_neg = float(self.global_true_negatives) else: if not self.total_examples: return 0. true_pos = float(self.true_positives) false_pos = float(self.false_positives) false_neg = float(self.false_negatives) true_neg = float(self.true_negatives) terms = [(true_pos + false_pos), (true_pos + false_neg), (true_neg + false_pos), (true_neg + false_neg)] denom = 1. for t in filter(lambda t: t != 0., terms): denom *= t return ((true_pos * true_neg) - (false_pos * false_neg)) / math.sqrt(denom)
Returns a new dataset with each sample transformed by the transformer function `fn`. Parameters ---------- fn : callable A transformer function that takes a sample as input and returns the transformed sample. lazy : bool, default True If False, transforms all samples at once. Otherwise, transforms each sample on demand. Note that if `fn` is stochastic, you must set lazy to True or you will get the same result on all epochs. Returns ------- Dataset The transformed dataset.
def transform(self, fn, lazy=True): """Returns a new dataset with each sample transformed by the transformer function `fn`. Parameters ---------- fn : callable A transformer function that takes a sample as input and returns the transformed sample. lazy : bool, default True If False, transforms all samples at once. Otherwise, transforms each sample on demand. Note that if `fn` is stochastic, you must set lazy to True or you will get the same result on all epochs. Returns ------- Dataset The transformed dataset. """ trans = _LazyTransformDataset(self, fn) if lazy: return trans return SimpleDataset([i for i in trans])
Returns a new dataset with the first element of each sample transformed by the transformer function `fn`. This is useful, for example, when you only want to transform data while keeping label as is. Parameters ---------- fn : callable A transformer function that takes the first elemtn of a sample as input and returns the transformed element. lazy : bool, default True If False, transforms all samples at once. Otherwise, transforms each sample on demand. Note that if `fn` is stochastic, you must set lazy to True or you will get the same result on all epochs. Returns ------- Dataset The transformed dataset.
def transform_first(self, fn, lazy=True): """Returns a new dataset with the first element of each sample transformed by the transformer function `fn`. This is useful, for example, when you only want to transform data while keeping label as is. Parameters ---------- fn : callable A transformer function that takes the first elemtn of a sample as input and returns the transformed element. lazy : bool, default True If False, transforms all samples at once. Otherwise, transforms each sample on demand. Note that if `fn` is stochastic, you must set lazy to True or you will get the same result on all epochs. Returns ------- Dataset The transformed dataset. """ return self.transform(_TransformFirstClosure(fn), lazy)
Forward the image through the LSTM network model Parameters ---------- img_: int of array Returns ---------- label_list: string of list
def forward_ocr(self, img_): """Forward the image through the LSTM network model Parameters ---------- img_: int of array Returns ---------- label_list: string of list """ img_ = cv2.resize(img_, (80, 30)) img_ = img_.transpose(1, 0) print(img_.shape) img_ = img_.reshape((1, 80, 30)) print(img_.shape) # img_ = img_.reshape((80 * 30)) img_ = np.multiply(img_, 1 / 255.0) self.predictor.forward(data=img_, **self.init_state_dict) prob = self.predictor.get_output(0) label_list = [] for p in prob: print(np.argsort(p)) max_index = np.argsort(p)[::-1][0] label_list.append(max_index) return self.__get_string(label_list)
Return a caffe_pb2.NetParameter object that defined in a prototxt file
def read_prototxt(fname): """Return a caffe_pb2.NetParameter object that defined in a prototxt file """ proto = caffe_pb2.NetParameter() with open(fname, 'r') as f: text_format.Merge(str(f.read()), proto) return proto
Returns layers in a caffe_pb2.NetParameter object
def get_layers(proto): """Returns layers in a caffe_pb2.NetParameter object """ if len(proto.layer): return proto.layer elif len(proto.layers): return proto.layers else: raise ValueError('Invalid proto file.')
Return a caffe_pb2.NetParameter object that defined in a binary caffemodel file
def read_caffemodel(prototxt_fname, caffemodel_fname): """Return a caffe_pb2.NetParameter object that defined in a binary caffemodel file """ if use_caffe: caffe.set_mode_cpu() net = caffe.Net(prototxt_fname, caffemodel_fname, caffe.TEST) layer_names = net._layer_names layers = net.layers return (layers, layer_names) else: proto = caffe_pb2.NetParameter() with open(caffemodel_fname, 'rb') as f: proto.ParseFromString(f.read()) return (get_layers(proto), None)
Iterate over all layers
def layer_iter(layers, layer_names): """Iterate over all layers""" if use_caffe: for layer_idx, layer in enumerate(layers): layer_name = re.sub('[-/]', '_', layer_names[layer_idx]) layer_type = layer.type layer_blobs = layer.blobs yield (layer_name, layer_type, layer_blobs) else: for layer in layers: layer_name = re.sub('[-/]', '_', layer.name) layer_type = layer.type layer_blobs = layer.blobs yield (layer_name, layer_type, layer_blobs)
Set up the configure of profiler (only accepts keyword arguments). Parameters ---------- filename : string, output file for profile data profile_all : boolean, all profile types enabled profile_symbolic : boolean, whether to profile symbolic operators profile_imperative : boolean, whether to profile imperative operators profile_memory : boolean, whether to profile memory usage profile_api : boolean, whether to profile the C API contiguous_dump : boolean, whether to periodically dump profiling data to file dump_period : float, seconds between profile data dumps aggregate_stats : boolean, whether to maintain aggregate stats in memory for console dump. Has some negative performance impact. profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker`
def set_config(**kwargs): """Set up the configure of profiler (only accepts keyword arguments). Parameters ---------- filename : string, output file for profile data profile_all : boolean, all profile types enabled profile_symbolic : boolean, whether to profile symbolic operators profile_imperative : boolean, whether to profile imperative operators profile_memory : boolean, whether to profile memory usage profile_api : boolean, whether to profile the C API contiguous_dump : boolean, whether to periodically dump profiling data to file dump_period : float, seconds between profile data dumps aggregate_stats : boolean, whether to maintain aggregate stats in memory for console dump. Has some negative performance impact. profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker` """ kk = kwargs.keys() vv = kwargs.values() check_call(_LIB.MXSetProcessProfilerConfig(len(kwargs), c_str_array([key for key in kk]), c_str_array([str(val) for val in vv]), profiler_kvstore_handle))
Set up the configure of profiler (Deprecated). Parameters ---------- mode : string, optional Indicates whether to enable the profiler, can be 'symbolic', or 'all'. Defaults to `symbolic`. filename : string, optional The name of output trace file. Defaults to 'profile.json'.
def profiler_set_config(mode='symbolic', filename='profile.json'): """Set up the configure of profiler (Deprecated). Parameters ---------- mode : string, optional Indicates whether to enable the profiler, can be 'symbolic', or 'all'. Defaults to `symbolic`. filename : string, optional The name of output trace file. Defaults to 'profile.json'. """ warnings.warn('profiler.profiler_set_config() is deprecated. ' 'Please use profiler.set_config() instead') keys = c_str_array([key for key in ["profile_" + mode, "filename"]]) values = c_str_array([str(val) for val in [True, filename]]) assert len(keys) == len(values) check_call(_LIB.MXSetProcessProfilerConfig(len(keys), keys, values, profiler_kvstore_handle))
Set up the profiler state to 'run' or 'stop'. Parameters ---------- state : string, optional Indicates whether to run the profiler, can be 'stop' or 'run'. Default is `stop`. profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker`
def set_state(state='stop', profile_process='worker'): """Set up the profiler state to 'run' or 'stop'. Parameters ---------- state : string, optional Indicates whether to run the profiler, can be 'stop' or 'run'. Default is `stop`. profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker` """ state2int = {'stop': 0, 'run': 1} profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXSetProcessProfilerState(ctypes.c_int(state2int[state]), profile_process2int[profile_process], profiler_kvstore_handle))
Dump profile and stop profiler. Use this to save profile in advance in case your program cannot exit normally. Parameters ---------- finished : boolean Indicates whether to stop statistic output (dumping) after this dump. Default is True profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker`
def dump(finished=True, profile_process='worker'): """Dump profile and stop profiler. Use this to save profile in advance in case your program cannot exit normally. Parameters ---------- finished : boolean Indicates whether to stop statistic output (dumping) after this dump. Default is True profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker` """ fin = 1 if finished is True else 0 profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXDumpProcessProfile(fin, profile_process2int[profile_process], profiler_kvstore_handle))
Return a printable string of aggregate profile stats. Parameters ---------- reset: boolean Indicates whether to clean aggeregate statistical data collected up to this point
def dumps(reset=False): """Return a printable string of aggregate profile stats. Parameters ---------- reset: boolean Indicates whether to clean aggeregate statistical data collected up to this point """ debug_str = ctypes.c_char_p() do_reset = 1 if reset is True else 0 check_call(_LIB.MXAggregateProfileStatsPrint(ctypes.byref(debug_str), int(do_reset))) return py_str(debug_str.value)
Pause profiling. Parameters ---------- profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker`
def pause(profile_process='worker'): """Pause profiling. Parameters ---------- profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker` """ profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXProcessProfilePause(int(1), profile_process2int[profile_process], profiler_kvstore_handle))
Resume paused profiling. Parameters ---------- profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker`
def resume(profile_process='worker'): """ Resume paused profiling. Parameters ---------- profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker` """ profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXProcessProfilePause(int(0), profile_process2int[profile_process], profiler_kvstore_handle))
Set counter value. Parameters ---------- value : int Value for the counter
def set_value(self, value): """Set counter value. Parameters ---------- value : int Value for the counter """ check_call(_LIB.MXProfileSetCounter(self.handle, int(value)))
Increment counter value. Parameters ---------- value_change : int Amount by which to add to the counter
def increment(self, delta=1): """Increment counter value. Parameters ---------- value_change : int Amount by which to add to the counter """ check_call(_LIB.MXProfileAdjustCounter(self.handle, int(delta)))
Decrement counter value. Parameters ---------- value_change : int Amount by which to subtract from the counter
def decrement(self, delta=1): """Decrement counter value. Parameters ---------- value_change : int Amount by which to subtract from the counter """ check_call(_LIB.MXProfileAdjustCounter(self.handle, -int(delta)))
Set up the profiler state to record operator. Parameters ---------- scope : string, optional Indicates what scope the marker should refer to. Can be 'global', 'process', thread', task', and 'marker' Default is `process`.
def mark(self, scope='process'): """Set up the profiler state to record operator. Parameters ---------- scope : string, optional Indicates what scope the marker should refer to. Can be 'global', 'process', thread', task', and 'marker' Default is `process`. """ check_call(_LIB.MXProfileSetMarker(self.domain.handle, c_str(self.name), c_str(scope)))
r"""Get CUDA kernel from compiled module. Parameters ---------- name : str String name of the kernel. signature : str Function signature for the kernel. For example, if a kernel is declared as:: extern "C" __global__ void axpy(const float *x, double *y, int alpha) Then its signature should be:: const float *x, double *y, int alpha or:: const float *, double *, int Note that `*` in signature marks an argument as array and `const` marks an argument as constant (input) array. Returns ------- CudaKernel CUDA kernels that can be launched on GPUs.
def get_kernel(self, name, signature): r"""Get CUDA kernel from compiled module. Parameters ---------- name : str String name of the kernel. signature : str Function signature for the kernel. For example, if a kernel is declared as:: extern "C" __global__ void axpy(const float *x, double *y, int alpha) Then its signature should be:: const float *x, double *y, int alpha or:: const float *, double *, int Note that `*` in signature marks an argument as array and `const` marks an argument as constant (input) array. Returns ------- CudaKernel CUDA kernels that can be launched on GPUs. """ hdl = CudaKernelHandle() is_ndarray = [] is_const = [] dtypes = [] pattern = re.compile(r"""^\s*(const)?\s*([\w_]+)\s*(\*)?\s*([\w_]+)?\s*$""") args = re.sub(r"\s+", " ", signature).split(",") for arg in args: match = pattern.match(arg) if not match or match.groups()[1] == 'const': raise ValueError( 'Invalid function prototype "%s". Must be in the ' 'form of "(const) type (*) (name)"'%arg) is_const.append(bool(match.groups()[0])) dtype = match.groups()[1] is_ndarray.append(bool(match.groups()[2])) if dtype not in _DTYPE_CPP_TO_NP: raise TypeError( "Unsupported kernel argument type %s. Supported types are: %s."%( arg, ','.join(_DTYPE_CPP_TO_NP.keys()))) dtypes.append(_DTYPE_NP_TO_MX[_DTYPE_CPP_TO_NP[dtype]]) check_call(_LIB.MXRtcCudaKernelCreate( self.handle, c_str(name), len(dtypes), c_array_buf(ctypes.c_int, array('i', is_ndarray)), c_array_buf(ctypes.c_int, array('i', is_const)), c_array_buf(ctypes.c_int, array('i', dtypes)), ctypes.byref(hdl))) return CudaKernel(hdl, name, is_ndarray, dtypes)
Launch cuda kernel. Parameters ---------- args : tuple of NDArray or numbers List of arguments for kernel. NDArrays are expected for pointer types (e.g. `float*`, `double*`) while numbers are expected for non-pointer types (e.g. `int`, `float`). ctx : Context The context to launch kernel on. Must be GPU context. grid_dims : tuple of 3 integers Grid dimensions for CUDA kernel. block_dims : tuple of 3 integers Block dimensions for CUDA kernel. shared_mem : integer, optional Size of dynamically allocated shared memory. Defaults to 0.
def launch(self, args, ctx, grid_dims, block_dims, shared_mem=0): """Launch cuda kernel. Parameters ---------- args : tuple of NDArray or numbers List of arguments for kernel. NDArrays are expected for pointer types (e.g. `float*`, `double*`) while numbers are expected for non-pointer types (e.g. `int`, `float`). ctx : Context The context to launch kernel on. Must be GPU context. grid_dims : tuple of 3 integers Grid dimensions for CUDA kernel. block_dims : tuple of 3 integers Block dimensions for CUDA kernel. shared_mem : integer, optional Size of dynamically allocated shared memory. Defaults to 0. """ assert ctx.device_type == 'gpu', "Cuda kernel can only be launched on GPU" assert len(grid_dims) == 3, "grid_dims must be a tuple of 3 integers" assert len(block_dims) == 3, "grid_dims must be a tuple of 3 integers" assert len(args) == len(self._dtypes), \ "CudaKernel(%s) expects %d arguments but got %d"%( self._name, len(self._dtypes), len(args)) void_args = [] ref_holder = [] for i, (arg, is_nd, dtype) in enumerate(zip(args, self._is_ndarray, self._dtypes)): if is_nd: assert isinstance(arg, NDArray), \ "The %d-th argument is expected to be a NDArray but got %s"%( i, type(arg)) void_args.append(arg.handle) else: assert isinstance(arg, numeric_types), \ "The %d-th argument is expected to be a number, but got %s"%( i, type(arg)) ref_holder.append(np.array(arg, dtype=dtype)) void_args.append(ref_holder[-1].ctypes.data_as(ctypes.c_void_p)) check_call(_LIB.MXRtcCudaKernelCall( self.handle, ctx.device_id, c_array(ctypes.c_void_p, void_args), mx_uint(grid_dims[0]), mx_uint(grid_dims[1]), mx_uint(grid_dims[2]), mx_uint(block_dims[0]), mx_uint(block_dims[1]), mx_uint(block_dims[2]), mx_uint(shared_mem)))
Clear the internal statistics to initial state.
def reset(self): """Clear the internal statistics to initial state.""" if getattr(self, 'num', None) is None: self.num_inst = 0 self.sum_metric = 0.0 else: self.num_inst = [0] * self.num self.sum_metric = [0.0] * self.num self.records = dict() self.counts = dict()
Update internal records. This function now only update internal buffer, sum_metric and num_inst are updated in _update() function instead when get() is called to return results. Params: ---------- labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional 2-d array of ground-truths, n objects(id-xmin-ymin-xmax-ymax-[difficult]) preds: mx.nd.array (m * 6) 2-d array of detections, m objects(id-score-xmin-ymin-xmax-ymax)
def update(self, labels, preds): """ Update internal records. This function now only update internal buffer, sum_metric and num_inst are updated in _update() function instead when get() is called to return results. Params: ---------- labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional 2-d array of ground-truths, n objects(id-xmin-ymin-xmax-ymax-[difficult]) preds: mx.nd.array (m * 6) 2-d array of detections, m objects(id-score-xmin-ymin-xmax-ymax) """ def iou(x, ys): """ Calculate intersection-over-union overlap Params: ---------- x : numpy.array single box [xmin, ymin ,xmax, ymax] ys : numpy.array multiple box [[xmin, ymin, xmax, ymax], [...], ] Returns: ----------- numpy.array [iou1, iou2, ...], size == ys.shape[0] """ ixmin = np.maximum(ys[:, 0], x[0]) iymin = np.maximum(ys[:, 1], x[1]) ixmax = np.minimum(ys[:, 2], x[2]) iymax = np.minimum(ys[:, 3], x[3]) iw = np.maximum(ixmax - ixmin, 0.) ih = np.maximum(iymax - iymin, 0.) inters = iw * ih uni = (x[2] - x[0]) * (x[3] - x[1]) + (ys[:, 2] - ys[:, 0]) * \ (ys[:, 3] - ys[:, 1]) - inters ious = inters / uni ious[uni < 1e-12] = 0 # in case bad boxes return ious # independant execution for each image for i in range(labels[0].shape[0]): # get as numpy arrays label = labels[0][i].asnumpy() if np.sum(label[:, 0] >= 0) < 1: continue pred = preds[self.pred_idx][i].asnumpy() # calculate for each class while (pred.shape[0] > 0): cid = int(pred[0, 0]) indices = np.where(pred[:, 0].astype(int) == cid)[0] if cid < 0: pred = np.delete(pred, indices, axis=0) continue dets = pred[indices] pred = np.delete(pred, indices, axis=0) # sort by score, desceding dets = dets[dets[:,1].argsort()[::-1]] records = np.hstack((dets[:, 1][:, np.newaxis], np.zeros((dets.shape[0], 1)))) # ground-truths label_indices = np.where(label[:, 0].astype(int) == cid)[0] gts = label[label_indices, :] label = np.delete(label, label_indices, axis=0) if gts.size > 0: found = [False] * gts.shape[0] for j in range(dets.shape[0]): # compute overlaps ious = iou(dets[j, 2:], gts[:, 1:5]) ovargmax = np.argmax(ious) ovmax = ious[ovargmax] if ovmax > self.ovp_thresh: if (not self.use_difficult and gts.shape[1] >= 6 and gts[ovargmax, 5] > 0): pass else: if not found[ovargmax]: records[j, -1] = 1 # tp found[ovargmax] = True else: # duplicate records[j, -1] = 2 # fp else: records[j, -1] = 2 # fp else: # no gt, mark all fp records[:, -1] = 2 # ground truth count if (not self.use_difficult and gts.shape[1] >= 6): gt_count = np.sum(gts[:, 5] < 1) else: gt_count = gts.shape[0] # now we push records to buffer # first column: score, second column: tp/fp # 0: not set(matched to difficult or something), 1: tp, 2: fp records = records[np.where(records[:, -1] > 0)[0], :] if records.size > 0: self._insert(cid, records, gt_count) # add missing class if not present in prediction while (label.shape[0] > 0): cid = int(label[0, 0]) label_indices = np.where(label[:, 0].astype(int) == cid)[0] label = np.delete(label, label_indices, axis=0) if cid < 0: continue gt_count = label_indices.size self._insert(cid, np.array([[0, 0]]), gt_count)
update num_inst and sum_metric
def _update(self): """ update num_inst and sum_metric """ aps = [] for k, v in self.records.items(): recall, prec = self._recall_prec(v, self.counts[k]) ap = self._average_precision(recall, prec) aps.append(ap) if self.num is not None and k < (self.num - 1): self.sum_metric[k] = ap self.num_inst[k] = 1 if self.num is None: self.num_inst = 1 self.sum_metric = np.mean(aps) else: self.num_inst[-1] = 1 self.sum_metric[-1] = np.mean(aps)
get recall and precision from internal records
def _recall_prec(self, record, count): """ get recall and precision from internal records """ record = np.delete(record, np.where(record[:, 1].astype(int) == 0)[0], axis=0) sorted_records = record[record[:,0].argsort()[::-1]] tp = np.cumsum(sorted_records[:, 1].astype(int) == 1) fp = np.cumsum(sorted_records[:, 1].astype(int) == 2) if count <= 0: recall = tp * 0.0 else: recall = tp / float(count) prec = tp.astype(float) / (tp + fp) return recall, prec
calculate average precision Params: ---------- rec : numpy.array cumulated recall prec : numpy.array cumulated precision Returns: ---------- ap as float
def _average_precision(self, rec, prec): """ calculate average precision Params: ---------- rec : numpy.array cumulated recall prec : numpy.array cumulated precision Returns: ---------- ap as float """ # append sentinel values at both ends mrec = np.concatenate(([0.], rec, [1.])) mpre = np.concatenate(([0.], prec, [0.])) # compute precision integration ladder for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # look for recall value changes i = np.where(mrec[1:] != mrec[:-1])[0] # sum (\delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap
Insert records according to key
def _insert(self, key, records, count): """ Insert records according to key """ if key not in self.records: assert key not in self.counts self.records[key] = records self.counts[key] = count else: self.records[key] = np.vstack((self.records[key], records)) assert key in self.counts self.counts[key] += count
calculate average precision, override the default one, special 11-point metric Params: ---------- rec : numpy.array cumulated recall prec : numpy.array cumulated precision Returns: ---------- ap as float
def _average_precision(self, rec, prec): """ calculate average precision, override the default one, special 11-point metric Params: ---------- rec : numpy.array cumulated recall prec : numpy.array cumulated precision Returns: ---------- ap as float """ ap = 0. for t in np.arange(0., 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap += p / 11. return ap
symbol: the pre-trained network symbol arg_params: the argument parameters of the pre-trained model num_classes: the number of classes for the fine-tune datasets layer_name: the layer name before the last fully-connected layer
def get_fine_tune_model(symbol, arg_params, num_classes, layer_name, dtype='float32'): """ symbol: the pre-trained network symbol arg_params: the argument parameters of the pre-trained model num_classes: the number of classes for the fine-tune datasets layer_name: the layer name before the last fully-connected layer """ all_layers = symbol.get_internals() net = all_layers[layer_name+'_output'] net = mx.symbol.FullyConnected(data=net, num_hidden=num_classes, name='fc') if dtype == 'float16': net = mx.sym.Cast(data=net, dtype=np.float32) net = mx.symbol.SoftmaxOutput(data=net, name='softmax') new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k}) return (net, new_args)
Description : generate list for lip images
def _list_images(self, root): """ Description : generate list for lip images """ self.labels = [] self.items = [] valid_unseen_sub_idx = [1, 2, 20, 22] skip_sub_idx = [21] if self._mode == 'train': sub_idx = ['s' + str(i) for i in range(1, 35) \ if i not in valid_unseen_sub_idx + skip_sub_idx] elif self._mode == 'valid': sub_idx = ['s' + str(i) for i in valid_unseen_sub_idx] folder_path = [] for i in sub_idx: folder_path.extend(glob.glob(os.path.join(root, i, "*"))) for folder in folder_path: filename = glob.glob(os.path.join(folder, "*")) if len(filename) != self._seq_len: continue filename.sort() label = os.path.split(folder)[-1] self.items.append((filename, label))
Description : Align to lip position
def align_generation(self, file_nm, padding=75): """ Description : Align to lip position """ align = Align(self._align_root + '/' + file_nm + '.align') return nd.array(align.sentence(padding))
Switch on/off verbose mode Parameters ---------- verbose : bool switch on/off verbose mode print_func : function A function that computes statistics of initialized arrays. Takes an `NDArray` and returns an `str`. Defaults to mean absolute value str((abs(x)/size(x)).asscalar()).
def set_verbosity(self, verbose=False, print_func=None): """Switch on/off verbose mode Parameters ---------- verbose : bool switch on/off verbose mode print_func : function A function that computes statistics of initialized arrays. Takes an `NDArray` and returns an `str`. Defaults to mean absolute value str((abs(x)/size(x)).asscalar()). """ self._verbose = verbose if print_func is None: def asum_stat(x): """returns |x|/size(x), async execution.""" return str((ndarray.norm(x)/sqrt(x.size)).asscalar()) print_func = asum_stat self._print_func = print_func return self
Internal verbose print function Parameters ---------- desc : InitDesc or str name of the array init : str initializer pattern arr : NDArray initialized array
def _verbose_print(self, desc, init, arr): """Internal verbose print function Parameters ---------- desc : InitDesc or str name of the array init : str initializer pattern arr : NDArray initialized array """ if self._verbose and self._print_func: logging.info('Initialized %s as %s: %s', desc, init, self._print_func(arr))
Legacy initialization method. Parameters ---------- name : str Name of corresponding NDArray. arr : NDArray NDArray to be initialized.
def _legacy_init(self, name, arr): """Legacy initialization method. Parameters ---------- name : str Name of corresponding NDArray. arr : NDArray NDArray to be initialized. """ warnings.warn( "\033[91mCalling initializer with init(str, NDArray) has been deprecated." \ "please use init(mx.init.InitDesc(...), NDArray) instead.\033[0m", DeprecationWarning, stacklevel=3) if not isinstance(name, string_types): raise TypeError('name must be string') if not isinstance(arr, NDArray): raise TypeError('arr must be NDArray') if name.startswith('upsampling'): self._init_bilinear(name, arr) elif name.startswith('stn_loc') and name.endswith('weight'): self._init_zero(name, arr) elif name.startswith('stn_loc') and name.endswith('bias'): self._init_loc_bias(name, arr) elif name.endswith('bias'): self._init_bias(name, arr) elif name.endswith('gamma'): self._init_gamma(name, arr) elif name.endswith('beta'): self._init_beta(name, arr) elif name.endswith('weight'): self._init_weight(name, arr) elif name.endswith("moving_mean"): self._init_zero(name, arr) elif name.endswith("moving_var"): self._init_one(name, arr) elif name.endswith("moving_inv_var"): self._init_zero(name, arr) elif name.endswith("moving_avg"): self._init_zero(name, arr) elif name.endswith('min'): self._init_zero(name, arr) elif name.endswith('max'): self._init_one(name, arr) else: self._init_default(name, arr)
save imglist to disk Parameters: ---------- fname : str saved filename
def save_imglist(self, fname=None, root=None, shuffle=False): """ save imglist to disk Parameters: ---------- fname : str saved filename """ def progress_bar(count, total, suffix=''): import sys bar_len = 24 filled_len = int(round(bar_len * count / float(total))) percents = round(100.0 * count / float(total), 1) bar = '=' * filled_len + '-' * (bar_len - filled_len) sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix)) sys.stdout.flush() str_list = [] for index in range(self.num_images): progress_bar(index, self.num_images) label = self.label_from_index(index) if label.size < 1: continue path = self.image_path_from_index(index) if root: path = osp.relpath(path, root) str_list.append('\t'.join([str(index), str(2), str(label.shape[1])] \ + ["{0:.4f}".format(x) for x in label.ravel()] + [path,]) + '\n') if str_list: if shuffle: import random random.shuffle(str_list) if not fname: fname = self.name + '.lst' with open(fname, 'w') as f: for line in str_list: f.write(line) else: raise RuntimeError("No image in imdb")
load class names from text file Parameters: ---------- filename: str file stores class names dirname: str file directory
def _load_class_names(self, filename, dirname): """ load class names from text file Parameters: ---------- filename: str file stores class names dirname: str file directory """ full_path = osp.join(dirname, filename) classes = [] with open(full_path, 'r') as f: classes = [l.strip() for l in f.readlines()] return classes
download and read data into numpy
def read_data(label, image): """ download and read data into numpy """ base_url = 'http://yann.lecun.com/exdb/mnist/' with gzip.open(download_file(base_url+label, os.path.join('data',label))) as flbl: magic, num = struct.unpack(">II", flbl.read(8)) label = np.fromstring(flbl.read(), dtype=np.int8) with gzip.open(download_file(base_url+image, os.path.join('data',image)), 'rb') as fimg: magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16)) image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols) return (label, image)
create data iterator with NDArrayIter
def get_mnist_iter(args, kv): """ create data iterator with NDArrayIter """ (train_lbl, train_img) = read_data( 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz') (val_lbl, val_img) = read_data( 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz') train = mx.io.NDArrayIter( to4d(train_img), train_lbl, args.batch_size, shuffle=True) val = mx.io.NDArrayIter( to4d(val_img), val_lbl, args.batch_size) return (train, val)
Function factory for file extension argparse assertion Args: extension (string): the file extension to assert Returns: string: the supplied extension, if assertion is successful.
def make_file_extension_assertion(extension): """Function factory for file extension argparse assertion Args: extension (string): the file extension to assert Returns: string: the supplied extension, if assertion is successful. """ def file_extension_assertion(file_path): base, ext = os.path.splitext(file_path) if ext.lower() != extension: raise argparse.ArgumentTypeError('File must have ' + extension + ' extension') return file_path return file_extension_assertion
generates the colormap for visualizing the segmentation mask Args: num_colors (int): the number of colors to generate in the output palette Returns: string: the supplied extension, if assertion is successful.
def get_palette(num_colors=256): """generates the colormap for visualizing the segmentation mask Args: num_colors (int): the number of colors to generate in the output palette Returns: string: the supplied extension, if assertion is successful. """ pallete = [0]*(num_colors*3) for j in range(0, num_colors): lab = j pallete[j*3+0] = 0 pallete[j*3+1] = 0 pallete[j*3+2] = 0 i = 0 while (lab > 0): pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i)) pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i)) pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i)) i = i + 1 lab >>= 3 return pallete
get the (1, 3, h, w) np.array data for the supplied image Args: img_path (string): the input image path Returns: np.array: image data in a (1, 3, h, w) shape
def get_data(img_path): """get the (1, 3, h, w) np.array data for the supplied image Args: img_path (string): the input image path Returns: np.array: image data in a (1, 3, h, w) shape """ mean = np.array([123.68, 116.779, 103.939]) # (R,G,B) img = Image.open(img_path) img = np.array(img, dtype=np.float32) reshaped_mean = mean.reshape(1, 1, 3) img = img - reshaped_mean img = np.swapaxes(img, 0, 2) img = np.swapaxes(img, 1, 2) img = np.expand_dims(img, axis=0) return img
Module main execution
def main(): """Module main execution""" # Initialization variables - update to change your model and execution context model_prefix = "FCN8s_VGG16" epoch = 19 # By default, MXNet will run on the CPU. Change to ctx = mx.gpu() to run on GPU. ctx = mx.cpu() fcnxs, fcnxs_args, fcnxs_auxs = mx.model.load_checkpoint(model_prefix, epoch) fcnxs_args["data"] = mx.nd.array(get_data(args.input), ctx) data_shape = fcnxs_args["data"].shape label_shape = (1, data_shape[2]*data_shape[3]) fcnxs_args["softmax_label"] = mx.nd.empty(label_shape, ctx) exector = fcnxs.bind(ctx, fcnxs_args, args_grad=None, grad_req="null", aux_states=fcnxs_args) exector.forward(is_train=False) output = exector.outputs[0] out_img = np.uint8(np.squeeze(output.asnumpy().argmax(axis=1))) out_img = Image.fromarray(out_img) out_img.putpalette(get_palette()) out_img.save(args.output)
check input imdbs, make sure they have same classes
def _check_classes(self): """ check input imdbs, make sure they have same classes """ try: self.classes = self.imdbs[0].classes self.num_classes = len(self.classes) except AttributeError: # fine, if no classes is provided pass if self.num_classes > 0: for db in self.imdbs: assert self.classes == db.classes, "Multiple imdb must have same classes"
get total number of images, init indices Parameters ---------- shuffle : bool whether to shuffle the initial indices
def _load_image_set_index(self, shuffle): """ get total number of images, init indices Parameters ---------- shuffle : bool whether to shuffle the initial indices """ self.num_images = 0 for db in self.imdbs: self.num_images += db.num_images indices = list(range(self.num_images)) if shuffle: random.shuffle(indices) return indices
given index, find out sub-db and sub-index Parameters ---------- index : int index of a specific image Returns ---------- a tuple (sub-db, sub-index)
def _locate_index(self, index): """ given index, find out sub-db and sub-index Parameters ---------- index : int index of a specific image Returns ---------- a tuple (sub-db, sub-index) """ assert index >= 0 and index < self.num_images, "index out of range" pos = self.image_set_index[index] for k, v in enumerate(self.imdbs): if pos >= v.num_images: pos -= v.num_images else: return (k, pos)
given image index, find out full path Parameters ---------- index: int index of a specific image Returns ---------- full path of this image
def image_path_from_index(self, index): """ given image index, find out full path Parameters ---------- index: int index of a specific image Returns ---------- full path of this image """ assert self.image_set_index is not None, "Dataset not initialized" pos = self.image_set_index[index] n_db, n_index = self._locate_index(index) return self.imdbs[n_db].image_path_from_index(n_index)
Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit.
def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False): """Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit. """ period = int(max(1, period)) # pylint: disable=unused-argument def _callback(iter_no, sym=None, arg=None, aux=None): """The checkpoint function.""" if (iter_no + 1) % period == 0: mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states) return _callback
A callback that saves a model checkpoint every few epochs. Each checkpoint is made up of a couple of binary files: a model description file and a parameters (weights and biases) file. The model description file is named `prefix`--symbol.json and the parameters file is named `prefix`-`epoch_number`.params Parameters ---------- prefix : str Prefix for the checkpoint filenames. period : int, optional Interval (number of epochs) between checkpoints. Default `period` is 1. Returns ------- callback : function A callback function that can be passed as `epoch_end_callback` to fit. Example ------- >>> module.fit(iterator, num_epoch=n_epoch, ... epoch_end_callback = mx.callback.do_checkpoint("mymodel", 1)) Start training with [cpu(0)] Epoch[0] Resetting Data Iterator Epoch[0] Time cost=0.100 Saved checkpoint to "mymodel-0001.params" Epoch[1] Resetting Data Iterator Epoch[1] Time cost=0.060 Saved checkpoint to "mymodel-0002.params"
def do_checkpoint(prefix, period=1): """A callback that saves a model checkpoint every few epochs. Each checkpoint is made up of a couple of binary files: a model description file and a parameters (weights and biases) file. The model description file is named `prefix`--symbol.json and the parameters file is named `prefix`-`epoch_number`.params Parameters ---------- prefix : str Prefix for the checkpoint filenames. period : int, optional Interval (number of epochs) between checkpoints. Default `period` is 1. Returns ------- callback : function A callback function that can be passed as `epoch_end_callback` to fit. Example ------- >>> module.fit(iterator, num_epoch=n_epoch, ... epoch_end_callback = mx.callback.do_checkpoint("mymodel", 1)) Start training with [cpu(0)] Epoch[0] Resetting Data Iterator Epoch[0] Time cost=0.100 Saved checkpoint to "mymodel-0001.params" Epoch[1] Resetting Data Iterator Epoch[1] Time cost=0.060 Saved checkpoint to "mymodel-0002.params" """ period = int(max(1, period)) def _callback(iter_no, sym, arg, aux): """The checkpoint function.""" if (iter_no + 1) % period == 0: save_checkpoint(prefix, iter_no + 1, sym, arg, aux) return _callback
Callback to log the training evaluation result every period. Parameters ---------- period : int The number of batch to log the training evaluation metric. auto_reset : bool Reset the metric after each log. Returns ------- callback : function The callback function that can be passed as iter_epoch_callback to fit.
def log_train_metric(period, auto_reset=False): """Callback to log the training evaluation result every period. Parameters ---------- period : int The number of batch to log the training evaluation metric. auto_reset : bool Reset the metric after each log. Returns ------- callback : function The callback function that can be passed as iter_epoch_callback to fit. """ def _callback(param): """The checkpoint function.""" if param.nbatch % period == 0 and param.eval_metric is not None: name_value = param.eval_metric.get_name_value() for name, value in name_value: logging.info('Iter[%d] Batch[%d] Train-%s=%f', param.epoch, param.nbatch, name, value) if auto_reset: param.eval_metric.reset_local() return _callback
install callback to executor. Supports installing to multiple exes. Parameters ---------- exe : mx.executor.Executor The Executor (returned by symbol.bind) to install to.
def install(self, exe): """install callback to executor. Supports installing to multiple exes. Parameters ---------- exe : mx.executor.Executor The Executor (returned by symbol.bind) to install to. """ exe.set_monitor_callback(self.stat_helper, self.monitor_all) self.exes.append(exe)
Start collecting stats for current batch. Call before calling forward.
def tic(self): """Start collecting stats for current batch. Call before calling forward.""" if self.step % self.interval == 0: for exe in self.exes: for array in exe.arg_arrays: array.wait_to_read() for array in exe.aux_arrays: array.wait_to_read() self.queue = [] self.activated = True self.step += 1
End collecting for current batch and return results. Call after computation of current batch. Returns ------- res : list of
def toc(self): """End collecting for current batch and return results. Call after computation of current batch. Returns ------- res : list of """ if not self.activated: return [] for exe in self.exes: for array in exe.arg_arrays: array.wait_to_read() for array in exe.aux_arrays: array.wait_to_read() for exe in self.exes: for name, array in zip(exe._symbol.list_arguments(), exe.arg_arrays): if self.re_prog.match(name): self.queue.append((self.step, name, self.stat_func(array))) for name, array in zip(exe._symbol.list_auxiliary_states(), exe.aux_arrays): if self.re_prog.match(name): self.queue.append((self.step, name, self.stat_func(array))) self.activated = False res = [] if self.sort: self.queue.sort(key=lambda x: x[1]) for n, k, v_list in self.queue: if isinstance(v_list, NDArray): v_list = [v_list] assert isinstance(v_list, list) s = '' for v in v_list: assert isinstance(v, NDArray) if v.shape == (1,): s += str(v.asscalar()) + '\t' else: s += str(v.asnumpy()) + '\t' res.append((n, k, s)) self.queue = [] return res
End collecting and print results.
def toc_print(self): """End collecting and print results.""" res = self.toc() for n, k, v in res: logging.info('Batch: {:7d} {:30s} {:s}'.format(n, k, v))
make a random data iteration plan
def make_data_iter_plan(self): "make a random data iteration plan" # truncate each bucket into multiple of batch-size bucket_n_batches = [] for i in range(len(self.data)): bucket_n_batches.append(np.floor((self.data[i]) / self.batch_size)) self.data[i] = self.data[i][:int(bucket_n_batches[i]*self.batch_size)] bucket_plan = np.hstack([np.zeros(n, int)+i for i, n in enumerate(bucket_n_batches)]) np.random.shuffle(bucket_plan) bucket_idx_all = [np.random.permutation(len(x)) for x in self.data] self.bucket_plan = bucket_plan self.bucket_idx_all = bucket_idx_all self.bucket_curr_idx = [0 for x in self.data] self.data_buffer = [] self.label_buffer = [] for i_bucket in range(len(self.data)): if not self.model_parallel: data = np.zeros((self.batch_size, self.buckets[i_bucket])) label = np.zeros((self.batch_size, self.buckets[i_bucket])) self.data_buffer.append(data) self.label_buffer.append(label) else: data = np.zeros((self.buckets[i_bucket], self.batch_size)) self.data_buffer.append(data) if self.model_parallel: # Transpose data if model parallel for i in range(len(self.data)): bucket_data = self.data[i] self.data[i] = np.transpose(bucket_data)
Expand the pending files in the current stage. Parameters ---------- x: str The file to expand. pending : str The list of pending files to expand. stage: str The current stage for file expansion, used for matching the prefix of files.
def expand(x, pending, stage): """ Expand the pending files in the current stage. Parameters ---------- x: str The file to expand. pending : str The list of pending files to expand. stage: str The current stage for file expansion, used for matching the prefix of files. """ if x in history and x not in ['mshadow/mshadow/expr_scalar-inl.h']: # MULTIPLE includes return if x in pending: #print('loop found: {} in {}'.format(x, pending)) return whtspace = ' ' * expand.treeDepth expand.fileCount += 1 comment = u"//=====[{:3d}] STAGE:{:>4} {}EXPANDING: {} =====\n\n".format(expand.fileCount, stage, whtspace, x) out.write(comment.encode('ascii')) print(comment) with open(x, 'rb') as x_h: for line in x_h.readlines(): uline = line.decode('utf-8') if '#define DMLC_LOG_STACK_TRACE 1' in uline.strip(): # Do not enable stacktrace logging continue if uline.find('#include') < 0: out.write(line) continue if uline.strip().find('#include') > 0: print(uline) continue m = re1.search(uline) if not m: m = re2.search(uline) if m: path = m.groups()[0] else: m = re3.search(uline) if m: path = 'execinfo.h' else: print(uline + ' not found') continue h = path.strip('./') if "../3rdparty/" not in path else path if h.endswith('complex.h') and x.endswith('openblas_config.h'): source = '' elif h.startswith('ps/'): source = '../3rdparty/ps-lite/include/' + h else: source = find_source(h, x, stage) if not source: if (h not in blacklist and h not in sysheaders and 'mkl' not in h and 'nnpack' not in h and 'tensorrt' not in h and not h.endswith('.cuh')): sysheaders.append(h) else: expand.treeDepth += 1 expand(source, pending + [x], stage) expand.treeDepth -= 1 out.write(u"//===== EXPANDED : {} =====\n\n".format(x).encode('ascii')) history.add(x)
Dataset loader with preprocessing.
def get_imagenet_iterator(root, batch_size, num_workers, data_shape=224, dtype='float32'): """Dataset loader with preprocessing.""" train_dir = os.path.join(root, 'train') train_transform, val_transform = get_imagenet_transforms(data_shape, dtype) logging.info("Loading image folder %s, this may take a bit long...", train_dir) train_dataset = ImageFolderDataset(train_dir, transform=train_transform) train_data = DataLoader(train_dataset, batch_size, shuffle=True, last_batch='discard', num_workers=num_workers) val_dir = os.path.join(root, 'val') if not os.path.isdir(os.path.expanduser(os.path.join(root, 'val', 'n01440764'))): user_warning = 'Make sure validation images are stored in one subdir per category, a helper script is available at https://git.io/vNQv1' raise ValueError(user_warning) logging.info("Loading image folder %s, this may take a bit long...", val_dir) val_dataset = ImageFolderDataset(val_dir, transform=val_transform) val_data = DataLoader(val_dataset, batch_size, last_batch='keep', num_workers=num_workers) return DataLoaderIter(train_data, dtype), DataLoaderIter(val_data, dtype)
Creates an instance of token embedding. Creates a token embedding instance by loading embedding vectors from an externally hosted pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid `embedding_name` and `pretrained_file_name`, use `mxnet.contrib.text.embedding.get_pretrained_file_names()`. Parameters ---------- embedding_name : str The token embedding name (case-insensitive). Returns ------- An instance of `mxnet.contrib.text.glossary._TokenEmbedding`: A token embedding instance that loads embedding vectors from an externally hosted pre-trained token embedding file.
def create(embedding_name, **kwargs): """Creates an instance of token embedding. Creates a token embedding instance by loading embedding vectors from an externally hosted pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid `embedding_name` and `pretrained_file_name`, use `mxnet.contrib.text.embedding.get_pretrained_file_names()`. Parameters ---------- embedding_name : str The token embedding name (case-insensitive). Returns ------- An instance of `mxnet.contrib.text.glossary._TokenEmbedding`: A token embedding instance that loads embedding vectors from an externally hosted pre-trained token embedding file. """ create_text_embedding = registry.get_create_func(_TokenEmbedding, 'token embedding') return create_text_embedding(embedding_name, **kwargs)
Get valid token embedding names and their pre-trained file names. To load token embedding vectors from an externally hosted pre-trained token embedding file, such as those of GloVe and FastText, one should use `mxnet.contrib.text.embedding.create(embedding_name, pretrained_file_name)`. This method returns all the valid names of `pretrained_file_name` for the specified `embedding_name`. If `embedding_name` is set to None, this method returns all the valid names of `embedding_name` with their associated `pretrained_file_name`. Parameters ---------- embedding_name : str or None, default None The pre-trained token embedding name. Returns ------- dict or list: A list of all the valid pre-trained token embedding file names (`pretrained_file_name`) for the specified token embedding name (`embedding_name`). If the text embeding name is set to None, returns a dict mapping each valid token embedding name to a list of valid pre-trained files (`pretrained_file_name`). They can be plugged into `mxnet.contrib.text.embedding.create(embedding_name, pretrained_file_name)`.
def get_pretrained_file_names(embedding_name=None): """Get valid token embedding names and their pre-trained file names. To load token embedding vectors from an externally hosted pre-trained token embedding file, such as those of GloVe and FastText, one should use `mxnet.contrib.text.embedding.create(embedding_name, pretrained_file_name)`. This method returns all the valid names of `pretrained_file_name` for the specified `embedding_name`. If `embedding_name` is set to None, this method returns all the valid names of `embedding_name` with their associated `pretrained_file_name`. Parameters ---------- embedding_name : str or None, default None The pre-trained token embedding name. Returns ------- dict or list: A list of all the valid pre-trained token embedding file names (`pretrained_file_name`) for the specified token embedding name (`embedding_name`). If the text embeding name is set to None, returns a dict mapping each valid token embedding name to a list of valid pre-trained files (`pretrained_file_name`). They can be plugged into `mxnet.contrib.text.embedding.create(embedding_name, pretrained_file_name)`. """ text_embedding_reg = registry.get_registry(_TokenEmbedding) if embedding_name is not None: if embedding_name not in text_embedding_reg: raise KeyError('Cannot find `embedding_name` %s. Use ' '`get_pretrained_file_names(' 'embedding_name=None).keys()` to get all the valid embedding ' 'names.' % embedding_name) return list(text_embedding_reg[embedding_name].pretrained_file_name_sha1.keys()) else: return {embedding_name: list(embedding_cls.pretrained_file_name_sha1.keys()) for embedding_name, embedding_cls in registry.get_registry(_TokenEmbedding).items()}
Load embedding vectors from the pre-trained token embedding file. For every unknown token, if its representation `self.unknown_token` is encountered in the pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the text embedding vector initialized by `init_unknown_vec`. If a token is encountered multiple times in the pre-trained text embedding file, only the first-encountered token embedding vector will be loaded and the rest will be skipped.
def _load_embedding(self, pretrained_file_path, elem_delim, init_unknown_vec, encoding='utf8'): """Load embedding vectors from the pre-trained token embedding file. For every unknown token, if its representation `self.unknown_token` is encountered in the pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the text embedding vector initialized by `init_unknown_vec`. If a token is encountered multiple times in the pre-trained text embedding file, only the first-encountered token embedding vector will be loaded and the rest will be skipped. """ pretrained_file_path = os.path.expanduser(pretrained_file_path) if not os.path.isfile(pretrained_file_path): raise ValueError('`pretrained_file_path` must be a valid path to ' 'the pre-trained token embedding file.') logging.info('Loading pre-trained token embedding vectors from %s', pretrained_file_path) vec_len = None all_elems = [] tokens = set() loaded_unknown_vec = None line_num = 0 with io.open(pretrained_file_path, 'r', encoding=encoding) as f: for line in f: line_num += 1 elems = line.rstrip().split(elem_delim) assert len(elems) > 1, 'At line %d of the pre-trained text embedding file: the ' \ 'data format of the pre-trained token embedding file %s ' \ 'is unexpected.' % (line_num, pretrained_file_path) token, elems = elems[0], [float(i) for i in elems[1:]] if token == self.unknown_token and loaded_unknown_vec is None: loaded_unknown_vec = elems tokens.add(self.unknown_token) elif token in tokens: warnings.warn('At line %d of the pre-trained token embedding file: the ' 'embedding vector for token %s has been loaded and a duplicate ' 'embedding for the same token is seen and skipped.' % (line_num, token)) elif len(elems) == 1: warnings.warn('At line %d of the pre-trained text embedding file: token %s ' 'with 1-dimensional vector %s is likely a header and is ' 'skipped.' % (line_num, token, elems)) else: if vec_len is None: vec_len = len(elems) # Reserve a vector slot for the unknown token at the very beggining because # the unknown index is 0. all_elems.extend([0] * vec_len) else: assert len(elems) == vec_len, \ 'At line %d of the pre-trained token embedding file: the dimension ' \ 'of token %s is %d but the dimension of previous tokens is %d. ' \ 'Dimensions of all the tokens must be the same.' \ % (line_num, token, len(elems), vec_len) all_elems.extend(elems) self._idx_to_token.append(token) self._token_to_idx[token] = len(self._idx_to_token) - 1 tokens.add(token) self._vec_len = vec_len self._idx_to_vec = nd.array(all_elems).reshape((-1, self.vec_len)) if loaded_unknown_vec is None: self._idx_to_vec[C.UNKNOWN_IDX] = init_unknown_vec(shape=self.vec_len) else: self._idx_to_vec[C.UNKNOWN_IDX] = nd.array(loaded_unknown_vec)
Sets the mapping between token indices and token embedding vectors. Parameters ---------- token_embeddings : instance or list `mxnet.contrib.text.embedding._TokenEmbedding` One or multiple pre-trained token embeddings to load. If it is a list of multiple embeddings, these embedding vectors will be concatenated for each token. vocab_len : int Length of vocabulary whose tokens are indexed in the token embedding. vocab_idx_to_token: list of str A list of indexed tokens in the vocabulary. These tokens are indexed in the token embedding.
def _set_idx_to_vec_by_embeddings(self, token_embeddings, vocab_len, vocab_idx_to_token): """Sets the mapping between token indices and token embedding vectors. Parameters ---------- token_embeddings : instance or list `mxnet.contrib.text.embedding._TokenEmbedding` One or multiple pre-trained token embeddings to load. If it is a list of multiple embeddings, these embedding vectors will be concatenated for each token. vocab_len : int Length of vocabulary whose tokens are indexed in the token embedding. vocab_idx_to_token: list of str A list of indexed tokens in the vocabulary. These tokens are indexed in the token embedding. """ new_vec_len = sum(embed.vec_len for embed in token_embeddings) new_idx_to_vec = nd.zeros(shape=(vocab_len, new_vec_len)) col_start = 0 # Concatenate all the embedding vectors in token_embeddings. for embed in token_embeddings: col_end = col_start + embed.vec_len # Cancatenate vectors of the unknown token. new_idx_to_vec[0, col_start:col_end] = embed.idx_to_vec[0] new_idx_to_vec[1:, col_start:col_end] = embed.get_vecs_by_tokens(vocab_idx_to_token[1:]) col_start = col_end self._vec_len = new_vec_len self._idx_to_vec = new_idx_to_vec
Look up embedding vectors of tokens. Parameters ---------- tokens : str or list of strs A token or a list of tokens. lower_case_backup : bool, default False If False, each token in the original case will be looked up; if True, each token in the original case will be looked up first, if not found in the keys of the property `token_to_idx`, the token in the lower case will be looked up. Returns ------- mxnet.ndarray.NDArray: The embedding vector(s) of the token(s). According to numpy conventions, if `tokens` is a string, returns a 1-D NDArray of shape `self.vec_len`; if `tokens` is a list of strings, returns a 2-D NDArray of shape=(len(tokens), self.vec_len).
def get_vecs_by_tokens(self, tokens, lower_case_backup=False): """Look up embedding vectors of tokens. Parameters ---------- tokens : str or list of strs A token or a list of tokens. lower_case_backup : bool, default False If False, each token in the original case will be looked up; if True, each token in the original case will be looked up first, if not found in the keys of the property `token_to_idx`, the token in the lower case will be looked up. Returns ------- mxnet.ndarray.NDArray: The embedding vector(s) of the token(s). According to numpy conventions, if `tokens` is a string, returns a 1-D NDArray of shape `self.vec_len`; if `tokens` is a list of strings, returns a 2-D NDArray of shape=(len(tokens), self.vec_len). """ to_reduce = False if not isinstance(tokens, list): tokens = [tokens] to_reduce = True if not lower_case_backup: indices = [self.token_to_idx.get(token, C.UNKNOWN_IDX) for token in tokens] else: indices = [self.token_to_idx[token] if token in self.token_to_idx else self.token_to_idx.get(token.lower(), C.UNKNOWN_IDX) for token in tokens] vecs = nd.Embedding(nd.array(indices), self.idx_to_vec, self.idx_to_vec.shape[0], self.idx_to_vec.shape[1]) return vecs[0] if to_reduce else vecs
Updates embedding vectors for tokens. Parameters ---------- tokens : str or a list of strs A token or a list of tokens whose embedding vector are to be updated. new_vectors : mxnet.ndarray.NDArray An NDArray to be assigned to the embedding vectors of `tokens`. Its length must be equal to the number of `tokens` and its width must be equal to the dimension of embeddings of the glossary. If `tokens` is a singleton, it must be 1-D or 2-D. If `tokens` is a list of multiple strings, it must be 2-D.
def update_token_vectors(self, tokens, new_vectors): """Updates embedding vectors for tokens. Parameters ---------- tokens : str or a list of strs A token or a list of tokens whose embedding vector are to be updated. new_vectors : mxnet.ndarray.NDArray An NDArray to be assigned to the embedding vectors of `tokens`. Its length must be equal to the number of `tokens` and its width must be equal to the dimension of embeddings of the glossary. If `tokens` is a singleton, it must be 1-D or 2-D. If `tokens` is a list of multiple strings, it must be 2-D. """ assert self.idx_to_vec is not None, 'The property `idx_to_vec` has not been properly set.' if not isinstance(tokens, list) or len(tokens) == 1: assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) in [1, 2], \ '`new_vectors` must be a 1-D or 2-D NDArray if `tokens` is a singleton.' if not isinstance(tokens, list): tokens = [tokens] if len(new_vectors.shape) == 1: new_vectors = new_vectors.expand_dims(0) else: assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) == 2, \ '`new_vectors` must be a 2-D NDArray if `tokens` is a list of multiple strings.' assert new_vectors.shape == (len(tokens), self.vec_len), \ 'The length of new_vectors must be equal to the number of tokens and the width of' \ 'new_vectors must be equal to the dimension of embeddings of the glossary.' indices = [] for token in tokens: if token in self.token_to_idx: indices.append(self.token_to_idx[token]) else: raise ValueError('Token %s is unknown. To update the embedding vector for an ' 'unknown token, please specify it explicitly as the ' '`unknown_token` %s in `tokens`. This is to avoid unintended ' 'updates.' % (token, self.idx_to_token[C.UNKNOWN_IDX])) self._idx_to_vec[nd.array(indices)] = new_vectors
Checks if a pre-trained token embedding file name is valid. Parameters ---------- pretrained_file_name : str The pre-trained token embedding file.
def _check_pretrained_file_names(cls, pretrained_file_name): """Checks if a pre-trained token embedding file name is valid. Parameters ---------- pretrained_file_name : str The pre-trained token embedding file. """ embedding_name = cls.__name__.lower() if pretrained_file_name not in cls.pretrained_file_name_sha1: raise KeyError('Cannot find pretrained file %s for token embedding %s. Valid ' 'pretrained files for embedding %s: %s' % (pretrained_file_name, embedding_name, embedding_name, ', '.join(cls.pretrained_file_name_sha1.keys())))
Calculate gradient
def calc_grad(exe, exe_grads, params, X, Y, label_name=None, outgrad_f=None): """Calculate gradient""" exe.copy_params_from(params) exe.arg_dict['data'][:] = X if outgrad_f is None: exe.arg_dict[label_name][:] = Y exe.forward(is_train=True) exe.backward() else: exe.forward(is_train=True) exe.backward(outgrad_f(exe.outpus, Y)) for k, v in exe_grads.items(): v.wait_to_read()
Generate the implementation of step HMC
def step_HMC(exe, exe_params, exe_grads, label_key, noise_precision, prior_precision, L=10, eps=1E-6): """Generate the implementation of step HMC""" init_params = {k: v.copyto(v.context) for k, v in exe_params.items()} end_params = {k: v.copyto(v.context) for k, v in exe_params.items()} init_momentums = {k: mx.random.normal(0, 1, v.shape) for k, v in init_params.items()} end_momentums = {k: v.copyto(v.context) for k, v in init_momentums.items()} init_potential = calc_potential(exe, init_params, label_key, noise_precision, prior_precision) # 0. Calculate Initial Energy and Kinetic init_kinetic = sum([nd.sum(nd.square(momentum)) / 2.0 for momentum in init_momentums.values()]).asscalar() # 1. Make a half step for momentum at the beginning exe.copy_params_from(end_params) exe.forward(is_train=True) exe.backward() for k, v in exe_grads.items(): v.wait_to_read() for k, momentum in end_momentums.items(): momentum[:] = momentum - (eps / 2) * exe_grads[k] # 2. Alternate full steps for position and momentum for i in range(L): # 2.1 Full step for position for k, param in exe_params.items(): param[:] = param + eps * end_momentums[k] # 2.2 Full step for the momentum, except at the end of trajectory we perform a half step exe.forward(is_train=True) exe.backward() for v in exe_grads.values(): v.wait_to_read() if i != L - 1: for k, momentum in end_momentums.items(): momentum[:] = momentum - eps * exe_grads[k] else: for k, momentum in end_momentums.items(): # We should reverse the sign of the momentum at the end momentum[:] = -(momentum - eps / 2.0 * exe_grads[k]) copy_param(exe, end_params) # 3. Calculate acceptance ratio and accept/reject the move end_potential = calc_potential(exe, end_params, label_key, noise_precision, prior_precision) end_kinetic = sum([nd.sum(nd.square(momentum)) / 2.0 for momentum in end_momentums.values()]).asscalar() # print init_potential, init_kinetic, end_potential, end_kinetic r = numpy.random.rand(1) if r < numpy.exp(-(end_potential + end_kinetic) + (init_potential + init_kinetic)): exe.copy_params_from(end_params) return end_params, 1 else: exe.copy_params_from(init_params) return init_params, 0
Generate the implementation of HMC
def HMC(sym, data_inputs, X, Y, X_test, Y_test, sample_num, initializer=None, noise_precision=1 / 9.0, prior_precision=0.1, learning_rate=1E-6, L=10, dev=mx.gpu()): """Generate the implementation of HMC""" label_key = list(set(data_inputs.keys()) - set(['data']))[0] exe, exe_params, exe_grads, _ = get_executor(sym, dev, data_inputs, initializer) exe.arg_dict['data'][:] = X exe.arg_dict[label_key][:] = Y sample_pool = [] accept_num = 0 start = time.time() for i in range(sample_num): sample_params, is_accept = step_HMC(exe, exe_params, exe_grads, label_key, noise_precision, prior_precision, L, learning_rate) accept_num += is_accept if (i + 1) % 10 == 0: sample_pool.append(sample_params) if (i + 1) % 100000 == 0: end = time.time() print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start), "MSE:", sample_test_regression(exe, X=X_test, Y=Y_test, sample_pool=sample_pool, minibatch_size=Y.shape[0], save_path='regression_HMC.txt')) start = time.time() exe.copy_params_from(sample_params) print('accept ratio', accept_num / float(sample_num)) return sample_pool
Generate the implementation of SGD
def SGD(sym, data_inputs, X, Y, X_test, Y_test, total_iter_num, lr=None, lr_scheduler=None, prior_precision=1, out_grad_f=None, initializer=None, minibatch_size=100, dev=mx.gpu()): """Generate the implementation of SGD""" if out_grad_f is None: label_key = list(set(data_inputs.keys()) - set(['data']))[0] exe, params, params_grad, _ = get_executor(sym, dev, data_inputs, initializer) optimizer = mx.optimizer.create('sgd', learning_rate=lr, rescale_grad=X.shape[0] / minibatch_size, lr_scheduler=lr_scheduler, wd=prior_precision) updater = mx.optimizer.get_updater(optimizer) start = time.time() for i in range(total_iter_num): indices = numpy.random.randint(X.shape[0], size=minibatch_size) X_batch = X[indices] Y_batch = Y[indices] exe.arg_dict['data'][:] = X_batch if out_grad_f is None: exe.arg_dict[label_key][:] = Y_batch exe.forward(is_train=True) exe.backward() else: exe.forward(is_train=True) exe.backward(out_grad_f(exe.outputs, nd.array(Y_batch, ctx=dev))) for k in params: updater(k, params_grad[k], params[k]) if (i + 1) % 500 == 0: end = time.time() print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start)) sample_test_acc(exe, X=X_test, Y=Y_test, label_num=10, minibatch_size=100) start = time.time() return exe, params, params_grad
Generate the implementation of SGLD
def SGLD(sym, X, Y, X_test, Y_test, total_iter_num, data_inputs=None, learning_rate=None, lr_scheduler=None, prior_precision=1, out_grad_f=None, initializer=None, minibatch_size=100, thin_interval=100, burn_in_iter_num=1000, task='classification', dev=mx.gpu()): """Generate the implementation of SGLD""" if out_grad_f is None: label_key = list(set(data_inputs.keys()) - set(['data']))[0] exe, params, params_grad, _ = get_executor(sym, dev, data_inputs, initializer) optimizer = mx.optimizer.create('sgld', learning_rate=learning_rate, rescale_grad=X.shape[0] / minibatch_size, lr_scheduler=lr_scheduler, wd=prior_precision) updater = mx.optimizer.get_updater(optimizer) sample_pool = [] start = time.time() for i in range(total_iter_num): indices = numpy.random.randint(X.shape[0], size=minibatch_size) X_batch = X[indices] Y_batch = Y[indices] exe.arg_dict['data'][:] = X_batch if out_grad_f is None: exe.arg_dict[label_key][:] = Y_batch exe.forward(is_train=True) exe.backward() else: exe.forward(is_train=True) exe.backward(out_grad_f(exe.outputs, nd.array(Y_batch, ctx=dev))) for k in params: updater(k, params_grad[k], params[k]) if i < burn_in_iter_num: continue else: if (i - burn_in_iter_num) % thin_interval == 0: if optimizer.lr_scheduler is not None: lr = optimizer.lr_scheduler(optimizer.num_update) else: lr = learning_rate sample_pool.append([lr, copy_param(exe)]) if (i + 1) % 100000 == 0: end = time.time() if task == 'classification': print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start)) test_correct, test_total, test_acc = \ sample_test_acc(exe, sample_pool=sample_pool, X=X_test, Y=Y_test, label_num=10, minibatch_size=minibatch_size) print("Test %d/%d=%f" % (test_correct, test_total, test_acc)) else: print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start), "MSE:", sample_test_regression(exe=exe, sample_pool=sample_pool, X=X_test, Y=Y_test, minibatch_size=minibatch_size, save_path='regression_SGLD.txt')) start = time.time() return exe, sample_pool
Generate the implementation of DistilledSGLD
def DistilledSGLD(teacher_sym, student_sym, teacher_data_inputs, student_data_inputs, X, Y, X_test, Y_test, total_iter_num, teacher_learning_rate, student_learning_rate, teacher_lr_scheduler=None, student_lr_scheduler=None, student_optimizing_algorithm='sgd', teacher_grad_f=None, student_grad_f=None, teacher_prior_precision=1, student_prior_precision=0.001, perturb_deviation=0.001, student_initializer=None, teacher_initializer=None, minibatch_size=100, task='classification', dev=mx.gpu()): """Generate the implementation of DistilledSGLD""" teacher_exe, teacher_params, teacher_params_grad, _ = \ get_executor(teacher_sym, dev, teacher_data_inputs, teacher_initializer) student_exe, student_params, student_params_grad, _ = \ get_executor(student_sym, dev, student_data_inputs, student_initializer) if teacher_grad_f is None: teacher_label_key = list(set(teacher_data_inputs.keys()) - set(['data']))[0] if student_grad_f is None: student_label_key = list(set(student_data_inputs.keys()) - set(['data']))[0] teacher_optimizer = mx.optimizer.create('sgld', learning_rate=teacher_learning_rate, rescale_grad=X.shape[0] / float(minibatch_size), lr_scheduler=teacher_lr_scheduler, wd=teacher_prior_precision) student_optimizer = mx.optimizer.create(student_optimizing_algorithm, learning_rate=student_learning_rate, rescale_grad=1.0 / float(minibatch_size), lr_scheduler=student_lr_scheduler, wd=student_prior_precision) teacher_updater = mx.optimizer.get_updater(teacher_optimizer) student_updater = mx.optimizer.get_updater(student_optimizer) start = time.time() for i in range(total_iter_num): # 1.1 Draw random minibatch indices = numpy.random.randint(X.shape[0], size=minibatch_size) X_batch = X[indices] Y_batch = Y[indices] # 1.2 Update teacher teacher_exe.arg_dict['data'][:] = X_batch if teacher_grad_f is None: teacher_exe.arg_dict[teacher_label_key][:] = Y_batch teacher_exe.forward(is_train=True) teacher_exe.backward() else: teacher_exe.forward(is_train=True) teacher_exe.backward( teacher_grad_f(teacher_exe.outputs, nd.array(Y_batch, ctx=dev))) for k in teacher_params: teacher_updater(k, teacher_params_grad[k], teacher_params[k]) # 2.1 Draw random minibatch and do random perturbation if task == 'classification': indices = numpy.random.randint(X.shape[0], size=minibatch_size) X_student_batch = X[indices] + numpy.random.normal(0, perturb_deviation, X_batch.shape).astype('float32') else: X_student_batch = mx.random.uniform(-6, 6, X_batch.shape, mx.cpu()) # 2.2 Get teacher predictions teacher_exe.arg_dict['data'][:] = X_student_batch teacher_exe.forward(is_train=False) teacher_pred = teacher_exe.outputs[0] teacher_pred.wait_to_read() # 2.3 Update student student_exe.arg_dict['data'][:] = X_student_batch if student_grad_f is None: student_exe.arg_dict[student_label_key][:] = teacher_pred student_exe.forward(is_train=True) student_exe.backward() else: student_exe.forward(is_train=True) student_exe.backward(student_grad_f(student_exe.outputs, teacher_pred)) for k in student_params: student_updater(k, student_params_grad[k], student_params[k]) if (i + 1) % 2000 == 0: end = time.time() if task == 'classification': print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start)) test_correct, test_total, test_acc = \ sample_test_acc(student_exe, X=X_test, Y=Y_test, label_num=10, minibatch_size=minibatch_size) train_correct, train_total, train_acc = \ sample_test_acc(student_exe, X=X, Y=Y, label_num=10, minibatch_size=minibatch_size) teacher_test_correct, teacher_test_total, teacher_test_acc = \ sample_test_acc(teacher_exe, X=X_test, Y=Y_test, label_num=10, minibatch_size=minibatch_size) teacher_train_correct, teacher_train_total, teacher_train_acc = \ sample_test_acc(teacher_exe, X=X, Y=Y, label_num=10, minibatch_size=minibatch_size) print("Student: Test ACC %d/%d=%f, Train ACC %d/%d=%f" % (test_correct, test_total, test_acc, train_correct, train_total, train_acc)) print("Teacher: Test ACC %d/%d=%f, Train ACC %d/%d=%f" \ % (teacher_test_correct, teacher_test_total, teacher_test_acc, teacher_train_correct, teacher_train_total, teacher_train_acc)) else: print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start), "MSE:", sample_test_regression(exe=student_exe, X=X_test, Y=Y_test, minibatch_size=minibatch_size, save_path='regression_DSGLD.txt')) start = time.time() return student_exe, student_params, student_params_grad
Get a list of architectures given our dockerfiles
def get_platforms(path: str = get_dockerfiles_path()) -> List[str]: """Get a list of architectures given our dockerfiles""" dockerfiles = glob.glob(os.path.join(path, "Dockerfile.*")) dockerfiles = list(filter(lambda x: x[-1] != '~', dockerfiles)) files = list(map(lambda x: re.sub(r"Dockerfile.(.*)", r"\1", x), dockerfiles)) platforms = list(map(lambda x: os.path.split(x)[1], sorted(files))) return platforms
:return: docker tag to be used for the container
def get_docker_tag(platform: str, registry: str) -> str: """:return: docker tag to be used for the container""" platform = platform if any(x in platform for x in ['build.', 'publish.']) else 'build.{}'.format(platform) if not registry: registry = "mxnet_local" return "{0}/{1}".format(registry, platform)
Build a container for the given platform :param platform: Platform :param docker_binary: docker binary to use (docker/nvidia-docker) :param registry: Dockerhub registry name :param num_retries: Number of retries to build the docker image :param no_cache: pass no-cache to docker to rebuild the images :return: Id of the top level image
def build_docker(platform: str, docker_binary: str, registry: str, num_retries: int, no_cache: bool) -> str: """ Build a container for the given platform :param platform: Platform :param docker_binary: docker binary to use (docker/nvidia-docker) :param registry: Dockerhub registry name :param num_retries: Number of retries to build the docker image :param no_cache: pass no-cache to docker to rebuild the images :return: Id of the top level image """ tag = get_docker_tag(platform=platform, registry=registry) logging.info("Building docker container tagged '%s' with %s", tag, docker_binary) # # We add a user with the same group as the executing non-root user so files created in the # container match permissions of the local user. Same for the group. # # These variables are used in the docker files to create user and group with these ids. # see: docker/install/ubuntu_adduser.sh # # cache-from is needed so we use the cached images tagged from the remote via # docker pull see: docker_cache.load_docker_cache # # This also prevents using local layers for caching: https://github.com/moby/moby/issues/33002 # So to use local caching, we should omit the cache-from by using --no-dockerhub-cache argument to this # script. # # This doesn't work with multi head docker files. # cmd = [docker_binary, "build", "-f", get_dockerfile(platform), "--build-arg", "USER_ID={}".format(os.getuid()), "--build-arg", "GROUP_ID={}".format(os.getgid())] if no_cache: cmd.append("--no-cache") elif registry: cmd.extend(["--cache-from", tag]) cmd.extend(["-t", tag, get_dockerfiles_path()]) @retry(subprocess.CalledProcessError, tries=num_retries) def run_cmd(): logging.info("Running command: '%s'", ' '.join(cmd)) check_call(cmd) run_cmd() # Get image id by reading the tag. It's guaranteed (except race condition) that the tag exists. Otherwise, the # check_call would have failed image_id = _get_local_image_id(docker_binary=docker_binary, docker_tag=tag) if not image_id: raise FileNotFoundError('Unable to find docker image id matching with {}'.format(tag)) return image_id
Get the image id of the local docker layer with the passed tag :param docker_tag: docker tag :return: Image id as string or None if tag does not exist
def _get_local_image_id(docker_binary, docker_tag): """ Get the image id of the local docker layer with the passed tag :param docker_tag: docker tag :return: Image id as string or None if tag does not exist """ cmd = [docker_binary, "images", "-q", docker_tag] image_id_b = check_output(cmd) image_id = image_id_b.decode('utf-8').strip() if not image_id: raise RuntimeError('Unable to find docker image id matching with tag {}'.format(docker_tag)) return image_id
:return: ccache directory for the current platform
def default_ccache_dir() -> str: """:return: ccache directory for the current platform""" # Share ccache across containers if 'CCACHE_DIR' in os.environ: ccache_dir = os.path.realpath(os.environ['CCACHE_DIR']) try: os.makedirs(ccache_dir, exist_ok=True) return ccache_dir except PermissionError: logging.info('Unable to make dirs at %s, falling back to local temp dir', ccache_dir) # In osx tmpdir is not mountable by default import platform if platform.system() == 'Darwin': ccache_dir = "/tmp/_mxnet_ccache" os.makedirs(ccache_dir, exist_ok=True) return ccache_dir return os.path.join(os.path.expanduser("~"), ".ccache")
Run command in a container
def container_run(platform: str, nvidia_runtime: bool, docker_registry: str, shared_memory_size: str, local_ccache_dir: str, command: List[str], cleanup: Cleanup, environment: Dict[str, str], dry_run: bool = False) -> int: """Run command in a container""" container_wait_s = 600 # # Environment setup # environment.update({ 'CCACHE_MAXSIZE': '500G', 'CCACHE_TEMPDIR': '/tmp/ccache', # temp dir should be local and not shared 'CCACHE_DIR': '/work/ccache', # this path is inside the container as /work/ccache is # mounted 'CCACHE_LOGFILE': '/tmp/ccache.log', # a container-scoped log, useful for ccache # verification. }) # These variables are passed to the container to the process tree killer can find runaway # process inside the container # https://wiki.jenkins.io/display/JENKINS/ProcessTreeKiller # https://github.com/jenkinsci/jenkins/blob/578d6bacb33a5e99f149de504c80275796f0b231/core/src/main/java/hudson/model/Run.java#L2393 # jenkins_env_vars = ['BUILD_NUMBER', 'BUILD_ID', 'BUILD_TAG'] environment.update({k: os.environ[k] for k in jenkins_env_vars if k in os.environ}) environment.update({k: os.environ[k] for k in ['CCACHE_MAXSIZE'] if k in os.environ}) tag = get_docker_tag(platform=platform, registry=docker_registry) mx_root = get_mxnet_root() local_build_folder = buildir() # We need to create it first, otherwise it will be created by the docker daemon with root only permissions os.makedirs(local_build_folder, exist_ok=True) os.makedirs(local_ccache_dir, exist_ok=True) logging.info("Using ccache directory: %s", local_ccache_dir) docker_client = docker.from_env() # Equivalent command docker_cmd_list = [ get_docker_binary(nvidia_runtime), 'run', "--cap-add", "SYS_PTRACE", # Required by ASAN '--rm', '--shm-size={}'.format(shared_memory_size), # mount mxnet root '-v', "{}:/work/mxnet".format(mx_root), # mount mxnet/build for storing build '-v', "{}:/work/build".format(local_build_folder), '-v', "{}:/work/ccache".format(local_ccache_dir), '-u', '{}:{}'.format(os.getuid(), os.getgid()), '-e', 'CCACHE_MAXSIZE={}'.format(environment['CCACHE_MAXSIZE']), # temp dir should be local and not shared '-e', 'CCACHE_TEMPDIR={}'.format(environment['CCACHE_TEMPDIR']), # this path is inside the container as /work/ccache is mounted '-e', "CCACHE_DIR={}".format(environment['CCACHE_DIR']), # a container-scoped log, useful for ccache verification. '-e', "CCACHE_LOGFILE={}".format(environment['CCACHE_LOGFILE']), '-ti', tag] docker_cmd_list.extend(command) docker_cmd = ' \\\n\t'.join(docker_cmd_list) logging.info("Running %s in container %s", command, tag) logging.info("Executing the equivalent of:\n%s\n", docker_cmd) # return code of the command inside docker ret = 0 if not dry_run: ############################# # signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM}) # noinspection PyShadowingNames runtime = None if nvidia_runtime: # noinspection PyShadowingNames # runc is default (docker info | grep -i runtime) runtime = 'nvidia' container = docker_client.containers.run( tag, runtime=runtime, detach=True, command=command, shm_size=shared_memory_size, user='{}:{}'.format(os.getuid(), os.getgid()), cap_add='SYS_PTRACE', volumes={ mx_root: {'bind': '/work/mxnet', 'mode': 'rw'}, local_build_folder: {'bind': '/work/build', 'mode': 'rw'}, local_ccache_dir: {'bind': '/work/ccache', 'mode': 'rw'}, }, environment=environment) try: logging.info("Started container: %s", trim_container_id(container.id)) # Race condition: # If the previous call is interrupted then it's possible that the container is not cleaned up # We avoid by masking the signals temporarily cleanup.add_container(container) signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM}) # ############################# stream = container.logs(stream=True, stdout=True, stderr=True) sys.stdout.flush() for chunk in stream: sys.stdout.buffer.write(chunk) sys.stdout.buffer.flush() sys.stdout.flush() stream.close() try: logging.info("Waiting for status of container %s for %d s.", trim_container_id(container.id), container_wait_s) wait_result = container.wait(timeout=container_wait_s) logging.info("Container exit status: %s", wait_result) ret = wait_result.get('StatusCode', 200) if ret != 0: logging.error("Container exited with an error 😞") else: logging.info("Container exited with success 👍") except Exception as e: logging.exception(e) ret = 150 # Stop try: logging.info("Stopping container: %s", trim_container_id(container.id)) container.stop() except Exception as e: logging.exception(e) ret = 151 # Remove try: logging.info("Removing container: %s", trim_container_id(container.id)) container.remove() except Exception as e: logging.exception(e) ret = 152 cleanup.remove_container(container) containers = docker_client.containers.list() if containers: logging.info("Other running containers: %s", [trim_container_id(x.id) for x in containers]) except docker.errors.NotFound as e: logging.info("Container was stopped before cleanup started: %s", e) return ret
Imports tagged container from the given docker registry
def load_docker_cache(tag, docker_registry) -> None: """Imports tagged container from the given docker registry""" if docker_registry: # noinspection PyBroadException try: import docker_cache logging.info('Docker cache download is enabled from registry %s', docker_registry) docker_cache.load_docker_cache(registry=docker_registry, docker_tag=tag) except Exception: logging.exception('Unable to retrieve Docker cache. Continue without...') else: logging.info('Distributed docker cache disabled')
Load a list of arrays into a list of arrays specified by slices.
def _load_general(data, targets, major_axis): """Load a list of arrays into a list of arrays specified by slices.""" for d_src, d_targets, axis in zip(data, targets, major_axis): # pylint: disable=too-many-nested-blocks if isinstance(d_targets, nd.NDArray): d_src.copyto(d_targets) elif isinstance(d_src, (list, tuple)): for src, dst in zip(d_src, d_targets): src.copyto(dst) else: for slice_idx, d_dst in d_targets: if axis >= 0: # copy slice shape = d_src.shape do_crop = (slice_idx.start != 0 or shape[axis] != slice_idx.stop) # pylint: disable=no-member,protected-access if do_crop: if axis == 0: d_src[slice_idx.start:slice_idx.stop].copyto(d_dst) else: if d_src.context == d_dst.context: nd.slice_axis(d_src, axis=axis, begin=slice_idx.start, end=slice_idx.stop, out=d_dst) else: # on different device, crop and then do cross device copy d_dst_copy = nd.slice_axis(d_src, axis=axis, begin=slice_idx.start, end=slice_idx.stop) d_dst_copy.copyto(d_dst) else: d_src.copyto(d_dst) # pylint: enable=no-member,protected-access else: d_src.copyto(d_dst)
Load data into sliced arrays.
def _load_data(batch, targets, major_axis): """Load data into sliced arrays.""" if isinstance(batch, list): new_batch = [] for i in range(len(targets)): new_batch.append([b.data[i] for b in batch]) new_targets = [[dst for _, dst in d_target] for d_target in targets] _load_general(new_batch, new_targets, major_axis) else: _load_general(batch.data, targets, major_axis)
Merge outputs that lives on multiple context into one, so that they look like living on one context.
def _merge_multi_context(outputs, major_axis): """Merge outputs that lives on multiple context into one, so that they look like living on one context. """ rets = [] for tensors, axis in zip(outputs, major_axis): if axis >= 0: # pylint: disable=no-member,protected-access if len(tensors) == 1: rets.append(tensors[0]) else: # Concatenate if necessary rets.append(nd.concat(*[tensor.as_in_context(tensors[0].context) for tensor in tensors], dim=axis)) # pylint: enable=no-member,protected-access else: # negative axis means the there is no batch_size axis, and all the # results should be the same on each device. We simply take the # first one, without checking they are actually the same rets.append(tensors[0]) return rets
Prepare the group2contexts, will duplicate the context if some ctx_group map to only one context.
def _prepare_group2ctxs(group2ctxs, ctx_len): """Prepare the group2contexts, will duplicate the context if some ctx_group map to only one context. """ if group2ctxs is None: return [None] * ctx_len elif isinstance(group2ctxs, list): assert(len(group2ctxs) == ctx_len), "length of group2ctxs\ should be %d" % ctx_len return group2ctxs elif isinstance(group2ctxs, dict): ret = [{} for i in range(ctx_len)] for k, v in group2ctxs.items(): ctxs = None if isinstance(v, ctx.Context): ctxs = [v] * ctx_len else: if len(v) == 1: ctxs = v * ctx_len else: assert(len(v) == ctx_len), "length of group2ctxs[%s]\ should be %d or 1" % (k, ctx_len) ctxs = v for i in range(ctx_len): ret[i][k] = ctxs[i] return ret else: assert(False), "group2ctxs should be list of dict of str to context,\ or dict of str to context or list of context" return False
Decide the slices for each context according to the workload. Parameters ---------- data_shapes : list list of (name, shape) specifying the shapes for the input data or label.
def decide_slices(self, data_shapes): """Decide the slices for each context according to the workload. Parameters ---------- data_shapes : list list of (name, shape) specifying the shapes for the input data or label. """ assert len(data_shapes) > 0 major_axis = [DataDesc.get_batch_axis(x.layout) for x in data_shapes] for (name, shape), axis in zip(data_shapes, major_axis): if axis == -1: continue batch_size = shape[axis] if self.batch_size is not None: assert batch_size == self.batch_size, ("all data must have the same batch size: " + ("batch_size = %d, but " % self.batch_size) + ("%s has shape %s" % (name, shape))) else: self.batch_size = batch_size self.slices = _split_input_slice(self.batch_size, self.workload) return major_axis
Collect internal arrays from executors.
def _collect_arrays(self): """Collect internal arrays from executors.""" # convenient data structures self.data_arrays = [[(self.slices[i], e.arg_dict[name]) for i, e in enumerate(self.execs)] for name, _ in self.data_shapes] self.state_arrays = [[e.arg_dict[name] for e in self.execs] for name in self.state_names] if self.label_shapes is not None: self.label_arrays = [[(self.slices[i], e.arg_dict[name]) for i, e in enumerate(self.execs)] for name, _ in self.label_shapes] else: self.label_arrays = None self.param_arrays = [[exec_.arg_arrays[i] for exec_ in self.execs] for i, name in enumerate(self.arg_names) if name in self.param_names] if self.for_training: self.grad_arrays = [[exec_.grad_arrays[i] for exec_ in self.execs] for i, name in enumerate(self.arg_names) if name in self.param_names] else: self.grad_arrays = None data_names = [x[0] for x in self.data_shapes] if self.inputs_need_grad: self.input_grad_arrays = [[exec_.grad_arrays[self.arg_names.index(name)] for exec_ in self.execs] for name in data_names if name in self.arg_names] else: self.input_grad_arrays = None self.aux_arrays = [[exec_.aux_arrays[i] for exec_ in self.execs] for i in range(len(self.aux_names))]
Bind executors on their respective devices. Parameters ---------- data_shapes : list label_shapes : list shared_group : DataParallelExecutorGroup reshape : bool
def bind_exec(self, data_shapes, label_shapes, shared_group=None, reshape=False): """Bind executors on their respective devices. Parameters ---------- data_shapes : list label_shapes : list shared_group : DataParallelExecutorGroup reshape : bool """ assert reshape or not self.execs self.batch_size = None # calculate workload and bind executors self.data_layouts = self.decide_slices(data_shapes) if label_shapes is not None: # call it to make sure labels has the same batch size as data self.label_layouts = self.decide_slices(label_shapes) for i in range(len(self.contexts)): data_shapes_i = self._sliced_shape(data_shapes, i, self.data_layouts) if label_shapes is not None: label_shapes_i = self._sliced_shape(label_shapes, i, self.label_layouts) else: label_shapes_i = [] if reshape: self.execs[i] = self._default_execs[i].reshape( allow_up_sizing=True, **dict(data_shapes_i + label_shapes_i)) else: self.execs.append(self._bind_ith_exec(i, data_shapes_i, label_shapes_i, shared_group)) self.data_shapes = data_shapes self.label_shapes = label_shapes self.data_names = [i.name for i in self.data_shapes] if label_shapes is not None: self.label_names = [i.name for i in self.label_shapes] self._collect_arrays()
Reshape executors. Parameters ---------- data_shapes : list label_shapes : list
def reshape(self, data_shapes, label_shapes): """Reshape executors. Parameters ---------- data_shapes : list label_shapes : list """ if data_shapes == self.data_shapes and label_shapes == self.label_shapes: return if self._default_execs is None: self._default_execs = [i for i in self.execs] self.bind_exec(data_shapes, label_shapes, reshape=True)
Assign, i.e. copy parameters to all the executors. Parameters ---------- arg_params : dict A dictionary of name to `NDArray` parameter mapping. aux_params : dict A dictionary of name to `NDArray` auxiliary variable mapping. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor.
def set_params(self, arg_params, aux_params, allow_extra=False): """Assign, i.e. copy parameters to all the executors. Parameters ---------- arg_params : dict A dictionary of name to `NDArray` parameter mapping. aux_params : dict A dictionary of name to `NDArray` auxiliary variable mapping. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor. """ for exec_ in self.execs: exec_.copy_params_from(arg_params, aux_params, allow_extra_params=allow_extra)
Copy data from each executor to `arg_params` and `aux_params`. Parameters ---------- arg_params : list of NDArray Target parameter arrays. aux_params : list of NDArray Target aux arrays. Notes ----- - This function will inplace update the NDArrays in arg_params and aux_params.
def get_params(self, arg_params, aux_params): """ Copy data from each executor to `arg_params` and `aux_params`. Parameters ---------- arg_params : list of NDArray Target parameter arrays. aux_params : list of NDArray Target aux arrays. Notes ----- - This function will inplace update the NDArrays in arg_params and aux_params. """ for name, block in zip(self.param_names, self.param_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(arg_params[name].dtype).copyto(arg_params[name]) for name, block in zip(self.aux_names, self.aux_arrays): weight = sum(w.copyto(ctx.cpu()) for w in block) / len(block) weight.astype(aux_params[name].dtype).copyto(aux_params[name])
Split `data_batch` according to workload and run forward on each devices. Parameters ---------- data_batch : DataBatch Or could be any object implementing similar interface. is_train : bool The hint for the backend, indicating whether we are during training phase. Default is `None`, then the value `self.for_training` will be used. Returns -------
def forward(self, data_batch, is_train=None): """Split `data_batch` according to workload and run forward on each devices. Parameters ---------- data_batch : DataBatch Or could be any object implementing similar interface. is_train : bool The hint for the backend, indicating whether we are during training phase. Default is `None`, then the value `self.for_training` will be used. Returns ------- """ _load_data(data_batch, self.data_arrays, self.data_layouts) if is_train is None: is_train = self.for_training if isinstance(data_batch, list): if self.label_arrays is not None and data_batch is not None and data_batch[0].label: _load_label(data_batch, self.label_arrays, self.label_layouts) else: if self.label_arrays is not None and data_batch.label: _load_label(data_batch, self.label_arrays, self.label_layouts) for exec_ in self.execs: exec_.forward(is_train=is_train)
Get the shapes of the outputs.
def get_output_shapes(self): """Get the shapes of the outputs.""" outputs = self.execs[0].outputs shapes = [out.shape for out in outputs] concat_shapes = [] for key, the_shape, axis in zip(self.symbol.list_outputs(), shapes, self.output_layouts): the_shape = list(the_shape) if axis >= 0: the_shape[axis] = self.batch_size concat_shapes.append((key, tuple(the_shape))) return concat_shapes