desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'.. todo:: WRITEME'
def get_design_matrix(self):
return self.X
'.. todo:: WRITEME Parameters dataset : Dataset The dataset to act on. can_fit : bool If True, the Preprocessor can adapt internal parameters based on the contents of dataset. Otherwise it must not fit any parameters, or must re-use old ones. Subclasses should still have this default to False, so that the behavior of the preprocessors is uniform. Notes Typical usage: .. code-block:: python # Learn PCA preprocessing and apply it to the training set my_pca_preprocessor.apply(training_set, can_fit = True) # Now apply the same transformation to the test set my_pca_preprocessor.apply(test_set, can_fit = False) This method must take a dataset, rather than a numpy ndarray, for a variety of reasons: - Preprocessors should work on any dataset, and not all datasets will store their data as ndarrays. - Preprocessors often need to change a dataset\'s metadata. For example, suppose you have a DenseDesignMatrix dataset of images. If you implement a fovea Preprocessor that reduces the dimensionality of images by sampling them finely near the center and coarsely with blurring at the edges, then your preprocessor will need to change the way that the dataset converts example vectors to images for visualization.'
def apply(self, dataset, can_fit=False):
raise NotImplementedError((str(type(self)) + ' does not implement an apply method.'))
'Do any necessary prep work to be able to support the "inverse" method later. Default implementation is no-op.'
def invert(self):
pass
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
assert (not can_fit) dataset.X = self.block.perform(dataset.X)
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
for item in self.items: item.apply(dataset, can_fit)
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
X = dataset.get_topological_view() num_topological_dimensions = (len(X.shape) - 2) if (num_topological_dimensions != len(self.patch_shape)): raise ValueError(((((('ExtractGridPatches with ' + str(len(self.patch_shape))) + ' topological dimensions called on') + ' dataset with ') + str(num_topological_dimensions)) + '.')) num_patches = X.shape[0] max_strides = [(X.shape[0] - 1)] for i in xrange(num_topological_dimensions): patch_width = self.patch_shape[i] data_width = X.shape[(i + 1)] last_valid_coord = (data_width - patch_width) if (last_valid_coord < 0): raise ValueError(((((('On topological dimension ' + str(i)) + ', the data has width ') + str(data_width)) + ' but the requested patch width is ') + str(patch_width))) stride = self.patch_stride[i] if (stride == 0): max_stride_this_axis = 0 else: max_stride_this_axis = (last_valid_coord / stride) num_strides_this_axis = (max_stride_this_axis + 1) max_strides.append(max_stride_this_axis) num_patches *= num_strides_this_axis output_shape = [num_patches] for dim in self.patch_shape: output_shape.append(dim) output_shape.append(X.shape[(-1)]) output = numpy.zeros(output_shape, dtype=X.dtype) channel_slice = slice(0, X.shape[(-1)]) coords = ([0] * (num_topological_dimensions + 1)) keep_going = True i = 0 while keep_going: args = [coords[0]] for j in xrange(num_topological_dimensions): coord = (coords[(j + 1)] * self.patch_stride[j]) args.append(slice(coord, (coord + self.patch_shape[j]))) args.append(channel_slice) patch = X[args] output[i, :] = patch i += 1 j = 0 keep_going = False while (not keep_going): if (coords[(- (j + 1))] < max_strides[(- (j + 1))]): coords[(- (j + 1))] += 1 keep_going = True else: coords[(- (j + 1))] = 0 if (j == num_topological_dimensions): break j = (j + 1) dataset.set_topological_view(output) if (dataset.y is not None): dataset.y = numpy.repeat(dataset.y, (num_patches / X.shape[0]))
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
patches = dataset.get_topological_view() num_topological_dimensions = (len(patches.shape) - 2) if (num_topological_dimensions != len(self.patch_shape)): raise ValueError(((((('ReassembleGridPatches with ' + str(len(self.patch_shape))) + ' topological dimensions called on dataset ') + ' with ') + str(num_topological_dimensions)) + '.')) num_patches = patches.shape[0] num_examples = num_patches for (im_dim, patch_dim) in zip(self.orig_shape, self.patch_shape): if ((im_dim % patch_dim) != 0): raise Exception((((('Trying to assemble patches of shape ' + str(self.patch_shape)) + ' into images of ') + 'shape ') + str(self.orig_shape))) patches_this_dim = (im_dim / patch_dim) if ((num_examples % patches_this_dim) != 0): raise Exception(((((('Trying to re-assemble ' + str(num_patches)) + ' patches of shape ') + str(self.patch_shape)) + ' into images of shape ') + str(self.orig_shape))) num_examples /= patches_this_dim reassembled_shape = [num_examples] for dim in self.orig_shape: reassembled_shape.append(dim) reassembled_shape.append(patches.shape[(-1)]) reassembled = numpy.zeros(reassembled_shape, dtype=patches.dtype) channel_slice = slice(0, patches.shape[(-1)]) coords = ([0] * (num_topological_dimensions + 1)) max_strides = [(num_examples - 1)] for (dim, pd) in zip(self.orig_shape, self.patch_shape): assert ((dim % pd) == 0) max_strides.append(((dim / pd) - 1)) keep_going = True i = 0 while keep_going: args = [coords[0]] for j in xrange(num_topological_dimensions): coord = coords[(j + 1)] args.append(slice((coord * self.patch_shape[j]), ((coord + 1) * self.patch_shape[j]))) next_shape_coord = reassembled.shape[(j + 1)] assert (((coord + 1) * self.patch_shape[j]) <= next_shape_coord) args.append(channel_slice) try: patch = patches[i, :] except IndexError: reraise_as(IndexError(((('Gave index of ' + str(i)) + ', : into thing of shape ') + str(patches.shape)))) reassembled[args] = patch i += 1 j = 0 keep_going = False while (not keep_going): if (coords[(- (j + 1))] < max_strides[(- (j + 1))]): coords[(- (j + 1))] += 1 keep_going = True else: coords[(- (j + 1))] = 0 if (j == num_topological_dimensions): break j = (j + 1) dataset.set_topological_view(reassembled) if (dataset.y is not None): dataset.y = dataset.y[::(patches.shape[0] / reassembled_shape[0])]
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
rng = copy.copy(self.start_rng) X = dataset.get_topological_view() num_topological_dimensions = (len(X.shape) - 2) if (num_topological_dimensions != len(self.patch_shape)): raise ValueError(((((('ExtractPatches with ' + str(len(self.patch_shape))) + ' topological dimensions called on ') + 'dataset with ') + str(num_topological_dimensions)) + '.')) output_shape = [self.num_patches] for dim in self.patch_shape: output_shape.append(dim) output_shape.append(X.shape[(-1)]) output = numpy.zeros(output_shape, dtype=X.dtype) channel_slice = slice(0, X.shape[(-1)]) for i in xrange(self.num_patches): args = [] args.append(rng.randint(X.shape[0])) for j in xrange(num_topological_dimensions): max_coord = (X.shape[(j + 1)] - self.patch_shape[j]) coord = rng.randint((max_coord + 1)) args.append(slice(coord, (coord + self.patch_shape[j]))) args.append(channel_slice) output[i, :] = X[args] dataset.set_topological_view(output) dataset.y = None
'.. todo:: WRITEME'
def __call__(self, batch):
if self.input_space: self.input_space.validate(batch) squared_batch = (batch ** 2) squared_norm = squared_batch.sum(axis=1) norm = tensor.sqrt(squared_norm) return (batch / norm)
'.. todo:: WRITEME'
def set_input_space(self, space):
self.input_space = space
'.. todo:: WRITEME'
def get_input_space(self):
if (self.input_space is not None): return self.input_space raise ValueError(('No input space was specified for this Block (%s). You can call set_input_space to correct that.' % str(self)))
'.. todo:: WRITEME'
def get_output_space(self):
return self.get_input_space()
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
X = dataset.get_design_matrix() X_norm = numpy.sqrt(numpy.sum((X ** 2), axis=1)) X /= X_norm[:, None] dataset.set_design_matrix(X)
'.. todo:: WRITEME'
def as_block(self):
return ExamplewiseUnitNormBlock()
'.. todo:: WRITEME'
def _multiply(self, batch):
if (self.multiply is not None): batch *= self.multiply return batch
'.. todo:: WRITEME'
def _add(self, batch):
if (self.add is not None): batch += self.add return batch
'.. todo:: WRITEME'
def __call__(self, batch):
if self.input_space: self.input_space.validate(batch) cur = batch if self._multiply_first: batch = self._add(self._multiply(batch)) else: batch = self._multiply(self._add(batch)) return batch
'.. todo:: WRITEME'
def inverse(self):
if ((self._multiply is not None) and self._has_zeros): raise ZeroDivisionError(('%s transformation not invertible due to (near-) zeros in multiplicand' % self.__class__.__name__)) else: mult_inverse = (self._multiply ** (-1.0)) return self.__class__(add=(- self._add), multiply=mult_inverse, multiply_first=(not self._multiply_first))
'.. todo:: WRITEME'
def set_input_space(self, space):
self.input_space = space
'.. todo:: WRITEME'
def get_input_space(self):
if (self.input_space is not None): return self.input_space raise ValueError(('No input space was specified for this Block (%s). You can call set_input_space to correct that.' % str(self)))
'.. todo:: WRITEME'
def get_output_space(self):
return self.get_input_space()
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=True):
X = dataset.get_design_matrix() if can_fit: self._mean = X.mean(axis=self._axis) elif (self._mean is None): raise ValueError('can_fit is False, but RemoveMean object has no stored mean or standard deviation') X -= self._mean dataset.set_design_matrix(X)
'.. todo:: WRITEME'
def as_block(self):
if (self._mean is None): raise ValueError(("can't convert %s to block without fitting" % self.__class__.__name__)) return ExamplewiseAddScaleTransform(add=(- self._mean))
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
X = dataset.get_design_matrix() if can_fit: self._mean = (X.mean() if self._global_mean else X.mean(axis=0)) self._std = (X.std() if self._global_std else X.std(axis=0)) elif ((self._mean is None) or (self._std is None)): raise ValueError('can_fit is False, but Standardize object has no stored mean or standard deviation') new = ((X - self._mean) / (self._std_eps + self._std)) dataset.set_design_matrix(new)
'.. todo:: WRITEME'
def as_block(self):
if ((self._mean is None) or (self._std is None)): raise ValueError(("can't convert %s to block without fitting" % self.__class__.__name__)) return ExamplewiseAddScaleTransform(add=(- self._mean), multiply=(self._std ** (-1)))
'.. todo:: WRITEME'
def __call__(self, batch):
if (batch.ndim != 2): raise ValueError('Only two-dimensional tensors are supported') return batch.dimshuffle(1, 0)[self._columns].dimshuffle(1, 0)
'.. todo:: WRITEME'
def inverse(self):
return ZeroColumnInsertBlock(self._columns, self._total)
'.. todo:: WRITEME'
def get_input_space(self):
return VectorSpace(dim=self._total)
'.. todo:: WRITEME'
def get_output_space(self):
return VectorSpace(dim=self._columns)
'.. todo:: WRITEME'
def __init__(self, columns, total):
self._columns = columns self._total = total
'.. todo:: WRITEME'
def __call__(self, batch):
if (batch.ndim != 2): raise ValueError('Only two-dimensional tensors are supported') return insert_columns(batch, self._total, self._columns)
'.. todo:: WRITEME'
def inverse(self):
return ColumnSubsetBlock(self._columns, self._total)
'.. todo:: WRITEME'
def get_input_space(self):
return VectorSpace(dim=self._columns)
'.. todo:: WRITEME'
def get_output_space(self):
return VectorSpace(dim=self._total)
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
design_matrix = dataset.get_design_matrix() mean = design_matrix.mean(axis=0) var = design_matrix.var(axis=0) (columns,) = numpy.where(((var < self._eps) & (mean < self._eps))) self._block = ColumnSubsetBlock
'.. todo:: WRITEME'
def as_block(self):
if (self._block is None): raise ValueError(("can't convert %s to block without fitting" % self.__class__.__name__)) return self._block
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
X = dataset.get_design_matrix() X = ((X - self.map_from[0]) / numpy.diff(self.map_from)) X = ((X * numpy.diff(self.map_to)) + self.map_to[0]) dataset.set_design_matrix(X)
'.. todo:: WRITEME'
def view_shape(self):
return self.orig_view_converter.shape
'.. todo:: WRITEME'
def design_mat_to_topo_view(self, X):
to_input = self.to_input(X) return self.orig_view_converter.design_mat_to_topo_view(to_input)
'.. todo:: WRITEME'
def design_mat_to_weights_view(self, X):
to_weights = self.to_weights(X) return self.orig_view_converter.design_mat_to_weights_view(to_weights)
'.. todo:: WRITEME'
def topo_view_to_design_mat(self, V):
return self.to_pca(self.orig_view_converter.topo_view_to_design_mat(V))
'.. todo:: WRITEME'
def get_formatted_batch(self, batch, dspace):
if isinstance(dspace, VectorSpace): dspace.np_validate(batch) return batch else: to_input = self.to_input(batch) return self.orig_view_converter.get_formatted_batch(to_input, dspace)
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
if (self._pca is None): if (not can_fit): raise ValueError('can_fit is False, but PCA preprocessor object has no fitted model stored') from pylearn2.models import pca self._pca = pca.CovEigPCA(num_components=self._num_components, whiten=self._whiten) self._pca.train(dataset.get_design_matrix()) self._transform_func = function([self._input], self._pca(self._input)) self._invert_func = function([self._output], self._pca.reconstruct(self._output)) self._convert_weights_func = function([self._output], self._pca.reconstruct(self._output, add_mean=False)) orig_data = dataset.get_design_matrix() dataset.set_design_matrix(self._transform_func(dataset.get_design_matrix())) proc_data = dataset.get_design_matrix() orig_var = orig_data.var(axis=0) proc_var = proc_data.var(axis=0) if ((not self._whiten) and can_fit): assert (proc_var[0] > orig_var.max()) log.info('original variance: {0}'.format(orig_var.sum())) log.info('processed variance: {0}'.format(proc_var.sum())) if hasattr(dataset, 'view_converter'): if (dataset.view_converter is not None): new_converter = PCA_ViewConverter(self._transform_func, self._invert_func, self._convert_weights_func, dataset.view_converter) dataset.view_converter = new_converter
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
X = dataset.get_topological_view() d = (len(X.shape) - 2) assert (d in [2, 3]) assert ((X.dtype == 'float32') or (X.dtype == 'float64')) if (d == 2): X = X.reshape([X.shape[0], X.shape[1], X.shape[2], 1, X.shape[3]]) kernel_size = 1 kernel_shape = [X.shape[(-1)]] for factor in self.sampling_factor: kernel_size *= factor kernel_shape.append(factor) if (d == 2): kernel_shape.append(1) kernel_shape.append(X.shape[(-1)]) kernel_value = (1.0 / float(kernel_size)) kernel = numpy.zeros(kernel_shape, dtype=X.dtype) for i in xrange(X.shape[(-1)]): kernel[i, :, :, :, i] = kernel_value from theano.tensor.nnet.Conv3D import conv3D X_var = tensor.TensorType(broadcastable=[(s == 1) for s in X.shape], dtype=X.dtype)() downsampled = conv3D(X_var, kernel, numpy.zeros(X.shape[(-1)], X.dtype), kernel_shape[1:(-1)]) f = function([X_var], downsampled) X = f(X) if (d == 2): X = X.reshape([X.shape[0], X.shape[1], X.shape[2], X.shape[4]]) dataset.set_topological_view(X)
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
if (self._batch_size is None): X = global_contrast_normalize(dataset.get_design_matrix(), scale=self._scale, subtract_mean=self._subtract_mean, use_std=self._use_std, sqrt_bias=self._sqrt_bias, min_divisor=self._min_divisor) dataset.set_design_matrix(X) else: data = dataset.get_design_matrix() data_size = data.shape[0] last = (numpy.floor((data_size / float(self._batch_size))) * self._batch_size) for i in xrange(0, data_size, self._batch_size): stop = (i + self._batch_size) log.info(('GCN processing data from %d to %d' % (i, stop))) X = data[i:stop] X = global_contrast_normalize(X, scale=self._scale, subtract_mean=self._subtract_mean, use_std=self._use_std, sqrt_bias=self._sqrt_bias, min_divisor=self._min_divisor) dataset.set_design_matrix(X, start=i)
'Performs matrix multiplication. Attempts to use the GPU if it\'s available. If the matrix multiplication is too big to fit on the GPU, this falls back to the CPU after throwing a warning. Parameters matrix_a : WRITEME matrix_b : WRITEME matrix_c : WRITEME'
@staticmethod def _gpu_matrix_dot(matrix_a, matrix_b, matrix_c=None):
if (not hasattr(ZCA._gpu_matrix_dot, 'theano_func')): (ma, mb) = theano.tensor.matrices('A', 'B') mc = theano.tensor.dot(ma, mb) ZCA._gpu_matrix_dot.theano_func = theano.function([ma, mb], mc, allow_input_downcast=True) theano_func = ZCA._gpu_matrix_dot.theano_func try: if (matrix_c is None): return theano_func(matrix_a, matrix_b) else: matrix_c[...] = theano_func(matrix_a, matrix_b) return matrix_c except MemoryError: warnings.warn('Matrix multiplication too big to fit on GPU. Re-doing with CPU. Consider using THEANO_FLAGS="device=cpu" for your next preprocessor run') return numpy.dot(matrix_a, matrix_b, matrix_c)
'Performs the matrix multiplication M * D * M^T. First tries to do this on the GPU. If this throws a MemoryError, it falls back to the CPU, with a warning message. Parameters mat : WRITEME diags : WRITEME'
@staticmethod def _gpu_mdmt(mat, diags):
floatX = theano.config.floatX if (not hasattr(ZCA._gpu_mdmt, 'theano_func')): t_mat = theano.tensor.matrix('M') t_diags = theano.tensor.vector('D') result = theano.tensor.dot((t_mat * t_diags), t_mat.T) ZCA._gpu_mdmt.theano_func = theano.function([t_mat, t_diags], result, allow_input_downcast=True) try: if (str(mat.dtype) != floatX): warnings.warn(('Implicitly converting mat from dtype=%s to %s for gpu' % (mat.dtype, floatX))) if (str(diags.dtype) != floatX): warnings.warn(('Implicitly converting diag from dtype=%s to %s for gpu' % (diags.dtype, floatX))) return ZCA._gpu_mdmt.theano_func(mat, diags) except MemoryError: warnings.warn('M * D * M^T was too big to fit on GPU. Re-doing with CPU. Consider using THEANO_FLAGS="device=cpu" for your next preprocessor run') return numpy.dot((mat * diags), mat.T)
'Analogous to DenseDesignMatrix.use_design_loc(). If a matrices_save_path is set, when this ZCA is pickled, the internal parameter matrices will be saved separately to `matrices_save_path`, as a numpy .npz archive. This uses half the memory that a normal pickling does. Parameters matrices_save_path : WRITEME'
def set_matrices_save_path(self, matrices_save_path):
if (matrices_save_path is not None): assert isinstance(matrices_save_path, str) matrices_save_path = os.path.abspath(matrices_save_path) if os.path.isdir(matrices_save_path): raise IOError('Matrix save path "%s" must not be an existing directory.') assert (matrices_save_path[(-1)] not in ('/', '\\')) if (not os.path.isdir(os.path.split(matrices_save_path)[0])): raise IOError('Couldn\'t find parent directory:\n DCTB "%s"\n DCTB of matrix path\n DCTB "%s"') self.matrices_save_path = matrices_save_path
'Used by pickle. Returns a dictionary to pickle in place of self.__dict__. If self.matrices_save_path is set, this saves the matrices P_ and inv_P_ separately in matrices_save_path as a .npz archive, which uses much less space & memory than letting pickle handle them.'
def __getstate__(self):
result = copy.copy(self.__dict__) if (self.matrices_save_path is not None): matrices = {'P_': self.P_} if (self.inv_P_ is not None): matrices['inv_P_'] = self.inv_P_ numpy.savez(self.matrices_save_path, **matrices) for (key, matrix) in matrices.items(): del result[key] return result
'Used to unpickle. Parameters state : dict The dictionary created by __setstate__, presumably unpickled from disk.'
def __setstate__(self, state):
if ('matrices_save_path' not in state): state['matrices_save_path'] = None if (state['matrices_save_path'] is not None): matrices = numpy.load(state['matrices_save_path']) state = dict((state.items() + matrices.items())) del matrices self.__dict__.update(state) if (not hasattr(self, 'inv_P_')): self.inv_P_ = None
'Fits this `ZCA` instance to a design matrix `X`. Parameters X : ndarray A matrix where each row is a datum. Notes Implementation details: Stores result as `self.P_`. If self.store_inverse is true, this also computes `self.inv_P_`.'
def fit(self, X):
assert (X.dtype in ['float32', 'float64']) assert (not contains_nan(X)) assert (len(X.shape) == 2) n_samples = X.shape[0] if self.copy: X = X.copy() self.mean_ = numpy.mean(X, axis=0) X -= self.mean_ log.info('computing zca of a {0} matrix'.format(X.shape)) t1 = time.time() bias = (self.filter_bias * scipy.sparse.identity(X.shape[1], theano.config.floatX)) covariance = ((ZCA._gpu_matrix_dot(X.T, X) / X.shape[0]) + bias) t2 = time.time() log.info('cov estimate took {0} seconds'.format((t2 - t1))) t1 = time.time() (eigs, eigv) = linalg.eigh(covariance) t2 = time.time() log.info('eigh() took {0} seconds'.format((t2 - t1))) assert (not contains_nan(eigs)) assert (not contains_nan(eigv)) assert (eigs.min() > 0) if (self.n_components and self.n_drop_components): raise ValueError('Either n_components or n_drop_componentsshould be specified') if self.n_components: eigs = eigs[(- self.n_components):] eigv = eigv[:, (- self.n_components):] if self.n_drop_components: eigs = eigs[self.n_drop_components:] eigv = eigv[:, self.n_drop_components:] t1 = time.time() sqrt_eigs = numpy.sqrt(eigs) try: self.P_ = ZCA._gpu_mdmt(eigv, (1.0 / sqrt_eigs)) except MemoryError: warnings.warn() self.P_ = numpy.dot((eigv * (1.0 / sqrt_eigs)), eigv.T) t2 = time.time() assert (not contains_nan(self.P_)) self.has_fit_ = True if self.store_inverse: self.inv_P_ = ZCA._gpu_mdmt(eigv, sqrt_eigs) else: self.inv_P_ = None
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
if (not hasattr(ZCA, '_x_minus_mean_times_p')): x_symbol = tensor.matrix('X') mean_symbol = tensor.vector('mean') p_symbol = tensor.matrix('P_') new_x_symbol = tensor.dot((x_symbol - mean_symbol), p_symbol) ZCA._x_minus_mean_times_p = theano.function([x_symbol, mean_symbol, p_symbol], new_x_symbol) X = dataset.get_design_matrix() assert (X.dtype in ['float32', 'float64']) if (not self.has_fit_): assert can_fit self.fit(X) new_X = ZCA._gpu_matrix_dot((X - self.mean_), self.P_) dataset.set_design_matrix(new_X)
'.. todo:: WRITEME'
def inverse(self, X):
assert (X.ndim == 2) if (self.inv_P_ is None): warnings.warn('inv_P_ was None. Computing inverse of P_ now. This will take some time. For efficiency, it is recommended that in the future you compute the inverse in ZCA.fit() instead, by passing it store_inverse=True.') log.info('inverting...') self.inv_P_ = numpy.linalg.inv(self.P_) log.info('...done inverting') return (self._gpu_matrix_dot(X, self.inv_P_) + self.mean_)
'.. todo:: WRITEME properly Parameters X : WRITEME data with axis [b, 0, 1, c]'
def transform(self, x):
for i in self._channels: assert isinstance(i, int) assert ((i >= 0) and (i <= x.shape[3])) x[:, :, :, i] = lecun_lcn(x[:, :, :, i], self._img_shape, self._kernel_size, self._threshold) return x
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
axes = ['b', 0, 1, 'c'] data_size = dataset.X.shape[0] if (self._channels is None): self._channels last = (numpy.floor((data_size / float(self._batch_size))) * self._batch_size) for i in xrange(0, data_size, self._batch_size): stop = ((i + numpy.mod(data_size, self._batch_size)) if (i >= last) else (i + self._batch_size)) log.info('LCN processing data from {0} to {1}'.format(i, stop)) transformed = self.transform(convert_axes(dataset.get_topological_view(dataset.X[i:stop, :]), dataset.view_converter.axes, axes)) transformed = convert_axes(transformed, axes, dataset.view_converter.axes) if (self._batch_size != data_size): if isinstance(dataset.X, numpy.ndarray): transformed = convert_axes(transformed, dataset.view_converter.axes, ['b', 0, 1, 'c']) transformed = transformed.reshape(transformed.shape[0], ((transformed.shape[1] * transformed.shape[2]) * transformed.shape[3])) dataset.X[i:stop] = transformed else: dataset.set_topological_view(transformed, dataset.view_converter.axes, start=i) if (self._batch_size == data_size): dataset.set_topological_view(transformed, dataset.view_converter.axes)
'.. todo:: WRITEME'
def yuv_rgb(self, x):
y = x[:, :, :, 0] u = x[:, :, :, 1] v = x[:, :, :, 2] r = (y + (1.13983 * v)) g = ((y - (0.39465 * u)) - (0.5806 * v)) b = (y + (2.03211 * u)) x[:, :, :, 0] = r x[:, :, :, 1] = g x[:, :, :, 2] = b return x
'.. todo:: WRITEME'
def rgb_yuv(self, x):
r = x[:, :, :, 0] g = x[:, :, :, 1] b = x[:, :, :, 2] y = (((0.299 * r) + (0.587 * g)) + (0.114 * b)) u = ((((-0.14713) * r) - (0.28886 * g)) + (0.436 * b)) v = (((0.615 * r) - (0.51499 * g)) - (0.10001 * b)) x[:, :, :, 0] = y x[:, :, :, 1] = u x[:, :, :, 2] = v return x
'.. todo:: WRITEME'
def transform(self, x, dataset_axes):
axes = ['b', 0, 1, 'c'] x = convert_axes(x, dataset_axes, axes) if self._rgb_yuv: x = self.rgb_yuv(x) else: x = self.yuv_rgb(x) x = convert_axes(x, axes, dataset_axes) return x
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
X = dataset.X data_size = X.shape[0] last = (numpy.floor((data_size / float(self._batch_size))) * self._batch_size) for i in xrange(0, data_size, self._batch_size): stop = ((i + numpy.mod(data_size, self._batch_size)) if (i >= last) else (i + self._batch_size)) log.info('RGB_YUV processing data from {0} to {1}'.format(i, stop)) data = dataset.get_topological_view(X[i:stop]) transformed = self.transform(data, dataset.view_converter.axes) if isinstance(dataset.X, numpy.ndarray): transformed = convert_axes(transformed, dataset.view_converter.axes, ['b', 0, 1, 'c']) transformed = transformed.reshape(transformed.shape[0], ((transformed.shape[1] * transformed.shape[2]) * transformed.shape[3])) dataset.X[i:stop] = transformed else: dataset.set_topological_view(transformed, dataset.view_converter.axes, start=i)
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
(w_rows, w_cols) = self.window_shape arr = dataset.get_topological_view() try: axes = dataset.view_converter.axes except AttributeError: reraise_as(NotImplementedError("I don't know how to tell what the axes of this kind of dataset are.")) needs_transpose = (not (axes[1:3] == (0, 1))) if needs_transpose: arr = numpy.transpose(arr, (axes.index('c'), axes.index(0), axes.index(1), axes.index('b'))) r_off = ((arr.shape[1] - w_rows) // 2) c_off = ((arr.shape[2] - w_cols) // 2) new_arr = arr[:, r_off:(r_off + w_rows), c_off:(c_off + w_cols), :] if needs_transpose: index_map = tuple((('c', 0, 1, 'b').index(axis) for axis in axes)) new_arr = numpy.transpose(new_arr, index_map) dataset.set_topological_view(new_arr, axes=axes)
'.. todo:: WRITEME'
def apply(self, dataset, can_fit=False):
start = self.start stop = self.stop rng = make_np_rng(self.seed, which_method='randint') X = dataset.X y = dataset.y if (y is not None): assert (X.shape[0] == y.shape[0]) for i in xrange(X.shape[0]): j = rng.randint(X.shape[0]) tmp = X[i, :].copy() X[i, :] = X[j, :].copy() X[j, :] = tmp if (y is not None): tmp = y[i, :].copy() y[i, :] = y[j, :].copy() y[j, :] = tmp assert (start >= 0) assert (stop > start) assert (stop <= X.shape[0]) dataset.X = X[start:stop, :] if (y is not None): dataset.y = y[start:stop, :]
'.. todo:: WRITEME'
def __init__(self, path, which_set):
Xs = io.loadmat(path) X = Xs[which_set] super(MatlabDataset, self).__init__(X=N.cast[config.floatX](X)) assert (not N.any(N.isnan(self.X)))
'.. todo:: WRITEME'
def get_design_matrix(self, topo=None):
if (topo is not None): return self.raw.get_design_matrix(topo) X = self.raw.get_design_matrix() return self.transformer.perform(X)
'Caches a file locally if possible. If caching was succesfull, or if the file was previously successfully cached, this method returns the path to the local copy of the file. If not, it returns the path to the original file. Parameters filename : string Remote file to cache locally Returns output : string Updated (if needed) filename to use to access the remote file.'
def cache_file(self, filename):
remote_name = string_utils.preprocess(filename) if (self.dataset_local_dir == ''): return filename common_msg = 'Message from Pylearn2 local cache of dataset(specified by the environment variable PYLEARN2_LOCAL_DATA_PATH): ' if (not os.path.exists(remote_name)): log.error(('Error : Specified file %s does not exist' % remote_name)) return filename if (not os.path.isfile(remote_name)): log.error(('Error : Specified name %s is not a file' % remote_name)) return filename if (not remote_name.startswith(self.dataset_remote_dir)): log.warning((common_msg + ('We cache in the local directory only what is under $PYLEARN2_DATA_PATH: %s' % remote_name))) return filename self.safe_mkdir(self.dataset_local_dir, ((((((((stat.S_IRUSR | stat.S_IWUSR) | stat.S_IXUSR) | stat.S_IRGRP) | stat.S_IWGRP) | stat.S_IXGRP) | stat.S_IROTH) | stat.S_IWOTH) | stat.S_IXOTH)) local_name = os.path.join(self.dataset_local_dir, os.path.relpath(remote_name, self.dataset_remote_dir)) local_folder = os.path.split(local_name)[0] try: self.safe_mkdir(local_folder) except Exception as e: log.warning(((common_msg + "While creating the directory %s, we got an error. We won't cache to the local disk.") % local_folder)) return filename if (not os.access(local_folder, os.W_OK)): log.warning((common_msg + ("Local folder %s isn't writable. This is needed for synchronization. We will use the remote version. Manually fix the permission." % local_folder))) return filename self.get_writelock(local_name) if (not os.path.exists(local_name)): if (not self.check_enough_space(remote_name, local_name)): log.warning((common_msg + ('File %s not cached: Not enough free space' % remote_name))) self.release_writelock() return filename self.copy_from_server_to_local(remote_name, local_name) log.info((common_msg + ('File %s has been locally cached to %s' % (remote_name, local_name)))) elif (os.path.getmtime(remote_name) > os.path.getmtime(local_name)): log.warning((common_msg + ('File %s in cache will not be used: The remote file (modified %s) is newer than the locally cached file %s (modified %s).' % (remote_name, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.path.getmtime(remote_name))), local_name, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.path.getmtime(local_name))))))) self.release_writelock() return filename elif (os.path.getsize(local_name) != os.path.getsize(remote_name)): log.warning((common_msg + ('File %s not cached: The remote file (%d bytes) is of a different size than the locally cached file %s (%d bytes). The local cache might be corrupt.' % (remote_name, os.path.getsize(remote_name), local_name, os.path.getsize(local_name))))) self.release_writelock() return filename elif (not os.access(local_name, os.R_OK)): log.warning((common_msg + ("File %s in cache isn't readable. We will use the remote version. Manually fix the permission." % local_name))) self.release_writelock() return filename else: log.debug(('File %s has previously been locally cached to %s' % (remote_name, local_name))) self.get_readlock(local_name) self.release_writelock() return local_name
'Copies a remote file locally Parameters remote_fname : string Remote file to copy local_fname : string Path and name of the local copy to be made of the remote file.'
def copy_from_server_to_local(self, remote_fname, local_fname):
(head, tail) = os.path.split(local_fname) head += os.path.sep if (not os.path.exists(head)): os.makedirs(os.path.dirname(head)) command = ((('cp ' + remote_fname) + ' ') + local_fname) os.system(command) st = os.stat(remote_fname) os.chmod(local_fname, st.st_mode) try: os.chown(local_fname, (-1), st.st_gid) except OSError: pass dirs = os.path.dirname(local_fname).replace(self.dataset_local_dir, '') sep = dirs.split(os.path.sep) if (sep[0] == ''): sep = sep[1:] for i in range(len(sep)): orig_p = os.path.join(self.dataset_remote_dir, *sep[:(i + 1)]) new_p = os.path.join(self.dataset_local_dir, *sep[:(i + 1)]) orig_st = os.stat(orig_p) new_st = os.stat(new_p) if (not (new_st.st_mode & stat.S_IWGRP)): os.chmod(new_p, (new_st.st_mode | stat.S_IWGRP)) if (orig_st.st_gid != new_st.st_gid): try: os.chown(new_p, (-1), orig_st.st_gid) except OSError: pass
'Return free usage about the given path, in bytes Parameters path : string Folder for which to return disk usage Returns output : tuple Tuple containing total space in the folder and currently used space in the folder'
def disk_usage(self, path):
st = os.statvfs(path) total = (st.f_blocks * st.f_frsize) used = ((st.f_blocks - st.f_bfree) * st.f_frsize) return (total, used)
'Check if the given local folder has enough space to store the specified remote file Parameters remote_fname : string Path to the remote file remote_fname : string Path to the local folder max_disk_usage : float Fraction indicating how much of the total space in the local folder can be used before the local cache must stop adding to it. Returns output : boolean True if there is enough space to store the remote file.'
def check_enough_space(self, remote_fname, local_fname, max_disk_usage=0.9):
storage_need = os.path.getsize(remote_fname) (storage_total, storage_used) = self.disk_usage(self.dataset_local_dir) return ((storage_used + storage_need) < (storage_total * max_disk_usage))
'Create the specified folder. If the parent folders do not exist, they are also created. If the folder already exists, nothing is done. Parameters folderName : string Name of the folder to create force_perm : mode to use for folder creation'
def safe_mkdir(self, folderName, force_perm=None):
if os.path.exists(folderName): return intermediaryFolders = folderName.split(os.path.sep) if (intermediaryFolders[(-1)] == ''): intermediaryFolders = intermediaryFolders[:(-1)] if force_perm: force_perm_path = folderName.split(os.path.sep) if (force_perm_path[(-1)] == ''): force_perm_path = force_perm_path[:(-1)] base = (len(force_perm_path) - len(intermediaryFolders)) for i in range(1, len(intermediaryFolders)): folderToCreate = os.path.sep.join(intermediaryFolders[:(i + 1)]) if os.path.exists(folderToCreate): continue os.mkdir(folderToCreate) if force_perm: os.chmod(folderToCreate, force_perm)
'Obtain a readlock on a file Parameters path : string Name of the file on which to obtain a readlock'
def get_readlock(self, path):
timestamp = int((time.time() * 1000000.0)) lockdirName = ('%s.readlock.%i.%i' % (path, self.pid, timestamp)) os.mkdir(lockdirName) atexit.register(self.release_readlock, lockdirName=lockdirName)
'Release a previously obtained readlock Parameters lockdirName : string Name of the previously obtained readlock'
def release_readlock(self, lockdirName):
if (os.path.exists(lockdirName) and os.path.isdir(lockdirName)): os.rmdir(lockdirName)
'Obtain a writelock on a file. Only one write lock may be held at any given time. Parameters filename : string Name of the file on which to obtain a writelock'
def get_writelock(self, filename):
compilelock.get_lock((filename + '.writelock'))
'Release the previously obtained writelock'
def release_writelock(self):
compilelock.release_lock()
'Partition the dataset according to cross-validation subsets and return the raw data in each subset.'
def get_data_subsets(self):
for subsets in self.subset_iterator: labels = None if (len(subsets) == 3): labels = ['train', 'valid', 'test'] elif (len(subsets) == 2): labels = ['train', 'test'] data_subsets = OrderedDict() for (i, subset) in enumerate(subsets): subset_data = tuple((data[subset] for data in self._data)) if (len(subset_data) == 2): (X, y) = subset_data else: (X,) = subset_data y = None data_subsets[labels[i]] = (X, y) (yield data_subsets)
'Create a DenseDesignMatrix for each dataset subset and apply any preprocessing to the child datasets.'
def __iter__(self):
for data_subsets in self.get_data_subsets(): datasets = {} for (label, data) in data_subsets.items(): (X, y) = data datasets[label] = DenseDesignMatrix(X=X, y=y) if (self.preprocessor is not None): self.preprocessor.apply(datasets['train'], can_fit=self.fit_preprocessor) for (label, dataset) in datasets.items(): if (label == 'train'): continue self.preprocessor.apply(dataset, can_fit=False) if (self.which_set is not None): for (label, dataset) in list(datasets.items()): if (label not in self.which_set): del datasets[label] del data_subsets[label] if (not len(datasets)): raise ValueError(('No matching dataset(s) for ' + '{}'.format(self.which_set))) if (not self.return_dict): datasets = list((datasets[label] for label in data_subsets.keys())) if (len(datasets) == 1): (datasets,) = datasets (yield datasets)
'Stratified cross-validation requires label information for examples. This function gets target values for a dataset, converting from one-hot encoding to a 1D array as needed. Parameters dataset : object Dataset containing target values for examples.'
@staticmethod def get_y(dataset):
y = np.asarray(dataset.y) if (y.ndim > 1): assert np.array_equal(np.unique(y), [0, 1]) y = np.argmax(y, axis=1) return y
'Construct a Transformer dataset for each partition.'
def __iter__(self):
for (k, datasets) in enumerate(self.dataset_iterator): if isinstance(self.transformers, list): transformer = self.transformers[k] elif isinstance(self.transformers, StackedBlocksCV): transformer = self.transformers.select_fold(k) else: transformer = self.transformers if isinstance(datasets, list): for (i, dataset) in enumerate(datasets): datasets[i] = TransformerDataset(dataset, transformer) else: for (key, dataset) in datasets.items(): datasets[key] = TransformerDataset(dataset, transformer) (yield datasets)
'Add tracking to all trainers. Parameters trainers : list List of Train objects belonging to the parent TrainCV object.'
def setup(self, trainers):
for (k, trainer) in enumerate(trainers): if ((self.save_path is not None) and self.save_folds): (path, ext) = os.path.splitext(self.save_path) save_path = ((path + '-{}'.format(k)) + ext) else: save_path = None if (self.tag_key is not None): tag_key = '{}-{}'.format(self.tag_key, k) else: tag_key = None extension = MonitorBasedSaveBest(self.channel_name, save_path=save_path, store_best_model=True, higher_is_better=self.higher_is_better, tag_key=tag_key) trainer.extensions.append(extension)
'Save best model from each cross-validation fold. Parameters trainers : list List of Train objects belonging to the parent TrainCV object.'
def on_save(self, trainers):
if (self.save_path is None): return models = [] for trainer in trainers: for extension in trainer.extensions: if isinstance(extension, MonitorBasedSaveBest): models.append(extension.best_model) break assert (len(models) == len(trainers)) try: for trainer in trainers: trainer.dataset._serialization_guard = SerializationGuard() serial.save(self.save_path, models, on_overwrite='backup') finally: for trainer in trainers: trainer.dataset._serialization_guard = None
'Choose a single cross-validation fold to represent. Parameters k : int Index of selected fold.'
def select_fold(self, k):
return self._folds[k]
'Get input space.'
def get_input_space(self):
return self._folds[0][0].get_input_space()
'Get output space.'
def get_output_space(self):
return self._folds[0][(-1)].get_output_space()
'Set input space. Parameters space : WRITEME Input space.'
def set_input_space(self, space):
for fold in self._folds: this_space = space for layer in fold._layers: layer.set_input_space(this_space) this_space = layer.get_output_space()
'Set up the main loop.'
def setup(self):
self.setup_extensions()
'Set up extensions.'
def setup_extensions(self):
for extension in self.cv_extensions: extension.setup(self.trainers)
'Run main_loop of each trainer. Note: if you get PickleErrors when running in parallel, make sure you have `dill` installed. Parameters time_budget : int, optional The maximum number of seconds before interrupting training. Default is `None`, no time limit. parallel : bool, optional Whether to train subtrainers in parallel using IPython.parallel (default False). client_kwargs : dict, optional Keyword arguments for IPython.parallel Client. view_flags : dict, optional Flags for IPython.parallel LoadBalancedView.'
def main_loop(self, time_budget=None, parallel=False, client_kwargs=None, view_flags=None):
self.setup() if parallel: from IPython.parallel import Client def _train(trainer, time_budget=None): '\n Run main_loop of this trainer.\n\n Parameters\n ----------\n trainer : Train object\n Train object.\n time_budget : int, optional\n The maximum number of seconds before interrupting\n training. Default is `None`, no time limit.\n ' trainer.main_loop(time_budget) return trainer if (client_kwargs is None): client_kwargs = {} if (view_flags is None): view_flags = {} client = Client(**client_kwargs) view = client.load_balanced_view() view.set_flags(**view_flags) call = view.map(_train, self.trainers, ([time_budget] * len(self.trainers)), block=False) self.trainers = call.get() else: for trainer in self.trainers: trainer.main_loop(time_budget) self.save()
'Call on_save for Train and TrainCV extensions and serialize trained models if save_path is set.'
def save(self):
for trainer in self.trainers: for extension in trainer.extensions: extension.on_save(trainer.model, trainer.dataset, trainer.algorithm) for extension in self.cv_extensions: extension.on_save(self.trainers) if (self.save_path is not None): models = [trainer.model for trainer in self.trainers] try: for trainer in self.trainers: trainer.dataset._serialization_guard = SerializationGuard() if ((not self.allow_overwrite) and os.path.exists(self.save_path)): raise IOError('Trying to overwrite file when not allowed.') serial.save(self.save_path, models, on_overwrite='backup') finally: for trainer in self.trainers: trainer.dataset._serialization_guard = None
'Yield train/valid/test splits.'
def __iter__(self):
cv = list(super(ValidationKFold, self).__iter__()) for (train, valid, test) in get_k_fold_splits(cv): (yield (train, valid, test))
'Yield train/valid/test splits.'
def __iter__(self):
cv = list(super(StratifiedValidationKFold, self).__iter__()) for (train, valid, test) in get_k_fold_splits(cv): (yield (train, valid, test))
'Return train/valid/test splits. The validation set is generated by splitting the training set.'
def __iter__(self):
for (train, test) in super(ValidationShuffleSplit, self).__iter__(): n = len(np.arange(self.n)[train]) train_cv = ShuffleSplit(n, test_size=self.valid_size, random_state=self.random_state) (train, valid) = get_validation_set_from_train(train, train_cv) (yield (train, valid, test))
'Return train/valid/test splits. The validation set is generated by a stratified split of the training set.'
def __iter__(self):
for (train, test) in super(StratifiedValidationShuffleSplit, self).__iter__(): y = self.y[train] train_cv = StratifiedShuffleSplit(y, test_size=self.valid_size, random_state=self.random_state) (train, valid) = get_validation_set_from_train(train, train_cv) (yield (train, valid, test))
'Choose a single cross-validation fold to represent. Parameters k : int Index of selected fold.'
def select_fold(self, k):
return self._folds[k]
'Set input space. Parameters space : Space The input space for this layer.'
def set_input_space(self, space):
return [fold.set_input_space(space) for fold in self._folds]
'Get parameters.'
def get_params(self):
return self._folds[0].get_params()
'Get input space.'
def get_input_space(self):
return self._folds[0].get_input_space()
'Get output space.'
def get_output_space(self):
return self._folds[0].get_output_space()
'Get monitoring channels.'
def get_monitoring_channels(self):
return self._folds[0].get_monitoring_channels()
'Store state of parameters, ensure that parameters are the same as when the model was last monitored'
def on_save(self, model, dataset, algorithm):
self.params_on_save = np.asarray(model.get_param_values()) param_pairs = zip(self.params_on_save, self.params_on_monitor) for (save_params, monitor_params) in param_pairs: assert np.array_equal(save_params, monitor_params)
'Store state of parameters'
def on_monitor(self, model, dataset, algorithm):
self.params_on_monitor = np.asarray(model.get_param_values())
'Return a hash based on the object ID (to avoid hashing unhashable namedtuple elements).'
def __hash__(self):
return hash(id(self))
'Modifies the parameters before a learning update is applied. This method acts *after* the model subclass\' _modify_updates method and any ModelExtensions that come earlier in the extensions list. Parameters updates : dict A dictionary mapping shared variables to symbolic values they will be updated to. model : Model The Model to act on'
def post_modify_updates(self, updates, model):
pass