desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Get a handle for an HDF5 dataset, or load the entire dataset into memory. Parameters dataset : str Name or path of HDF5 dataset. load_all : bool, optional (default False) If true, load dataset into memory.'
def get_dataset(self, dataset, load_all=False):
if load_all: data = self._file[dataset][:] else: data = self._file[dataset] data.ndim = len(data.shape) return data
'Get an iterator for this dataset. The FiniteDatasetIterator uses indexing that is not supported by HDF5 datasets, so we change the class to HDF5DatasetIterator to override the iterator.next method used in dataset iteration. Parameters WRITEME'
def iterator(self, *args, **kwargs):
iterator = super(HDF5DatasetDeprecated, self).iterator(*args, **kwargs) iterator.__class__ = HDF5DatasetIterator return iterator
'Set up dataset topological view, without building an in-memory design matrix. This is mostly copied from DenseDesignMatrix, except: * HDF5ViewConverter is used instead of DefaultViewConverter * Data specs are derived from topo_view, not X * NaN checks have been moved to HDF5DatasetIterator.next Note that y may be loaded into memory for reshaping if y.ndim != 2. Parameters V : ndarray Topological view. axes : tuple, optional (default (\'b\', 0, 1, \'c\')) Order of axes in topological view.'
def set_topological_view(self, V, axes=('b', 0, 1, 'c')):
shape = [V.shape[axes.index('b')], V.shape[axes.index(0)], V.shape[axes.index(1)], V.shape[axes.index('c')]] self.view_converter = HDF5ViewConverter(shape[1:], axes=axes) self.X = self.view_converter.topo_view_to_design_mat(V) self.X_topo_space = self.view_converter.topo_space X_space = VectorSpace(dim=V.shape[axes.index('b')]) X_source = 'features' if (self.y is None): space = X_space source = X_source else: if (self.y.ndim == 1): dim = 1 else: dim = self.y.shape[(-1)] if (getattr(self, 'y_labels', None) is not None): y_space = IndexSpace(dim=dim, max_labels=self.y_labels) elif (getattr(self, 'max_labels', None) is not None): y_space = IndexSpace(dim=dim, max_labels=self.max_labels) else: y_space = VectorSpace(dim=dim) y_source = 'targets' space = CompositeSpace((X_space, y_space)) source = (X_source, y_source) self.data_specs = (space, source) self.X_space = X_space self._iter_data_specs = (X_space, X_source)
'Get the next subset of the dataset during dataset iteration. Converts index selections for batches to boolean selections that are supported by HDF5 datasets.'
def next(self):
next_index = self._subset_iterator.next() sel = np.zeros(self.num_examples, dtype=bool) sel[next_index] = True next_index = sel rval = [] for (data, fn) in safe_izip(self._raw_data, self._convert): try: this_data = data[next_index] except TypeError: if (data.ndim > 1): this_data = data[next_index, :] else: raise if fn: this_data = fn(this_data) assert (not contains_nan(this_data)) rval.append(this_data) rval = tuple(rval) if ((not self._return_tuple) and (len(rval) == 1)): (rval,) = rval return rval
'Generate a design matrix from the topological view. This override of DefaultViewConverter.topo_view_to_design_mat does not attempt to transpose the topological view, since transposition is not supported by HDF5 datasets. Parameters WRITEME'
def topo_view_to_design_mat(self, V):
v_shape = (V.shape[self.axes.index('b')], V.shape[self.axes.index(0)], V.shape[self.axes.index(1)], V.shape[self.axes.index('c')]) if np.any((np.asarray(self.shape) != np.asarray(v_shape[1:]))): raise ValueError(((('View converter for views of shape batch size followed by ' + str(self.shape)) + ' given tensor of shape ') + str(v_shape))) rval = HDF5TopoViewConverter(V, self.axes) return rval
'Indexes the design matrix and transforms the requested batch from the topological view. Parameters item : slice or ndarray Batch selection. Either a slice or a boolean mask.'
def __getitem__(self, item):
sel = ([slice(None)] * len(self.topo_view_shape)) sel[self.axes.index('b')] = item sel = tuple(sel) V = self.topo_view[sel] batch_size = V.shape[self.axes.index('b')] rval = np.zeros((batch_size, (self.pixels_per_channel * self.n_channels)), dtype=V.dtype) for i in xrange(self.n_channels): ppc = self.pixels_per_channel sel = ([slice(None)] * len(V.shape)) sel[self.axes.index('c')] = i sel = tuple(sel) rval[:, (i * ppc):((i + 1) * ppc)] = V[sel].reshape(batch_size, ppc) return rval
'Sanity checks for X_labels and y_labels.'
def _check_labels(self):
if (self.X_labels is not None): assert (self.X is not None) assert (self.view_converter is None) assert (self.X.ndim <= 2) assert np.all((self.X < self.X_labels)) if (self.y_labels is not None): assert (self.y is not None) assert (self.y.ndim <= 2) assert np.all((self.y < self.y_labels))
'Returns all the data, as it is internally stored. The definition and format of these data are described in `self.get_data_specs()`. Returns data : numpy matrix or 2-tuple of matrices The data'
def get_data(self):
if (self.y is None): return self.X else: return (self.X, self.y)
'Calling this function changes the serialization behavior of the object permanently. If this function has been called, when the object is serialized, it will save the design matrix to `path` as a .npy file rather than pickling the design matrix along with the rest of the dataset object. This avoids pickle\'s unfortunate behavior of using 2X the RAM when unpickling. TODO: Get rid of this logic, use custom array-aware picklers (joblib, custom pylearn2 serialization format). Parameters path : str The path to save the design matrix to'
def use_design_loc(self, path):
if (not path.endswith('.npy')): raise ValueError("path should end with '.npy'") self.design_loc = path
'The index of the axis of the batches Returns axis : int The axis of a topological view of this dataset that corresponds to indexing over different examples.'
def get_topo_batch_axis(self):
axis = self.view_converter.axes.index('b') return axis
'If called, when pickled the dataset will be saved using only 8 bits per element. .. todo:: Not sure this should be implemented as something a base dataset does. Perhaps as a mixin that specific datasets (i.e. CIFAR10) inherit from.'
def enable_compression(self):
self.compress = True
'.. todo:: WRITEME'
def __getstate__(self):
rval = copy.copy(self.__dict__) if self.compress: rval['compress_min'] = rval['X'].min(axis=0) rval['X'] = (rval['X'] - rval['compress_min']) rval['compress_max'] = rval['X'].max(axis=0) rval['compress_max'][(rval['compress_max'] == 0)] = 1 rval['X'] *= (255.0 / rval['compress_max']) rval['X'] = np.cast['uint8'](rval['X']) if (self.design_loc is not None): np.save(self.design_loc, rval['X']) del rval['X'] return rval
'.. todo:: WRITEME'
def __setstate__(self, d):
if (d['design_loc'] is not None): if control.get_load_data(): fname = cache.datasetCache.cache_file(d['design_loc']) d['X'] = np.load(fname) else: d['X'] = None if d['compress']: X = d['X'] mx = d['compress_max'] mn = d['compress_min'] del d['compress_max'] del d['compress_min'] d['X'] = 0 self.__dict__.update(d) if (X is not None): self.X = (((np.cast['float32'](X) * mx) / 255.0) + mn) else: self.X = None else: self.__dict__.update(d) if (not all(((m in d) for m in ('data_specs', 'X_space', '_iter_data_specs', 'X_topo_space')))): X_space = VectorSpace(dim=self.X.shape[1]) X_source = 'features' if (self.y is None): space = X_space source = X_source else: y_space = VectorSpace(dim=self.y.shape[(-1)]) y_source = 'targets' space = CompositeSpace((X_space, y_space)) source = (X_source, y_source) self.data_specs = (space, source) self.X_space = X_space self._iter_data_specs = (X_space, X_source) view_converter = d.get('view_converter', None) if (view_converter is not None): if (not hasattr(view_converter, 'topo_space')): raise NotImplementedError(('Not able to get a topo_space from this converter: %s' % view_converter)) self.X_topo_space = view_converter.topo_space
'This function splits the dataset according to the number of train_size if defined by the user with respect to the mode provided by the user. Otherwise it will use the train_prop to divide the dataset into a training and holdout validation set. This function returns the training and validation dataset. Parameters _mode : WRITEME train_size : int Number of examples that will be assigned to the training dataset. train_prop : float Proportion of training dataset split. Returns WRITEME'
def _apply_holdout(self, _mode='sequential', train_size=0, train_prop=0):
'\n This function splits the dataset according to the number of\n train_size if defined by the user with respect to the mode provided\n by the user. Otherwise it will use the\n train_prop to divide the dataset into a training and holdout\n validation set. This function returns the training and validation\n dataset.\n\n Parameters\n -----------\n _mode : WRITEME\n train_size : int\n Number of examples that will be assigned to the training dataset.\n train_prop : float\n Proportion of training dataset split.\n\n Returns\n -------\n WRITEME\n ' if (train_size != 0): size = train_size elif (train_prop != 0): size = np.round((self.get_num_examples() * train_prop)) else: raise ValueError('Initialize either split ratio and split size to non-zero value.') if (size < (self.get_num_examples() - size)): dataset_iter = self.iterator(mode=_mode, batch_size=(self.get_num_examples() - size)) valid = dataset_iter.next() train = dataset_iter.next()[:(self.get_num_examples() - valid.shape[0])] else: dataset_iter = self.iterator(mode=_mode, batch_size=size) train = dataset_iter.next() valid = dataset_iter.next()[:(self.get_num_examples() - train.shape[0])] return (train, valid)
'This function splits the dataset into to the number of n folds given by the user. Returns an array of folds. Parameters nfolds : int, optional The number of folds for the the validation set. Returns WRITEME'
def split_dataset_nfolds(self, nfolds=0):
folds_iter = self.iterator(mode='sequential', num_batches=nfolds) folds = list(folds_iter) return folds
'This function splits the dataset according to the number of train_size if defined by the user. Otherwise it will use the train_prop to divide the dataset into a training and holdout validation set. This function returns the training and validation dataset. Parameters train_size : int Number of examples that will be assigned to the training dataset. train_prop : float Proportion of dataset split.'
def split_dataset_holdout(self, train_size=0, train_prop=0):
return self._apply_holdout('sequential', train_size, train_prop)
'This function splits the dataset using the random_slice and into the n folds. Returns the folds. Parameters nfolds : int The number of folds for the dataset. rng : WRITEME Random number generation class to be used.'
def bootstrap_nfolds(self, nfolds, rng=None):
folds_iter = self.iterator(mode='random_slice', num_batches=nfolds, rng=rng) folds = list(folds_iter) return folds
'This function splits the dataset according to the number of train_size defined by the user. Parameters train_size : int Number of examples that will be assigned to the training dataset. nfolds : int The number of folds for the the validation set. rng : WRITEME Random number generation class to be used.'
def bootstrap_holdout(self, train_size=0, train_prop=0, rng=None):
return self._apply_holdout('random_slice', train_size, train_prop)
'If we view the dataset as providing a stream of random examples to read, the object returned uniquely identifies our current position in that stream.'
def get_stream_position(self):
return copy.copy(self.rng)
'.. todo:: WRITEME properly Return to a state specified by an object returned from get_stream_position. Parameters pos : object WRITEME'
def set_stream_position(self, pos):
self.rng = copy.copy(pos)
'Return to the default initial state of the random example stream.'
def restart_stream(self):
self.reset_RNG()
'Restore the default seed of the rng used for choosing random examples.'
def reset_RNG(self):
if ('default_rng' not in dir(self)): self.default_rng = make_np_rng(None, [17, 2, 946], which_method='random_integers') self.rng = copy.copy(self.default_rng)
'.. todo:: WRITEME Parameters preprocessor : object preprocessor object can_fit : bool, optional WRITEME'
def apply_preprocessor(self, preprocessor, can_fit=False):
preprocessor.apply(self, can_fit)
'Convert an array (or the entire dataset) to a topological view. Parameters mat : ndarray, 2-dimensional, optional An array containing a design matrix representation of training examples. If unspecified, the entire dataset (`self.X`) is used instead. This parameter is not named X because X is generally used to refer to the design matrix for the current problem. In this case we want to make it clear that `mat` need not be the design matrix defining the dataset.'
def get_topological_view(self, mat=None):
if (self.view_converter is None): raise Exception('Tried to call get_topological_view on a dataset that has no view converter') if (mat is None): mat = self.X return self.view_converter.design_mat_to_topo_view(mat)
'Convert an array (or the entire dataset) to a destination space. Parameters mat : ndarray, 2-dimensional An array containing a design matrix representation of training examples. dspace : Space A Space we want the data in mat to be formatted in. It can be a VectorSpace for a design matrix output, a Conv2DSpace for a topological output for instance. Valid values depend on the type of `self.view_converter`. Returns WRITEME'
def get_formatted_view(self, mat, dspace):
if (self.view_converter is None): raise Exception('Tried to call get_formatted_view on a dataset that has no view converter') self.X_space.np_validate(mat) return self.view_converter.get_formatted_batch(mat, dspace)
'.. todo:: WRITEME properly Return a view of mat in the topology preserving format. Currently the same as get_topological_view. Parameters mat : ndarray, 2-dimensional WRITEME'
def get_weights_view(self, mat):
if (self.view_converter is None): raise Exception('Tried to call get_weights_view on a dataset that has no view converter') return self.view_converter.design_mat_to_weights_view(mat)
'Sets the dataset to represent V, where V is a batch of topological views of examples. .. todo:: Why is this parameter named \'V\'? Parameters V : ndarray An array containing a design matrix representation of training examples. axes : tuple, optional The axes ordering of the provided topo_view. Must be some permutation of (\'b\', 0, 1, \'c\') where \'b\' indicates the axis indexing examples, 0 and 1 indicate the row/cols dimensions and \'c\' indicates the axis indexing color channels.'
def set_topological_view(self, V, axes=('b', 0, 1, 'c')):
if (len(V.shape) != len(axes)): raise ValueError(('The topological view must have exactly 4 dimensions, corresponding to %s' % str(axes))) assert (not contains_nan(V)) rows = V.shape[axes.index(0)] cols = V.shape[axes.index(1)] channels = V.shape[axes.index('c')] self.view_converter = DefaultViewConverter([rows, cols, channels], axes=axes) self.X = self.view_converter.topo_view_to_design_mat(V) self.X_topo_space = self.view_converter.topo_space assert (not contains_nan(self.X)) X_space = VectorSpace(dim=self.X.shape[1]) X_source = 'features' if (self.y is None): space = X_space source = X_source else: if (self.y.ndim == 1): dim = 1 else: dim = self.y.shape[(-1)] if (getattr(self, 'y_labels', None) is not None): y_space = IndexSpace(dim=dim, max_labels=self.y_labels) elif (getattr(self, 'max_labels', None) is not None): y_space = IndexSpace(dim=dim, max_labels=self.max_labels) else: y_space = VectorSpace(dim=dim) y_source = 'targets' space = CompositeSpace((X_space, y_space)) source = (X_source, y_source) self.data_specs = (space, source) self.X_space = X_space self._iter_data_specs = (X_space, X_source)
'Return topo (a batch of examples in topology preserving format), in design matrix format. Parameters topo : ndarray, optional An array containing a topological representation of training examples. If unspecified, the entire dataset (`self.X`) is used instead. Returns WRITEME'
def get_design_matrix(self, topo=None):
if (topo is not None): if (self.view_converter is None): raise Exception('Tried to convert from topological_view to design matrix using a dataset that has no view converter') return self.view_converter.topo_view_to_design_mat(topo) return self.X
'.. todo:: WRITEME Parameters X : ndarray WRITEME'
def set_design_matrix(self, X):
assert (len(X.shape) == 2) assert (not contains_nan(X)) self.X = X
'.. todo:: WRITEME'
def get_targets(self):
return self.y
'.. todo:: WRITEME Parameters batch_size : int WRITEME include_labels : bool WRITEME'
def get_batch_design(self, batch_size, include_labels=False):
try: idx = self.rng.randint(((self.X.shape[0] - batch_size) + 1)) except ValueError: if (batch_size > self.X.shape[0]): reraise_as(ValueError(('Requested %d examples from a dataset containing only %d.' % (batch_size, self.X.shape[0])))) raise rx = self.X[idx:(idx + batch_size), :] if include_labels: if (self.y is None): return (rx, None) ry = self.y[idx:(idx + batch_size)] return (rx, ry) rx = np.cast[config.floatX](rx) return rx
'.. todo:: WRITEME Parameters batch_size : int WRITEME include_labels : bool WRITEME'
def get_batch_topo(self, batch_size, include_labels=False):
if include_labels: (batch_design, labels) = self.get_batch_design(batch_size, True) else: batch_design = self.get_batch_design(batch_size) rval = self.view_converter.design_mat_to_topo_view(batch_design) if include_labels: return (rval, labels) return rval
'.. todo:: WRITEME'
def view_shape(self):
return self.view_converter.view_shape()
'.. todo:: WRITEME'
def weights_view_shape(self):
return self.view_converter.weights_view_shape()
'.. todo:: WRITEME'
def has_targets(self):
return (self.y is not None)
'.. todo:: WRITEME properly Restricts the dataset to include only the examples in range(start, stop). Ignored if both arguments are None. Parameters start : int start index stop : int stop index'
def restrict(self, start, stop):
assert ((start is None) == (stop is None)) if (start is None): return assert (start >= 0) assert (stop > start) assert (stop <= self.X.shape[0]) assert (self.X.shape[0] == self.y.shape[0]) self.X = self.X[start:stop, :] if (self.y is not None): self.y = self.y[start:stop, :] assert (self.X.shape[0] == self.y.shape[0]) assert (self.X.shape[0] == (stop - start))
'.. todo:: WRITEME properly If y exists and is a vector of ints, converts it to a binary matrix Otherwise will raise some exception Parameters min_class : int WRITEME'
def convert_to_one_hot(self, min_class=0):
if (self.y is None): raise ValueError('Called convert_to_one_hot on a DenseDesignMatrix with no labels.') if (self.y.ndim != 1): raise ValueError("Called convert_to_one_hot on a DenseDesignMatrix whose labels aren't scalar.") if ('int' not in str(self.y.dtype)): raise ValueError("Called convert_to_one_hot on a DenseDesignMatrix whose labels aren't integer-valued.") self.y = (self.y - min_class) if (self.y.min() < 0): raise ValueError('We do not support negative classes. You can use the min_class argument to remap negative classes to positive values, but we require this to be done explicitly so you are aware of the remapping.') num_classes = (self.y.max() + 1) y = np.zeros((self.y.shape[0], num_classes)) for i in xrange(self.y.shape[0]): y[(i, self.y[i])] = 1 self.y = y (init_space, source) = self.data_specs (X_space, init_y_space) = init_space.components new_y_space = VectorSpace(dim=num_classes) new_space = CompositeSpace((X_space, new_y_space)) self.data_specs = (new_space, source)
'.. todo:: WRITEME Parameters X : ndarray The data to be adjusted'
def adjust_for_viewer(self, X):
return (X / np.abs(X).max())
'.. todo:: WRITEME Parameters X : int WRITEME ref : float WRITEME per_example : obejct, optional WRITEME'
def adjust_to_be_viewed_with(self, X, ref, per_example=None):
if (per_example is not None): logger.warning('ignoring per_example') return np.clip((X / np.abs(ref).max()), (-1.0), 1.0)
'Returns the data_specs specifying how the data is internally stored. This is the format the data returned by `self.get_data()` will be.'
def get_data_specs(self):
return self.data_specs
'.. todo:: WRITEME properly Change the axes of the view_converter, if any. This function is only useful if you intend to call self.iterator without data_specs, and with "topo=True", which is deprecated. Parameters axes : WRITEME WRITEME'
def set_view_converter_axes(self, axes):
assert (self.view_converter is not None) self.view_converter.set_axes(axes) self.X_topo_space = self.view_converter.topo_space
'Sanity checks for X_labels and y_labels.'
def _check_labels(self):
if (self.X_labels is not None): assert (self.X is not None) assert (self.view_converter is None) assert (self.X.ndim <= 2) if (self.y_labels is not None): assert (self.y is not None) assert (self.y.ndim <= 2)
'.. todo:: WRITEME'
def set_design_matrix(self, X, start=0):
assert (len(X.shape) == 2) assert (not contains_nan(X)) DenseDesignMatrixPyTables.fill_hdf5(file_handle=self.h5file, data_x=X, start=start)
'Sets the dataset to represent V, where V is a batch of topological views of examples. .. todo:: Why is this parameter named \'V\'? Parameters V : ndarray An array containing a design matrix representation of training examples. axes : tuple, optional The axes ordering of the provided topo_view. Must be some permutation of (\'b\', 0, 1, \'c\') where \'b\' indicates the axis indexing examples, 0 and 1 indicate the row/cols dimensions and \'c\' indicates the axis indexing color channels. start : int The start index to write data.'
def set_topological_view(self, V, axes=('b', 0, 1, 'c'), start=0):
assert (not contains_nan(V)) rows = V.shape[axes.index(0)] cols = V.shape[axes.index(1)] channels = V.shape[axes.index('c')] self.view_converter = DefaultViewConverter([rows, cols, channels], axes=axes) X = self.view_converter.topo_view_to_design_mat(V) assert (not contains_nan(X)) DenseDesignMatrixPyTables.fill_hdf5(file_handle=self.h5file, data_x=X, start=start)
'Initializes the hdf5 file into which the data will be stored. This must be called before calling fill_hdf5. Parameters path : string The name of the hdf5 file. shapes : tuple The shapes of X and y. title : string, optional Name of the dataset. e.g. For SVHN, set this to "SVHN Dataset". "Pytables Dataset" is used as title, by default. y_dtype : string, optional Either \'float\' or \'int\'. Decides the type of pytables atom used to store the y data. By default \'float\' type is used.'
def init_hdf5(self, path, shapes, title='Pytables Dataset', y_dtype='float'):
assert (y_dtype in ['float', 'int']), "y_dtype can be 'float' or 'int' only" (x_shape, y_shape) = shapes ensure_tables() h5file = tables.openFile(path, mode='w', title=title) gcolumns = h5file.createGroup(h5file.root, 'Data', 'Data') atom = (tables.Float32Atom() if (config.floatX == 'float32') else tables.Float64Atom()) h5file.createCArray(gcolumns, 'X', atom=atom, shape=x_shape, title='Data values', filters=self.filters) if (y_dtype != 'float'): atom = (tables.Int32Atom() if (config.floatX == 'float32') else tables.Int64Atom()) h5file.createCArray(gcolumns, 'y', atom=atom, shape=y_shape, title='Data targets', filters=self.filters) return (h5file, gcolumns)
'Saves the data to the hdf5 file. PyTables tends to crash if you write large amounts of data into them at once. As such this function writes data in batches. Parameters file_handle : hdf5 file handle Handle to an hdf5 object. data_x : nd array X data. Must be the same shape as specified to init_hdf5. data_y : nd array, optional y data. Must be the same shape as specified to init_hdf5. node : string, optional The hdf5 node into which the data should be stored. start : int The start index to write data. batch_size : int, optional The size of the batch to be saved.'
@staticmethod def fill_hdf5(file_handle, data_x, data_y=None, node=None, start=0, batch_size=5000):
if (node is None): node = file_handle.getNode('/', 'Data') data_size = data_x.shape[0] last = (np.floor((data_size / float(batch_size))) * batch_size) for i in xrange(0, data_size, batch_size): stop = ((i + np.mod(data_size, batch_size)) if (i >= last) else (i + batch_size)) assert (len(range((start + i), (start + stop))) == len(range(i, stop))) assert ((start + stop) <= node.X.shape[0]) node.X[(start + i):(start + stop), :] = data_x[i:stop, :] if (data_y is not None): node.y[(start + i):(start + stop), :] = data_y[i:stop, :] file_handle.flush()
'Resizes the X and y tables. This must be called before calling fill_hdf5. Parameters h5file : hdf5 file handle Handle to an hdf5 object. start : int The start index to write data. stop : int The index of the record following the last record to be written.'
def resize(self, h5file, start, stop):
ensure_tables() data = h5file.getNode('/', 'Data') try: gcolumns = h5file.createGroup('/', 'Data_', 'Data') except tables.exceptions.NodeError: h5file.removeNode('/', 'Data_', 1) gcolumns = h5file.createGroup('/', 'Data_', 'Data') start = (0 if (start is None) else start) stop = (gcolumns.X.nrows if (stop is None) else stop) atom = (tables.Float32Atom() if (config.floatX == 'float32') else tables.Float64Atom()) x = h5file.createCArray(gcolumns, 'X', atom=atom, shape=((stop - start), data.X.shape[1]), title='Data values', filters=self.filters) if np.issubdtype(data.y, int): atom = (tables.Int32Atom() if (config.floatX == 'float32') else tables.Int64Atom()) y = h5file.createCArray(gcolumns, 'y', atom=atom, shape=((stop - start), data.y.shape[1]), title='Data targets', filters=self.filters) x[:] = data.X[start:stop] y[:] = data.y[start:stop] h5file.removeNode('/', 'Data', 1) h5file.renameNode('/', 'Data', 'Data_') h5file.flush() return (h5file, gcolumns)
'.. todo:: WRITEME'
def view_shape(self):
return self.shape
'.. todo:: WRITEME'
def weights_view_shape(self):
return self.shape
'Returns a topological view/copy of design matrix. Parameters design_matrix: numpy.ndarray A design matrix with data in rows. Data is assumed to be laid out in memory according to the axis order (\'b\', \'c\', 0, 1) returns: numpy.ndarray A matrix with axis order given by self.axes and batch shape given by self.shape (if you reordered self.shape to match self.axes, as self.shape is always in \'c\', 0, 1 order). This will try to return a view into design_matrix if possible; otherwise it will allocate a new ndarray.'
def design_mat_to_topo_view(self, design_matrix):
if (len(design_matrix.shape) != 2): raise ValueError(('design_matrix must have 2 dimensions, but shape was %s.' % str(design_matrix.shape))) expected_row_size = np.prod(self.shape) if (design_matrix.shape[1] != expected_row_size): raise ValueError(("This DefaultViewConverter's self.shape = %s, for a total size of %d, but the design_matrix's row size was different (%d)." % (str(self.shape), expected_row_size, design_matrix.shape[1]))) bc01_shape = tuple(([design_matrix.shape[0]] + [self.shape[i] for i in (2, 0, 1)])) topo_array_bc01 = design_matrix.reshape(bc01_shape) axis_order = [('b', 'c', 0, 1).index(axis) for axis in self.axes] return topo_array_bc01.transpose(*axis_order)
'.. todo:: WRITEME'
def design_mat_to_weights_view(self, X):
rval = self.design_mat_to_topo_view(X) rval = np.transpose(rval, tuple((self.axes.index(axis) for axis in ('b', 0, 1, 'c')))) return rval
'Returns a design matrix view/copy of topological matrix. Parameters topo_array: numpy.ndarray An N-D array with axis order given by self.axes. Non-batch axes\' dimension sizes must agree with corresponding sizes in self.shape. returns: numpy.ndarray A design matrix with data in rows. Data, is laid out in memory according to the default axis order (\'b\', \'c\', 0, 1). This will try to return a view into topo_array if possible; otherwise it will allocate a new ndarray.'
def topo_view_to_design_mat(self, topo_array):
for (shape_elem, axis) in safe_zip(self.shape, (0, 1, 'c')): if (topo_array.shape[self.axes.index(axis)] != shape_elem): raise ValueError("topo_array's %s axis has a different size (%d) from the corresponding size (%d) in self.shape.\n self.shape: %s (uses standard axis order: 0, 1, 'c')\n self.axes: %s\n topo_array.shape: %s (should be in self.axes' order)") topo_array_bc01 = topo_array.transpose([self.axes.index(ax) for ax in ('b', 'c', 0, 1)]) return topo_array_bc01.reshape((topo_array_bc01.shape[0], np.prod(topo_array_bc01.shape[1:])))
'.. todo:: WRITEME properly Reformat batch from the internal storage format into dspace.'
def get_formatted_batch(self, batch, dspace):
if isinstance(dspace, VectorSpace): return dspace.np_format_as(batch, dspace) elif isinstance(dspace, Conv2DSpace): topo_batch = self.design_mat_to_topo_view(batch) if (self.topo_space.axes != self.axes): warnings.warn(('It looks like %s.axes has been changed directly, please use the set_axes() method instead.' % self.__class__.__name__)) self._update_topo_space() return self.topo_space.np_format_as(topo_batch, dspace) else: raise ValueError(('%s does not know how to format a batch into %s of type %s.' % (self.__class__.__name__, dspace, type(dspace))))
'.. todo:: WRITEME'
def __setstate__(self, d):
if ('axes' not in d): d['axes'] = ['b', 0, 1, 'c'] self.__dict__.update(d) if ('topo_space' not in self.__dict__): self._update_topo_space()
'Update self.topo_space from self.shape and self.axes'
def _update_topo_space(self):
(rows, cols, channels) = self.shape self.topo_space = Conv2DSpace(shape=(rows, cols), num_channels=channels, axes=self.axes)
'.. todo:: WRITEME'
def set_axes(self, axes):
self.axes = axes self._update_topo_space()
'.. todo:: WRITEME'
def __init__(self, num_examples, rng=(2013, 5, 17)):
rng = make_np_rng(rng, self._default_seed, which_method='uniform') X = rng.uniform((-1), 1, size=(num_examples, 2)) y = _four_regions_labels(X) super(FourRegions, self).__init__(X=X, y=y, y_labels=4)
'.. todo:: WRITEME'
def __init__(self, min_x=(-6.28), max_x=6.28, std=0.05, rng=None):
(self.min_x, self.max_x, self.std) = (min_x, max_x, std) rng = make_np_rng(rng, [17, 2, 946], which_method=['uniform', 'randn']) self.default_rng = copy.copy(rng) self.rng = rng
'.. todo:: WRITEME'
def energy(self, mat):
x = mat[:, 0] y = mat[:, 1] rval = (((y - N.cos(x)) ** 2.0) / (2.0 * (self.std ** 2.0))) return rval
'.. todo:: WRITEME properly This dataset can generate an infinite amount of examples. This function gives the pdf from which the examples are drawn.'
def pdf_func(self, mat):
x = mat[:, 0] y = mat[:, 1] rval = N.exp(((- ((y - N.cos(x)) ** 2.0)) / (2.0 * (self.std ** 2.0)))) rval /= N.sqrt(((2.0 * N.pi) * (self.std ** 2.0))) rval /= (self.max_x - self.min_x) rval *= (x < self.max_x) rval *= (x > self.min_x) return rval
'.. todo:: WRITEME properly This dataset can generate an infinite amount of examples. This function gives the energy function for the distribution from which the examples are drawn.'
def free_energy(self, X):
x = X[:, 0] y = X[:, 1] rval = (T.sqr((y - T.cos(x))) / (2.0 * (self.std ** 2.0))) mask = (x < self.max_x) mask = (mask * (x > self.min_x)) rval = ((mask * rval) + ((1 - mask) * 1e+30)) return rval
'.. todo:: WRITEME properly This dataset can generate an infinite amount of examples. This function gives the pdf from which the examples are drawn.'
def pdf(self, X):
x = X[:, 0] y = X[:, 1] rval = T.exp(((- T.sqr((y - T.cos(x)))) / (2.0 * (self.std ** 2.0)))) rval /= N.sqrt(((2.0 * N.pi) * (self.std ** 2.0))) rval /= (self.max_x - self.min_x) rval *= (x < self.max_x) rval *= (x > self.min_x) return rval
'.. todo:: WRITEME'
def get_stream_position(self):
return copy.copy(self.rng)
'.. todo:: WRITEME'
def set_stream_position(self, s):
self.rng = copy.copy(s)
'.. todo:: WRITEME'
def restart_stream(self):
self.reset_RNG()
'.. todo:: WRITEME'
def reset_RNG(self):
if ('default_rng' not in dir(self)): self.default_rng = N.random.RandomState([17, 2, 946]) self.rng = copy.copy(self.default_rng)
'.. todo:: WRITEME'
def apply_preprocessor(self, preprocessor, can_fit=False):
raise NotImplementedError()
'.. todo:: WRITEME'
def get_batch_design(self, batch_size):
x = N.cast[config.floatX](self.rng.uniform(self.min_x, self.max_x, (batch_size, 1))) y = (N.cos(x) + (N.cast[config.floatX](self.rng.randn(*x.shape)) * self.std)) rval = N.hstack((x, y)) return rval
'.. todo:: WRITEME'
def adjust_for_viewer(self, X):
return N.clip(((X * 2.0) - 1.0), (-1.0), 1.0)
'.. todo:: WRITEME'
def adjust_to_be_viewed_with(self, X, other, per_example=False):
return self.adjust_for_viewer(X)
'.. todo:: WRITEME'
def get_test_set(self):
args = {} args.update(self.args) del args['self'] args['which_set'] = 'test' args['start'] = None args['stop'] = None args['fit_preprocessor'] = args['fit_test_preprocessor'] args['fit_test_preprocessor'] = None return MNIST(**args)
'.. todo:: WRITEME'
def get_test_set(self):
return SVHN(which_set='test', path=self.path, center=self.center, scale=self.scale, start=self.start, stop=self.stop, axes=self.axes, preprocessor=self.preprocessor)
'.. todo:: WRITEME'
def make_data(self, which_set, path, shuffle=True):
sizes = {'train': 73257, 'test': 26032, 'extra': 531131, 'train_all': 604388, 'valid': 6000, 'splitted_train': 598388} image_size = ((32 * 32) * 3) h_file_n = '{0}_32x32.h5'.format(os.path.join(path, 'h5', which_set)) (h5file, node) = self.init_hdf5(h_file_n, ([sizes[which_set], image_size], [sizes[which_set], 1]), title='SVHN Dataset', y_dtype='int') rng = make_np_rng(None, 322, which_method='shuffle') def design_matrix_view(data_x): 'reshape data_x to design matrix view\n ' data_x = numpy.transpose(data_x, axes=[3, 2, 0, 1]) data_x = data_x.reshape((data_x.shape[0], ((32 * 32) * 3))) return data_x def load_data(path): 'Loads data from mat files' data = load(path) data_x = numpy.cast[config.floatX](data['X']) data_y = data['y'] del data gc.collect() return (design_matrix_view(data_x), data_y) def split_train_valid(path, num_valid_train=400, num_valid_extra=200): '\n Extract number of class balanced samples from train and extra\n sets for validation, and regard the remaining as new train set.\n\n Parameters\n ----------\n num_valid_train : int, optional\n Number of samples per class from train\n num_valid_extra : int, optional\n Number of samples per class from extra\n ' data = load('{0}train_32x32.mat'.format(path)) valid_index = [] for i in xrange(1, 11): index = numpy.nonzero((data['y'] == i))[0] index.flags.writeable = 1 rng.shuffle(index) valid_index.append(index[:num_valid_train]) valid_index = set(numpy.concatenate(valid_index)) train_index = (set(numpy.arange(data['X'].shape[3])) - valid_index) valid_index = list(valid_index) train_index = list(train_index) train_x = data['X'][:, :, :, train_index] train_y = data['y'][train_index, :] valid_x = data['X'][:, :, :, valid_index] valid_y = data['y'][valid_index, :] train_size = data['X'].shape[3] assert (train_x.shape[3] == (train_size - (num_valid_train * 10))) assert (train_y.shape[0] == (train_size - (num_valid_train * 10))) assert (valid_x.shape[3] == (num_valid_train * 10)) assert (valid_y.shape[0] == (num_valid_train * 10)) del data gc.collect() data = load('{0}extra_32x32.mat'.format(path)) valid_index = [] for i in xrange(1, 11): index = numpy.nonzero((data['y'] == i))[0] index.flags.writeable = 1 rng.shuffle(index) valid_index.append(index[:num_valid_extra]) valid_index = set(numpy.concatenate(valid_index)) train_index = (set(numpy.arange(data['X'].shape[3])) - valid_index) valid_index = list(valid_index) train_index = list(train_index) train_x = numpy.concatenate((train_x, data['X'][:, :, :, train_index]), axis=3) train_y = numpy.concatenate((train_y, data['y'][train_index, :])) valid_x = numpy.concatenate((valid_x, data['X'][:, :, :, valid_index]), axis=3) valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :])) extra_size = data['X'].shape[3] sizes['valid'] = ((num_valid_train + num_valid_extra) * 10) sizes['splitted_train'] = ((train_size + extra_size) - sizes['valid']) assert (train_x.shape[3] == sizes['splitted_train']) assert (train_y.shape[0] == sizes['splitted_train']) assert (valid_x.shape[3] == sizes['valid']) assert (valid_y.shape[0] == sizes['valid']) del data gc.collect() train_x = numpy.cast[config.floatX](train_x) valid_x = numpy.cast[config.floatX](valid_x) return ((design_matrix_view(train_x), train_y), (design_matrix_view(valid_x), valid_y)) if (which_set in ['train', 'test']): (data_x, data_y) = load_data('{0}{1}_32x32.mat'.format(path, which_set)) elif (which_set in ['splitted_train', 'valid']): (train_data, valid_data) = split_train_valid(path) if (which_set == 'splitted_train'): (data_x, data_y) = train_data else: (data_x, data_y) = valid_data del train_data elif (which_set in ['train_all', 'extra']): (data_x, data_y) = load_data('{0}extra_32x32.mat'.format(path)) if (which_set == 'train_all'): (train_x, train_y) = load_data('{0}train_32x32.mat'.format(path)) data_x = numpy.concatenate((data_x, train_x)) data_y = numpy.concatenate((data_y, train_y)) assert (data_x.shape[0] == sizes[which_set]) assert (data_y.shape[0] == sizes[which_set]) if shuffle: index = range(data_x.shape[0]) rng.shuffle(index) data_x = data_x[index, :] data_y = data_y[index, :] data_y = (data_y - 1) SVHN.fill_hdf5(h5file, data_x, data_y, node) h5file.close()
'.. todo:: WRITEME'
def get_test_set(self):
return SVHN_On_Memory(which_set='test', path=self.path, center=self.center, scale=self.scale, start=self.start, stop=self.stop, axes=self.axes, preprocessor=self.preprocessor)
'.. todo:: WRITEME'
def make_data(self, which_set, path, shuffle=True):
sizes = {'train': 73257, 'test': 26032, 'extra': 531131, 'train_all': 604388, 'valid': 6000, 'splitted_train': 598388} image_size = ((32 * 32) * 3) rng = make_np_rng(None, 322, which_method='shuffle') def design_matrix_view(data_x): 'reshape data_x to deisng matrix view\n ' data_x = numpy.transpose(data_x, axes=[3, 2, 0, 1]) data_x = data_x.reshape((data_x.shape[0], ((32 * 32) * 3))) return data_x def load_data(path): 'Loads data from mat files' data = load(path) data_x = numpy.cast[config.floatX](data['X']) import ipdb ipdb.set_trace() data_y = data['y'] del data gc.collect() return (design_matrix_view(data_x), data_y) def split_train_valid(path, num_valid_train=400, num_valid_extra=200): '\n Extract number of class balanced samples from train and extra\n sets for validation, and regard the remaining as new train set.\n\n Parameters\n ----------\n num_valid_train : int, optional\n Number of samples per class from train\n num_valid_extra : int, optional\n Number of samples per class from extra\n ' data = load('{0}train_32x32.mat'.format(path)) valid_index = [] for i in xrange(1, 11): index = numpy.nonzero((data['y'] == i))[0] index.flags.writeable = 1 rng.shuffle(index) valid_index.append(index[:num_valid_train]) valid_index = set(numpy.concatenate(valid_index)) train_index = (set(numpy.arange(data['X'].shape[3])) - valid_index) valid_index = list(valid_index) train_index = list(train_index) train_x = data['X'][:, :, :, train_index] train_y = data['y'][train_index, :] valid_x = data['X'][:, :, :, valid_index] valid_y = data['y'][valid_index, :] train_size = data['X'].shape[3] assert (train_x.shape[3] == (train_size - (num_valid_train * 10))) assert (train_y.shape[0] == (train_size - (num_valid_train * 10))) assert (valid_x.shape[3] == (num_valid_train * 10)) assert (valid_y.shape[0] == (num_valid_train * 10)) del data gc.collect() data = load('{0}extra_32x32.mat'.format(path)) valid_index = [] for i in xrange(1, 11): index = numpy.nonzero((data['y'] == i))[0] index.flags.writeable = 1 rng.shuffle(index) valid_index.append(index[:num_valid_extra]) valid_index = set(numpy.concatenate(valid_index)) train_index = (set(numpy.arange(data['X'].shape[3])) - valid_index) valid_index = list(valid_index) train_index = list(train_index) train_x = numpy.concatenate((train_x, data['X'][:, :, :, train_index]), axis=3) train_y = numpy.concatenate((train_y, data['y'][train_index, :])) valid_x = numpy.concatenate((valid_x, data['X'][:, :, :, valid_index]), axis=3) valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :])) extra_size = data['X'].shape[3] sizes['valid'] = ((num_valid_train + num_valid_extra) * 10) sizes['splitted_train'] = ((train_size + extra_size) - sizes['valid']) assert (train_x.shape[3] == sizes['splitted_train']) assert (train_y.shape[0] == sizes['splitted_train']) assert (valid_x.shape[3] == sizes['valid']) assert (valid_y.shape[0] == sizes['valid']) del data gc.collect() train_x = numpy.cast[config.floatX](train_x) valid_x = numpy.cast[config.floatX](valid_x) return (design_matrix_view(train_x), train_y, design_matrix_view(valid_x), valid_y) if (which_set in ['train', 'test']): (data_x, data_y) = load_data('{0}{1}_32x32.mat'.format(path, which_set)) elif (which_set in ['splitted_train', 'valid']): (train_data, valid_data) = split_train_valid(path) if (which_set == 'splitted_train'): (data_x, data_y) = train_data else: (data_x, data_y) = valid_data del train_data elif (which_set in ['train_all', 'extra']): (data_x, data_y) = load_data('{0}extra_32x32.mat'.format(path)) if (which_set == 'train_all'): (train_x, train_y) = load_data('{0}train_32x32.mat'.format(path)) data_x = numpy.concatenate((data_x, train_x)) data_y = numpy.concatenate((data_y, train_y)) assert (data_x.shape[0] == sizes[which_set]) assert (data_y.shape[0] == sizes[which_set]) if shuffle: index = range(data_x.shape[0]) rng.shuffle(index) data_x = data_x[index, :] data_y = data_y[index, :] return (data_x, data_y)
'Return an iterator for this dataset'
def __iter__(self):
return self.iterator()
'Return an iterator for this dataset with the specified behaviour. Unspecified values are filled-in by the default. Parameters mode : str or object, optional One of \'sequential\', \'random_slice\', or \'random_uniform\', *or* a class that instantiates an iterator that returns slices or index sequences on every call to next(). batch_size : int, optional The size of an individual batch. Optional if `mode` is \'sequential\' and `num_batches` is specified (batch size will be calculated based on full dataset size). num_batches : int, optional The total number of batches. Unnecessary if `mode` is \'sequential\' and `batch_size` is specified (number of batches will be calculated based on full dataset size). rng : int, object or array_like, optional Either an instance of `numpy.random.RandomState` (or something with a compatible interface), or a seed value to be passed to the constructor to create a `RandomState`. See the docstring for `numpy.random.RandomState` for details on the accepted seed formats. If unspecified, defaults to using the dataset\'s own internal random number generator, which persists across iterations through the dataset and may potentially be shared by multiple iterator objects simultaneously (see "Notes" below). data_specs : (space, source) pair, optional `space` must be an instance of `Space` and `source` must be a string or tuple of string names such as \'features\' or \'targets\'. The source names specify where the data will come from and the Space specifies its format. When source is a tuple, there are some additional requirements: * `space` must be a `CompositeSpace`, with one sub-space corresponding to each source name. i.e., the specification must be flat. * None of the components of `space` may be a `CompositeSpace`. * Each corresponding (sub-space, source name) pair must be unique, but the same source name may be mapped to many sub-spaces (for example if one part of the model is fully connected and expects a `VectorSpace`, while another part is convolutional and expects a `Conv2DSpace`). If `data_specs` is not provided, the behaviour (which sources will be present, in which order and space, or whether an Exception will be raised) is not defined and may depend on the implementation of each `Dataset`. return_tuple : bool, optional In case `data_specs` consists of a single space and source, if `return_tuple` is True, the returned iterator will return a tuple of length 1 containing the minibatch of the data at each iteration. If False, it will return the minibatch itself. This flag has no effect if data_specs is composite. Default: False. Returns iter_obj : object An iterator object implementing the standard Python iterator protocol (i.e. it has an `__iter__` method that return the object itself, and a `next()` method that returns results until it raises `StopIteration`). The `next()` method returns a batch containing data for each of the sources required in `data_specs`, in the requested `Space`. Notes Arguments are passed as instantiation parameters to classes that derive from `pylearn2.utils.iteration.SubsetIterator`. Iterating simultaneously with multiple iterator objects sharing the same random number generator could lead to difficult-to-reproduce behaviour during training. It is therefore *strongly recommended* that each iterator be given its own random number generator with the `rng` parameter in such situations. When it is valid to call the `iterator` method with the default value for all arguments, it makes it possible to use the `Dataset` itself as an Python iterator, with the default implementation of `Dataset.__iter__`. For instance, `DenseDesignMatrix` supports a value of `None` for `data_specs`.'
def iterator(self, mode=None, batch_size=None, num_batches=None, rng=None, data_specs=None, return_tuple=False):
raise NotImplementedError()
'Fill-in unspecified attributes trying to set them to their default values or raising an error. Parameters mode : str or object, optional batch_size : int, optional num_batches : int, optional rng : int, object or array_like, optional data_specs : (space, source) pair, optional Refer to `dataset.iterator` for a detailed description of the parameters. Returns A tuple [mode, batch_size, num_batches, rng, data_specs]. All the element of the tuple are set to either the value provided as input or their default value. Specifically: mode : str or object If None, return self._iter_subset_class. If self._iter_subset_class is not defined raise an error. batch_size : int If None, return self._iter_batch_size. If self._iter_batch_size is not defined return None. num_batches : int If None, return self._iter_num_batches. If self._iter_num_batches is not defined return None. rng : int, object or array_like If None and mode.stochastic, return self.rng. data_specs : (space, source) pair If None, return self._iter_data_specs. If self._iter_data_specs is not defined raises an error.'
def _init_iterator(self, mode=None, batch_size=None, num_batches=None, rng=None, data_specs=None):
if (data_specs is None): if hasattr(self, '_iter_data_specs'): data_specs = self._iter_data_specs else: raise ValueError(('data_specs not provided and no default data spec set for %s' % str(self))) if (mode is None): if hasattr(self, '_iter_subset_class'): mode = self._iter_subset_class else: raise ValueError(('iteration mode not provided and no default mode set for %s' % str(self))) else: mode = resolve_iterator_class(mode) if (batch_size is None): batch_size = getattr(self, '_iter_batch_size', None) if (num_batches is None): num_batches = getattr(self, '_iter_num_batches', None) if ((rng is None) and mode.stochastic): rng = self.rng return [mode, batch_size, num_batches, rng, data_specs]
'Shift and scale a tensor, mapping its data range to [-1, 1]. It makes it possible for the transformed tensor to be displayed with `pylearn2.gui.patch_viewer` tools. Default is to do nothing. Parameters X: `numpy.ndarray` a tensor in the same space as the data Returns `numpy.ndarray` X shifted and scaled by a transformation that maps the data range to [-1, 1]. Notes For example, for MNIST X will lie in [0,1] and the return value should be X*2-1'
def adjust_for_viewer(self, X):
return X
'Returns true if the dataset includes targets'
def has_targets(self):
raise NotImplementedError()
'Returns the index of the axis that corresponds to different examples in a batch when using topological_view. WARNING: This method is deprecated and will be unsupported after 27 July 27, 2015. Some classes, e.g. DenseDesignMatrix, might still implement it, but it will not be part of the interface anymore.'
def get_topo_batch_axis(self):
warnings.warn('This method is deprecated and will be unsupported after 27 July 27, 2015') raise NotImplementedError()
'Returns a randomly chosen batch of data formatted as a design matrix. This method is not guaranteed to have any particular properties like not repeating examples, etc. It is mostly useful for getting a single batch of data for a unit test or a quick-and-dirty visualization. Using this method for serious learning code is strongly discouraged. All code that depends on any particular example sampling properties should use Dataset.iterator. WARNING: This method is deprecated and will be unsupported after 27 July 27, 2015. Some classes, e.g. DenseDesignMatrix, might still implement it, but it will not be part of the interface anymore. .. todo:: Refactor to use `include_targets` rather than `include_labels`, to make the terminology more consistent with the rest of the library. Parameters batch_size : int The number of examples to include in the batch. include_labels : bool If True, returns the targets for the batch, as well as the features. Returns batch : member of feature space, or member of (feature, target) space. Either numpy value of the features, or a (features, targets) tuple of numpy values, depending on the value of `include_labels`.'
def get_batch_design(self, batch_size, include_labels=False):
warnings.warn('This method is deprecated and will be unsupported after 27 July 27, 2015') raise NotImplementedError((str(type(self)) + ' does not implement get_batch_design.'))
'Returns a topology-preserving batch of data. This method is not guaranteed to have any particular properties like not repeating examples, etc. It is mostly useful for getting a single batch of data for a unit test or a quick-and-dirty visualization. Using this method for serious learning code is strongly discouraged. All code that depends on any particular example sampling properties should use Dataset.iterator. .. todo:: Refactor to use `include_targets` rather than `include_labels`, to make the terminology more consistent with the rest of the library. WARNING: This method is deprecated and will be unsupported after 27 July 27, 2015. Some classes, e.g. DenseDesignMatrix, might still implement it, but it will not be part of the interface anymore. Parameters batch_size : int The number of examples to include in the batch. include_labels : bool If True, returns the targets for the batch, as well as the features. Returns batch : member of feature space, or member of (feature, target) space. Either numpy value of the features, or a (features, targets) tuple of numpy values, depending on the value of `include_labels`.'
def get_batch_topo(self, batch_size, include_labels=False):
warnings.warn('This method is deprecated and will be unsupported after 27 July 27, 2015') raise NotImplementedError()
'Returns the number of examples in the dataset Notes Infinite datasets have float(\'inf\') examples.'
def get_num_examples(self):
raise NotImplementedError()
'.. todo:: WRITEME'
def adjust_for_viewer(self, X):
rval = X.copy() if (not hasattr(self, 'center')): self.center = False if (not hasattr(self, 'rescale')): self.rescale = False if (not hasattr(self, 'gcn')): self.gcn = False if (self.gcn is not None): rval = X.copy() for i in xrange(rval.shape[0]): rval[i, :] /= numpy.abs(rval[i, :]).max() return rval if (not self.center): rval -= 127.5 if (not self.rescale): rval /= 127.5 rval = numpy.clip(rval, (-1.0), 1.0) return rval
'.. todo:: WRITEME'
def adjust_to_be_viewed_with(self, X, orig, per_example=False):
rval = X.copy() if (not hasattr(self, 'center')): self.center = False if (not hasattr(self, 'rescale')): self.rescale = False if (not hasattr(self, 'gcn')): self.gcn = False if (self.gcn is not None): rval = X.copy() if per_example: for i in xrange(rval.shape[0]): rval[i, :] /= numpy.abs(orig[i, :]).max() else: rval /= numpy.abs(orig).max() rval = numpy.clip(rval, (-1.0), 1.0) return rval if (not self.center): rval -= 127.5 if (not self.rescale): rval /= 127.5 rval = numpy.clip(rval, (-1.0), 1.0) return rval
'.. todo:: WRITEME'
def get_test_set(self):
return CIFAR10(which_set='test', center=self.center, rescale=self.rescale, gcn=self.gcn, toronto_prepro=self.toronto_prepro, axes=self.axes)
'.. todo:: WRITEME'
def __init__(self, which_set, center=False, example_range=None):
if (which_set == 'train'): train = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/train.mat') self.class_names = [array[0].encode('utf-8') for array in train['class_names'][0]] fold_indices = train['fold_indices'] assert (fold_indices.shape == (1, 10)) self.fold_indices = np.zeros((10, 1000), dtype='uint16') for i in xrange(10): indices = fold_indices[(0, i)] assert (indices.shape == (1000, 1)) assert (indices.dtype == 'uint16') self.fold_indices[i, :] = indices[:, 0] X = np.cast['float32'](train['X']) assert (X.shape == (5000, ((96 * 96) * 3))) if (example_range is not None): X = X[example_range[0]:example_range[1], :] y_labels = 10 y = (train['y'][:, 0] - 1) assert (y.shape == (5000,)) elif (which_set == 'test'): test = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/test.mat') self.class_names = [array[0].encode('utf-8') for array in test['class_names'][0]] X = np.cast['float32'](test['X']) assert (X.shape == (8000, ((96 * 96) * 3))) if (example_range is not None): X = X[example_range[0]:example_range[1], :] y_labels = 10 y = (test['y'][:, 0] - 1) assert (y.shape == (8000,)) elif (which_set == 'unlabeled'): unlabeled = load('${PYLEARN2_DATA_PATH}/stl10/stl10_matlab/unlabeled.mat') X = unlabeled['X'] assert (X.shape == (((96 * 96) * 3), 100000)) assert (X.dtype == 'uint8') if (example_range is None): X = X.value else: X = X.value[:, example_range[0]:example_range[1]] X = np.cast['float32'](X.T) unlabeled.close() y_labels = None y = None else: raise ValueError((('"' + which_set) + '" is not an STL10 dataset. Recognized values are "train", "test", and "unlabeled".')) if center: X -= 127.5 view_converter = dense_design_matrix.DefaultViewConverter((96, 96, 3)) super(STL10, self).__init__(X=X, y=y, y_labels=y_labels, view_converter=view_converter) for i in xrange(self.X.shape[0]): mat = X[i:(i + 1), :] topo = self.get_topological_view(mat) for j in xrange(topo.shape[3]): temp = topo[0, :, :, j].T.copy() topo[0, :, :, j] = temp mat = self.get_design_matrix(topo) X[i:(i + 1), :] = mat assert (not contains_nan(self.X))
'.. todo:: WRITEME'
def __init__(self, which_set, axes=['b', 0, 1, 'c']):
self.args = locals() assert (which_set in self.data_split.keys()) path = serial.preprocess('${PYLEARN2_DATA_PATH}/ocr_letters/letter.data') with open(path, 'r') as data_f: data = data_f.readlines() data = [line.split(' DCTB ') for line in data] data_x = [map(int, item[6:(-1)]) for item in data] data_letters = [item[1] for item in data] data_fold = [int(item[5]) for item in data] letters = list(numpy.unique(data_letters)) data_y = [letters.index(item) for item in data_letters] if (which_set == 'train'): split = slice(0, self.data_split['train']) elif (which_set == 'valid'): split = slice(self.data_split['train'], (self.data_split['train'] + self.data_split['valid'])) elif (which_set == 'test'): split = slice((self.data_split['train'] + self.data_split['valid']), ((self.data_split['train'] + self.data_split['valid']) + self.data_split['test'])) data_x = numpy.asarray(data_x[split]) data_y = numpy.asarray(data_y[split]) data_fold = numpy.asarray(data_y[split]) assert (data_x.shape[0] == data_y.shape[0]) assert (data_x.shape[0] == self.data_split[which_set]) view_converter = dense_design_matrix.DefaultViewConverter((16, 8, 1), axes) super(OCR, self).__init__(X=data_x, y=data_y, y_labels=len(letters), view_converter=view_converter) assert (not contains_nan(self.X)) self.fold = data_fold
'.. todo:: WRITEME'
def get_test_set(self):
return OCR('test')
'Returns the test set.'
def get_test_set(self):
yaml = self.preprocessed_dataset.yaml_src yaml = yaml.replace('train', 'test') args = {} args.update(self.args) del args['self'] args['start'] = None args['stop'] = None args['preprocessed_dataset'] = yaml_parse.load(yaml) return ZCA_Dataset(**args)
'Formats examples for use with PatchViewer Parameters X : 2d numpy array One example per row Returns output : 2d numpy array One example per row, rescaled so the maximum absolute value within each row is (almost) 1.'
def adjust_for_viewer(self, X):
rval = X.copy() for i in xrange(rval.shape[0]): rval[i, :] /= (np.abs(rval[i, :]).max() + 1e-12) return rval
'Adjusts `X` using the same transformation that would be applied to `other` if `other` were passed to `adjust_for_viewer`. This is useful for visualizing `X` alongside `other`. Parameters X : 2d ndarray Examples to be adjusted other : 2d ndarray Examples that define the scale per_example : bool Default: False. If True, compute the scale separately for each example. If False, compute one scale for the whole batch.'
def adjust_to_be_viewed_with(self, X, other, per_example=False):
assert (X.shape == other.shape), (X.shape, other.shape) rval = X.copy() if per_example: for i in xrange(rval.shape[0]): rval[i, :] /= np.abs(other[i, :]).max() else: rval /= np.abs(other).max() rval = np.clip(rval, (-1.0), 1.0) return rval
'Map `X` back to the original space (before ZCA preprocessing) and adjust it for display with PatchViewer. Parameters X : 2d numpy array The examples to be mapped back and adjusted Returns output : 2d numpy array The examples in the original space, adjusted for display'
def mapback_for_viewer(self, X):
assert (X.ndim == 2) rval = self.preprocessor.inverse(X) rval = self.preprocessed_dataset.adjust_for_viewer(rval) return rval
'Map `X` back to the original space (before ZCA preprocessing) Parameters X : 2d numpy array The examples to be mapped back Returns output : 2d numpy array The examples in the original space'
def mapback(self, X):
return self.preprocessor.inverse(X)
'Creates an NpyDataset object. Parameters file : file-like object or str A file-like object or string indicating a filename. Passed directly to `numpy.load`. mmap_mode : str, optional Memory mapping options for memory-mapping an array on disk, rather than loading it into memory. See the `numpy.load` docstring for details.'
def __init__(self, file, mmap_mode=None):
self._path = file self._loaded = False
'.. todo:: WRITEME'
def _deferred_load(self):
self._loaded = True loaded = numpy.load(self._path) assert isinstance(loaded, numpy.ndarray), 'single arrays (.npy) only' if (len(loaded.shape) == 2): super(NpyDataset, self).__init__(X=loaded) else: super(NpyDataset, self).__init__(topo_view=loaded)
'Creates an NpzDataset object. Parameters file : file-like object or str A file-like object or string indicating a filename. Passed directly to `numpy.load`. key : str A string indicating which key name to use to pull out the input data. target_key : str, optional A string indicating which key name to use to pull out the output data.'
def __init__(self, file, key, target_key=None):
loaded = numpy.load(file) assert (not isinstance(loaded, numpy.ndarray)), 'zipped groups of arrays (.npz) only' assert (key in loaded), ('%s not found in loaded NPZFile' % key) if (target_key is not None): assert (target_key in loaded), ('%s not found in loaded NPZFile' % target_key) y = loaded[target_key] else: y = None if (len(loaded[key].shape) == 2): super(NpzDataset, self).__init__(X=loaded[key], y=y) else: super(NpzDataset, self).__init__(topo_view=loaded[key], y=y) loaded.close()
'Loads the data from a CSV file (ending with a \'.csv\' filename). Returns X : object The features of the dataset. y : object, optional The target variable of the model.'
def _load_data(self):
assert self.path.endswith('.csv') if self.expect_headers: data = np.loadtxt(self.path, delimiter=self.delimiter, skiprows=1) else: data = np.loadtxt(self.path, delimiter=self.delimiter) def take_subset(X, y): '\n Takes a subset of the dataset if the start_fraction,\n stop_fraction or start/stop parameter of the class\n is set.\n\n Parameters\n ----------\n X : object\n The features of the dataset.\n y : object, optional\n The target variable of the model.\n\n Returns\n -------\n X : object\n The subset of the features of the dataset.\n y : object, optional\n The subset of the target variable of the model.\n\n ' if (self.start_fraction is not None): n = X.shape[0] subset_end = int((self.start_fraction * n)) X = X[0:subset_end, :] y = y[0:subset_end] elif (self.end_fraction is not None): n = X.shape[0] subset_start = int(((1 - self.end_fraction) * n)) X = X[subset_start:] y = y[subset_start:] elif (self.start is not None): X = X[self.start:self.stop] if (y is not None): y = y[self.start:self.stop] return (X, y) if self.expect_labels: y = data[:, 0:self.num_outputs] X = data[:, self.num_outputs:] y = y.reshape((y.shape[0], self.num_outputs)) else: X = data y = None (X, y) = take_subset(X, y) return (X, y)
'.. todo:: WRITEME'
def __init__(self):
view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3)) super(DebugDataset, self).__init__(X=N.asarray([[1.0, 0.0], [0.0, 1.0]]), view_converter=view_converter) assert (not N.any(N.isnan(self.X)))