desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Compute and return eigen{values,vectors} of X\'s covariance matrix. Parameters X : WRITEME Returns All eigenvalues in decreasing order matrix containing corresponding eigenvectors in its columns'
def _cov_eigen(self, X):
raise NotImplementedError(('Not implemented in _PCABase. Use a ' + 'subclass (and implement it there).'))
'.. todo:: WRITEME'
def get_input_type(self):
return csr_matrix
'.. todo:: WRITEME'
def _cov_eigen(self, X):
(n, d) = X.shape cov = numpy.zeros((d, d)) batch_size = self.minibatch_size for i in xrange(0, n, batch_size): logger.info(' DCTB processing example {0}'.format(i)) end = min(n, (i + batch_size)) x = (X[i:end, :].todense() - self.mean_) assert (x.shape[0] == (end - i)) prod = numpy.dot(x.T, x) assert (prod.shape == (d, d)) cov += prod cov /= n logger.info('computing eigens') (v, W) = linalg.eigh(cov, eigvals=((d - self.num_components), (d - 1))) (v, W) = (v[::(-1)], W[:, ::(-1)]) return (v, W)
'Compute the PCA transformation matrix. Given a rectangular matrix :math:`X = USV` such that :math:`S` is a diagonal matrix with :math:`X`\'s singular values along its diagonal, returns :math:`W = V^{-1}`. If mean is provided, :math:`X` will not be centered first. Parameters X : numpy.ndarray Matrix of shape (n, d) on which to train PCA'
def train(self, X):
assert sparse.issparse(X) logger.info('computing mean') self.mean_ = numpy.asarray(X.mean(axis=0))[0, :] super(SparseMatPCA, self).train(X, mean=self.mean_)
'.. todo:: WRITEME'
def __call__(self, inputs):
self._update_cutoff() Y = structured_dot(inputs, self.W[:, :self.component_cutoff]) Z = (Y - tensor.dot(self.mean, self.W[:, :self.component_cutoff])) if self.whiten: Z /= tensor.sqrt(self.v[:self.component_cutoff]) return Z
'Returns a compiled theano function to compute a representation Parameters name : str WRITEME'
def function(self, name=None):
inputs = SparseType('csr', dtype=theano.config.floatX)() return theano.function([inputs], self(inputs), name=name)
'Perform online computation of covariance matrix eigen{values,vectors}. Parameters X : WRITEME Returns WRITEME'
def _cov_eigen(self, X):
num_components = min(self.num_components, X.shape[1]) pca_estimator = PcaOnlineEstimator(X.shape[1], n_eigen=num_components, minibatch_size=self.minibatch_size, centering=False) logger.debug(('*' * 50)) for i in range(X.shape[0]): if (((i + 1) % (X.shape[0] / 50)) == 0): logger.debug('|') pca_estimator.observe(X[i, :]) (v, W) = pca_estimator.getLeadingEigen() return (v[::(-1)], W.T[:, ::(-1)])
'.. todo:: WRITEME'
def __call__(self, X):
X = X.T (m, n) = X.shape mean = X.mean(axis=0) rval = N.zeros((n, n)) for i in xrange(0, m, self.batch_size): B = (X[i:(i + self.batch_size), :] - mean) rval += N.dot(B.T, B) return (rval / float((m - 1)))
'Perform direct computation of covariance matrix eigen{values,vectors}. Parameters X : WRITEME Returns WRITEME'
def _cov_eigen(self, X):
(v, W) = linalg.eigh(self.cov(X.T)) return (v[::(-1)], W[:, ::(-1)])
'Compute covariance matrix eigen{values,vectors} via Singular Value Decomposition (SVD). Parameters X : WRITEME Returns WRITEME'
def _cov_eigen(self, X):
(U, s, Vh) = linalg.svd(X, full_matrices=False) return ((s ** 2), Vh.T)
'.. todo:: WRITEME'
def train(self, X, mean=None):
warnings.warn('You should probably be using SparseMatPCA, unless your design matrix fits in memory.') (n, d) = X.shape mean = X.mean(axis=0) mean_matrix = csr_matrix(mean.repeat(n).reshape((d, n))).T X = (X - mean_matrix) super(SparsePCA, self).train(X, mean=numpy.asarray(mean).squeeze())
'Perform direct computation of covariance matrix eigen{values,vectors}, given a scipy.sparse matrix. Parameters X : WRITEME Returns WRITEME'
def _cov_eigen(self, X):
(v, W) = eigen_symmetric((X.T.dot(X) / X.shape[0]), k=self.num_components) return (v[::(-1)], W[:, ::(-1)])
'Compute and return the PCA transformation of sparse data. Precondition: `self.mean` has been subtracted from inputs. The reason for this is that, as far as I can tell, there is no way to subtract a vector from a sparse matrix without constructing an intermediary dense matrix, in theano; even the hack used in `train()` won\'t do, because there is no way to symbolically construct a sparse matrix by repeating a vector (again, as far as I can tell). Parameters inputs : scipy.sparse matrix object Sparse matrix of shape (n, d) on which to compute PCA Returns WRITEME'
def __call__(self, inputs):
self._update_cutoff() Y = structured_dot(inputs, self.W[:, :self.component_cutoff]) if self.whiten: Y /= tensor.sqrt(self.v[:self.component_cutoff]) return Y
'Returns a compiled theano function to compute a representation Parameters name : str WRITEME Returns WRITEME'
def function(self, name=None):
inputs = SparseType('csr', dtype=theano.config.floatX)() return theano.function([inputs], self(inputs), name=name)
'.. todo:: WRITEME'
def observe(self, x):
assert (numpy.size(x) == self.n_dim) self.n_observations += 1 row = (self.n_eigen + self.minibatch_index) self.Xt[row] = x self.x_sum *= self.gamma self.x_sum += x normalizer = ((1.0 - pow(self.gamma, self.n_observations)) / (1.0 - self.gamma)) if self.centering: self.Xt[row] -= (self.x_sum / normalizer) rn = pow(self.gamma, ((-0.5) * (self.minibatch_index + 1))) self.Xt[row] *= rn self.G[:(row + 1), row] = numpy.dot(self.Xt[:(row + 1), :], self.Xt[row, :].transpose()) self.G[row, :row] = self.G[:row, row].transpose() self.minibatch_index += 1 if (self.minibatch_index == self.minibatch_size): self.reevaluate()
'.. todo:: WRITEME'
def reevaluate(self):
assert (self.minibatch_index == self.minibatch_size) for i in range((self.n_eigen + self.minibatch_size)): self.G[(i, i)] += self.regularizer (self.d, self.V) = linalg.eigh(self.G) self.Ut = numpy.dot(self.V[:, (- self.n_eigen):].transpose(), self.Xt) rn = pow(self.gamma, ((-0.5) * (self.minibatch_index + 1))) inv_rn2 = (1.0 / (rn * rn)) self.Ut *= (1.0 / rn) self.d *= inv_rn2 self.Xt[:self.n_eigen, :] = self.Ut for i in range(self.n_eigen): self.G[(i, i)] = self.d[((- self.n_eigen) + i)] self.minibatch_index = 0
'.. todo:: WRITEME'
def getLeadingEigen(self):
normalizer = ((1.0 - pow(self.gamma, (self.n_observations - self.minibatch_index))) / (1.0 - self.gamma)) eigvals = (self.d[(- self.n_eigen):] / normalizer) eigvecs = numpy.zeros([self.n_eigen, self.n_dim]) for i in range(self.n_eigen): eigvecs[i] = (self.Ut[((- self.n_eigen) + i)] / numpy.sqrt(numpy.dot(self.Ut[((- self.n_eigen) + i)], self.Ut[((- self.n_eigen) + i)]))) return [eigvals, eigvecs]
'Returns rval : str A string representation of the object. In this case, just the class name.'
def __str__(self):
return 'Maxout'
'Tells the layer to use the specified input space. This resets parameters! The weight matrix is initialized with the size needed to receive input from this space. Parameters space : Space The Space that the input will lie in.'
def set_input_space(self, space):
self.input_space = space if isinstance(space, VectorSpace): self.requires_reformat = False self.input_dim = space.dim else: self.requires_reformat = True self.input_dim = space.get_total_dimension() self.desired_space = VectorSpace(self.input_dim) if (not (0 == ((self.detector_layer_dim - self.pool_size) % self.pool_stride))): if (self.pool_stride == self.pool_size): raise ValueError(('detector_layer_dim = %d, pool_size = %d. Should be divisible but remainder is %d' % (self.detector_layer_dim, self.pool_size, (self.detector_layer_dim % self.pool_size)))) raise ValueError() self.h_space = VectorSpace(self.detector_layer_dim) self.pool_layer_dim = (((self.detector_layer_dim - self.pool_size) / self.pool_stride) + 1) self.output_space = VectorSpace(self.pool_layer_dim) rng = self.mlp.rng if (self.irange is not None): assert (self.sparse_init is None) W = (rng.uniform((- self.irange), self.irange, (self.input_dim, self.detector_layer_dim)) * (rng.uniform(0.0, 1.0, (self.input_dim, self.detector_layer_dim)) < self.include_prob)) else: assert (self.sparse_init is not None) W = np.zeros((self.input_dim, self.detector_layer_dim)) def mask_rejects(idx, i): if (self.mask_weights is None): return False return (self.mask_weights[(idx, i)] == 0.0) for i in xrange(self.detector_layer_dim): assert (self.sparse_init <= self.input_dim) for j in xrange(self.sparse_init): idx = rng.randint(0, self.input_dim) while ((W[(idx, i)] != 0) or mask_rejects(idx, i)): idx = rng.randint(0, self.input_dim) W[(idx, i)] = rng.randn() W *= self.sparse_stdev W = sharedX(W) W.name = (self.layer_name + '_W') self.transformer = MatrixMul(W) (W,) = self.transformer.get_params() assert (W.name is not None) if (not hasattr(self, 'randomize_pools')): self.randomize_pools = False if self.randomize_pools: permute = np.zeros((self.detector_layer_dim, self.detector_layer_dim)) for j in xrange(self.detector_layer_dim): i = rng.randint(self.detector_layer_dim) permute[(i, j)] = 1 self.permute = sharedX(permute) if (self.mask_weights is not None): expected_shape = (self.input_dim, self.detector_layer_dim) if (expected_shape != self.mask_weights.shape): raise ValueError(((('Expected mask with shape ' + str(expected_shape)) + ' but got ') + str(self.mask_weights.shape))) self.mask = sharedX(self.mask_weights)
'Replaces the values in `updates` if needed to enforce the options set in the __init__ method, including `mask_weights` Parameters updates : OrderedDict A dictionary mapping parameters (including parameters not belonging to this model) to updated values of those parameters. The dictionary passed in contains the updates proposed by the learning algorithm. This function modifies the dictionary directly. The modified version will be compiled and executed by the learning algorithm.'
def _modify_updates(self, updates):
if (not hasattr(self, 'mask_weights')): self.mask_weights = None if (self.mask_weights is not None): (W,) = self.transformer.get_params() if (W in updates): updates[W] = (updates[W] * self.mask)
'Tells the layer to use the specified input space. This resets parameters! The kernel tensor is initialized with the size needed to receive input from this space. Parameters space : Space The Space that the input will lie in.'
def set_input_space(self, space):
rng = self.mlp.rng setup_detector_layer_c01b(layer=self, input_space=space, rng=rng) detector_shape = self.detector_space.shape def handle_pool_shape(idx): if (self.pool_shape[idx] < 1): raise ValueError(('bad pool shape: ' + str(self.pool_shape))) if (self.pool_shape[idx] > detector_shape[idx]): if self.fix_pool_shape: assert (detector_shape[idx] > 0) self.pool_shape[idx] = detector_shape[idx] else: raise ValueError(('Pool shape exceeds detector layer shape on axis %d' % idx)) map(handle_pool_shape, [0, 1]) assert (self.pool_shape[0] == self.pool_shape[1]) assert (self.pool_stride[0] == self.pool_stride[1]) assert all((isinstance(elem, py_integer_types) for elem in self.pool_stride)) if (self.pool_stride[0] > self.pool_shape[0]): if self.fix_pool_stride: warnings.warn('Fixing the pool stride') ps = self.pool_shape[0] assert isinstance(ps, py_integer_types) self.pool_stride = [ps, ps] else: raise ValueError('Stride too big.') assert all((isinstance(elem, py_integer_types) for elem in self.pool_stride)) dummy_detector = sharedX(self.detector_space.get_origin_batch(2)[0:16, :, :, :]) dummy_p = max_pool_c01b(c01b=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride) dummy_p = dummy_p.eval() self.output_space = Conv2DSpace(shape=[dummy_p.shape[1], dummy_p.shape[2]], num_channels=self.num_channels, axes=('c', 0, 1, 'b')) logger.info('Output space: {0}'.format(self.output_space.shape))
'Tells the layer to use the specified input space. This resets parameters! The weight tensor is initialized with the size needed to receive input from this space. Parameters space : Space The Space that the input will lie in.'
def set_input_space(self, space):
self.input_space = space if (not isinstance(self.input_space, Conv2DSpace)): raise TypeError(((('The input to a convolutional layer should be a Conv2DSpace, but layer ' + self.layer_name) + ' got ') + str(type(self.input_space)))) self.desired_space = Conv2DSpace(shape=space.shape, channels=space.num_channels, axes=('c', 0, 1, 'b')) ch = self.desired_space.num_channels rem = (ch % 4) if ((ch > 3) and (rem != 0)): self.dummy_channels = (4 - rem) else: self.dummy_channels = 0 self.dummy_space = Conv2DSpace(shape=space.shape, channels=(space.num_channels + self.dummy_channels), axes=('c', 0, 1, 'b')) rng = self.mlp.rng output_shape = [(int(np.ceil((((i_sh + (2.0 * self.pad)) - k_sh) / float(k_st)))) + 1) for (i_sh, k_sh, k_st) in izip(self.input_space.shape, self.kernel_shape, self.kernel_stride)] def handle_kernel_shape(idx): if (self.kernel_shape[idx] < 1): raise ValueError(('kernel must have strictly positive size on all axes but has shape: ' + str(self.kernel_shape))) if (output_shape[idx] <= 0): if self.fix_kernel_shape: self.kernel_shape[idx] = (self.input_space.shape[idx] + (2 * self.pad)) assert (self.kernel_shape[idx] != 0) output_shape[idx] = 1 warnings.warn('Had to change the kernel shape to make network feasible') else: raise ValueError('kernel too big for input (even with zero padding)') map(handle_kernel_shape, [0, 1]) self.detector_space = Conv2DSpace(shape=output_shape, num_channels=self.detector_channels, axes=('c', 0, 1, 'b')) if (self.pool_shape is not None): def handle_pool_shape(idx): if (self.pool_shape[idx] < 1): raise ValueError(('bad pool shape: ' + str(self.pool_shape))) if (self.pool_shape[idx] > output_shape[idx]): if self.fix_pool_shape: assert (output_shape[idx] > 0) self.pool_shape[idx] = output_shape[idx] else: raise ValueError(('Pool shape exceeds detector layer shape on axis %d' % idx)) map(handle_pool_shape, [0, 1]) assert (self.pool_shape[0] == self.pool_shape[1]) assert (self.pool_stride[0] == self.pool_stride[1]) assert all((isinstance(elem, py_integer_types) for elem in self.pool_stride)) if (self.pool_stride[0] > self.pool_shape[0]): if self.fix_pool_stride: warnings.warn('Fixing the pool stride') ps = self.pool_shape[0] assert isinstance(ps, py_integer_types) self.pool_stride = [ps, ps] else: raise ValueError('Stride too big.') assert all((isinstance(elem, py_integer_types) for elem in self.pool_stride)) if (self.irange is not None): self.transformer = local_c01b.make_random_local(input_groups=self.input_groups, irange=self.irange, input_axes=self.desired_space.axes, image_shape=self.desired_space.shape, output_axes=self.detector_space.axes, input_channels=self.dummy_space.num_channels, output_channels=self.detector_space.num_channels, kernel_shape=self.kernel_shape, kernel_stride=self.kernel_stride, pad=self.pad, partial_sum=self.partial_sum, rng=rng) (W,) = self.transformer.get_params() W.name = 'W' if self.tied_b: self.b = sharedX((np.zeros(self.detector_space.num_channels) + self.init_bias)) else: self.b = sharedX((self.detector_space.get_origin() + self.init_bias)) self.b.name = 'b' logger.info('Input shape: {0}'.format(self.input_space.shape)) logger.info((self.layer_name + ' detector space: {0}'.format(self.detector_space.shape))) assert (self.detector_space.num_channels >= 16) if ((self.pool_shape is None) or (np.prod(self.pool_shape) == 1)): self.output_space = Conv2DSpace(shape=self.detector_space.shape, num_channels=self.num_channels, axes=('c', 0, 1, 'b')) elif (max_pool_c01b is not None): ds = self.detector_space dummy_detector = sharedX(ds.get_origin_batch(2)[0:16, :, :, :]) dummy_p = max_pool_c01b(c01b=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride) dummy_p = dummy_p.eval() self.output_space = Conv2DSpace(shape=[dummy_p.shape[1], dummy_p.shape[2]], num_channels=self.num_channels, axes=('c', 0, 1, 'b')) else: raise NotImplementedError('Pooling is not implemented for CPU') logger.info('Output space: {0}'.format(self.output_space.shape))
'Returns norms : theano 4 tensor A theano expression for the norms of the different filters in the layer. TODO: explain significance of each of the 4 axes, and what order they\'ll be in.'
def get_filter_norms(self, W=None):
if (W is None): (W,) = self.transformer.get_params() assert (W.ndim == 7) sq_W = T.sqr(W) norms = T.sqrt(sq_W.sum(axis=(2, 3, 4))) return norms
'(Symbolically) corrupt the inputs with a noise process. Parameters inputs : tensor_like, or list of tensor_likes Theano symbolic(s) representing a (list of) (mini)batch of inputs to be corrupted, with the first dimension indexing training examples and the second indexing data dimensions. Returns corrupted : tensor_like, or list of tensor_likes Theano symbolic(s) representing the corresponding corrupted inputs.'
def __call__(self, inputs):
if isinstance(inputs, tensor.Variable): return self._corrupt(inputs) else: return [self._corrupt(inp) for inp in inputs]
'Corrupts a single tensor_like object. Parameters x : tensor_like Theano symbolic representing a (mini)batch of inputs to be corrupted, with the first dimension indexing training examples and the second indexing data dimensions. Returns corrupted : tensor_like Theano symbolic representing the corresponding corrupted input. Notes This is the method that all subclasses should implement. The logic in Corruptor.__call__ handles mapping over multiple tensor_like inputs.'
def _corrupt(self, x):
raise NotImplementedError()
'.. todo:: WRITEME'
def corruption_free_energy(self, corrupted_X, X):
raise NotImplementedError()
'.. todo:: WRITEME'
def __call__(self, inputs):
return inputs
'Corrupts a single tensor_like object. Parameters x : tensor_like Theano symbolic representing a (mini)batch of inputs to be corrupted, with the first dimension indexing training examples and the second indexing data dimensions. Returns corrupted : tensor_like Theano symbolic representing the corresponding corrupted input.'
def _corrupt(self, x):
return (self.s_rng.binomial(size=x.shape, n=1, p=(1 - self.corruption_level), dtype=theano.config.floatX) * x)
'Corrupts a single tensor_like object. Parameters x : tensor_like Theano symbolic representing a (mini)batch of inputs to be corrupted, with the first dimension indexing training examples and the second indexing data dimensions. Returns corrupted : tensor_like Theano symbolic representing the corresponding corrupted input.'
def _corrupt(self, x):
if (self.corruption_level < 1e-05): return x dropped = super(DropoutCorruptor, self)._corrupt(x) return ((1.0 / (1.0 - self.corruption_level)) * dropped)
'Corrupts a single tensor_like object. Parameters x : tensor_like Theano symbolic representing a (mini)batch of inputs to be corrupted, with the first dimension indexing training examples and the second indexing data dimensions. Returns corrupted : tensor_like Theano symbolic representing the corresponding corrupted input.'
def _corrupt(self, x):
noise = self.s_rng.normal(size=x.shape, avg=0.0, std=self.corruption_level, dtype=theano.config.floatX) return (noise + x)
'.. todo:: WRITEME'
def corruption_free_energy(self, corrupted_X, X):
axis = range(1, len(X.type.broadcastable)) rval = (T.sum(T.sqr((corrupted_X - X)), axis=axis) / (2.0 * (self.corruption_level ** 2.0))) assert (len(rval.type.broadcastable) == 1) return rval
'Corrupts a single tensor_like object. Parameters x : tensor_like Theano symbolic representing a (mini)batch of inputs to be corrupted, with the first dimension indexing training examples and the second indexing data dimensions. Returns corrupted : tensor_like Theano symbolic representing the corresponding corrupted input.'
def _corrupt(self, x):
a = self.s_rng.binomial(size=x.shape, p=(1 - self.corruption_level), dtype=theano.config.floatX) b = self.s_rng.binomial(size=x.shape, p=0.5, dtype=theano.config.floatX) c = (T.eq(a, 0) * b) return ((x * a) + c)
'Corrupts a single tensor_like object. Parameters x : tensor_like Theano symbolic representing a (mini)batch of inputs to be corrupted, with the first dimension indexing training examples and the second indexing data dimensions. Returns corrupted : tensor_like Theano symbolic representing the corresponding corrupted input.'
def _corrupt(self, x):
num_examples = x.shape[0] num_classes = x.shape[1] keep_mask = T.addbroadcast(self.s_rng.binomial(size=(num_examples, 1), p=(1 - self.corruption_level), dtype='int8'), 1) pvals = T.alloc((1.0 / num_classes), num_classes) one_hot = self.s_rng.multinomial(size=(num_examples,), pvals=pvals) return ((keep_mask * x) + ((1 - keep_mask) * one_hot))
'Corrupts a single tensor_like object. Parameters x : tensor_like Theano symbolic representing a (mini)batch of inputs to be corrupted, with the first dimension indexing training examples and the second indexing data dimensions. Returns corrupted : tensor_like Theano symbolic representing the corresponding corrupted input.'
def _corrupt(self, x):
noise = self.s_rng.normal(size=x.shape, avg=0.0, std=self.corruption_level, dtype=theano.config.floatX) return rescaled_softmax((x + noise))
'Corrupts a single tensor_like object. Parameters x : tensor_like Theano symbolic representing a (mini)batch of inputs to be corrupted, with the first dimension indexing training examples and the second indexing data dimensions. Returns corrupted : tensor_like Theano symbolic representing the corresponding corrupted input.'
def _corrupt(self, x):
return self.s_rng.binomial(size=x.shape, p=x, dtype=theano.config.floatX)
'Treats each row in matrix as a multinomial trial. Parameters x : tensor_like x must be a matrix where all elements are non-negative (with at least one non-zero element) Returns y : tensor_like y will have the same shape as x. Each row in y will be a one hot vector, and can be viewed as the outcome of the multinomial trial defined by the probabilities of that row in x.'
def _corrupt(self, x):
normalized = (x / x.sum(axis=1, keepdims=True)) return self.s_rng.multinomial(pvals=normalized, dtype=theano.config.floatX)
'Corrupts a single tensor_like object. Parameters x : tensor_like Theano symbolic representing a (mini)batch of inputs to be corrupted, with the first dimension indexing training examples and the second indexing data dimensions. Returns corrupted : tensor_like Theano symbolic representing the corresponding corrupted input.'
def _corrupt(self, x):
result = x for c in reversed(self._corruptors): result = c(result) return result
'.. todo:: WRITEME properly Parameters X : WRITEME Must contain only examples that lie on the hypersphere'
def free_energy(self, X):
return T.zeros_like(X[:, 0])
'.. todo:: WRITEME'
def log_prob(self, X):
return ((- self.free_energy(X)) - self.logZ)
'.. todo:: WRITEME'
def random_design_matrix(self, m):
Z = self.s_rng.normal(size=(m, self.dim), avg=0.0, std=1.0, dtype=config.floatX) Z.name = 'UH.rdm.Z' sq_norm_Z = T.sum(T.sqr(Z), axis=1) sq_norm_Z.name = 'UH.rdm.sq_norm_Z' eps = 1e-06 mask = (sq_norm_Z < eps) mask.name = 'UH.rdm.mask' Z = ((Z.T * (1.0 - mask)) + mask).T Z.name = 'UH.rdm.Z2' sq_norm_Z = ((sq_norm_Z * (1.0 - mask)) + (self.dim * mask)) sq_norm_Z.name = 'UH.rdm.sq_norm_Z2' norm_Z = T.sqrt(sq_norm_Z) norm_Z.name = 'UH.rdm.sq_norm_Z2' rval = (self.radius * (Z.T / norm_Z).T) rval.name = 'UH.rdm.rval' return rval
'.. todo:: WRITEME'
def sample_integer(self, m):
return N.nonzero(self.rng.multinomial(pvals=self.pi, n=1, size=(m,)))[1]
'.. todo:: WRITEME'
def free_energy(self, X):
return (0.5 * T.sum(T.dot((X - self.mu), T.dot(self.sigma_inv, T.transpose((X - self.mu))))))
'.. todo:: WRITEME'
def log_prob(self, X):
return ((- self.free_energy(X)) - self.logZ)
'.. todo:: WRITEME'
def random_design_matrix(self, m):
Z = self.s_rng.normal(size=(m, self.mu.shape[0]), avg=0.0, std=1.0, dtype=config.floatX) return (self.mu + T.dot(Z, self.L.T))
'.. todo:: WRITEME properly Parameters X : WRITEME A theano variable containing a design matrix of observations of the random vector to condition on.'
def random_design_matrix(self, X):
Z = self.s_rng.normal(size=X.shape, avg=X, std=(1.0 / T.sqrt(self.beta)), dtype=config.floatX) return Z
'.. todo:: WRITEME properly A property of conditional distributions P(Y|X) Return true if P(y|x) = P(x|y) for all x,y'
def is_symmetric(self):
return True
'Evaluates the log likelihood of a set of datapoints with respect to the probability distribution. Parameters x : numpy matrix The set of points for which you want to evaluate the log likelihood.'
def get_ll(self, x, batch_size=10):
inds = range(x.shape[0]) n_batches = int(numpy.ceil((float(len(inds)) / batch_size))) lls = [] for i in range(n_batches): lls.extend(self.lpdf(x[inds[i::n_batches]])) return numpy.array(lls).mean()
'.. todo:: WRITEME * What does this function do? * How should inputs be formatted? is it a single tensor, a list of tensors, a tuple of tensors?'
def __call__(self, inputs):
raise NotImplementedError(((str(type(self)) + 'does not implement ') + 'Block.__call__'))
'Returns a compiled theano function to compute a representation Parameters name : string, optional name of the function'
def function(self, name=None):
inputs = tensor.matrix() if self.cpu_only: return theano.function([inputs], self(inputs), name=name, mode=get_default_mode().excluding('gpu')) else: return theano.function([inputs], self(inputs), name=name)
'.. todo:: WRITEME'
def perform(self, X):
if (self.fn is None): self.fn = self.function('perform') return self.fn(X)
'.. todo:: WRITEME'
def inverse(self):
raise NotImplementedError()
'.. todo:: WRITEME'
def set_input_space(self, space):
raise NotImplementedError(('%s does not implement set_input_space yet' % str(type(self))))
'.. todo:: WRITEME'
def get_input_space(self):
raise NotImplementedError(('%s does not implement get_input_space yet' % str(type(self))))
'.. todo:: WRITEME'
def get_output_space(self):
raise NotImplementedError(('%s does not implement get_output_space yet' % str(type(self))))
'.. todo:: WRITEME'
def layers(self):
return list(self._layers)
'.. todo:: WRITEME'
def __len__(self):
return len(self._layers)
'Return the output representation of all layers, including the inputs. Parameters inputs : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input minibatch(es) to be encoded. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. Returns reconstructed : tensor_like or list of tensor_like A list of theano symbolic (or list thereof), each containing the representation at one level. The first element is the input.'
def __call__(self, inputs):
repr = [inputs] for layer in self._layers: outputs = layer(repr[(-1)]) repr.append(outputs) return repr
'Compile a function computing representations on given layers. Parameters name : string, optional name of the function repr_index : int, optional Index of the hidden representation to return. 0 means the input, -1 the last output. sparse_input : bool, optional WRITEME Returns WRITEME'
def function(self, name=None, repr_index=(-1), sparse_input=False):
if sparse_input: inputs = SparseType('csr', dtype=theano.config.floatX)() else: inputs = tensor.matrix() return theano.function([inputs], outputs=self(inputs)[repr_index], name=name)
'Compile a function concatenating representations on given layers. Parameters name : string, optional name of the function start_index : int, optional Index of the hidden representation to start the concatenation. 0 means the input, -1 the last output. end_index : int, optional Index of the hidden representation from which to stop the concatenation. We must have start_index < end_index. Returns WRITEME'
def concat(self, name=None, start_index=(-1), end_index=None):
inputs = tensor.matrix() return theano.function([inputs], outputs=tensor.concatenate(self(inputs)[start_index:end_index]), name=name)
'Add a new layer on top of the last one Parameters layer : WRITEME'
def append(self, layer):
self._layers.append(layer) if (self._params is not None): self._params.update(layer._params)
'.. todo:: WRITEME'
def get_input_space(self):
return self._layers[0].get_input_space()
'.. todo:: WRITEME'
def get_output_space(self):
return self._layers[(-1)].get_output_space()
'.. todo:: WRITEME'
def set_input_space(self, space):
for layer in self._layers: layer.set_input_space(space) space = layer.get_output_space()
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self.ds == other.ds) and (self.stride == other.stride) and (self.start == other.start))
'.. todo:: WRITEME'
def __hash__(self):
return (((hash(type(self)) ^ hash(self.ds)) ^ hash(self.stride)) ^ hash(self.start))
'.. todo:: WRITEME'
def c_header_dirs(self):
return ([this_dir, config.pthreads.inc_dir] if config.pthreads.inc_dir else [this_dir])
'.. todo:: WRITEME'
def c_headers(self):
return ['nvmatrix.cuh', 'conv_util.cuh']
'.. todo:: WRITEME'
def c_lib_dirs(self):
return ([cuda_convnet_loc, config.pthreads.lib_dir] if config.pthreads.lib_dir else [cuda_convnet_loc])
'.. todo:: WRITEME'
def c_libraries(self):
return (['cuda_convnet', config.pthreads.lib] if config.pthreads.lib else ['cuda_convnet'])
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (1,)
'.. todo:: WRITEME'
def _argument_contiguity_check(self, arg_name):
return ('\n if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))\n {\n if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {\n PyErr_SetString(PyExc_ValueError,\n "%(class)s: %(arg_name)s must be C contiguous");\n %%(fail)s;\n }\n }\n ' % {'class': self.__class__.__name__, 'arg_name': arg_name, 'class_name_caps': self.__class__.__name__.upper()})
'.. todo:: WRITEME'
def make_node(self, images, top_down):
images = as_cuda_ndarray_variable(images) top_down = as_cuda_ndarray_variable(top_down) assert (images.ndim == 4) assert (top_down.ndim == 4) channels_broadcastable = images.type.broadcastable[0] batch_broadcastable = images.type.broadcastable[3] rows_broadcastable = False cols_broadcastable = False houtput_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) houtput_type = CudaNdarrayType(broadcastable=houtput_broadcastable) houtput = houtput_type() poutput_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) poutput_type = CudaNdarrayType(broadcastable=poutput_broadcastable) poutput = poutput_type() return Apply(self, [images, top_down], [houtput, poutput])
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(images, top_down) = inputs (ptargets, htargets) = outputs fail = sub['fail'] num_braces = 0 if self.copy_non_contiguous: raise UnimplementedError() else: basic_setup = '#define PROBMAXPOOL_COPY_NON_CONTIGUOUS 0\n' setup_nv_images = (self._argument_contiguity_check('images') + '\n if (%(images)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "images must have nd=4, got nd=%%i", %(images)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n\n const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);\n const int img_channels = images_dims[0];\n const int imgSizeY = images_dims[1];\n const int imgSizeX = images_dims[2];\n const int batch_size = images_dims[3];\n\n if(imgSizeY != imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",\n img_channels, imgSizeY, imgSizeX, batch_size);\n %(fail)s;\n }\n if(%(ds)s > imgSizeY){\n PyErr_Format(PyExc_ValueError,\n "ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",\n %(ds)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n if(%(start)s >= imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "start is %%d but must be smaller then the images size of %%d x %%d.",\n %(start)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n\n NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,\n "ProbMaxPool:nv_images");\n ') num_braces += 1 setup_nv_top_down = (self._argument_contiguity_check('top_down') + '\n if (%(top_down)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "top_down must have nd=4, got nd=%%i", %(images)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n\n int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;\n\n\n NVMatrix nv_top_down(%(top_down)s, img_channels * _outputsX * _outputsX, batch_size,\n "ProbMaxPool:nv_top_down");\n ') num_braces += 1 setup_nv_ptargets = '\n //int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;\n\n int target_dims [] = {\n img_channels,\n _outputsX,\n _outputsX,\n batch_size };\n\n if (CudaNdarray_prep_output(& %(ptargets)s, 4, target_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_target brace # 1\n\n NVMatrix nv_ptargets(%(ptargets)s, target_dims[0] * target_dims[1] * target_dims[2],\n target_dims[3], "ProbMaxPool:nv_ptargets");\n\n ' num_braces += 1 setup_nv_htargets = '\n int target_dims [] = {\n img_channels,\n imgSizeX,\n imgSizeY,\n batch_size };\n\n if (CudaNdarray_prep_output(& %(htargets)s, 4, target_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_target brace # 1\n\n NVMatrix nv_htargets(%(htargets)s, target_dims[0] * target_dims[1] * target_dims[2],\n target_dims[3], "ProbMaxPool:nv_htargets");\n\n ' num_braces += 1 do_pool = '\n probabilisticPool(nv_images, nv_top_down, nv_ptargets, nv_htargets, img_channels, %(ds)s,\n %(start)s, %(stride)s, _outputsX, MaxPooler());\n ' braces = ('}' * num_braces) rval = ((((((basic_setup + setup_nv_images) + setup_nv_top_down) + setup_nv_ptargets) + setup_nv_htargets) + do_pool) + braces) start = self.start stride = self.stride ds = self.ds rval = (rval % locals()) return rval
'.. todo:: WRITEME'
def grad(self, inp, grads):
(x, top_down) = inp (p, h) = self(x, top_down) (gp, gh) = grads gp_iszero = 0.0 gh_iszero = 0.0 if isinstance(gp.type, theano.gradient.DisconnectedType): gp = tensor.zeros_like(p) gp_iszero = 1.0 if isinstance(gh.type, theano.gradient.DisconnectedType): gh = tensor.zeros_like(h) gh_iszero = 1.0 gp = gpu_contiguous(gp) gh = gpu_contiguous(gh) gp_iszero = as_cuda_ndarray_variable(gp_iszero) gh_iszero = as_cuda_ndarray_variable(gh_iszero) return ProbMaxPoolGrad(self.ds, self.stride, self.start)(p, h, gp, gh, gp_iszero, gh_iszero)
'.. todo:: WRITEME'
def make_thunk(self, *args, **kwargs):
if (not convnet_available()): raise RuntimeError('Could not compile cuda_convnet') return super(ProbMaxPool, self).make_thunk(*args, **kwargs)
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self.ds == other.ds) and (self.stride == other.stride) and (self.start == other.start))
'.. todo:: WRITEME'
def __hash__(self):
return (((hash(type(self)) ^ hash(self.ds)) ^ hash(self.stride)) ^ hash(self.start))
'.. todo:: WRITEME'
def c_header_dirs(self):
return ([this_dir, config.pthreads.inc_dir] if config.pthreads.inc_dir else [this_dir])
'.. todo:: WRITEME'
def c_headers(self):
return ['nvmatrix.cuh', 'conv_util.cuh']
'.. todo:: WRITEME'
def c_lib_dirs(self):
return ([cuda_convnet_loc, config.pthreads.lib_dir] if config.pthreads.lib_dir else [cuda_convnet_loc])
'.. todo:: WRITEME'
def c_libraries(self):
return (['cuda_convnet', config.pthreads.lib] if config.pthreads.lib else ['cuda_convnet'])
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (1,)
'.. todo:: WRITEME'
def _argument_contiguity_check(self, arg_name):
return ('\n if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))\n {\n if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {\n PyErr_SetString(PyExc_ValueError,\n "%(class)s: %(arg_name)s must be C contiguous");\n %%(fail)s;\n }\n }\n ' % {'class': self.__class__.__name__, 'arg_name': arg_name, 'class_name_caps': self.__class__.__name__.upper()})
'.. todo:: WRITEME'
def make_node(self, p, h, gp, gh, gp_iszero, gh_iszero):
p = as_cuda_ndarray_variable(p) h = as_cuda_ndarray_variable(h) gp = as_cuda_ndarray_variable(gp) gh = as_cuda_ndarray_variable(gh) assert (p.ndim == 4) assert (h.ndim == 4) assert (gp.ndim == 4) assert (gh.ndim == 4) try: nb_channel = int(get_scalar_constant_value(h.shape[0])) assert ((nb_channel % 16) == 0) except NotScalarConstantError: pass return Apply(self, [p, h, gp, gh, gp_iszero, gh_iszero], [p.type(), h.type()])
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(p, h, gp, gh, gp_iszero, gh_iszero) = inputs (targets_z, targets_t) = outputs fail = sub['fail'] num_braces = 0 if self.copy_non_contiguous: raise UnimplementedError() else: basic_setup = '#define PROBMAXPOOLGRAD_COPY_NON_CONTIGUOUS 0\n' setup_nv_h = (self._argument_contiguity_check('h') + '\n if (%(h)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "h must have nd=4, got nd=%%i", %(h)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n\n const int * images_dims = CudaNdarray_HOST_DIMS(%(h)s);\n const int img_channels = images_dims[0];\n const int imgSizeY = images_dims[1];\n const int imgSizeX = images_dims[2];\n const int batch_size = images_dims[3];\n\n if(imgSizeY != imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",\n img_channels, imgSizeY, imgSizeX, batch_size);\n %(fail)s;\n }\n if(%(ds)s > imgSizeY){\n PyErr_Format(PyExc_ValueError,\n "ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",\n %(ds)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n if (CudaNdarray_HOST_DIMS(%(h)s)[0] %% 16 != 0)\n {\n PyErr_Format(PyExc_ValueError,\n "h must have a number of channels that is a multiple of 16. Got %%d",\n CudaNdarray_HOST_DIMS(%(gh)s)[0]);\n %(fail)s;\n }\n\n\n NVMatrix nv_h(%(h)s, img_channels * imgSizeY * imgSizeX,\n batch_size, "ProbMaxPool:nv_h");\n\n ') num_braces += 1 setup_nv_p = (self._argument_contiguity_check('p') + '\n if (%(p)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "P must have nd=4, got nd=%%i", %(p)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n\n int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;\n\n\n NVMatrix nv_p(%(p)s, img_channels * _outputsX * _outputsX, batch_size,\n "ProbMaxPool:nv_p");\n ') num_braces += 1 setup_nv_gh = (self._argument_contiguity_check('gh') + '\n if (%(gh)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "gh must have nd=4, got nd=%%i", %(gh)s->nd);\n %(fail)s;\n }\n if (CudaNdarray_HOST_DIMS(%(gh)s)[0] %% 16 != 0)\n {\n PyErr_Format(PyExc_ValueError,\n "gh must have a number of channels that is a multiple of 16. Got %%d",\n CudaNdarray_HOST_DIMS(%(gh)s)[0]);\n %(fail)s;\n }\n\n { //setup_nv_gh brace 1\n\n const int * gh_dims = CudaNdarray_HOST_DIMS(%(gh)s);\n const int gh_channels = gh_dims[0];\n const int ghSizeY = gh_dims[1];\n const int ghSizeX = gh_dims[2];\n\n NVMatrix nv_gh(%(gh)s, gh_channels * ghSizeY * ghSizeX,\n batch_size, "ProbMaxPool:nv_gh");\n ') num_braces += 1 setup_nv_gp = (self._argument_contiguity_check('gp') + '\n if (%(gp)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "gp must have nd=4, got nd=%%i", %(gp)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n\n int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;\n\n\n NVMatrix nv_gp(%(gp)s, img_channels * _outputsX * _outputsX, batch_size,\n "ProbMaxPool:nv_gp");\n ') num_braces += 1 setup_nv_targets_z = '\n int target_z_dims [] = {\n img_channels,\n imgSizeX,\n imgSizeY,\n batch_size };\n\n if (CudaNdarray_prep_output(& %(targets_z)s, 4, target_z_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_target brace # 1\n\n NVMatrix nv_targets_z(%(targets_z)s,\n target_z_dims[0] * target_z_dims[1] * target_z_dims[2],\n target_z_dims[3], "ProbMaxPool:nv_targets_z");\n\n ' num_braces += 1 setup_nv_targets_t = '\n int target_t_dims [] = {\n img_channels,\n _outputsX,\n _outputsX,\n batch_size };\n\n if (CudaNdarray_prep_output(& %(targets_t)s, 4, target_t_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_target brace # 1\n\n NVMatrix nv_targets_t(%(targets_t)s, target_t_dims[0] * target_t_dims[1] * target_t_dims[2],\n target_t_dims[3], "ProbMaxPool:nv_targets_t");\n\n\n float * gp_iszero = CudaNdarray_DEV_DATA(%(gp_iszero)s);\n float * gh_iszero = CudaNdarray_DEV_DATA(%(gh_iszero)s);\n ' num_braces += 1 undo_pool = '\n localProbMaxUndo(nv_h, nv_p, nv_gh, nv_gp, nv_targets_z, nv_targets_t,\n %(ds)s, %(start)s, %(stride)s, _outputsX, imgSizeX, gp_iszero, gh_iszero);\n ' braces = ('}' * num_braces) rval = ((((((((basic_setup + setup_nv_h) + setup_nv_p) + setup_nv_gh) + setup_nv_gp) + setup_nv_targets_z) + setup_nv_targets_t) + undo_pool) + braces) start = self.start stride = self.stride ds = self.ds rval = (rval % locals()) return rval
'.. todo:: WRITEME'
def make_thunk(self, node, storage_map, compute_map, no_recycling):
if (not convnet_available()): raise RuntimeError('Could not compile cuda_convnet') return super(ProbMaxPoolGrad, self).make_thunk(node, storage_map, compute_map, no_recycling)
'.. todo:: WRITEME'
def make_node(self, images, filters):
if (not isinstance(images.type, CudaNdarrayType)): raise TypeError(('FilterActs: expected images.type to be CudaNdarrayType, got ' + str(images.type))) if (not isinstance(filters.type, CudaNdarrayType)): raise TypeError(('FilterActs: expected filters.type to be CudaNdarrayType, got ' + str(filters.type))) assert (images.ndim == 4) assert (filters.ndim == 4) channels_broadcastable = filters.type.broadcastable[3] batch_broadcastable = images.type.broadcastable[3] rows_broadcastable = False cols_broadcastable = False targets_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() return Apply(self, [images, filters], [targets])
'Useful with the hack in profilemode to print the MFlops'
def flops(self, inputs, outputs):
(images, kerns) = inputs (out,) = outputs assert (images[0] == kerns[0]) flops = ((kerns[1] * kerns[2]) * 2) flops *= (out[1] * out[2]) flops *= ((images[0] * kerns[3]) * images[3]) return flops
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(images, filters) = inputs (targets,) = outputs fail = sub['fail'] basic_setup = '\n #define scaleTargets 0\n #define scaleOutput 1\n ' if self.dense_connectivity: basic_setup += '\n #define numGroups 1\n ' assert isinstance(self.pad, py_integer_types) assert (self.pad >= 0), 'pad must be non-negative' basic_setup += ('\n #define paddingStart (-%d)\n ' % self.pad) basic_setup += ('\n #define moduleStride %d\n ' % int(self.stride)) if self.copy_non_contiguous: raise UnimplementedError() else: basic_setup += '#define FILTERACTS_COPY_NON_CONTIGUOUS 0\n' num_braces = 0 setup_nv_images = (self._argument_contiguity_check('images') + '\n if (%(images)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "images must have nd=4, got nd=%%i", %(images)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);\n const int img_channels = images_dims[0];\n const int imgSizeY = images_dims[1];\n const int imgSizeX = images_dims[2];\n const int batch_size = images_dims[3];\n NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,\n "filter_acts:nv_images");\n ') num_braces += 1 setup_nv_filters = (self._argument_contiguity_check('filters') + '\n if (%(filters)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "filters must have nd=4, got nd=%%i", %(filters)s->nd);\n %(fail)s;\n }\n\n { // setup_nv_filters brace 1\n const int * filters_dims = CudaNdarray_HOST_DIMS(%(filters)s);\n const int filter_channels = filters_dims[0];\n const int filter_rows = filters_dims[1];\n const int filter_cols = filters_dims[2];\n const int num_filters = filters_dims[3];\n\n if (numGroups * filter_channels != img_channels)\n {\n PyErr_Format(PyExc_ValueError,\n "# input channels mismatch. images have %%d but filters have %%d groups of %%d for a total of %%d.",\n img_channels, numGroups, filter_channels, numGroups * filter_channels);\n %(fail)s;\n }\n\n if ((num_filters %% (numGroups * 16)) != 0)\n {\n PyErr_Format(PyExc_ValueError,\n "Each group must have a multiple of 16 channels, but num_filters %%%% (numGroups * 16) = %%d %%%% ( %%d * 16) = %%d.",\n num_filters, numGroups, num_filters %% (numGroups * 16));\n %(fail)s;\n }\n\n if (filter_rows != filter_cols)\n {\n PyErr_Format(PyExc_ValueError,\n "filter must be square, but instead have shape (%%d, %%d)",\n filter_rows, filter_cols);\n %(fail)s;\n }\n else if (moduleStride > filter_rows) {\n PyErr_Format(PyExc_ValueError,\n "stride %%d greater than filter size (%%d, %%d)",\n moduleStride, filter_rows, filter_cols);\n %(fail)s;\n }\n\n { // setup_nv_filters brace 2\n\n\n NVMatrix nv_filters(%(filters)s, filter_channels * filter_rows *\n filter_cols, num_filters, "filter_acts:nv_filters");\n ') num_braces += 2 div_ms_y = '((imgSizeY - 2*paddingStart - filter_rows) / moduleStride)' div_ms_x = '((imgSizeX - 2*paddingStart - filter_cols) / moduleStride)' mod_ms_y = '((imgSizeY - 2*paddingStart - filter_rows) % moduleStride)' mod_ms_x = '((imgSizeX - 2*paddingStart - filter_cols) % moduleStride)' target_rows = ('%s + ((%s > 0) ? 1 : 0) + 1' % (div_ms_y, mod_ms_y)) target_cols = ('%s + ((%s > 0) ? 1 : 0) + 1' % (div_ms_x, mod_ms_x)) setup_nv_targets = '\n\n\n int target_dims [] = {\n num_filters,\n %(target_rows)s,\n %(target_cols)s,\n batch_size };\n\n #define numModulesY target_dims[1]\n #define numModulesX target_dims[2]\n\n if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_filters brace # 1\n\n NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1]\n * target_dims[2], target_dims[3], "filter_acts:nv_targets");\n\n ' num_braces += 1 do_convolution = '\n convFilterActs(nv_images, nv_filters, nv_targets,\n imgSizeY, numModulesY, numModulesX,\n paddingStart, moduleStride, img_channels,\n numGroups, scaleTargets, scaleOutput);\n ' braces = ('}' * num_braces) rval = (((((basic_setup + setup_nv_images) + setup_nv_filters) + setup_nv_targets) + do_convolution) + braces) rval = (rval % locals()) return rval
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (10,)
'.. todo:: WRITEME'
def R_op(self, inputs, evals):
(images, filters) = inputs (images_ev, filters_ev) = evals if ('Cuda' not in str(type(images))): raise TypeError('inputs must be cuda') if ('Cuda' not in str(type(filters))): raise TypeError('filters must be cuda') if (filters_ev is not None): sol = self(images, filters_ev) else: sol = None if (images_ev is not None): if (sol is not None): sol += self(images_ev, filters) else: sol = self(images_ev, filters) return [sol]
'.. todo:: WRITEME'
def grad(self, inputs, dout):
(images, filters) = inputs if ('Cuda' not in str(type(images))): raise TypeError('inputs must be cuda') if ('Cuda' not in str(type(filters))): raise TypeError('filters must be cuda') (dout,) = dout dout = gpu_contiguous(dout) if ('Cuda' not in str(type(dout))): raise TypeError('output gradients must be cuda') ishape = images.shape[1:3] fshape = filters.shape[1:3] d_images = ImageActs(self.pad, self.partial_sum, self.stride)(dout, filters, ishape) d_filters = WeightActs(self.pad, self.partial_sum, self.stride)(images, dout, fshape)[0] return (d_images, d_filters)
'.. todo:: WRITEME'
def __hash__(self):
return hash((self._size_f, self._add_scale, self._pow_scale, self._blocked))
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (hash(self) == hash(other)))
'.. todo:: WRITEME'
def make_node(self, images):
if (not isinstance(images.type, CudaNdarrayType)): raise TypeError(('CrossMapNorm: expected images.type to be CudaNdarrayType, got ' + str(images.type))) assert (images.ndim == 4) targets_broadcastable = images.type.broadcastable targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) denoms = targets_type() targets = targets_type() return Apply(self, [images], [targets, denoms])
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(images,) = inputs (targets, denoms) = outputs fail = sub['fail'] num_braces = 0 size_f = self._size_f add_scale = self._add_scale pow_scale = self._pow_scale blocked = ('true' if self._blocked else 'false') class_name = self.__class__.__name__ class_name_upper = class_name.upper() basic_setup = self._basic_setup setup_nv_images = ((contiguity_check('images') + dimension_check('images', 4)) + self._images_setup) num_braces += 2 setup_nv_targets = output_same_shape('targets', 'images') num_braces += 1 setup_nv_denoms = output_same_shape('denoms', 'images') num_braces += 1 do_normalize = '\n convResponseNormCrossMap(nv_images, nv_denoms, nv_targets, numFilters, sizeF,\n addScale, powScale, blocked);\n ' braces = (('}' * num_braces) + '\n') rval = (((((basic_setup + setup_nv_images) + setup_nv_targets) + setup_nv_denoms) + do_normalize) + braces) rval = (rval % locals()) return rval
'.. todo:: WRITEME'
def grad(self, inputs, dout):
(images,) = inputs (acts, denoms) = self(images) (dout, _) = dout dout = as_cuda_ndarray_variable(dout) dout = gpu_contiguous(dout) grad_op = CrossMapNormUndo(self._size_f, self._add_scale, self._pow_scale, self._blocked, inplace=False) return [grad_op(images, acts, denoms, dout)[0]]
'.. todo:: WRITEME'
def __str__(self):
return (self.__class__.__name__ + ('[size_f=%d,add_scale=%f,pow_scale=%f,blocked=%s]' % (self._size_f, self._add_scale, self._pow_scale, self._blocked)))
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (6,)
'.. todo:: WRITEME'
def __hash__(self):
super_hash = super(CrossMapNormUndo, self).__hash__() return hash((super_hash, self._inplace))