desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Returns dim : int The number of elements in the input, if the input is a vector.'
def get_input_dim(self):
if (not isinstance(self.vis_space, VectorSpace)): raise TypeError((("Can't describe " + str(type(self.vis_space))) + ' as a dimensionality number.')) return self.vis_space.dim
'Returns dim : int The number of elements in the output, if the output is a vector.'
def get_output_dim(self):
if (not isinstance(self.hid_space, VectorSpace)): raise TypeError((("Can't describe " + str(type(self.hid_space))) + ' as a dimensionality number.')) return self.hid_space.dim
'.. todo:: WRITEME'
def get_input_space(self):
return self.vis_space
'.. todo:: WRITEME'
def get_output_space(self):
return self.hid_space
'.. todo:: WRITEME'
def get_params(self):
return [param for param in self._params]
'.. todo:: WRITEME'
def get_weights(self, borrow=False):
(weights,) = self.transformer.get_params() return weights.get_value(borrow=borrow)
'.. todo:: WRITEME'
def get_weights_topo(self):
return self.transformer.get_weights_topo()
'.. todo:: WRITEME'
def get_weights_format(self):
return ['v', 'h']
'.. todo:: WRITEME'
def get_monitoring_channels(self, data):
V = data theano_rng = make_theano_rng(None, 42, which_method='binomial') H = self.mean_h_given_v(V) h = H.mean(axis=0) return {'bias_hid_min': T.min(self.bias_hid), 'bias_hid_mean': T.mean(self.bias_hid), 'bias_hid_max': T.max(self.bias_hid), 'bias_vis_min': T.min(self.bias_vis), 'bias_vis_mean': T.mean(self.bias_vis), 'bias_vis_max': T.max(self.bias_vis), 'h_min': T.min(h), 'h_mean': T.mean(h), 'h_max': T.max(h), 'reconstruction_error': self.reconstruction_error(V, theano_rng)}
'Get the data_specs describing the data for get_monitoring_channel. This implementation returns specification corresponding to unlabeled inputs. Returns WRITEME'
def get_monitoring_data_specs(self):
return (self.get_input_space(), self.get_input_source())
'Get the contrastive gradients given positive and negative phase visible units. Parameters pos_v : tensor_like Theano symbolic representing a minibatch on the visible units, with the first dimension indexing training examples and the second indexing data dimensions (usually actual training data). neg_v : tensor_like Theano symbolic representing a minibatch on the visible units, with the first dimension indexing training examples and the second indexing data dimensions (usually reconstructions of the data or sampler particles from a persistent Markov chain). Returns grads : list List of Theano symbolic variables representing gradients with respect to model parameters, in the same order as returned by `params()`. Notes `pos_v` and `neg_v` need not have the same first dimension, i.e. minibatch size.'
def ml_gradients(self, pos_v, neg_v):
ml_cost = (self.free_energy_given_v(pos_v).mean() - self.free_energy_given_v(neg_v).mean()) grads = tensor.grad(ml_cost, self.get_params(), consider_constant=[pos_v, neg_v]) return grads
'.. todo:: WRITEME properly A default learning rule based on SML'
def train_batch(self, dataset, batch_size):
self.learn_mini_batch(dataset.get_batch_design(batch_size)) return True
'.. todo:: WRITEME A default learning rule based on SML'
def learn_mini_batch(self, X):
if (not hasattr(self, 'learn_func')): self.redo_theano() rval = self.learn_func(X) return rval
'Compiles the theano function for the default learning rule'
def redo_theano(self):
init_names = dir(self) minibatch = tensor.matrix() optimizer = _SGDOptimizer(self, self.base_lr, self.anneal_start) sampler = sampler = BlockGibbsSampler(self, (0.5 + np.zeros((self.nchains, self.get_input_dim()))), self.rng, steps=self.sml_gibbs_steps) updates = training_updates(visible_batch=minibatch, model=self, sampler=sampler, optimizer=optimizer) self.learn_func = theano.function([minibatch], updates=updates) final_names = dir(self) self.register_names_to_del([name for name in final_names if (name not in init_names)])
'Do a round of block Gibbs sampling given visible configuration Parameters v : tensor_like Theano symbolic representing the hidden unit states for a batch of training examples (or negative phase particles), with the first dimension indexing training examples and the second indexing data dimensions. rng : RandomStreams object Random number generator to use for sampling the hidden and visible units. Returns v_sample : tensor_like Theano symbolic representing the new visible unit state after one round of Gibbs sampling. locals : dict Contains the following auxiliary state as keys (all symbolics except shape tuples): * `h_mean`: the returned value from `mean_h_given_v` * `h_mean_shape`: shape tuple indicating the size of `h_mean` and `h_sample` * `h_sample`: the stochastically sampled hidden units * `v_mean_shape`: shape tuple indicating the shape of `v_mean` and `v_sample` * `v_mean`: the returned value from `mean_v_given_h` * `v_sample`: the stochastically sampled visible units'
def gibbs_step_for_v(self, v, rng):
h_mean = self.mean_h_given_v(v) assert (h_mean.type.dtype == v.type.dtype) h_sample = rng.binomial(size=h_mean.shape, n=1, p=h_mean, dtype=h_mean.type.dtype) assert (h_sample.type.dtype == v.type.dtype) v_mean = self.mean_v_given_h(h_sample) assert (v_mean.type.dtype == v.type.dtype) v_sample = self.sample_visibles([v_mean], v_mean.shape, rng) assert (v_sample.type.dtype == v.type.dtype) return (v_sample, locals())
'Stochastically sample the visible units given hidden unit configurations for a set of training examples. Parameters params : list List of the necessary parameters to sample :math:`p(v|h)`. In the case of a binary-binary RBM this is a single-element list containing the symbolic representing :math:`p(v|h)`, as returned by `mean_v_given_h`. Returns vprime : tensor_like Theano symbolic representing stochastic samples from :math:`p(v|h)`'
def sample_visibles(self, params, shape, rng):
v_mean = params[0] return as_floatX((rng.uniform(size=shape) < v_mean))
'Compute the affine function (linear map plus bias) that serves as input to the hidden layer in an RBM. Parameters v : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the one or several minibatches on the visible units, with the first dimension indexing training examples and the second indexing data dimensions. Returns a : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input to each hidden unit for each training example.'
def input_to_h_from_v(self, v):
if isinstance(v, tensor.Variable): return (self.bias_hid + self.transformer.lmul(v)) else: return [self.input_to_h_from_v(vis) for vis in v]
'Compute the affine function (linear map plus bias) that serves as input to the visible layer in an RBM. Parameters h : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the one or several minibatches on the hidden units, with the first dimension indexing training examples and the second indexing data dimensions. Returns a : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input to each visible unit for each row of h.'
def input_to_v_from_h(self, h):
if isinstance(h, tensor.Variable): return (self.bias_vis + self.transformer.lmul_T(h)) else: return [self.input_to_v_from_h(hid) for hid in h]
'Wrapper around mean_h_given_v method. Called when RBM is accessed by mlp.HiddenLayer.'
def upward_pass(self, v):
return self.mean_h_given_v(v)
'Compute the mean activation of the hidden units given visible unit configurations for a set of training examples. Parameters v : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the hidden unit states for a batch (or several) of training examples, with the first dimension indexing training examples and the second indexing data dimensions. Returns h : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the mean (deterministic) hidden unit activations given the visible units.'
def mean_h_given_v(self, v):
if isinstance(v, tensor.Variable): return nnet.sigmoid(self.input_to_h_from_v(v)) else: return [self.mean_h_given_v(vis) for vis in v]
'Compute the mean activation of the visibles given hidden unit configurations for a set of training examples. Parameters h : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the hidden unit states for a batch (or several) of training examples, with the first dimension indexing training examples and the second indexing hidden units. Returns vprime : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the mean (deterministic) reconstruction of the visible units given the hidden units.'
def mean_v_given_h(self, h):
if isinstance(h, tensor.Variable): return nnet.sigmoid(self.input_to_v_from_h(h)) else: return [self.mean_v_given_h(hid) for hid in h]
'Calculate the free energy of a visible unit configuration by marginalizing over the hidden units. Parameters v : tensor_like Theano symbolic representing the hidden unit states for a batch of training examples, with the first dimension indexing training examples and the second indexing data dimensions. Returns f : tensor_like 1-dimensional tensor (vector) representing the free energy associated with each row of v.'
def free_energy_given_v(self, v):
sigmoid_arg = self.input_to_h_from_v(v) return ((- tensor.dot(v, self.bias_vis)) - nnet.softplus(sigmoid_arg).sum(axis=1))
'Calculate the free energy of a hidden unit configuration by marginalizing over the visible units. Parameters h : tensor_like Theano symbolic representing the hidden unit states, with the first dimension indexing training examples and the second indexing data dimensions. Returns f : tensor_like 1-dimensional tensor (vector) representing the free energy associated with each row of v.'
def free_energy_given_h(self, h):
sigmoid_arg = self.input_to_v_from_h(h) return ((- tensor.dot(h, self.bias_hid)) - nnet.softplus(sigmoid_arg).sum(axis=1))
'Forward propagate (symbolic) input through this module, obtaining a representation to pass on to layers above. This just aliases the `mean_h_given_v()` function for syntactic sugar/convenience.'
def __call__(self, v):
return self.mean_h_given_v(v)
'Compute the mean-squared error (mean over examples, sum over units) across a minibatch after a Gibbs step starting from the training data. Parameters v : tensor_like Theano symbolic representing the hidden unit states for a batch of training examples, with the first dimension indexing training examples and the second indexing data dimensions. rng : RandomStreams object Random number generator to use for sampling the hidden and visible units. Returns mse : tensor_like 0-dimensional tensor (essentially a scalar) indicating the mean reconstruction error across the minibatch. Notes The reconstruction used to assess error samples only the hidden units. For the visible units, it uses the conditional mean. No sampling of the visible units is done, to reduce noise in the estimate.'
def reconstruction_error(self, v, rng):
(sample, _locals) = self.gibbs_step_for_v(v, rng) return ((_locals['v_mean'] - v) ** 2).sum(axis=1).mean()
'.. todo:: WRITEME'
def _modify_updates(self, updates):
if (self.sigma_driver in updates): assert self.learn_sigma updates[self.sigma_driver] = T.clip(updates[self.sigma_driver], (self.min_sigma / self.sigma_lr_scale), (self.max_sigma / self.sigma_lr_scale))
'.. todo:: WRITEME'
def score(self, V):
return self.energy_function.score(V)
'.. todo:: WRITEME'
def P_H_given_V(self, V):
return self.energy_function.mean_H_given_V(V)
'.. todo:: WRITEME'
def mean_h_given_v(self, v):
return self.P_H_given_V(v)
'Compute the mean activation of the visibles given hidden unit configurations for a set of training examples. Parameters h : tensor_like Theano symbolic representing the hidden unit states for a batch of training examples, with the first dimension indexing training examples and the second indexing hidden units. Returns vprime : tensor_like Theano symbolic representing the mean (deterministic) reconstruction of the visible units given the hidden units.'
def mean_v_given_h(self, h):
return self.energy_function.mean_V_given_H(h)
'Calculate the free energy of a visible unit configuration by marginalizing over the hidden units. Parameters v : tensor_like Theano symbolic representing the hidden unit states for a batch of training examples, with the first dimension indexing training examples and the second indexing data dimensions. Returns f : tensor_like 1-dimensional tensor representing the free energy of the visible unit configuration for each example in the batch'
def free_energy_given_v(self, V):
'hid_inp = self.input_to_h_from_v(v)\n squared_term = ((self.bias_vis - v) ** 2.) / (2. * self.sigma)\n rval = squared_term.sum(axis=1) - nnet.softplus(hid_inp).sum(axis=1)\n assert len(rval.type.broadcastable) == 1' return self.energy_function.free_energy(V)
'.. todo:: WRITEME'
def free_energy(self, V):
return self.energy_function.free_energy(V)
'Stochastically sample the visible units given hidden unit configurations for a set of training examples. Parameters params : list List of the necessary parameters to sample :math:`p(v|h)`. In the case of a Gaussian-binary RBM this is a single-element list containing the conditional mean. shape : WRITEME rng : WRITEME Returns vprime : tensor_like Theano symbolic representing stochastic samples from :math:`p(v|h)` Notes If `mean_vis` is specified as `True` in the constructor, this is equivalent to a call to `mean_v_given_h`.'
def sample_visibles(self, params, shape, rng):
v_mean = params[0] if self.mean_vis: return v_mean else: zero_mean = (rng.normal(size=shape) * self.sigma) return (zero_mean + v_mean)
'.. todo:: WRITEME'
def gibbs_step_for_v(self, v, rng):
batch_size = v.shape[0] h_mean = self.mean_h_given_v(v) h_mean_shape = (batch_size, self.nhid) h_sample = rng.binomial(size=h_mean_shape, n=1, p=h_mean, dtype=h_mean.dtype) (s_mu, s_var) = self.mean_var_s_given_v_h1(v) s_mu_shape = (batch_size, self.nslab) s_sample = (s_mu + (rng.normal(size=s_mu_shape) * tensor.sqrt(s_var))) (v_mean, v_var) = self.mean_var_v_given_h_s(h_sample, s_sample) v_mean_shape = (batch_size, self.nvis) v_sample = ((rng.normal(size=v_mean_shape) * tensor.sqrt(v_var)) + v_mean) del batch_size return (v_sample, locals())
'.. todo:: WRITEME'
def sample_visibles(self, params, shape, rng):
raise NotImplementedError('mu_pooled_ssRBM.sample_visibles')
'.. todo:: WRITEME'
def input_to_h_from_v(self, v):
D = self.Lambda alpha = self.alpha def sum_s(x): return x.reshape(((-1), self.nhid, self.n_s_per_h)).sum(axis=2) return tensor.add(self.b, ((-0.5) * tensor.dot((v * v), D)), sum_s((self.mu * tensor.dot(v, self.W))), sum_s(((0.5 * tensor.sqr(tensor.dot(v, self.W))) / alpha)))
'.. todo:: WRITEME'
def mean_var_v_given_h_s(self, h, s):
v_var = (1 / (self.B + tensor.dot(h, self.Lambda.T))) s3 = s.reshape(((-1), self.nhid, self.n_s_per_h)) hs = (h.dimshuffle(0, 1, 'x') * s3) v_mu = (tensor.dot(hs.flatten(2), self.W.T) * v_var) return (v_mu, v_var)
'.. todo:: WRITEME'
def mean_var_s_given_v_h1(self, v):
alpha = self.alpha return ((self.mu + (tensor.dot(v, self.W) / alpha)), (1.0 / alpha))
'.. todo:: WRITEME'
def mean_v_given_h(self, h):
raise NotImplementedError('mu_pooled_ssRBM.mean_v_given_h')
'.. todo:: WRITEME'
def free_energy_given_v(self, v):
sigmoid_arg = self.input_to_h_from_v(v) return tensor.add((0.5 * (self.B * (v ** 2)).sum(axis=1)), (- tensor.nnet.softplus(sigmoid_arg).sum(axis=1)))
'.. todo:: WRITEME'
def expr(self, model, data, **kwargs):
self.get_data_specs(model)[0].validate(data) X = data H = model.P_H_given_V(X) h = H.mean(axis=0) err = abs((h - self.target)) dead = T.maximum((err - self.eps), 0.0) assert (dead.ndim == 1) rval = (self.coeff * dead.mean()) return rval
'.. todo:: WRITEME'
def get_data_specs(self, model):
return (model.get_input_space(), model.get_input_source())
'Return symbolic updates to apply.'
def updates(self):
raise NotImplementedError()
'Initializes parameter-specific learning rate dictionary and shared variables for the annealed base learning rate and iteration number. Parameters base_lr : float The base learning rate before annealing or parameter-specific scaling. kwargs : dict WRITEME Notes Parameter-specific learning rates can be set by passing keyword arguments <name>_lr, where name is the .name attribute of a given parameter.'
def learning_rates_setup(self, base_lr, **kwargs):
self.learning_rates = {} self.base_lr = theano._asarray(base_lr, dtype=theano.config.floatX) lr_names_seen = set() for parameter in self.params: lr_name = ('%s_lr' % parameter.name) if (lr_name in lr_names_seen): logger.warning('In SGDOptimizer, at least two parameters have the same name. Both will be affected by the keyword argument {0}.'.format(lr_name)) lr_names_seen.add(lr_name) thislr = kwargs.get(lr_name, 1.0) self.learning_rates[parameter] = sharedX(thislr, lr_name) for lr_name in lr_names_seen: if (lr_name in kwargs): kwargs.pop(lr_name) for kw in six.iterkeys(kwargs): if (kw[(-3):] == '_lr'): logger.warning('In SGDOptimizer, keyword argument {0} will be ignored, because no parameter was found with name {1}.'.format(kw, kw[:(-3)])) self.iteration = sharedX(theano._asarray(0, dtype='int32'), name='iter') self.annealed = sharedX(base_lr, 'annealed')
'Compute a dictionary of shared variable updates related to annealing the learning rate. Parameters gradients : WRITEME Returns updates : dict A dictionary with the shared variables representing SGD metadata as keys and a symbolic expression of how they are to be updated as values.'
def learning_rate_updates(self, gradients):
ups = {} if self.use_adagrad: learn_rates = [] for (param, gp) in zip(self.params, gradients): acc = self.accumulators[param] ups[acc] = (acc + (gp ** 2).sum()) learn_rates.append((self.e0s[param] / (ups[acc] ** 0.5))) else: if (self.anneal_start is None): annealed = sharedX(self.base_lr) else: frac = (self.anneal_start / (self.iteration + 1.0)) annealed = tensor.minimum(as_floatX(frac), self.base_lr) ups[self.annealed] = annealed ups[self.iteration] = (self.iteration + 1) learn_rates = [(annealed * self.learning_rates[p]) for p in self.params] return (ups, learn_rates)
'Return symbolic updates to apply given a set of gradients on the parameters being optimized. Parameters gradients : list of tensor_likes List of symbolic gradients for the parameters contained in self.params, in the same order as in self.params. Returns updates : dict A dictionary with the shared variables in self.params as keys and a symbolic expression of how they are to be updated each SGD step as values. Notes `cost_updates` is a convenient helper function that takes all necessary gradients with respect to a given symbolic cost.'
def updates(self, gradients):
ups = {} (l_ups, learn_rates) = self.learning_rate_updates(gradients) safe_update(ups, l_ups) p_up = dict(self.sgd_updates(self.params, gradients, learn_rates)) safe_update(ups, p_up) for (param, (p_min, p_max)) in six.iteritems(self.clipping_values): p_min = tensor.as_tensor(p_min) p_max = tensor.as_tensor(p_max) dtype = param.dtype if (p_min.dtype != dtype): p_min = tensor.cast(p_min, dtype) if (p_max.dtype != dtype): p_max = tensor.cast(p_max, dtype) ups[param] = tensor.clip(ups[param], p_min, p_max) return ups
'Return symbolic updates to apply given a cost function. Parameters cost : tensor_like Symbolic cost with respect to which the gradients of the parameters should be taken. Should be 0-dimensional (scalar valued). Returns updates : dict A dictionary with the shared variables in self.params as keys and a symbolic expression of how they are to be updated each SGD step as values.'
def cost_updates(self, cost):
grads = [tensor.grad(cost, p) for p in self.params] return self.updates(gradients=grads)
'Return a list of (pairs) that can be used as updates in theano.function to implement stochastic gradient descent. Parameters params : list of Variable variables to adjust in order to minimize some cost grads : list of Variable the gradient on each param (with respect to some cost) stepsizes : symbolic scalar or list of one symbolic scalar per param step by this amount times the negative gradient on each iteration'
def sgd_updates(self, params, grads, stepsizes):
try: iter(stepsizes) except Exception: stepsizes = [stepsizes for p in params] if (len(params) != len(grads)): raise ValueError('params and grads have different lens') updates = [(p, (p - (step * gp))) for (step, p, gp) in zip(stepsizes, params, grads)] return updates
'.. todo:: WRITEME'
def sgd_momentum_updates(self, params, grads, stepsizes, momentum=0.9):
try: iter(stepsizes) except Exception: stepsizes = [stepsizes for p in params] try: iter(momentum) except Exception: momentum = [momentum for p in params] if (len(params) != len(grads)): raise ValueError('params and grads have different lens') headings = [theano.shared(numpy.zeros_like(p.get_value(borrow=True))) for p in params] updates = [] for (s, p, gp, m, h) in zip(stepsizes, params, grads, momentum, headings): updates.append((p, (p + (s * h)))) updates.append((h, ((m * h) - ((1.0 - m) * gp)))) return updates
'Fits the model to the given training data. Parameters X : ndarray 2D array, each row is one example y : ndarray vector of integer class labels'
def fit(self, X, y):
if (LogisticRegression is None): raise RuntimeError('sklearn not available.') min_y = y.min() max_y = y.max() assert (min_y == 0) num_classes = (max_y + 1) assert (num_classes > 1) logistics = [] for c in xrange(num_classes): logger.info('fitting class {0}'.format(c)) cur_y = (y == c).astype('int32') logistics.append(LogisticRegression(C=self.C).fit(X, cur_y)) return Classifier(logistics)
'.. todo:: WRITEME'
def predict(self, X):
return np.argmax((self.b + np.dot(X, self.W)), 1)
'Map inputs through the encoder function. Parameters inputs : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input minibatch(es) to be encoded. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. Returns encoded : tensor_like or list of tensor_like Theano symbolic (or list thereof) representing the corresponding minibatch(es) after encoding.'
def encode(self, inputs):
raise NotImplementedError((str(type(self)) + ' does not implement encode.'))
'Map inputs through the encoder function. Parameters hiddens : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input minibatch(es) to be encoded. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. Returns decoded : tensor_like or list of tensor_like Theano symbolic (or list thereof) representing the corresponding minibatch(es) after decoding.'
def decode(self, hiddens):
raise NotImplementedError((str(type(self)) + ' does not implement decode.'))
'Reconstruct (decode) the inputs after mapping through the encoder. Parameters inputs : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input minibatch(es) to be encoded and reconstructed. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. Returns reconstructed : tensor_like or list of tensor_like Theano symbolic (or list thereof) representing the corresponding reconstructed minibatch(es) after encoding/decoding.'
def reconstruct(self, inputs):
return self.decode(self.encode(inputs))
'Forward propagate (symbolic) input through this module, obtaining a representation to pass on to layers above. This just aliases the `encode()` function for syntactic sugar/convenience.'
def __call__(self, inputs):
return self.encode(inputs)
'WRITEME'
def __init__(self, nvis, nhid, act_enc, act_dec, tied_weights=False, irange=0.001, istdev=None, rng=9001):
super(Autoencoder, self).__init__() assert (nvis > 0), 'Number of visible units must be non-negative' assert (nhid > 0), 'Number of hidden units must be positive' self.input_space = VectorSpace(nvis) self.output_space = VectorSpace(nhid) self.nvis = nvis self.nhid = nhid self.irange = irange self.istdev = istdev self.tied_weights = tied_weights self.rng = make_np_rng(rng, which_method='randn') self._initialize_hidbias() if (nvis > 0): self._initialize_visbias(nvis) self._initialize_weights(nvis) else: self.visbias = None self.weights = None seed = int(self.rng.randint((2 ** 30))) self.s_rng = make_theano_rng(seed, which_method='uniform') if (tied_weights and (self.weights is not None)): self.w_prime = self.weights.T else: self._initialize_w_prime(nvis) def _resolve_callable(conf, conf_attr): '\n .. todo::\n\n WRITEME\n ' if ((conf[conf_attr] is None) or (conf[conf_attr] == 'linear')): return None if hasattr(conf[conf_attr], '__call__'): return conf[conf_attr] elif ((conf[conf_attr] in globals()) and hasattr(globals()[conf[conf_attr]], '__call__')): return globals()[conf[conf_attr]] elif hasattr(tensor.nnet, conf[conf_attr]): return getattr(tensor.nnet, conf[conf_attr]) elif hasattr(tensor, conf[conf_attr]): return getattr(tensor, conf[conf_attr]) else: raise ValueError(("Couldn't interpret %s value: '%s'" % (conf_attr, conf[conf_attr]))) self.act_enc = _resolve_callable(locals(), 'act_enc') self.act_dec = _resolve_callable(locals(), 'act_dec') self._params = [self.visbias, self.hidbias, self.weights] if (not self.tied_weights): self._params.append(self.w_prime)
'.. todo:: WRITEME'
def _initialize_weights(self, nvis, rng=None, irange=None, istdev=None):
if (rng is None): rng = self.rng if (irange is None): irange = self.irange if (istdev is None): istdev = self.istdev if (irange is not None): assert (istdev is None) W = rng.uniform((- irange), irange, (nvis, self.nhid)) else: assert (istdev is not None) W = (rng.randn(nvis, self.nhid) * istdev) self.weights = sharedX(W, name='W', borrow=True)
'.. todo:: WRITEME'
def _initialize_hidbias(self):
self.hidbias = sharedX(numpy.zeros(self.nhid), name='hb', borrow=True)
'.. todo:: WRITEME'
def _initialize_visbias(self, nvis):
self.visbias = sharedX(numpy.zeros(nvis), name='vb', borrow=True)
'.. todo:: WRITEME'
def _initialize_w_prime(self, nvis, rng=None, irange=None, istdev=None):
assert (not self.tied_weights), "Can't initialize w_prime in tied weights model; this method shouldn't have been called" if (rng is None): rng = self.rng if (irange is None): irange = self.irange if (istdev is None): istdev = self.istdev if (irange is not None): assert (istdev is None) W = ((0.5 - rng.rand(self.nhid, nvis)) * irange) else: assert (istdev is not None) W = (rng.randn(self.nhid, nvis) * istdev) self.w_prime = sharedX(W, name='Wprime', borrow=True)
'Create and initialize the necessary parameters to accept `nvis` sized inputs. Parameters nvis : int Number of visible units for the model. rng : RandomState object or seed, optional NumPy random number generator object (or seed to create one) used to initialize the model parameters. If not provided, the stored rng object (from the time of construction) will be used.'
def set_visible_size(self, nvis, rng=None):
if (self.weights is not None): raise ValueError('parameters of this model already initialized; create a new object instead') if (rng is not None): self.rng = rng else: rng = self.rng self._initialize_visbias(nvis) self._initialize_weights(nvis, rng) if (not self.tied_weights): self._initialize_w_prime(nvis, rng) self._set_params()
'Single minibatch activation function. Parameters x : tensor_like Theano symbolic representing the input minibatch. Returns y : tensor_like (Symbolic) hidden unit activations given the input.'
def _hidden_activation(self, x):
if (self.act_enc is None): act_enc = (lambda x: x) else: act_enc = self.act_enc return act_enc(self._hidden_input(x))
'Given a single minibatch, computes the input to the activation nonlinearity without applying it. Parameters x : tensor_like Theano symbolic representing the input minibatch. Returns y : tensor_like (Symbolic) input flowing into the hidden layer nonlinearity.'
def _hidden_input(self, x):
return (self.hidbias + tensor.dot(x, self.weights))
'Wrapper to Autoencoder encode function. Called when autoencoder is accessed by mlp.PretrainedLayer Parameters inputs : WRITEME Returns WRITEME'
def upward_pass(self, inputs):
return self.encode(inputs)
'Map inputs through the encoder function. Parameters inputs : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input minibatch(es) to be encoded. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. Returns encoded : tensor_like or list of tensor_like Theano symbolic (or list thereof) representing the corresponding minibatch(es) after encoding.'
def encode(self, inputs):
if isinstance(inputs, tensor.Variable): return self._hidden_activation(inputs) else: return [self.encode(v) for v in inputs]
'Map inputs through the encoder function. Parameters hiddens : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input minibatch(es) to be encoded. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. Returns decoded : tensor_like or list of tensor_like Theano symbolic (or list thereof) representing the corresponding minibatch(es) after decoding.'
def decode(self, hiddens):
if (self.act_dec is None): act_dec = (lambda x: x) else: act_dec = self.act_dec if isinstance(hiddens, tensor.Variable): return act_dec((self.visbias + tensor.dot(hiddens, self.w_prime))) else: return [self.decode(v) for v in hiddens]
'.. todo:: WRITEME'
def get_weights(self, borrow=False):
return self.weights.get_value(borrow=borrow)
'.. todo:: WRITEME'
def get_weights_format(self):
return ['v', 'h']
'Reconstruct the inputs after corrupting and mapping through the encoder and decoder. Parameters inputs : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input minibatch(es) to be corrupted and reconstructed. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. Returns reconstructed : tensor_like or list of tensor_like Theano symbolic (or list thereof) representing the corresponding reconstructed minibatch(es) after corruption and encoding/decoding.'
def reconstruct(self, inputs):
corrupted = self.corruptor(inputs) return super(DenoisingAutoencoder, self).reconstruct(corrupted)
'Calculate (symbolically) the contracting autoencoder penalty term. Parameters inputs : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input minibatch(es) on which the penalty is calculated. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. Returns act_grad : tensor_like 2-dimensional tensor representing, dh/da for every pre/postsynaptic pair, which we can easily do by taking the gradient of the sum of the hidden units activations w.r.t the presynaptic activity, since the gradient of hiddens.sum() with respect to hiddens is a matrix of ones! Notes Theano\'s differentiation capabilities do not currently allow (efficient) automatic evaluation of the Jacobian, mainly because of the immature state of the `scan` operator. Here we use a "semi-automatic" hack that works for hidden layers of the for :math:`s(Wx + b)`, where `s` is the activation function, :math:`W` is `self.weights`, and :math:`b` is `self.hidbias`, by only taking the derivative of :math:`s` with respect :math:`a = Wx + b` and manually constructing the Jacobian from there. Because of this implementation depends *critically* on the _hidden_inputs() method implementing only an affine transformation by the weights (i.e. :math:`Wx + b`), and the activation function `self.act_enc` applying an independent, elementwise operation.'
def _activation_grad(self, inputs):
acts = self._hidden_input(inputs) hiddens = self.act_enc(acts) act_grad = tensor.grad(hiddens.sum(), acts) return act_grad
'Calculate (symbolically) the contracting autoencoder penalty term. Parameters inputs : tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input minibatch(es) on which the penalty is calculated. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. Returns jacobian : tensor_like 3-dimensional tensor representing, for each mini-batch example, the Jacobian matrix of the encoder transformation. You can then apply the penalty you want on it, or use the contraction_penalty method to have a default one.'
def jacobian_h_x(self, inputs):
act_grad = self._activation_grad(inputs) jacobian = (self.weights * act_grad.dimshuffle(0, 'x', 1)) return jacobian
'Calculate (symbolically) the contracting autoencoder penalty term. Parameters data : tuple containing one tensor_like or list of tensor_likes Theano symbolic (or list thereof) representing the input minibatch(es) on which the penalty is calculated. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. Returns jacobian : tensor_like 1-dimensional tensor representing, for each mini-batch example, the penalty of the encoder transformation. Add this to the output of a Cost object, such as SquaredError, to penalize it.'
def contraction_penalty(self, data):
X = data act_grad = self._activation_grad(X) frob_norm = tensor.dot(tensor.sqr(act_grad), tensor.sqr(self.weights).sum(axis=0)) contract_penalty = (frob_norm.sum() / X.shape[0]) return tensor.cast(contract_penalty, X.dtype)
'.. todo:: WRITEME'
def contraction_penalty_data_specs(self):
return (self.get_input_space(), self.get_input_source())
'Stochastic approximation of Hessian Frobenius norm Parameters data : WRITEME Returns WRITEME'
def higher_order_penalty(self, data):
X = data corrupted_inputs = [self.corruptor(X) for times in range(self.num_corruptions)] hessian = tensor.concatenate([(self.jacobian_h_x(X) - self.jacobian_h_x(corrupted)) for corrupted in corrupted_inputs]) return (hessian ** 2).mean()
'.. todo:: WRITEME'
def higher_order_penalty_data_specs(self):
return (self.get_input_space(), self.get_input_source())
'.. todo:: WRITEME'
@functools.wraps(Autoencoder.encode) def encode(self, inputs):
current = inputs for encoder in self.autoencoders: current = encoder.encode(current) return current
'.. todo:: WRITEME'
@functools.wraps(Autoencoder.decode) def decode(self, hiddens):
current = hiddens for decoder in self.autoencoders[::(-1)]: current = decoder.decode(current) return current
'.. todo:: WRITEME'
@functools.wraps(Model.get_params) def get_params(self):
return reduce(operator.add, [ae.get_params() for ae in self.autoencoders])
'.. todo:: WRITEME'
def _modify_updates(self, updates):
for autoencoder in self.autoencoders: autoencoder.modify_updates(updates)
'Fit underlying estimators. Parameters X : array-like, shape = [n_samples, n_features] Data. y : array-like, shape = [n_samples] or [n_samples, n_classes] Multi-class targets. An indicator matrix turns on multilabel classification. Returns self'
def fit(self, X, y):
super(DenseMulticlassSVM, self).fit(X, y) return self
'Returns the distance of each sample from the decision boundary for each class. Parameters X : array-like, shape = [n_samples, n_features] A 2D ndarray with each row containing the input features for one example. Returns T : array-like, shape = [n_samples, n_classes]'
def decision_function(self, X):
return np.column_stack([estimator.decision_function(X) for estimator in self.estimators_])
'.. todo:: WRITEME'
def get_output_channels(self):
return self.nhid
'.. todo:: WRITEME'
def redo_everything(self):
self.W = shared(self.rng.randn(self.nhid, self.nvis), name='W') self.W.T.name = 'W.T'
'.. todo:: WRITEME'
def weights_format(self):
return ['h', 'v']
'.. todo:: WRITEME'
def optimize_gamma(self, example):
Y = N.zeros((self.nvis,)) Y[:] = example c = (1e-10 + N.square((self.W.get_value(borrow=True) - example)).sum(axis=1)) A = (self.W.get_value(borrow=True).T / c) x = feature_sign_search(A, Y, self.coeff) g = (x / c) return g
'.. todo:: WRITEME'
def train_batch(self, dataset, batch_size):
X = dataset.get_design_matrix() m = X.shape[0] assert (X.shape[1] == self.nvis) gamma = N.zeros((batch_size, self.nhid)) cur_gamma = T.vector(name='cur_gamma') cur_v = T.vector(name='cur_v') recons = T.dot(cur_gamma, self.W) recons.name = 'recons' recons_diffs = (cur_v - recons) recons_diffs.name = 'recons_diffs' recons_diff_sq = T.sqr(recons_diffs) recons_diff_sq.name = 'recons_diff' recons_error = T.sum(recons_diff_sq) recons_error.name = 'recons_error' dict_dists = T.sum(T.sqr((self.W - cur_v)), axis=1) dict_dists.name = 'dict_dists' abs_gamma = abs(cur_gamma) abs_gamma.name = 'abs_gamma' weighted_dists = T.dot(abs_gamma, dict_dists) weighted_dists.name = 'weighted_dists' penalty = (self.coeff * weighted_dists) penalty.name = 'penalty' debug = (1e-10 * T.sum(dict_dists)) debug.name = 'debug' J = ((recons_error + penalty) + debug) J.name = 'J' Jf = function([cur_v, cur_gamma], J) start = self.rng.randint(((m - batch_size) + 1)) batch_X = X[start:(start + batch_size), :] logger.info('optimizing gamma') for i in xrange(batch_size): gamma[i, :] = self.optimize_gamma(batch_X[i, :]) logger.info('max min') logger.info(N.abs(gamma).min(axis=0).max()) logger.info('min max') logger.info(N.abs(gamma).max(axis=0).max()) logger.info('optimizing W') logger.warning("not tested since switching to Razvan's all-theano implementation of linear cg") cg.linear_cg(J, [self.W], max_iters=3) err = 0.0 for i in xrange(batch_size): err += Jf(batch_X[i, :], gamma[i, :]) assert (not N.isnan(err)) assert (not N.isinf(err)) logger.info('err: {0}'.format(err)) return True
'Initialize the biases of the mapping units.'
def _initialize_mapbias(self):
self.mapbias = sharedX(numpy.zeros(self.nmap), name='mb', borrow=True)
'Initialize the biases of the first set of visible units.'
def _initialize_visbiasX(self, nvisx):
self.visbiasX = sharedX(numpy.zeros(nvisx), name='vbX', borrow=True)
'Initialize the biases of the second set of visible units.'
def _initialize_visbiasY(self, nvisy):
self.visbiasY = sharedX(numpy.zeros(nvisy), name='vbY', borrow=True)
'Creation of weight matrix wxf.'
def _initialize_wxf(self, nvisx, nfac, rng=None, irange=None):
if (rng is None): rng = self.rng if (irange is None): irange = self.irange self.wxf = sharedX((rng.randn(nvisx, nfac) * irange), name='wxf', borrow=True)
'Creation of weight matrix wyf.'
def _initialize_wyf(self, nvisy, nfac, rng=None, irange=None):
if (rng is None): rng = self.rng if (irange is None): irange = self.irange self.wyf = sharedX((rng.randn(nvisy, nfac) * irange), name='wyf', borrow=True)
'Creation of encoding weight matrix whf.'
def _initialize_whf(self, nmap, nfac, rng=None, irange=None):
if (rng is None): rng = self.rng if (irange is None): irange = self.irange self.whf = sharedX((rng.randn(nmap, nfac) * irange), name='whf', borrow=True)
'Creation of decoding weight matrix whf.'
def _initialize_whf_in(self, nmap, nfac, rng=None, irange=None):
if (rng is None): rng = self.rng if (irange is None): irange = self.irange self.whf_in = sharedX((rng.randn(nmap, nfac) * irange), name='whf_in', borrow=True)
'Applies the filters wxf to the first input and returns the corresponding factors'
def _factorsX(self, inputs):
return tensor.dot(inputs[0], self.wxf)
'Applies the filters wyf to the second input and returns the corresponding factors'
def _factorsY(self, inputs):
return tensor.dot(inputs[1], self.wyf)
'Returns the mapping units.'
def _mappings(self, inputs):
return (self.mapbias + tensor.dot((self._factorsX(inputs) * self._factorsY(inputs)), self.whf_in.T))
'Single minibatch activation function. Parameters inputs : tensor_like Theano symbolic representing the input minibatch that consists of a tuple of spaces with sizes (nviX, nvisY). Returns y : tensor_like (Symbolic) hidden unit activations given the input.'
def _hidden_activation(self, inputs):
if (self.act_enc is None): act_enc = (lambda x: x) else: act_enc = self.act_enc return act_enc(self._mappings(inputs))
'Returns the factors corresponding to the mapping units.'
def _factorsH(self, inputs):
return tensor.dot(self._hidden_activation(inputs), self.whf)
'Returns the reconstruction of \'x\' before the act_dec function Parameters inputs : tuple Tuple (lenght 2) of theano symbolic representing the input minibatch(es) to be encoded. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing the two data dimensions (X, Y).'
def decodeX(self, inputs):
return (self.visbiasX + tensor.dot((self._factorsY(inputs) * self._factorsH(inputs)), self.wxf.T))
'Returns the reconstruction of \'y\' before the act_dec function Parameters inputs : tuple Tuple (lenght 2) of theano symbolic representing the input minibatch(es) to be encoded. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing the two data dimensions (X, Y).'
def decodeY(self, inputs):
return (self.visbiasY + tensor.dot((self._factorsX(inputs) * self._factorsH(inputs)), self.wyf.T))