desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'.. todo:: WRITEME'
def _get_positive_phase(self, model, X, Y=None):
return (self._get_variational_pos(model, X, Y), OrderedDict())
'.. todo:: WRITEME d/d theta log Z = (d/d theta Z) / Z = (d/d theta sum_h sum_v exp(-E(v,h)) ) / Z = (sum_h sum_v - exp(-E(v,h)) d/d theta E(v,h) ) / Z = - sum_h sum_v P(v,h) d/d theta E(v,h)'
def _get_negative_phase(self, model, X, Y=None):
layer_to_chains = model.make_layer_to_state(self.num_chains) def recurse_check(l): if isinstance(l, (list, tuple)): for elem in l: recurse_check(elem) else: assert (l.get_value().shape[0] == self.num_chains) recurse_check(layer_to_chains.values()) model.layer_to_chains = layer_to_chains (updates, layer_to_chains) = model.get_sampling_updates(layer_to_chains, self.theano_rng, num_steps=self.num_gibbs_steps, return_layer_to_updated=True) if self.toronto_neg: neg_phase_grads = self._get_toronto_neg(model, layer_to_chains) else: neg_phase_grads = self._get_standard_neg(model, layer_to_chains) return (neg_phase_grads, updates)
''
def __init__(self, num_chains, num_gibbs_steps, supervised=False):
self.__dict__.update(locals()) del self.self self.theano_rng = MRG_RandomStreams(((2012 + 10) + 14)) assert (supervised in [True, False])
'The partition function makes this intractable. Parameters model : Model data : Batch in get_data_specs format Returns None : (always returns None because it\'s intractable)'
def expr(self, model, data):
if self.supervised: (X, Y) = data assert (Y is not None) return None
'PCD approximation to the gradient of the bound. Keep in mind this is a cost, so we are upper bounding the negative log likelihood. Parameters model : DBM data : Batch in get_data_specs_format Returns grads : OrderedDict Dictionary mapping from parameters to (approximate) gradients updates : OrderedDict Dictionary containing the Gibbs sampling updates used to maintain the Markov chain used for PCD'
def get_gradients(self, model, data):
if self.supervised: (X, Y) = data assert (Y is not None) assert isinstance(model.hidden_layers[(-1)], dbm.Softmax) else: X = data Y = None q = model.mf(X, Y) '\n Use the non-negativity of the KL divergence to construct a lower bound\n on the log likelihood. We can drop all terms that are constant with\n respect to the model parameters:\n\n log P(v) = L(v, q) + KL(q || P(h|v))\n L(v, q) = log P(v) - KL(q || P(h|v))\n L(v, q) = log P(v) - sum_h q(h) log q(h) + q(h) log P(h | v)\n L(v, q) = log P(v) + sum_h q(h) log P(h | v) + const\n L(v, q) = log P(v) + sum_h q(h) log P(h, v) - sum_h q(h) log P(v) + C\n L(v, q) = sum_h q(h) log P(h, v) + C\n L(v, q) = sum_h q(h) - E(h, v) - log Z + C\n\n so the cost we want to minimize is\n expected_energy + log Z + C\n\n\n Note: for the RBM, this bound is exact, since the KL divergence\n goes to 0.\n ' variational_params = flatten(q) expected_energy_q = model.expected_energy(X, q).mean() params = list(model.get_params()) grads = T.grad(expected_energy_q, params, consider_constant=variational_params, disconnected_inputs='ignore') gradients = OrderedDict(safe_zip(params, grads)) '\n d/d theta log Z = (d/d theta Z) / Z\n = (d/d theta sum_h sum_v exp(-E(v,h)) ) / Z\n = (sum_h sum_v - exp(-E(v,h)) d/d theta E(v,h) ) / Z\n = - sum_h sum_v P(v,h) d/d theta E(v,h)\n ' layer_to_chains = model.make_layer_to_state(self.num_chains) def recurse_check(l): if isinstance(l, (list, tuple)): for elem in l: recurse_check(elem) else: assert (l.get_value().shape[0] == self.num_chains) recurse_check(layer_to_chains.values()) model.layer_to_chains = layer_to_chains gsu = model.get_sampling_updates (updates, layer_to_chains) = gsu(layer_to_chains, self.theano_rng, num_steps=self.num_gibbs_steps, return_layer_to_updated=True) assert isinstance(model.visible_layer, dbm.BinaryVector) assert isinstance(model.hidden_layers[0], dbm.BinaryVectorMaxPool) assert (model.hidden_layers[0].pool_size == 1) assert isinstance(model.hidden_layers[1], dbm.BinaryVectorMaxPool) assert (model.hidden_layers[1].pool_size == 1) assert isinstance(model.hidden_layers[2], dbm.Softmax) assert (len(model.hidden_layers) == 3) V_samples = layer_to_chains[model.visible_layer] (H1_samples, H2_samples, Y_samples) = [layer_to_chains[layer] for layer in model.hidden_layers] sa = model.hidden_layers[0].downward_state(H1_samples) V_mf = model.visible_layer.inpaint_update(layer_above=model.hidden_layers[0], state_above=sa) f = model.hidden_layers[0].mf_update sb = model.visible_layer.upward_state(V_samples) sa = model.hidden_layers[1].downward_state(H2_samples) H1_mf = f(state_below=sb, state_above=sa, layer_above=model.hidden_layers[1]) f = model.hidden_layers[1].mf_update sb = model.hidden_layers[0].upward_state(H1_samples) sa = model.hidden_layers[2].downward_state(Y_samples) H2_mf = f(state_below=sb, state_above=sa, layer_above=model.hidden_layers[2]) sb = model.hidden_layers[1].upward_state(H2_samples) Y_mf = model.hidden_layers[2].mf_update(state_below=sb) e1 = model.energy(V_samples, [H1_mf, H2_samples, Y_mf]).mean() e2 = model.energy(V_mf, [H1_samples, H2_mf, Y_samples]).mean() expected_energy_p = (0.5 * (e1 + e2)) constants = flatten([V_samples, V_mf, H1_samples, H1_mf, H2_samples, H2_mf, Y_mf, Y_samples]) neg_phase_grads = OrderedDict(safe_zip(params, T.grad((- expected_energy_p), params, consider_constant=constants))) for param in list(gradients.keys()): gradients[param] = (neg_phase_grads[param] + gradients[param]) return (gradients, updates)
'.. todo:: WRITEME'
def _get_positive_phase(self, model, X, Y=None):
return (self._get_variational_pos(model, X, Y), OrderedDict())
'.. todo:: WRITEME d/d theta log Z = (d/d theta Z) / Z = (d/d theta sum_h sum_v exp(-E(v,h)) ) / Z = (sum_h sum_v - exp(-E(v,h)) d/d theta E(v,h) ) / Z = - sum_h sum_v P(v,h) d/d theta E(v,h)'
def _get_negative_phase(self, model, X, Y=None):
layer_to_clamp = OrderedDict([(model.visible_layer, True)]) layer_to_chains = model.make_layer_to_symbolic_state(self.num_chains, self.theano_rng) layer_to_chains[model.visible_layer] = X if self.supervised: assert (Y is not None) assert isinstance(model.hidden_layers[(-1)], Softmax) layer_to_clamp[model.hidden_layers[(-1)]] = True layer_to_chains[model.hidden_layers[(-1)]] = Y model.layer_to_chains = layer_to_chains layer_to_chains = model.sampling_procedure.sample(layer_to_chains, self.theano_rng, layer_to_clamp=layer_to_clamp, num_steps=1) layer_to_chains = model.sampling_procedure.sample(layer_to_chains, self.theano_rng, num_steps=self.num_gibbs_steps) if self.toronto_neg: neg_phase_grads = self._get_toronto_neg(model, layer_to_chains) else: neg_phase_grads = self._get_standard_neg(model, layer_to_chains) return (neg_phase_grads, OrderedDict())
'Returns the expression for the Cost. Parameters model : Model data : Batch in get_data_specs format return_locals : bool If returns locals is True, returns (objective, locals()) Note that this means adding / removing / changing the value of local variables is an interface change. In particular, TorontoSparsity depends on "terms" and "H_hat" kwargs : optional keyword arguments for FixedVarDescr'
def expr(self, model, data, return_locals=False, **kwargs):
self.get_data_specs(model)[0].validate(data) if self.supervised: (X, Y) = data else: X = data Y = None H_hat = model.mf(X, Y=Y) terms = [] hidden_layers = model.hidden_layers for (layer, mf_state, targets, coeffs) in safe_zip(hidden_layers, H_hat, self.targets, self.coeffs): try: cost = layer.get_l2_act_cost(mf_state, targets, coeffs) except NotImplementedError: if (isinstance(coeffs, float) and (coeffs == 0.0)): cost = 0.0 else: raise terms.append(cost) objective = sum(terms) if return_locals: return (objective, locals()) return objective
'Returns the FixedVarDescr object responsible for making sure the masks that determine which units are inputs and outputs are generated each time a minibatch is loaded. Parameters model : DBM data : Batch in get_data_specs format'
def get_fixed_var_descr(self, model, data):
(X, Y) = data assert (Y is not None) batch_size = model.batch_size drop_mask_X = sharedX(model.get_input_space().get_origin_batch(batch_size)) drop_mask_X.name = 'drop_mask' X_space = model.get_input_space() updates = OrderedDict() rval = FixedVarDescr() inputs = [X, Y] if (not self.supervised): update_X = self.mask_gen(X, X_space=X_space) else: drop_mask_Y = sharedX(np.ones(batch_size)) drop_mask_Y.name = 'drop_mask_Y' (update_X, update_Y) = self.mask_gen(X, Y, X_space) updates[drop_mask_Y] = update_Y rval.fixed_vars['drop_mask_Y'] = drop_mask_Y if self.mask_gen.sync_channels: n = update_X.ndim assert (n == (drop_mask_X.ndim - 1)) update_X.name = 'raw_update_X' zeros_like_X = T.zeros_like(X) zeros_like_X.name = 'zeros_like_X' update_X = (zeros_like_X + update_X.dimshuffle(0, 1, 2, 'x')) update_X.name = 'update_X' updates[drop_mask_X] = update_X rval.fixed_vars['drop_mask'] = drop_mask_X if hasattr(model.inference_procedure, 'V_dropout'): include_prob = model.inference_procedure.include_prob include_prob_V = model.inference_procedure.include_prob_V include_prob_Y = model.inference_procedure.include_prob_Y theano_rng = make_theano_rng(None, ((2012 + 10) + 20), which_method='binomial') for elem in flatten([model.inference_procedure.V_dropout]): updates[elem] = (theano_rng.binomial(p=include_prob_V, size=elem.shape, dtype=elem.dtype, n=1) / include_prob_V) if ('Softmax' in str(type(model.hidden_layers[(-1)]))): hid = model.inference_procedure.H_dropout[:(-1)] y = model.inference_procedure.H_dropout[(-1)] updates[y] = (theano_rng.binomial(p=include_prob_Y, size=y.shape, dtype=y.dtype, n=1) / include_prob_Y) else: hid = model.inference_procedure.H_dropout for elem in flatten(hid): updates[elem] = (theano_rng.binomial(p=include_prob, size=elem.shape, dtype=elem.dtype, n=1) / include_prob) rval.on_load_batch = [utils.function(inputs, updates=updates)] return rval
'Returns the generalized pseudolikelihood giving raw data, a mask, and the output of inference. Parameters dbm : DBM X : a batch of inputs V_hat_unmasked : A batch of reconstructions of X drop_mask : A batch of mask values state : Hidden states of the DBM Y : a batch of labels drop_mask_Y : A batch of Y mask values'
def get_inpaint_cost(self, dbm, X, V_hat_unmasked, drop_mask, state, Y, drop_mask_Y):
rval = dbm.visible_layer.recons_cost(X, V_hat_unmasked, drop_mask, use_sum=self.use_sum) if self.supervised: scale = None if self.use_sum: scale = 1.0 else: scale = (1.0 / float(dbm.get_input_space().get_total_dimension())) Y_hat_unmasked = state['Y_hat_unmasked'] rc = dbm.hidden_layers[(-1)].recons_cost rval = (rval + rc(Y, Y_hat_unmasked, drop_mask_Y, scale)) return rval
'Returns the total cost, given the states produced by inference. This includes activity regularization costs, not just generalized pseudolikelihood costs. Parameters state : The state of the model after inference. new_state : OrderedDict The state of the model after inference with a different mask. dbm : DBM. X : A batch of input pixels. Y : A batch of output labels. drop_mask : A batch of mask values determining which pixels are inputs. drop_mask_Y : Theano matrix A batch of mask values determining which labels are inputs. new_drop_mask : The second mask. new_drop_mask_Y : The second label mask. return_locals : bool If True, return all local variables Returns cost : Theano expression for the cost locals : Optional If return_locals is True, returns the dictionary of all local variables. Note that this means all implementation changes are now API changes.'
def cost_from_states(self, state, new_state, dbm, X, Y, drop_mask, drop_mask_Y, new_drop_mask, new_drop_mask_Y, return_locals=False):
if (not self.supervised): assert (drop_mask_Y is None) assert (new_drop_mask_Y is None) if self.supervised: assert (drop_mask_Y is not None) if self.both_directions: assert (new_drop_mask_Y is not None) assert (Y is not None) V_hat_unmasked = state['V_hat_unmasked'] assert (V_hat_unmasked.ndim == X.ndim) if (not hasattr(self, 'use_sum')): self.use_sum = False inpaint_cost = self.get_inpaint_cost(dbm, X, V_hat_unmasked, drop_mask, state, Y, drop_mask_Y) if (not hasattr(self, 'both_directions')): self.both_directions = False assert (self.both_directions == (new_state is not None)) if (new_state is not None): new_V_hat_unmasked = new_state['V_hat_unmasked'] rc = dbm.visible_layer.recons_cost new_inpaint_cost = rc(X, new_V_hat_unmasked, new_drop_mask) if self.supervised: new_Y_hat_unmasked = new_state['Y_hat_unmasked'] scale = None raise NotImplementedError('This branch appears to be broken,needs to define scale.') new_inpaint_cost = (new_inpaint_cost + dbm.hidden_layers[(-1)].recons_cost(Y, new_Y_hat_unmasked, new_drop_mask_Y, scale)) inpaint_cost = ((0.5 * inpaint_cost) + (0.5 * new_inpaint_cost)) total_cost = inpaint_cost if (not hasattr(self, 'range_rewards')): self.range_rewards = None if (self.range_rewards is not None): for (layer, mf_state, coeffs) in safe_izip(dbm.hidden_layers, state['H_hat'], self.range_rewards): try: layer_cost = layer.get_range_rewards(mf_state, coeffs) except NotImplementedError: if (coeffs == 0.0): layer_cost = 0.0 else: raise if (layer_cost != 0.0): total_cost += layer_cost if (not hasattr(self, 'stdev_rewards')): self.stdev_rewards = None if (self.stdev_rewards is not None): assert False for (layer, mf_state, coeffs) in safe_izip(dbm.hidden_layers, state['H_hat'], self.stdev_rewards): try: layer_cost = layer.get_stdev_rewards(mf_state, coeffs) except NotImplementedError: if (coeffs == 0.0): layer_cost = 0.0 else: raise if (layer_cost != 0.0): total_cost += layer_cost l1_act_cost = None if (self.l1_act_targets is not None): l1_act_cost = 0.0 if (self.l1_act_eps is None): self.l1_act_eps = ([None] * len(self.l1_act_targets)) for (layer, mf_state, targets, coeffs, eps) in safe_izip(dbm.hidden_layers, state['H_hat'], self.l1_act_targets, self.l1_act_coeffs, self.l1_act_eps): assert (not isinstance(targets, str)) try: layer_cost = layer.get_l1_act_cost(mf_state, targets, coeffs, eps) except NotImplementedError: if (coeffs == 0.0): layer_cost = 0.0 else: raise if (layer_cost != 0.0): l1_act_cost += layer_cost total_cost += l1_act_cost if (not hasattr(self, 'hid_presynaptic_cost')): self.hid_presynaptic_cost = None if (self.hid_presynaptic_cost is not None): assert False for (c, s) in safe_izip(self.hid_presynaptic_cost, state['H_hat']): if (c == 0.0): continue s = s[1] assert hasattr(s, 'owner') owner = s.owner assert (owner is not None) op = owner.op if (not hasattr(op, 'scalar_op')): raise ValueError(((('Expected V_hat_unmasked to be generatedby an Elemwise op, got ' + str(op)) + ' of type ') + str(type(op)))) assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid) (z,) = owner.inputs total_cost += (c * T.sqr(z).mean()) if (not hasattr(self, 'reweighted_act_targets')): self.reweighted_act_targets = None reweighted_act_cost = None if (self.reweighted_act_targets is not None): reweighted_act_cost = 0.0 warnings.warn("reweighted_act_cost is hardcoded for sigmoid layers and doesn't check that this is what we get.") for (c, t, s) in safe_izip(self.reweighted_act_coeffs, self.reweighted_act_targets, state['H_hat']): if (c == 0): continue (s, _) = s m = s.mean(axis=0) d = T.sqr((m - t)) weight = (1.0 / (1e-07 + (s * (1 - s)))) reweighted_act_cost += (c * (weight * d).mean()) total_cost += reweighted_act_cost total_cost.name = ('total_cost(V_hat_unmasked = %s)' % V_hat_unmasked.name) if return_locals: return (total_cost, locals()) return total_cost
'Provides the mask for multi-prediction training. A 1 in the mask corresponds to a variable that should be used as an input to the inference process. A 0 corresponds to a variable that should be used as a prediction target of the multi-prediction training criterion. Parameters X : Variable A batch of input features to mask for multi-prediction training Y : Variable A batch of input class labels to mask for multi-prediction Training Returns drop_mask : Variable A Theano expression for a random binary mask in the same shape as `X` drop_mask_Y : Variable, only returned if `Y` is not None A Theano expression for a random binary mask in the same shape as `Y` Notes Calling this repeatedly will yield the same random numbers each time.'
def __call__(self, X, Y=None, X_space=None):
assert (X_space is not None) self.called = True assert (X.dtype == config.floatX) theano_rng = make_theano_rng(getattr(self, 'seed', None), default_seed, which_method='binomial') if ((X.ndim == 2) and self.sync_channels): raise NotImplementedError() p = self.drop_prob if ((not hasattr(self, 'drop_prob_y')) or (self.drop_prob_y is None)): yp = p else: yp = self.drop_prob_y batch_size = X_space.batch_size(X) if self.balance: flip = theano_rng.binomial(size=(batch_size,), p=0.5, n=1, dtype=X.dtype) yp = ((flip * (1 - p)) + ((1 - flip) * p)) dimshuffle_args = (['x'] * X.ndim) if (X.ndim == 2): dimshuffle_args[0] = 0 assert (not self.sync_channels) else: dimshuffle_args[X_space.axes.index('b')] = 0 if self.sync_channels: del dimshuffle_args[X_space.axes.index('c')] flip = flip.dimshuffle(*dimshuffle_args) p = ((flip * (1 - p)) + ((1 - flip) * p)) size = tuple([X.shape[i] for i in xrange(X.ndim)]) if self.sync_channels: del size[X_space.axes.index('c')] drop_mask = theano_rng.binomial(size=size, p=p, n=1, dtype=X.dtype) X_name = make_name(X, 'anon_X') drop_mask.name = ('drop_mask(%s)' % X_name) if (Y is not None): assert (isinstance(yp, float) or (yp.ndim < 2)) drop_mask_Y = theano_rng.binomial(size=(batch_size,), p=yp, n=1, dtype=X.dtype) assert (drop_mask_Y.ndim == 1) Y_name = make_name(Y, 'anon_Y') drop_mask_Y.name = ('drop_mask_Y(%s)' % Y_name) return (drop_mask, drop_mask_Y) return drop_mask
'Returns a theano expression for the cost function. Returns a symbolic expression for a cost function applied to the minibatch of data. Optionally, may return None. This represents that the cost function is intractable but may be optimized via the get_gradients method. Parameters model : a pylearn2 Model instance data : a batch in cost.get_data_specs() form kwargs : dict Optional extra arguments. Not used by the base class.'
def expr(self, model, data, **kwargs):
try: per_example = self.cost_per_example(self, model, data, **kwargs) except NotImplementedError: raise NotImplementedError((str(type(self)) + ' does not implement expr.')) if (per_example is None): return None assert (per_example.ndim == 1) return per_example.mean()
'Returns a theano expression for the cost per example. This method is optional. Most training algorithms will work without it. Parameters model : Model data : a batch in cosst.get_data_specs() form kwargs : dict Optional extra arguments to be used by 3rd party TrainingAlgorithm classes and/or FixedVarDescr. Returns cost_per_example : 1-D Theano tensor Each element of this vector gives the cost for another example. The overall cost is the mean of this vector.'
def cost_per_example(self, model, data, **kwargs):
raise NotImplementedError((str(type(self)) + 'does not implement cost_per_example.'))
'Provides the gradients of the cost function with respect to the model parameters. These are not necessarily those obtained by theano.tensor.grad --you may wish to use approximate or even intentionally incorrect gradients in some cases. Parameters model : a pylearn2 Model instance data : a batch in cost.get_data_specs() form kwargs : dict Optional extra arguments, not used by the base class. Returns gradients : OrderedDict a dictionary mapping from the model\'s parameters to their gradients The default implementation is to compute the gradients using T.grad applied to the value returned by expr. However, subclasses may return other values for the gradient. For example, an intractable cost may return a sampling-based approximation to its gradient. updates : OrderedDict a dictionary mapping shared variables to updates that must be applied to them each time these gradients are computed. This is to facilitate computation of sampling-based approximate gradients. The parameters should never appear in the updates dictionary. This would imply that computing their gradient changes their value, thus making the gradient value outdated.'
def get_gradients(self, model, data, **kwargs):
try: cost = self.expr(model=model, data=data, **kwargs) except TypeError: message = (('Error while calling ' + str(type(self))) + '.expr') reraise_as(TypeError(message)) if (cost is None): raise NotImplementedError((str(type(self)) + ' represents an intractable cost and does not provide a gradient approximation scheme.')) params = list(model.get_params()) grads = T.grad(cost, params, disconnected_inputs='ignore') gradients = OrderedDict(izip(params, grads)) updates = OrderedDict() return (gradients, updates)
'.. todo:: WRITEME .. todo:: how do you do prereqs in this setup? (I think PL changed it, not sure if there still is a way in this context) Returns a dictionary mapping channel names to expressions for channel values. Parameters model : Model the model to use to compute the monitoring channels data : batch (a member of self.get_data_specs()[0]) symbolic expressions for the monitoring data kwargs : dict used so that custom algorithms can use extra variables for monitoring. Returns rval : dict Maps channels names to expressions for channel values.'
def get_monitoring_channels(self, model, data, **kwargs):
self.get_data_specs(model)[0].validate(data) return OrderedDict()
'Subclasses should override this if they need variables held constant across multiple updates to a minibatch. TrainingAlgorithms that do multiple updates to a minibatch should respect this. See the FixedVarDescr class for details. Parameters model : Model data : theano.gof.Variable or tuple A valid member of the Space used to train `model` with this cost. Returns fixed_var_descr : FixedVarDescr A description of how to hold the necessary variables constant'
def get_fixed_var_descr(self, model, data):
self.get_data_specs(model)[0].validate(data) fixed_var_descr = FixedVarDescr() return fixed_var_descr
'Returns a specification of the Space the data should lie in and its source (what part of the dataset it should come from). Parameters model : Model The model to train with this cost Returns data_specs : tuple The tuple should be of length two. The first element of the tuple should be a Space (possibly a CompositeSpace) describing how to format the data. The second element of the tuple describes the source of the data. It probably should be a string or nested tuple of strings. See Also For many common cases, rather than implementing this method yourself, you probably want to just inherit from `DefaultDataSpecsMixin` or NullDataSpecsMixin. Notes .. todo figure out return format for sure. PL seems to have documented this method incorrectly.'
def get_data_specs(self, model):
raise NotImplementedError(((str(type(self)) + ' does not implement ') + 'get_data_specs.'))
'Returns True if the cost is stochastic. Stochastic costs are incompatible with some optimization algorithms that make multiple updates per minibatch, such as algorithms that use line searches. These optimizations should raise a TypeError if given a stochastic Cost, or issue a warning if given a Cost whose `is_stochastic` method raises NotImplementedError. Returns is_stochastic : bool Whether the cost is stochastic. For example, dropout is stochastic.'
def is_stochastic(self):
raise NotImplementedError((str(type(self)) + ' needs to implement is_stochastic.'))
'Initialize the SumOfCosts object and make sure that the list of costs contains only Cost instances. Parameters costs : list List of Cost objects or (coeff, Cost) pairs'
def __init__(self, costs):
assert isinstance(costs, list) assert (len(costs) > 0) self.costs = [] self.coeffs = [] for cost in costs: if isinstance(cost, (list, tuple)): (coeff, cost) = cost else: coeff = 1.0 self.coeffs.append(coeff) self.costs.append(cost) if (not isinstance(cost, Cost)): raise ValueError('one of the costs is not Cost instance') self.supervised = any([cost_.supervised for cost_ in self.costs])
'Returns the sum of the costs the SumOfCosts instance was given at initialization. Parameters model : pylearn2.models.model.Model the model for which we want to calculate the sum of costs data : flat tuple of tensor_like variables. data has to follow the format defined by self.get_data_specs(), but this format will always be a flat tuple.'
def expr(self, model, data, **kwargs):
self.get_data_specs(model)[0].validate(data) (composite_specs, mapping) = self.get_composite_specs_and_mapping(model) nested_data = mapping.nest(data) costs = [] for (cost, cost_data) in safe_zip(self.costs, nested_data): costs.append(cost.expr(model, cost_data, **kwargs)) assert (len(costs) > 0) if any([(cost is None) for cost in costs]): sum_of_costs = None else: costs = [(coeff * cost) for (coeff, cost) in safe_zip(self.coeffs, costs)] assert (len(costs) > 0) sum_of_costs = reduce((lambda x, y: (x + y)), costs) return sum_of_costs
'Build and return a composite data_specs of all costs. The returned space is a CompositeSpace, where the components are the spaces of each of self.costs, in the same order. The returned source is a tuple of the corresponding sources. Parameters model : pylearn2.models.Model'
def get_composite_data_specs(self, model):
spaces = [] sources = [] for cost in self.costs: (space, source) = cost.get_data_specs(model) spaces.append(space) sources.append(source) composite_space = CompositeSpace(spaces) sources = tuple(sources) return (composite_space, sources)
'Build the composite data_specs and a mapping to flatten it, return both Build the composite data_specs described in `get_composite_specs`, and build a DataSpecsMapping that can convert between it and a flat equivalent version. In particular, it helps building a flat data_specs to request data, and nesting this data back to the composite data_specs, so it can be dispatched among the different sub-costs. Parameters model : pylearn2.models.Model Notes This is a helper function used by `get_data_specs` and `get_gradients`, and possibly other methods.'
def get_composite_specs_and_mapping(self, model):
(composite_space, sources) = self.get_composite_data_specs(model) mapping = DataSpecsMapping((composite_space, sources)) return ((composite_space, sources), mapping)
'Get a flat data_specs containing all information for all sub-costs. Parameters model : pylearn2.models.Model TODO WRITEME Notes This data_specs should be non-redundant. It is built by flattening the composite data_specs returned by `get_composite_specs`. This is the format that SumOfCosts will request its data in. Then, this flat data tuple will be nested into the composite data_specs, in order to dispatch it among the different sub-costs.'
def get_data_specs(self, model):
(composite_specs, mapping) = self.get_composite_specs_and_mapping(model) (composite_space, sources) = composite_specs flat_composite_space = mapping.flatten(composite_space) flat_sources = mapping.flatten(sources) data_specs = (flat_composite_space, flat_sources) return data_specs
'.. todo:: WRITEME Parameters model : Model data : theano.gof.Variable or tuple A valid member of the Space defined by self.get_data_specs(model)[0]'
def get_fixed_var_descr(self, model, data):
data_specs = self.get_data_specs(model) data_specs[0].validate(data) (composite_specs, mapping) = self.get_composite_specs_and_mapping(model) nested_data = mapping.nest(data) descrs = [cost.get_fixed_var_descr(model, cost_data) for (cost, cost_data) in safe_zip(self.costs, nested_data)] return reduce(merge, descrs)
'Provides an implementation of `Cost.expr`. Returns data specifications corresponding to not using any data at all. Parameters model : pylearn2.models.Model'
def get_data_specs(self, model):
return (NullSpace(), '')
'Provides a default data specification. The cost requests input features from the model\'s input space and input source. `self` must contain a bool field called `supervised`. If this field is True, the cost requests targets as well. Parameters model : pylearn2.models.Model TODO WRITEME'
def get_data_specs(self, model):
if self.supervised: space = CompositeSpace([model.get_input_space(), model.get_target_space()]) sources = (model.get_input_source(), model.get_target_source()) return (space, sources) else: return (model.get_input_space(), model.get_input_source())
'Parameters variables : list list of tensor variables to be regularized p : int p in "L-p penalty"'
def __init__(self, variables, p):
self.variables = variables self.p = p
'Return the L-p penalty term. The optional parameters are never used; they\'re only there to provide an interface that\'s consistent with the Cost superclass. Parameters model : a pylearn2 Model instance data : a batch in cost.get_data_specs() form kwargs : dict Optional extra arguments. Not used by the base class.'
def expr(self, model, data, **kwargs):
self.get_data_specs(model)[0].validate(data) penalty = 0 for var in self.variables: penalty = (penalty + abs((var ** self.p)).sum()) return penalty
'.. todo:: WRITEME Parameters method : a string specifying the name of the method of the model that should be called to generate the objective function. data_specs : a string specifying the name of a method/property of the model that describe the data specs required by method'
def __init__(self, method, data_specs=None):
self.method = method self.data_specs = data_specs
'Patches calls through to a user-specified method of the model Parameters model : pylearn2.models.model.Model the model for which we want to calculate the sum of costs data : flat tuple of tensor_like variables. data has to follow the format defined by self.get_data_specs(), but this format will always be a flat tuple.'
def expr(self, model, data, *args, **kwargs):
self.get_data_specs(model)[0].validate(data) fn = getattr(model, self.method) return fn(data, *args, **kwargs)
'The cost of returning `output` when the truth was `target` Parameters target : Theano tensor The ground truth output : Theano tensor The model\'s output'
@staticmethod def cost(target, output):
raise NotImplementedError()
'The cost of reconstructing `data` using `model`. Parameters model : a GSN data : a batch of inputs to reconstruct. args : evidently ignored? kwargs : optional keyword arguments For use with third party TrainingAlgorithms or FixedVarDescr'
def expr(self, model, data, *args, **kwargs):
self.get_data_specs(model)[0].validate(data) X = data return self.cost(X, model.reconstruct(X))
'Symmetric reconstruction cost. Parameters x : tensor_like Theano symbolic representing the first input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. y : tensor_like Theano symbolic representing the seconde input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. rx : tensor_like Reconstruction of the first minibatch by the model. ry: tensor_like Reconstruction of the second minibatch by the model. Returns Cost: theano_like expression Representation of the cost'
@staticmethod def cost(x, y, rx, ry):
raise NotImplementedError
'Returns a theano expression for the cost function. Returns a symbolic expression for a cost function applied to the minibatch of data. Optionally, may return None. This represents that the cost function is intractable but may be optimized via the get_gradients method. Parameters model : a pylearn2 Model instance data : a batch in cost.get_data_specs() form kwargs : dict Optional extra arguments. Not used by the base class.'
def expr(self, model, data, *args, **kwargs):
self.get_data_specs(model)[0].validate(data) (x, y) = data input_space = model.get_input_space() if (not isinstance(input_space.components[0], VectorSpace)): conv = input_space.components[0] vec = VectorSpace(conv.get_total_dimension()) x = conv.format_as(x, vec) if (not isinstance(input_space.components[1], VectorSpace)): conv = input_space.components[1] vec = VectorSpace(conv.get_total_dimension()) y = conv.format_as(y, vec) (rx, ry) = model.reconstructXY((x, y)) return self.cost(x, y, rx, ry)
'Summary (Definition of the cost). Mean squared reconstruction error. Parameters x : tensor_like Theano symbolic representing the first input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. y : tensor_like Theano symbolic representing the seconde input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. rx : tensor_like Reconstruction of the first minibatch by the model. ry: tensor_like Reconstruction of the second minibatch by the model. Returns Cost: theano_like expression Representation of the cost Notes Symmetric reconstruction cost as defined by Memisevic in: "Gradient-based learning of higher-order image features". This function only works with real valued data.'
@staticmethod def cost(x, y, rx, ry):
return ((0.5 * ((x - rx) ** 2)) + (0.5 * ((y - ry) ** 2))).sum(axis=1).mean()
'Summary (Definition of the cost). Normalized Mean squared reconstruction error. Values between 0 and 1. Parameters x : tensor_like Theano symbolic representing the first input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. y : tensor_like Theano symbolic representing the seconde input minibatch. Assumed to be 2-tensors, with the first dimension indexing training examples and the second indexing data dimensions. rx : tensor_like Reconstruction of the first minibatch by the model. ry: tensor_like Reconstruction of the second minibatch by the model. Returns Cost: theano_like expression Representation of the cost Notes Do not use this function to train, only to monitor the average percentage of reconstruction achieved when training on real valued data.'
@staticmethod def cost(x, y, rx, ry):
num = ((0.5 * ((x - rx) ** 2)) + (0.5 * ((y - ry) ** 2))).sum(axis=1).mean() den = ((0.5 * (x.norm(2, 1) ** 2)) + (0.5 * (y.norm(2, 1) ** 2))).mean() return (num / den)
'Computes the total cost contribution from one layer given the full output of the GSN. Parameters idx : int init_data and model_output both contain a subset of the layer activations at each time step. This is the index of the layer we want to evaluate the cost on WITHIN this subset. This is generally equal to the idx of the cost function within the GSNCost.costs list. costf : callable Function of two variables that computes the cost. The first argument is the target value, and the second argument is the predicted value. init_data : list of tensor_likes Although only the element at index "idx" is accessed/needed, this parameter is a list so that is can directly handle the data format from GSN.expr. model_output : list of list of tensor_likes The output of GSN.get_samples as called by GSNCost.expr.'
@staticmethod def _get_total_for_cost(idx, costf, init_data, model_output):
total = 0.0 for step in model_output: total += costf(init_data[idx], step[idx]) return (total / len(model_output))
'.. todo:: WRITEME properly Handles the different GSNCost modes.'
def _get_samples_from_model(self, model, data):
layer_idxs = [idx for (idx, _, _) in self.costs] zipped = safe_zip(layer_idxs, data) if (self.mode == 'joint'): use = zipped elif (self.mode == 'supervised'): use = zipped[:1] elif (self.mode == 'anti_supervised'): use = zipped[1:] else: raise ValueError(('Unknown mode "%s" for GSNCost' % self.mode)) return model.get_samples(use, walkback=self.walkback, indices=layer_idxs)
'Theano expression for the cost. Parameters model : GSN object WRITEME data : list of tensor_likes Data must be a list or tuple of the same length as self.costs. All elements in data must be a tensor_like (cannot be None). Returns y : tensor_like The actual cost that is backpropagated on.'
def expr(self, model, data):
self.get_data_specs(model)[0].validate(data) output = self._get_samples_from_model(model, data) total = 0.0 for (cost_idx, (_, coeff, costf)) in enumerate(self.costs): total += (coeff * self._get_total_for_cost(cost_idx, costf, data, output)) coeff_sum = sum((coeff for (_, coeff, _) in self.costs)) return (total / coeff_sum)
'Returns a theano expression for the cost function. Parameters model : MLP data : tuple Should be a valid occupant of CompositeSpace(model.get_input_space(), model.get_output_space()) Returns rval : theano.gof.Variable The cost obtained by calling model.cost_from_X(data)'
def expr(self, model, data, **kwargs):
(space, sources) = self.get_data_specs(model) space.validate(data) return model.cost_from_X(data)
'Returns a theano expression for the cost function. Parameters model : MLP data : tuple Should be a valid occupant of CompositeSpace(model.get_input_space(), model.get_output_space()) Returns total_cost : theano.gof.Variable coeff * sum(sqr(weights)) added up for each set of weights.'
def expr(self, model, data, **kwargs):
self.get_data_specs(model)[0].validate(data) assert (T.scalar() != 0.0) def wrapped_layer_cost(layer, coeff): try: return layer.get_weight_decay(coeff) except NotImplementedError: if (coeff == 0.0): return 0.0 else: reraise_as(NotImplementedError((str(type(layer)) + ' does not implement get_weight_decay.'))) if isinstance(self.coeffs, list): warnings.warn('Coefficients should be given as a dictionary with layer names as key. The support of coefficients as list would be deprecated from 03/06/2015') layer_costs = [wrapped_layer_cost(layer, coeff) for (layer, coeff) in safe_izip(model.layers, self.coeffs)] layer_costs = [cost for cost in layer_costs if (cost != 0.0)] else: layer_costs = [] for layer in model.layers: layer_name = layer.layer_name if (layer_name in self.coeffs): cost = wrapped_layer_cost(layer, self.coeffs[layer_name]) if (cost != 0.0): layer_costs.append(cost) if (len(layer_costs) == 0): rval = T.as_tensor_variable(0.0) rval.name = '0_weight_decay' return rval else: total_cost = reduce(operator.add, layer_costs) total_cost.name = 'MLP_WeightDecay' assert (total_cost.ndim == 0) total_cost.name = 'weight_decay' return total_cost
'Returns a theano expression for the cost function. Parameters model : MLP data : tuple Should be a valid occupant of CompositeSpace(model.get_input_space(), model.get_output_space()) Returns total_cost : theano.gof.Variable coeff * sum(abs(weights)) added up for each set of weights.'
def expr(self, model, data, **kwargs):
assert (T.scalar() != 0.0) self.get_data_specs(model)[0].validate(data) if isinstance(self.coeffs, list): warnings.warn('Coefficients should be given as a dictionary with layer names as key. The support of coefficients as list would be deprecated from 03/06/2015') layer_costs = [layer.get_l1_weight_decay(coeff) for (layer, coeff) in safe_izip(model.layers, self.coeffs)] layer_costs = [cost for cost in layer_costs if (cost != 0.0)] else: layer_costs = [] for layer in model.layers: layer_name = layer.layer_name if (layer_name in self.coeffs): cost = layer.get_l1_weight_decay(self.coeffs[layer_name]) if (cost != 0.0): layer_costs.append(cost) if (len(layer_costs) == 0): rval = T.constant(0.0, dtype=theano.config.floatX) rval.name = '0_l1_penalty' return rval else: total_cost = reduce(operator.add, layer_costs) total_cost.name = 'MLP_L1Penalty' assert (total_cost.ndim == 0) total_cost.name = 'l1_penalty' return total_cost
'Computes `h` from the NCE paper. Parameters X : Theano matrix Batch of input data model : Model Any model with a `log_prob` method. Returns h : A theano symbol for the `h` function from the paper.'
def h(self, X, model):
return (- T.nnet.sigmoid(self.G(X, model)))
'Computes `G` from the NCE paper. Parameters X : Theano matrix Batch of input data model : Model Any model with a `log_prob` method. Returns G : A theano symbol for the `G` function from the paper.'
def G(self, X, model):
return (model.log_prob(X) - self.noise.log_prob(X))
'Computes the NCE objective. Parameters model : Model Any Model that implements a `log_probs` method. data : Theano matrix noisy_data : Theano matrix, optional The noise samples used for noise-contrastive estimation. Will be generated internally if not provided. The keyword argument allows FixedVarDescr to provide the same noise across several steps of a line search.'
def expr(self, model, data, noisy_data=None):
(space, source) = self.get_data_specs(model) space.validate(data) X = data if (X.name is None): X_name = 'X' else: X_name = X.name m_data = X.shape[0] m_noise = (m_data * self.noise_per_clean) if (noisy_data is not None): space.validate(noisy_data) Y = noisy_data else: Y = self.noise.random_design_matrix(m_noise) log_hx = (- T.nnet.softplus((- self.G(X, model)))) log_one_minus_hy = (- T.nnet.softplus(self.G(Y, model))) rval = ((- T.mean(log_hx)) - T.mean(log_one_minus_hy)) rval.name = (('NCE(' + X_name) + ')') return rval
'A fake cost that we differentiate symbolically to derive the SML update rule. Parameters model : Model data : Batch in get_data_specs format Returns cost : 0-d Theano tensor The fake cost'
def _cost(self, model, data):
if (not hasattr(self, 'sampler')): self.sampler = BlockGibbsSampler(rbm=model, particles=(0.5 + np.zeros((self.nchains, model.get_input_dim()))), rng=model.rng, steps=self.nsteps) sampler_updates = self.sampler.updates() pos_v = data neg_v = self.sampler.particles ml_cost = (model.free_energy(pos_v).mean() - model.free_energy(neg_v).mean()) return ml_cost
'.. todo:: WRITEME'
def set_params(self, params):
self._params = list(params)
'.. todo:: WRITEME'
def params(self):
return list(self._params)
'.. todo:: WRITEME'
def __str__(self):
return (self.__class__.__name__ + '{}')
'.. todo:: WRITEME'
def __add__(self, other):
return Sum([self, other])
'.. todo:: WRITEME'
def __radd__(self, other):
return Sum([other, self])
'.. todo:: WRITEME'
def lmul(self, x):
try: AT_xT = self.rmul_T(self.transpose_left(x, False)) rval = self.transpose_right(AT_xT, True) return rval except RuntimeError as e: if ('ecursion' in str(e)): raise TypeError('either lmul or rmul_T must be implemented') raise except TypeError as e: if ('either lmul' in str(e)): raise TypeError('either lmul or rmul_T must be implemented')
'.. todo:: WRITEME'
def lmul_T(self, x):
A_xT = self.rmul(self.transpose_right(x, True)) rval = self.transpose_left(A_xT, True) return rval
'.. todo:: WRITEME'
def rmul(self, x):
try: xT_AT = self.lmul_T(self.transpose_right(x, False)) rval = self.transpose_left(xT_AT, False) return rval except RuntimeError as e: if ('ecursion' in str(e)): raise TypeError('either rmul or lmul_T must be implemented') raise except TypeError as e: if ('either lmul' in str(e)): raise TypeError('either rmul or lmul_T must be implemented')
'.. todo:: WRITEME'
def rmul_T(self, x):
xT_A = self.lmul(self.transpose_left(x, True)) rval = self.transpose_right(xT_A, True) return rval
'.. todo:: WRITEME'
def transpose_left(self, x, T):
cshp = self.col_shape() if T: ss = len(cshp) else: ss = (x.ndim - len(cshp)) pattern = (list(range(ss, x.ndim)) + list(range(ss))) return x.transpose(pattern)
'.. todo:: WRITEME'
def transpose_right(self, x, T):
rshp = self.row_shape() if T: ss = len(rshp) else: ss = (x.ndim - len(rshp)) pattern = (list(range(ss, x.ndim)) + list(range(ss))) return x.transpose(pattern)
'.. todo:: WRITEME'
def split_left_shape(self, xshp, T):
if (type(xshp) != tuple): raise TypeError('need tuple', xshp) cshp = self.col_shape() assert (type(cshp) == tuple) if T: ss = len(cshp) (RR, CC) = (xshp[ss:], xshp[:ss]) else: ss = (len(xshp) - len(cshp)) (RR, CC) = (xshp[:ss], xshp[ss:]) if ((len(CC) != len(cshp)) or (not all(((isinstance(cc, theano.Variable) or (cc == ci)) for (cc, ci) in zip(CC, cshp))))): raise ValueError('invalid left shape', dict(xshp=xshp, col_shape=cshp, xcols=CC, T=T)) if T: return (CC, RR) else: return (RR, CC)
'.. todo:: WRITEME'
def split_right_shape(self, xshp, T):
if (type(xshp) != tuple): raise TypeError('need tuple', xshp) rshp = self.row_shape() assert (type(rshp) == tuple) if T: ss = (len(xshp) - len(rshp)) (RR, CC) = (xshp[ss:], xshp[:ss]) else: ss = len(rshp) (RR, CC) = (xshp[:ss], xshp[ss:]) if ((len(RR) != len(rshp)) or (not all(((isinstance(rr, theano.Variable) or (rr == ri)) for (rr, ri) in zip(RR, rshp))))): raise ValueError('invalid left shape', dict(xshp=xshp, row_shape=rshp, xrows=RR, T=T)) if T: return (CC, RR) else: return (RR, CC)
'.. todo:: WRITEME'
def transpose_left_shape(self, xshp, T):
(RR, CC) = self.split_left_shape(xshp, T) return (CC + RR)
'.. todo:: WRITEME'
def transpose_right_shape(self, xshp, T):
(RR, CC) = self.split_right_shape(xshp, T) return (CC + RR)
'.. todo:: WRITEME'
def is_valid_left_shape(self, xshp, T):
try: self.split_left_shape(xshp, T) return True except ValueError: return False
'.. todo:: WRITEME'
def is_valid_right_shape(self, xshp, T):
try: self.split_right_shape(xshp, T) return True except ValueError: return False
'.. todo:: WRITEME'
def row_shape(self):
raise NotImplementedError('override me')
'.. todo:: WRITEME'
def col_shape(self):
raise NotImplementedError('override me')
'.. todo:: WRITEME'
def transpose(self):
return TransposeTransform(self)
'.. todo:: WRITEME'
def transpose(self):
return self.base
'.. todo:: WRITEME'
def params(self):
return self.base.params()
'.. todo:: WRITEME'
def lmul(self, x):
return self.base.lmul_T(x)
'.. todo:: WRITEME'
def lmul_T(self, x):
return self.base.lmul(x)
'.. todo:: WRITEME'
def rmul(self, x):
return self.base.rmul_T(x)
'.. todo:: WRITEME'
def rmul_T(self, x):
return self.base.rmul(x)
'.. todo:: WRITEME'
def transpose_left(self, x, T):
return self.base.transpose_right(x, (not T))
'.. todo:: WRITEME'
def transpose_right(self, x, T):
return self.base.transpose_left(x, (not T))
'.. todo:: WRITEME'
def transpose_left_shape(self, x, T):
return self.base.transpose_right_shape(x, (not T))
'.. todo:: WRITEME'
def transpose_right_shape(self, x, T):
return self.base.transpose_left_shape(x, (not T))
'.. todo:: WRITEME'
def split_left_shape(self, x, T):
return self.base.split_right_shape(x, (not T))
'.. todo:: WRITEME'
def split_right_shape(self, x, T):
return self.base.split_left_shape(x, (not T))
'.. todo:: WRITEME'
def is_valid_left_shape(self, x, T):
return self.base.is_valid_right_shape(x, (not T))
'.. todo:: WRITEME'
def is_valid_right_shape(self, x, T):
return self.base.is_valid_left_shape(x, (not T))
'.. todo:: WRITEME'
def row_shape(self):
return self.base.col_shape()
'.. todo:: WRITEME'
def col_shape(self):
return self.base.row_shape()
'.. todo:: WRITEME'
def print_status(self):
return self.base.print_status()
'.. todo:: WRITEME'
def tile_columns(self):
return self.base.tile_columns()
'.. todo:: WRITEME'
def props(self):
return (self.n_levels,)
'.. todo:: WRITEME'
def __hash__(self):
return hash((type(self), self.props()))
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self.props() == other.props()))
'.. todo:: WRITEME'
def __repr__(self):
return ('%s{n_levels=%s}' % (self.__class__.__name__, self.n_levels))
'.. todo:: WRITEME'
def infer_shape(self, node, input_shapes):
(xshp,) = input_shapes out_shapes = [xshp] while (len(out_shapes) < self.n_levels): s = out_shapes[(-1)] out_shapes.append((s[0], (s[1] // 2), (s[2] // 2), s[3])) return out_shapes
'.. todo:: WRITEME'
def make_node(self, x):
if (self.n_levels < 1): raise ValueError('It does not make sense for GaussianPyramid to generate %i levels', self.n_levels) x = as_tensor_variable(x) return Apply(self, [x], [x.type() for i in range(self.n_levels)])
'.. todo:: WRITEME'
def perform(self, node, ins, outs):
(x,) = ins outs[0][0] = z = x.copy() (B, M, N, K) = x.shape for level in range(1, self.n_levels): z0 = z[0] if ((z0.shape[0] <= 2) or (z0.shape[1] <= 2)): raise ValueError('Cannot downsample an image smaller than 3x3', z0.shape) logger.info('{0} {1} {2}'.format(z0.shape, z0.dtype, z0.strides)) out0 = cv.pyrDown(z0) assert (out0.dtype == x.dtype) if (out0.ndim == 3): assert (out0.shape[2] == x.shape[3]) else: assert (K == 1) out = numpy.empty((x.shape[0], out0.shape[0], out0.shape[1], K), dtype=out0.dtype) if (K == 1): out[0][:, :, 0] = out0 else: out[0] = out0 for (i, zi) in enumerate(z[1:]): if (K == 1): out[i][:, :, 0] = cv.pyrDown(z[i]) else: out[i] = cv.pyrDown(z[i]) outs[level][0] = out z = out
'This function returns (zlike) transpose(W(y)) Parameters zlike : WRITEME *inputs_1_to_n : WRITEME Returns WRITEME'
def transpose(zlike, *inputs_1_to_n):
raise NotImplementedError('override-me')
'.. todo:: WRITEME'
def grads_1_to_n(inputs, gzlist):
raise NotImplementedError('override-me')
'.. todo:: WRITEME'
def grad(self, inputs, gzlist):
if (len(gzlist) > 1): raise NotImplementedError() g_input0 = self.transpose(gzlist[0], *inputs[1:]) return ([g_input0] + self.grads_1_to_n(inputs, gzlist))
'.. todo:: WRITEME'
def lmul(self, x):
return conv2d(x, self._filters, image_shape=self._img_shape, filter_shape=self._filters_shape, subsample=self._subsample, border_mode=self._border_mode)
'.. todo:: WRITEME'
def lmul_T(self, x):
dummy_v = tensor.tensor4() z_hs = conv2d(dummy_v, self._filters, image_shape=self._img_shape, filter_shape=self._filters_shape, subsample=self._subsample, border_mode=self._border_mode) (xfilters, xdummy) = z_hs.owner.op.grad((dummy_v, self._filters), (x,)) return xfilters
'.. todo:: WRITEME'
def row_shape(self):
return self._img_shape[1:]
'.. todo:: WRITEME'
def col_shape(self):
rows_cols = ConvOp.getOutputShape(self._img_shape[2:], self._filters_shape[2:], self._subsample, self._border_mode) rval = ((self._filters_shape[0],) + tuple(rows_cols)) return rval
'.. todo:: WRITEME'
def tile_columns(self, scale_each=True, **kwargs):
return tile_slices_to_image(self._filters.get_value()[:, :, ::(-1), ::(-1)].transpose(0, 2, 3, 1), scale_each=scale_each, **kwargs)