desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'.. todo:: WRITEME'
def downward_state(self, total_state):
(p, h) = total_state if (not hasattr(self, 'center')): self.center = False if self.center: return (h - self.offset) return (h * self.copies)
'.. todo:: WRITEME'
def get_monitoring_channels(self):
(W,) = self.transformer.get_params() assert (W.ndim == 2) sq_W = T.sqr(W) row_norms = T.sqrt(sq_W.sum(axis=1)) col_norms = T.sqrt(sq_W.sum(axis=0)) return OrderedDict([('row_norms_min', row_norms.min()), ('row_norms_mean', row_norms.mean()), ('row_norms_max', row_norms.max()), ('col_norms_min', col_norms.min()), ('col_norms_mean', col_norms.mean()), ('col_norms_max', col_norms.max())])
'.. todo:: WRITEME'
def get_monitoring_channels_from_state(self, state):
(P, H) = state rval = OrderedDict() if (self.pool_size == 1): vars_and_prefixes = [(P, '')] else: vars_and_prefixes = [(P, 'p_'), (H, 'h_')] for (var, prefix) in vars_and_prefixes: v_max = var.max(axis=0) v_min = var.min(axis=0) v_mean = var.mean(axis=0) v_range = (v_max - v_min) for (key, val) in [('max_x.max_u', v_max.max()), ('max_x.mean_u', v_max.mean()), ('max_x.min_u', v_max.min()), ('min_x.max_u', v_min.max()), ('min_x.mean_u', v_min.mean()), ('min_x.min_u', v_min.min()), ('range_x.max_u', v_range.max()), ('range_x.mean_u', v_range.mean()), ('range_x.min_u', v_range.min()), ('mean_x.max_u', v_mean.max()), ('mean_x.mean_u', v_mean.mean()), ('mean_x.min_u', v_mean.min())]: rval[(prefix + key)] = val return rval
'.. todo:: WRITEME'
def get_stdev_rewards(self, state, coeffs):
rval = 0.0 (P, H) = state self.output_space.validate(P) self.h_space.validate(H) if (self.pool_size == 1): assert (len(state) == 2) if isinstance(coeffs, str): coeffs = float(coeffs) assert isinstance(coeffs, float) (_, state) = state state = [state] coeffs = [coeffs] else: assert all([(len(elem) == 2) for elem in [state, coeffs]]) for (s, c) in safe_zip(state, coeffs): assert all([isinstance(elem, float) for elem in [c]]) if (c == 0.0): continue mn = s.mean(axis=0) dev = (s - mn) stdev = T.sqrt(T.sqr(dev).mean(axis=0)) rval += ((0.5 - stdev).mean() * c) return rval
'.. todo:: WRITEME'
def get_range_rewards(self, state, coeffs):
rval = 0.0 (P, H) = state self.output_space.validate(P) self.h_space.validate(H) if (self.pool_size == 1): assert (len(state) == 2) if isinstance(coeffs, str): coeffs = float(coeffs) assert isinstance(coeffs, float) (_, state) = state state = [state] coeffs = [coeffs] else: assert all([(len(elem) == 2) for elem in [state, coeffs]]) for (s, c) in safe_zip(state, coeffs): assert all([isinstance(elem, float) for elem in [c]]) if (c == 0.0): continue mx = s.max(axis=0) assert hasattr(mx.owner.op, 'grad') assert (mx.ndim == 1) mn = s.min(axis=0) assert hasattr(mn.owner.op, 'grad') assert (mn.ndim == 1) r = (mx - mn) rval += ((1 - r).mean() * c) return rval
'.. todo:: WRITEME'
def get_l1_act_cost(self, state, target, coeff, eps=None):
rval = 0.0 (P, H) = state self.output_space.validate(P) self.h_space.validate(H) if (self.pool_size == 1): assert (len(state) == 2) if (not isinstance(target, float)): raise TypeError((((((('BinaryVectorMaxPool.get_l1_act_cost expected target of type float ' + ' but an instance named ') + self.layer_name) + ' got target ') + str(target)) + ' of type ') + str(type(target)))) assert (isinstance(coeff, float) or hasattr(coeff, 'dtype')) (_, state) = state state = [state] target = [target] coeff = [coeff] if (eps is None): eps = [0.0] else: eps = [eps] else: assert all([(len(elem) == 2) for elem in [state, target, coeff]]) if (eps is None): eps = [0.0, 0.0] if (target[1] > target[0]): warnings.warn('Do you really want to regularize the detector units to be more active than the pooling units?') for (s, t, c, e) in safe_zip(state, target, coeff, eps): assert all([(isinstance(elem, float) or hasattr(elem, 'dtype')) for elem in [t, c, e]]) if (c == 0.0): continue m = s.mean(axis=0) assert (m.ndim == 1) rval += (T.maximum((abs((m - t)) - e), 0.0).mean() * c) return rval
'.. todo:: WRITEME'
def get_l2_act_cost(self, state, target, coeff):
rval = 0.0 (P, H) = state self.output_space.validate(P) self.h_space.validate(H) if (self.pool_size == 1): assert (len(state) == 2) if (not isinstance(target, float)): raise TypeError((((((('BinaryVectorMaxPool.get_l1_act_cost expected target of type float ' + ' but an instance named ') + self.layer_name) + ' got target ') + str(target)) + ' of type ') + str(type(target)))) assert (isinstance(coeff, float) or hasattr(coeff, 'dtype')) (_, state) = state state = [state] target = [target] coeff = [coeff] else: assert all([(len(elem) == 2) for elem in [state, target, coeff]]) if (target[1] > target[0]): warnings.warn('Do you really want to regularize the detector units to be more active than the pooling units?') for (s, t, c) in safe_zip(state, target, coeff): assert all([(isinstance(elem, float) or hasattr(elem, 'dtype')) for elem in [t, c]]) if (c == 0.0): continue m = s.mean(axis=0) assert (m.ndim == 1) rval += (T.square((m - t)).mean() * c) return rval
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
if (self.copies != 1): raise NotImplementedError() if (theano_rng is None): raise ValueError('theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.') if (state_above is not None): msg = layer_above.downward_message(state_above) else: msg = None if self.requires_reformat: state_below = self.input_space.format_as(state_below, self.desired_space) z = (self.transformer.lmul(state_below) + self.b) (p, h, p_sample, h_sample) = max_pool_channels(z, self.pool_size, msg, theano_rng) return (p_sample, h_sample)
'.. todo:: WRITEME'
def downward_message(self, downward_state):
self.h_space.validate(downward_state) rval = self.transformer.lmul_T(downward_state) if self.requires_reformat: rval = self.desired_space.format_as(rval, self.input_space) return (rval * self.copies)
'.. todo:: WRITEME'
def init_mf_state(self):
z = (T.alloc(0.0, self.dbm.batch_size, self.detector_layer_dim).astype(self.b.dtype) + self.b.dimshuffle('x', 0)) rval = max_pool_channels(z=z, pool_size=self.pool_size) return rval
'.. todo:: WRITEME'
def make_state(self, num_examples, numpy_rng):
' Returns a shared variable containing an actual state\n (not a mean field state) for this variable.\n ' if (not hasattr(self, 'copies')): self.copies = 1 if (self.copies != 1): raise NotImplementedError() empty_input = self.h_space.get_origin_batch(num_examples) empty_output = self.output_space.get_origin_batch(num_examples) h_state = sharedX(empty_input) p_state = sharedX(empty_output) theano_rng = make_theano_rng(None, numpy_rng.randint((2 ** 16)), which_method='binomial') default_z = (T.zeros_like(h_state) + self.b) (p_exp, h_exp, p_sample, h_sample) = max_pool_channels(z=default_z, pool_size=self.pool_size, theano_rng=theano_rng) assert (h_sample.dtype == default_z.dtype) f = function([], updates=[(p_state, p_sample), (h_state, h_sample)]) f() p_state.name = 'p_sample_shared' h_state.name = 'h_sample_shared' return (p_state, h_state)
'.. todo:: WRITEME'
def make_symbolic_state(self, num_examples, theano_rng):
'\n Returns a theano symbolic variable containing an actual state\n (not a mean field state) for this variable.\n ' if (not hasattr(self, 'copies')): self.copies = 1 if (self.copies != 1): raise NotImplementedError() default_z = T.alloc(self.b, num_examples, self.detector_layer_dim) (p_exp, h_exp, p_sample, h_sample) = max_pool_channels(z=default_z, pool_size=self.pool_size, theano_rng=theano_rng) assert (h_sample.dtype == default_z.dtype) return (p_sample, h_sample)
'.. todo:: WRITEME'
def expected_energy_term(self, state, average, state_below, average_below):
self.input_space.validate(state_below) if self.requires_reformat: if (not isinstance(state_below, tuple)): for sb in get_debug_values(state_below): if (sb.shape[0] != self.dbm.batch_size): raise ValueError(('self.dbm.batch_size is %d but got shape of %d' % (self.dbm.batch_size, sb.shape[0]))) assert (reduce(operator.mul, sb.shape[1:]) == self.input_dim) state_below = self.input_space.format_as(state_below, self.desired_space) downward_state = self.downward_state(state) self.h_space.validate(downward_state) bias_term = T.dot(downward_state, self.b) weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=1) rval = ((- bias_term) - weights_term) assert (rval.ndim == 1) return (rval * self.copies)
'Used to implement TorontoSparsity. Unclear exactly what properties of it are important or how to implement it for other layers. Properties it must have: output is same kind of data structure (ie, tuple of theano 2-tensors) as mf_update. Properties it probably should have for other layer types: an infinitesimal change in state_below or the parameters should cause the same sign of change in the output of linear_feed_forward_approximation and in mf_update Should not have any non-linearities that cause the gradient to shrink Should disregard top-down feedback Parameters state_below : WRITEME'
def linear_feed_forward_approximation(self, state_below):
z = (self.transformer.lmul(state_below) + self.b) if (self.pool_size != 1): raise NotImplementedError() return (z, z)
'.. todo:: WRITEME'
def mf_update(self, state_below, state_above, layer_above=None, double_weights=False, iter_name=None):
self.input_space.validate(state_below) if self.requires_reformat: if (not isinstance(state_below, tuple)): for sb in get_debug_values(state_below): if (sb.shape[0] != self.dbm.batch_size): raise ValueError(('self.dbm.batch_size is %d but got shape of %d' % (self.dbm.batch_size, sb.shape[0]))) assert (reduce(operator.mul, sb.shape[1:]) == self.input_dim) state_below = self.input_space.format_as(state_below, self.desired_space) if (iter_name is None): iter_name = 'anon' if (state_above is not None): assert (layer_above is not None) msg = layer_above.downward_message(state_above) msg.name = (((((('msg_from_' + layer_above.layer_name) + '_to_') + self.layer_name) + '[') + iter_name) + ']') else: msg = None if double_weights: state_below = (2.0 * state_below) state_below.name = (((self.layer_name + '_') + iter_name) + '_2state') z = (self.transformer.lmul(state_below) + self.b) if ((self.layer_name is not None) and (iter_name is not None)): z.name = (((self.layer_name + '_') + iter_name) + '_z') (p, h) = max_pool_channels(z, self.pool_size, msg) p.name = ((self.layer_name + '_p_') + iter_name) h.name = ((self.layer_name + '_h_') + iter_name) return (p, h)
'.. todo:: WRITEME'
def get_total_state_space(self):
return self.output_space
'.. todo:: WRITEME'
def get_monitoring_channels_from_state(self, state):
mx = state.max(axis=1) return OrderedDict([('mean_max_class', mx.mean()), ('max_max_class', mx.max()), ('min_max_class', mx.min())])
'.. todo:: WRITEME'
def set_input_space(self, space):
self.input_space = space if (not isinstance(space, Space)): raise TypeError(((('Expected Space, got ' + str(space)) + ' of type ') + str(type(space)))) self.input_dim = space.get_total_dimension() self.needs_reformat = (not isinstance(space, VectorSpace)) self.desired_space = VectorSpace(self.input_dim) if (not self.needs_reformat): assert (self.desired_space == self.input_space) rng = self.dbm.rng if (self.irange is not None): assert (self.sparse_init is None) W = rng.uniform((- self.irange), self.irange, (self.input_dim, self.n_classes)) else: assert (self.sparse_init is not None) W = np.zeros((self.input_dim, self.n_classes)) for i in xrange(self.n_classes): for j in xrange(self.sparse_init): idx = rng.randint(0, self.input_dim) while (W[(idx, i)] != 0.0): idx = rng.randint(0, self.input_dim) W[(idx, i)] = (rng.randn() * self.sparse_istdev) self.W = sharedX(W, 'softmax_W') self._params = [self.b, self.W]
'.. todo:: WRITEME'
def get_weights_topo(self):
if (not isinstance(self.input_space, Conv2DSpace)): raise NotImplementedError() desired = self.W.get_value().T ipt = self.desired_space.format_as(desired, self.input_space) rval = Conv2DSpace.convert_numpy(ipt, self.input_space.axes, ('b', 0, 1, 'c')) return rval
'.. todo:: WRITEME'
def get_weights(self):
if (not isinstance(self.input_space, VectorSpace)): raise NotImplementedError() return self.W.get_value()
'.. todo:: WRITEME'
def set_weights(self, weights):
self.W.set_value(weights)
'.. todo:: WRITEME'
def set_biases(self, biases, recenter=False):
self.b.set_value(biases) if recenter: assert self.center self.offset.set_value((np.exp(biases) / np.exp(biases).sum()).astype(self.offset.dtype))
'.. todo:: WRITEME'
def get_biases(self):
return self.b.get_value()
'.. todo:: WRITEME'
def get_weights_format(self):
return ('v', 'h')
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
if (self.copies != 1): raise NotImplementedError('need to draw self.copies samples and average them together.') if (state_above is not None): raise NotImplementedError() if (theano_rng is None): raise ValueError('theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.') self.input_space.validate(state_below) if (not hasattr(self, 'needs_reformat')): self.needs_reformat = self.needs_reshape del self.needs_reshape if self.needs_reformat: state_below = self.input_space.format_as(state_below, self.desired_space) self.desired_space.validate(state_below) z = (T.dot(state_below, self.W) + self.b) h_exp = T.nnet.softmax(z) h_sample = theano_rng.multinomial(pvals=h_exp, dtype=h_exp.dtype) return h_sample
'.. todo:: WRITEME'
def mf_update(self, state_below, state_above=None, layer_above=None, double_weights=False, iter_name=None):
if (state_above is not None): raise NotImplementedError() if double_weights: raise NotImplementedError() self.input_space.validate(state_below) if (not hasattr(self, 'needs_reformat')): self.needs_reformat = self.needs_reshape del self.needs_reshape if self.needs_reformat: state_below = self.input_space.format_as(state_below, self.desired_space) for value in get_debug_values(state_below): if (value.shape[0] != self.dbm.batch_size): raise ValueError(((('state_below should have batch size ' + str(self.dbm.batch_size)) + ' but has ') + str(value.shape[0]))) self.desired_space.validate(state_below) assert (self.W.ndim == 2) assert (state_below.ndim == 2) b = self.b Z = (T.dot(state_below, self.W) + b) rval = T.nnet.softmax(Z) for value in get_debug_values(rval): assert (value.shape[0] == self.dbm.batch_size) return rval
'.. todo:: WRITEME'
def downward_message(self, downward_state):
if (not hasattr(self, 'copies')): self.copies = 1 rval = (T.dot(downward_state, self.W.T) * self.copies) rval = self.desired_space.format_as(rval, self.input_space) return rval
'The cost of reconstructing `Y` as `Y_hat`. Specifically, the negative log probability. This cost is for use with multi-prediction training. Parameters Y : target space batch The data labels Y_hat_unmasked : target space batch The output of this layer\'s `mf_update`; the predicted values of `Y`. Even though the model is only predicting the dropped values, we take predictions for all the values here. drop_mask_Y : 1-D theano tensor A batch of 0s/1s, with 1s indicating that variables have been dropped, and should be included in the reconstruction cost. One indicator per example in the batch, since each example in this layer only has one random variable in it. scale : float Multiply the cost by this amount. We need to do this because the visible layer also goes into the cost. We use the mean over units and examples, so that the scale of the cost doesn\'t change too much with batch size or example size. We need to multiply this cost by scale to make sure that it is put on the same scale as the reconstruction cost for the visible units. ie, scale should be 1/nvis'
def recons_cost(self, Y, Y_hat_unmasked, drop_mask_Y, scale):
Y_hat = Y_hat_unmasked assert hasattr(Y_hat, 'owner') owner = Y_hat.owner assert (owner is not None) op = owner.op if isinstance(op, Print): assert (len(owner.inputs) == 1) (Y_hat,) = owner.inputs owner = Y_hat.owner op = owner.op assert isinstance(op, T.nnet.Softmax) (z,) = owner.inputs assert (z.ndim == 2) z = (z - z.max(axis=1).dimshuffle(0, 'x')) log_prob = (z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))) log_prob_of = (Y * log_prob).sum(axis=1) masked = (log_prob_of * drop_mask_Y) assert (masked.ndim == 1) rval = ((masked.mean() * scale) * self.copies) return (- rval)
'.. todo:: WRITEME'
def init_mf_state(self):
rval = (T.nnet.softmax(self.b.dimshuffle('x', 0)) + T.alloc(0.0, self.dbm.batch_size, self.n_classes).astype(config.floatX)) return rval
'.. todo:: WRITEME'
def make_state(self, num_examples, numpy_rng):
' Returns a shared variable containing an actual state\n (not a mean field state) for this variable.\n ' if (self.copies != 1): raise NotImplementedError('need to make self.copies samples and average them together.') t1 = time.time() empty_input = self.output_space.get_origin_batch(num_examples) h_state = sharedX(empty_input) default_z = (T.zeros_like(h_state) + self.b) theano_rng = make_theano_rng(None, numpy_rng.randint((2 ** 16)), which_method='binomial') h_exp = T.nnet.softmax(default_z) h_sample = theano_rng.multinomial(pvals=h_exp, dtype=h_exp.dtype) h_state = sharedX(self.output_space.get_origin_batch(num_examples)) t2 = time.time() f = function([], updates=[(h_state, h_sample)]) t3 = time.time() f() t4 = time.time() logger.info('{0}.make_state took {1}'.format(self, (t4 - t1))) logger.info(' DCTB compose time: {0}'.format((t2 - t1))) logger.info(' DCTB compile time: {0}'.format((t3 - t2))) logger.info(' DCTB execute time: {0}'.format((t4 - t3))) h_state.name = 'softmax_sample_shared' return h_state
'.. todo:: WRITEME'
def make_symbolic_state(self, num_examples, theano_rng):
'\n Returns a symbolic variable containing an actual state\n (not a mean field state) for this variable.\n ' if (self.copies != 1): raise NotImplementedError('need to make self.copies samples and average them together.') default_z = T.alloc(self.b, num_examples, self.n_classes) h_exp = T.nnet.softmax(default_z) h_sample = theano_rng.multinomial(pvals=h_exp, dtype=h_exp.dtype) return h_sample
'.. todo:: WRITEME'
def get_weight_decay(self, coeff):
if isinstance(coeff, str): coeff = float(coeff) assert (isinstance(coeff, float) or hasattr(coeff, 'dtype')) return (coeff * T.sqr(self.W).sum())
'.. todo:: WRITEME'
def upward_state(self, state):
if self.center: return (state - self.offset) return state
'.. todo:: WRITEME'
def downward_state(self, state):
if (not hasattr(self, 'center')): self.center = False if self.center: 'TODO: write a unit test verifying that inference or sampling\n below a centered Softmax layer works' return (state - self.offset) return state
'.. todo:: WRITEME'
def expected_energy_term(self, state, average, state_below, average_below):
if self.center: state = (state - self.offset) self.input_space.validate(state_below) if self.needs_reformat: state_below = self.input_space.format_as(state_below, self.desired_space) self.desired_space.validate(state_below) bias_term = T.dot(state, self.b) weights_term = (T.dot(state_below, self.W) * state).sum(axis=1) rval = ((- bias_term) - weights_term) rval *= self.copies assert (rval.ndim == 1) return rval
'.. todo:: WRITEME'
def init_inpainting_state(self, Y, noise):
if noise: theano_rng = make_theano_rng(None, ((2012 + 10) + 30), which_method='binomial') return T.nnet.softmax(theano_rng.normal(avg=0.0, size=Y.shape, std=1.0, dtype='float32')) rval = T.nnet.softmax(self.b) if (not hasattr(self, 'learn_init_inpainting_state')): self.learn_init_inpainting_state = 1 if (not self.learn_init_inpainting_state): rval = block_gradient(rval) return rval
'.. todo:: WRITEME'
def install_presynaptic_outputs(self, outputs_dict, batch_size):
assert (self.presynaptic_name not in outputs_dict) outputs_dict[self.presynaptic_name] = self.output_space.make_shared_batch(batch_size, self.presynaptic_name)
'.. todo:: WRITEME'
def get_monitoring_channels(self):
rval = OrderedDict() rval['beta_min'] = self.beta.min() rval['beta_mean'] = self.beta.mean() rval['beta_max'] = self.beta.max() return rval
'.. todo:: WRITEME'
def get_params(self):
if (self.mu is None): return [self.beta] return [self.beta, self.mu]
'.. todo:: WRITEME'
def get_lr_scalers(self):
rval = OrderedDict() if (self.nvis is None): (rows, cols) = self.space.shape num_loc = float((rows * cols)) assert (self.tie_beta in [None, 'locations']) if (self.beta_lr_scale == 'by_sharing'): if (self.tie_beta == 'locations'): assert (self.nvis is None) rval[self.beta] = (1.0 / num_loc) elif (self.beta_lr_scale == None): pass else: rval[self.beta] = self.beta_lr_scale assert (self.tie_mu in [None, 'locations']) if (self.tie_mu == 'locations'): warn = True assert (self.nvis is None) rval[self.mu] = (1.0 / num_loc) logger.warning('mu lr_scaler hardcoded to 1/sharing') return rval
'Set mean parameter Parameters bias: WRITEME Vector of size nvis'
def set_biases(self, bias):
self.mu = sharedX(bias, name='mu')
'Returns mu, broadcasted to have the same shape as a batch of data'
def broadcasted_mu(self):
if (self.tie_mu == 'locations'): def f(x): if (x == 'c'): return 0 return 'x' axes = [f(ax) for ax in self.axes] rval = self.mu.dimshuffle(*axes) else: assert (self.tie_mu is None) if (self.nvis is None): axes = [0, 1, 2] axes.insert(self.axes.index('b'), 'x') rval = self.mu.dimshuffle(*axes) else: rval = self.mu.dimshuffle('x', 0) self.input_space.validate(rval) return rval
'Returns beta, broadcasted to have the same shape as a batch of data'
def broadcasted_beta(self):
return self.broadcast_beta(self.beta)
'.. todo:: WRITEME'
def broadcast_beta(self, beta):
'\n Returns beta, broadcasted to have the same shape as a batch of data\n ' if (self.tie_beta == 'locations'): def f(x): if (x == 'c'): return 0 return 'x' axes = [f(ax) for ax in self.axes] rval = beta.dimshuffle(*axes) else: assert (self.tie_beta is None) if (self.nvis is None): axes = [0, 1, 2] axes.insert(self.axes.index('b'), 'x') rval = beta.dimshuffle(*axes) else: rval = beta.dimshuffle('x', 0) self.input_space.validate(rval) return rval
'.. todo:: WRITEME'
def init_inpainting_state(self, V, drop_mask, noise=False, return_unmasked=False):
'for Vv, drop_mask_v in get_debug_values(V, drop_mask):\n assert Vv.ndim == 4\n assert drop_mask_v.ndim in [3,4]\n for i in xrange(drop_mask.ndim):\n if Vv.shape[i] != drop_mask_v.shape[i]:\n print(Vv.shape)\n print(drop_mask_v.shape)\n assert False\n ' unmasked = self.broadcasted_mu() if (drop_mask is None): assert (not noise) assert (not return_unmasked) return unmasked masked_mu = (unmasked * drop_mask) if (not hasattr(self, 'learn_init_inpainting_state')): self.learn_init_inpainting_state = True if (not self.learn_init_inpainting_state): masked_mu = block_gradient(masked_mu) masked_mu.name = 'masked_mu' if noise: theano_rng = make_theano_rng(None, 42, which_method='binomial') unmasked = theano_rng.normal(avg=0.0, std=1.0, size=masked_mu.shape, dtype=masked_mu.dtype) masked_mu = (unmasked * drop_mask) masked_mu.name = 'masked_noise' masked_V = (V * (1 - drop_mask)) rval = (masked_mu + masked_V) rval.name = 'init_inpainting_state' if return_unmasked: return (rval, unmasked) return rval
'.. todo:: WRITEME'
def expected_energy_term(self, state, average, state_below=None, average_below=None):
assert (state_below is None) assert (average_below is None) self.space.validate(state) if average: raise NotImplementedError((str(type(self)) + " doesn't support integrating out variational parameters yet.")) else: rval = (0.5 * (self.beta * T.sqr((state - self.mu))).sum(axis=self.axes_to_sum)) assert (rval.ndim == 1) return rval
'.. todo:: WRITEME'
def inpaint_update(self, state_above, layer_above, drop_mask=None, V=None, return_unmasked=False):
msg = layer_above.downward_message(state_above) mu = self.broadcasted_mu() z = (msg + mu) z.name = 'inpainting_z_[unknown_iter]' if (drop_mask is not None): rval = ((drop_mask * z) + ((1 - drop_mask) * V)) else: rval = z rval.name = 'inpainted_V[unknown_iter]' if return_unmasked: return (rval, z) return rval
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
assert (state_below is None) msg = layer_above.downward_message(state_above) mu = self.mu z = (msg + mu) rval = theano_rng.normal(size=z.shape, avg=z, dtype=z.dtype, std=(1.0 / T.sqrt(self.beta))) return rval
'.. todo:: WRITEME'
def recons_cost(self, V, V_hat_unmasked, drop_mask=None, use_sum=False):
return self._recons_cost(V=V, V_hat_unmasked=V_hat_unmasked, drop_mask=drop_mask, use_sum=use_sum, beta=self.beta)
'.. todo:: WRITEME'
def _recons_cost(self, V, V_hat_unmasked, beta, drop_mask=None, use_sum=False):
V_hat = V_hat_unmasked assert (V.ndim == V_hat.ndim) beta = self.broadcasted_beta() unmasked_cost = (((0.5 * beta) * T.sqr((V - V_hat))) - (0.5 * T.log((beta / (2 * np.pi))))) assert (unmasked_cost.ndim == V_hat.ndim) if (drop_mask is None): masked_cost = unmasked_cost else: masked_cost = (drop_mask * unmasked_cost) if use_sum: return masked_cost.mean(axis=0).sum() return masked_cost.mean() return masked_cost.mean()
'.. todo:: WRITEME'
def upward_state(self, total_state):
if ((self.nvis is None) and (total_state.ndim != 4)): raise ValueError(('total_state should have 4 dimensions, has ' + str(total_state.ndim))) assert (total_state is not None) V = total_state self.input_space.validate(V) upward_state = ((V - self.broadcasted_mu()) * self.broadcasted_beta()) return upward_state
'.. todo:: WRITEME'
def make_state(self, num_examples, numpy_rng):
shape = [num_examples] if (self.nvis is None): (rows, cols) = self.space.shape channels = self.space.num_channels shape.append(rows) shape.append(cols) shape.append(channels) else: shape.append(self.nvis) sample = numpy_rng.randn(*shape) sample *= (1.0 / np.sqrt(self.beta.get_value())) sample += self.mu.get_value() rval = sharedX(sample, name='v_sample_shared') return rval
'.. todo:: WRITEME'
def install_presynaptic_outputs(self, outputs_dict, batch_size):
outputs_dict['output_V_weighted_pred_sum'] = self.space.make_shared_batch(batch_size)
'.. todo:: WRITEME'
def ensemble_prediction(self, symbolic, outputs_dict, ensemble):
'\n Output a symbolic expression for V_hat_unmasked based on taking the\n geometric mean over the ensemble and renormalizing.\n n - 1 members of the ensemble have modified outputs_dict and the nth\n gives its prediction in "symbolic". The parameters for the nth one\n are currently loaded in the model.\n ' weighted_pred_sum = (outputs_dict['output_V_weighted_pred_sum'] + (self.broadcasted_beta() * symbolic)) beta_sum = sum(ensemble.get_ensemble_variants(self.beta)) unmasked_V_hat = (weighted_pred_sum / self.broadcast_beta(beta_sum)) return unmasked_V_hat
'.. todo:: WRITEME'
def ensemble_recons_cost(self, V, V_hat_unmasked, drop_mask=None, use_sum=False, ensemble=None):
beta = (sum(ensemble.get_ensemble_variants(self.beta)) / ensemble.num_copies) return self._recons_cost(V=V, V_hat_unmasked=V_hat_unmasked, beta=beta, drop_mask=drop_mask, use_sum=use_sum)
'.. todo:: WRITEME'
def broadcasted_bias(self):
assert (self.b.ndim == 1) shuffle = (['x'] * 4) shuffle[self.output_axes.index('c')] = 0 return self.b.dimshuffle(*shuffle)
'.. todo:: WRITEME'
def get_total_state_space(self):
return CompositeSpace((self.h_space, self.output_space))
'.. todo:: WRITEME'
def set_input_space(self, space):
' Note: this resets parameters!' if (not isinstance(space, Conv2DSpace)): raise TypeError((('ConvMaxPool can only act on a Conv2DSpace, but received ' + str(type(space))) + ' as input.')) self.input_space = space (self.input_rows, self.input_cols) = space.shape self.input_channels = space.num_channels if (self.border_mode == 'valid'): self.h_rows = ((self.input_rows - self.kernel_rows) + 1) self.h_cols = ((self.input_cols - self.kernel_cols) + 1) else: assert (self.border_mode == 'full') self.h_rows = ((self.input_rows + self.kernel_rows) - 1) self.h_cols = ((self.input_cols + self.kernel_cols) - 1) if (not ((self.h_rows % self.pool_rows) == 0)): raise ValueError(('h_rows = %d, pool_rows = %d. Should be divisible but remainder is %d' % (self.h_rows, self.pool_rows, (self.h_rows % self.pool_rows)))) assert ((self.h_cols % self.pool_cols) == 0) self.h_space = Conv2DSpace(shape=(self.h_rows, self.h_cols), num_channels=self.output_channels, axes=self.output_axes) self.output_space = Conv2DSpace(shape=((self.h_rows / self.pool_rows), (self.h_cols / self.pool_cols)), num_channels=self.output_channels, axes=self.output_axes) logger.info('{0}: detector shape: {1} pool shape: {2}'.format(self.layer_name, self.h_space.shape, self.output_space.shape)) if (tuple(self.output_axes) == ('b', 0, 1, 'c')): self.max_pool = max_pool_b01c elif (tuple(self.output_axes) == ('b', 'c', 0, 1)): self.max_pool = max_pool else: raise NotImplementedError() if (self.irange is not None): self.transformer = make_random_conv2D(self.irange, input_space=space, output_space=self.h_space, kernel_shape=(self.kernel_rows, self.kernel_cols), batch_size=self.dbm.batch_size, border_mode=self.border_mode, rng=self.dbm.rng) else: self.transformer = make_sparse_random_conv2D(self.sparse_init, input_space=space, output_space=self.h_space, kernel_shape=(self.kernel_rows, self.kernel_cols), batch_size=self.dbm.batch_size, border_mode=self.border_mode, rng=self.dbm.rng) self.transformer._filters.name = (self.layer_name + '_W') (W,) = self.transformer.get_params() assert (W.name is not None) if self.center: (p_ofs, h_ofs) = self.init_mf_state() self.p_offset = sharedX(self.output_space.get_origin(), 'p_offset') self.h_offset = sharedX(self.h_space.get_origin(), 'h_offset') f = function([], updates={self.p_offset: p_ofs[0, :, :, :], self.h_offset: h_ofs[0, :, :, :]}) f()
'.. todo:: WRITEME'
def get_params(self):
assert (self.b.name is not None) (W,) = self.transformer.get_params() assert (W.name is not None) return [W, self.b]
'.. todo:: WRITEME'
def state_to_b01c(self, state):
if (tuple(self.output_axes) == ('b', 0, 1, 'c')): return state return [Conv2DSpace.convert(elem, self.output_axes, ('b', 0, 1, 'c')) for elem in state]
'.. todo:: WRITEME'
def get_range_rewards(self, state, coeffs):
rval = 0.0 if ((self.pool_rows == 1) and (self.pool_cols == 1)): assert (len(state) == 2) assert isinstance(coeffs, float) (_, state) = state state = [state] coeffs = [coeffs] else: assert all([(len(elem) == 2) for elem in [state, coeffs]]) for (s, c) in safe_zip(state, coeffs): if (c == 0.0): continue assert (self.h_space.axes == ('b', 'c', 0, 1)) assert (self.output_space.axes == ('b', 'c', 0, 1)) mx = s.max(axis=3).max(axis=2).max(axis=0) assert hasattr(mx.owner.op, 'grad') mn = s.min(axis=3).max(axis=2).max(axis=0) assert hasattr(mn.owner.op, 'grad') assert (mx.ndim == 1) assert (mn.ndim == 1) r = (mx - mn) rval += ((1.0 - r).mean() * c) return rval
'.. todo:: WRITEME'
def get_l1_act_cost(self, state, target, coeff, eps):
'\n\n target: if pools contain more than one element, should be a list with\n two elements. the first element is for the pooling units and\n the second for the detector units.\n\n ' rval = 0.0 if ((self.pool_rows == 1) and (self.pool_cols == 1)): assert (len(state) == 2) assert isinstance(target, float) assert isinstance(coeff, float) (_, state) = state state = [state] target = [target] coeff = [coeff] if (eps is None): eps = 0.0 eps = [eps] else: if (eps is None): eps = [0.0, 0.0] assert all([(len(elem) == 2) for elem in [state, target, coeff]]) (p_target, h_target) = target if ((h_target > p_target) and ((coeff[0] != 0.0) and (coeff[1] != 0.0))): warnings.warn('Do you really want to regularize the detector units to be more active than the pooling units?') for (s, t, c, e) in safe_zip(state, target, coeff, eps): if (c == 0.0): continue m = s.mean(axis=[ax for ax in range(4) if (self.output_axes[ax] != 'c')]) assert (m.ndim == 1) rval += (T.maximum((abs((m - t)) - e), 0.0).mean() * c) return rval
'.. todo:: WRITEME'
def get_lr_scalers(self):
if self.scale_by_sharing: (h_rows, h_cols) = self.h_space.shape num_h = float((h_rows * h_cols)) return OrderedDict([(self.transformer._filters, (1.0 / num_h)), (self.b, (1.0 / num_h))]) else: return OrderedDict()
'.. todo:: WRITEME'
def upward_state(self, total_state):
(p, h) = total_state if (not hasattr(self, 'center')): self.center = False if self.center: p -= self.p_offset h -= self.h_offset return p
'.. todo:: WRITEME'
def downward_state(self, total_state):
(p, h) = total_state if (not hasattr(self, 'center')): self.center = False if self.center: p -= self.p_offset h -= self.h_offset return h
'.. todo:: WRITEME'
def get_monitoring_channels_from_state(self, state):
(P, H) = state if (tuple(self.output_axes) == ('b', 0, 1, 'c')): p_max = P.max(axis=(0, 1, 2)) p_min = P.min(axis=(0, 1, 2)) p_mean = P.mean(axis=(0, 1, 2)) else: assert (tuple(self.output_axes) == ('b', 'c', 0, 1)) p_max = P.max(axis=(0, 2, 3)) p_min = P.min(axis=(0, 2, 3)) p_mean = P.mean(axis=(0, 2, 3)) p_range = (p_max - p_min) rval = {'p_max_max': p_max.max(), 'p_max_mean': p_max.mean(), 'p_max_min': p_max.min(), 'p_min_max': p_min.max(), 'p_min_mean': p_min.mean(), 'p_min_max': p_min.max(), 'p_range_max': p_range.max(), 'p_range_mean': p_range.mean(), 'p_range_min': p_range.min(), 'p_mean_max': p_mean.max(), 'p_mean_mean': p_mean.mean(), 'p_mean_min': p_mean.min()} return rval
'.. todo:: WRITEME'
def get_weight_decay(self, coeffs):
(W,) = self.transformer.get_params() return (coeffs * T.sqr(W).sum())
'.. todo:: WRITEME'
def mf_update(self, state_below, state_above, layer_above=None, double_weights=False, iter_name=None):
self.input_space.validate(state_below) if (iter_name is None): iter_name = 'anon' if (state_above is not None): assert (layer_above is not None) msg = layer_above.downward_message(state_above) msg.name = (((((('msg_from_' + layer_above.layer_name) + '_to_') + self.layer_name) + '[') + iter_name) + ']') else: msg = None if (not hasattr(state_below, 'ndim')): raise TypeError(((('state_below should be a TensorType, got ' + str(state_below)) + ' of type ') + str(type(state_below)))) if (state_below.ndim != 4): raise ValueError(('state_below should have ndim 4, has ' + str(state_below.ndim))) if double_weights: state_below = (2.0 * state_below) state_below.name = (((self.layer_name + '_') + iter_name) + '_2state') z = (self.transformer.lmul(state_below) + self.broadcasted_bias()) if ((self.layer_name is not None) and (iter_name is not None)): z.name = (((self.layer_name + '_') + iter_name) + '_z') (p, h) = self.max_pool(z, (self.pool_rows, self.pool_cols), msg) p.name = ((self.layer_name + '_p_') + iter_name) h.name = ((self.layer_name + '_h_') + iter_name) return (p, h)
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
if (state_above is not None): msg = layer_above.downward_message(state_above) try: self.output_space.validate(msg) except TypeError as e: reraise_as(TypeError(((str(type(layer_above)) + '.downward_message gave something that was not the right type: ') + str(e)))) else: msg = None z = (self.transformer.lmul(state_below) + self.broadcasted_bias()) (p, h, p_sample, h_sample) = self.max_pool(z, (self.pool_rows, self.pool_cols), msg, theano_rng) return (p_sample, h_sample)
'.. todo:: WRITEME'
def downward_message(self, downward_state):
self.h_space.validate(downward_state) return self.transformer.lmul_T(downward_state)
'.. todo:: WRITEME'
def set_batch_size(self, batch_size):
self.transformer.set_batch_size(batch_size)
'.. todo:: WRITEME'
def get_weights_topo(self):
(outp, inp, rows, cols) = range(4) raw = self.transformer._filters.get_value() return np.transpose(raw, (outp, rows, cols, inp))
'.. todo:: WRITEME'
def init_mf_state(self):
default_z = self.broadcasted_bias() shape = {'b': self.dbm.batch_size, 0: self.h_space.shape[0], 1: self.h_space.shape[1], 'c': self.h_space.num_channels} default_z += T.alloc(*([0.0] + [shape[elem] for elem in self.h_space.axes])).astype(default_z.dtype) assert (default_z.ndim == 4) (p, h) = self.max_pool(z=default_z, pool_shape=(self.pool_rows, self.pool_cols)) return (p, h)
'.. todo:: WRITEME'
def make_state(self, num_examples, numpy_rng):
' Returns a shared variable containing an actual state\n (not a mean field state) for this variable.\n ' t1 = time.time() empty_input = self.h_space.get_origin_batch(self.dbm.batch_size) h_state = sharedX(empty_input) default_z = (T.zeros_like(h_state) + self.broadcasted_bias()) theano_rng = make_theano_rng(None, numpy_rng.randint((2 ** 16)), which_method='binomial') (p_exp, h_exp, p_sample, h_sample) = self.max_pool(z=default_z, pool_shape=(self.pool_rows, self.pool_cols), theano_rng=theano_rng) p_state = sharedX(self.output_space.get_origin_batch(self.dbm.batch_size)) t2 = time.time() f = function([], updates=[(p_state, p_sample), (h_state, h_sample)]) t3 = time.time() f() t4 = time.time() logger.info('{0}.make_state took'.format(self, (t4 - t1))) logger.info(' DCTB compose time: {0}'.format((t2 - t1))) logger.info(' DCTB compile time: {0}'.format((t3 - t2))) logger.info(' DCTB execute time: {0}'.format((t4 - t3))) p_state.name = 'p_sample_shared' h_state.name = 'h_sample_shared' return (p_state, h_state)
'.. todo:: WRITEME'
def expected_energy_term(self, state, average, state_below, average_below):
self.input_space.validate(state_below) downward_state = self.downward_state(state) self.h_space.validate(downward_state) bias_term = (downward_state * self.broadcasted_bias()).sum(axis=(1, 2, 3)) weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=(1, 2, 3)) rval = ((- bias_term) - weights_term) assert (rval.ndim == 1) return rval
'.. todo:: WRITEME'
def broadcasted_bias(self):
if (self.b.ndim != 1): raise NotImplementedError() shuffle = (['x'] * 4) shuffle[self.output_axes.index('c')] = 0 return self.b.dimshuffle(*shuffle)
'.. todo:: WRITEME'
def get_total_state_space(self):
return CompositeSpace((self.h_space, self.output_space))
'.. todo:: WRITEME'
def set_input_space(self, space):
' Note: this resets parameters!' setup_detector_layer_c01b(layer=self, input_space=space, rng=self.dbm.rng) if (not (tuple(space.axes) == ('c', 0, 1, 'b'))): raise AssertionError("You're not using c01b inputs. Ian is enforcing c01b inputs while developing his pipeline to make sure it runs at maximal speed. If you really don't want to use c01b inputs, you can remove this check and things should work. If they don't work it's only because they're not tested.") if (self.dummy_channels != 0): raise NotImplementedError((str(type(self)) + ' does not support adding dummy channels for cuda-convnet compatibility yet, you must implement that feature or use inputs with <=3 channels or a multiple of 4 channels')) self.input_rows = self.input_space.shape[0] self.input_cols = self.input_space.shape[1] self.h_rows = self.detector_space.shape[0] self.h_cols = self.detector_space.shape[1] if (not ((self.h_rows % self.pool_rows) == 0)): raise ValueError((self.layer_name + (': h_rows = %d, pool_rows = %d. Should be divisible but remainder is %d' % (self.h_rows, self.pool_rows, (self.h_rows % self.pool_rows))))) assert ((self.h_cols % self.pool_cols) == 0) self.h_space = Conv2DSpace(shape=(self.h_rows, self.h_cols), num_channels=self.output_channels, axes=self.output_axes) self.output_space = Conv2DSpace(shape=((self.h_rows / self.pool_rows), (self.h_cols / self.pool_cols)), num_channels=self.output_channels, axes=self.output_axes) logger.info('{0} : detector shape: {1} pool shape: {2}'.format(self.layer_name, self.h_space.shape, self.output_space.shape)) assert (tuple(self.output_axes) == ('c', 0, 1, 'b')) self.max_pool = max_pool_c01b if self.center: (p_ofs, h_ofs) = self.init_mf_state() self.p_offset = sharedX(self.output_space.get_origin(), 'p_offset') self.h_offset = sharedX(self.h_space.get_origin(), 'h_offset') f = function([], updates={self.p_offset: p_ofs[:, :, :, 0], self.h_offset: h_ofs[:, :, :, 0]}) f()
'.. todo:: WRITEME'
def get_params(self):
assert (self.b.name is not None) (W,) = self.transformer.get_params() assert (W.name is not None) return [W, self.b]
'.. todo:: WRITEME'
def state_to_b01c(self, state):
if (tuple(self.output_axes) == ('b', 0, 1, 'c')): return state return [Conv2DSpace.convert(elem, self.output_axes, ('b', 0, 1, 'c')) for elem in state]
'.. todo:: WRITEME'
def get_range_rewards(self, state, coeffs):
rval = 0.0 if ((self.pool_rows == 1) and (self.pool_cols == 1)): assert (len(state) == 2) assert isinstance(coeffs, float) (_, state) = state state = [state] coeffs = [coeffs] else: assert all([(len(elem) == 2) for elem in [state, coeffs]]) for (s, c) in safe_zip(state, coeffs): if (c == 0.0): continue assert (self.h_space.axes == ('b', 'c', 0, 1)) assert (self.output_space.axes == ('b', 'c', 0, 1)) mx = s.max(axis=3).max(axis=2).max(axis=0) assert hasattr(mx.owner.op, 'grad') mn = s.min(axis=3).max(axis=2).max(axis=0) assert hasattr(mn.owner.op, 'grad') assert (mx.ndim == 1) assert (mn.ndim == 1) r = (mx - mn) rval += ((1.0 - r).mean() * c) return rval
'.. todo:: WRITEME properly Parameters state : WRITEME target : WRITEME if pools contain more than one element, should be a list with two elements. the first element is for the pooling units and the second for the detector units. coeff : WRITEME eps : WRITEME'
def get_l1_act_cost(self, state, target, coeff, eps):
rval = 0.0 if ((self.pool_rows == 1) and (self.pool_cols == 1)): assert (len(state) == 2) assert isinstance(target, float) assert isinstance(coeff, float) (_, state) = state state = [state] target = [target] coeff = [coeff] if (eps is None): eps = 0.0 eps = [eps] else: if (eps is None): eps = [0.0, 0.0] assert all([(len(elem) == 2) for elem in [state, target, coeff]]) (p_target, h_target) = target if ((h_target > p_target) and ((coeff[0] != 0.0) and (coeff[1] != 0.0))): warnings.warn('Do you really want to regularize the detector units to be more active than the pooling units?') for (s, t, c, e) in safe_zip(state, target, coeff, eps): if (c == 0.0): continue m = s.mean(axis=[ax for ax in range(4) if (self.output_axes[ax] != 'c')]) assert (m.ndim == 1) rval += (T.maximum((abs((m - t)) - e), 0.0).mean() * c) return rval
'.. todo:: WRITEME'
def get_lr_scalers(self):
rval = OrderedDict() if self.scale_by_sharing: (h_rows, h_cols) = self.h_space.shape num_h = float((h_rows * h_cols)) rval[self.transformer._filters] = (1.0 / num_h) rval[self.b] = (1.0 / num_h) return rval
'.. todo:: WRITEME'
def upward_state(self, total_state):
(p, h) = total_state if (not hasattr(self, 'center')): self.center = False if self.center: p -= self.p_offset h -= self.h_offset return p
'.. todo:: WRITEME'
def downward_state(self, total_state):
(p, h) = total_state if (not hasattr(self, 'center')): self.center = False if self.center: p -= self.p_offset h -= self.h_offset return h
'.. todo:: WRITEME'
def get_monitoring_channels_from_state(self, state):
(P, H) = state axes = tuple([i for (i, ax) in enumerate(self.output_axes) if (ax != 'c')]) p_max = P.max(axis=(0, 1, 2)) p_min = P.min(axis=(0, 1, 2)) p_mean = P.mean(axis=(0, 1, 2)) p_range = (p_max - p_min) rval = {'p_max_max': p_max.max(), 'p_max_mean': p_max.mean(), 'p_max_min': p_max.min(), 'p_min_max': p_min.max(), 'p_min_mean': p_min.mean(), 'p_min_max': p_min.max(), 'p_range_max': p_range.max(), 'p_range_mean': p_range.mean(), 'p_range_min': p_range.min(), 'p_mean_max': p_mean.max(), 'p_mean_mean': p_mean.mean(), 'p_mean_min': p_mean.min()} return rval
'.. todo:: WRITEME'
def get_weight_decay(self, coeffs):
(W,) = self.transformer.get_params() return (coeffs * T.sqr(W).sum())
'.. todo:: WRITEME'
def mf_update(self, state_below, state_above, layer_above=None, double_weights=False, iter_name=None):
self.input_space.validate(state_below) if (iter_name is None): iter_name = 'anon' if (state_above is not None): assert (layer_above is not None) msg = layer_above.downward_message(state_above) msg.name = (((((('msg_from_' + layer_above.layer_name) + '_to_') + self.layer_name) + '[') + iter_name) + ']') else: msg = None if (not hasattr(state_below, 'ndim')): raise TypeError(((('state_below should be a TensorType, got ' + str(state_below)) + ' of type ') + str(type(state_below)))) if (state_below.ndim != 4): raise ValueError(('state_below should have ndim 4, has ' + str(state_below.ndim))) if double_weights: state_below = (2.0 * state_below) state_below.name = (((self.layer_name + '_') + iter_name) + '_2state') z = (self.transformer.lmul(state_below) + self.broadcasted_bias()) if ((self.layer_name is not None) and (iter_name is not None)): z.name = (((self.layer_name + '_') + iter_name) + '_z') (p, h) = self.max_pool(z, (self.pool_rows, self.pool_cols), msg) p.name = ((self.layer_name + '_p_') + iter_name) h.name = ((self.layer_name + '_h_') + iter_name) return (p, h)
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
raise NotImplementedError('Need to update for C01B') if (state_above is not None): msg = layer_above.downward_message(state_above) try: self.output_space.validate(msg) except TypeError as e: reraise_as(TypeError(((str(type(layer_above)) + '.downward_message gave something that was not the right type: ') + str(e)))) else: msg = None z = (self.transformer.lmul(state_below) + self.broadcasted_bias()) (p, h, p_sample, h_sample) = self.max_pool(z, (self.pool_rows, self.pool_cols), msg, theano_rng) return (p_sample, h_sample)
'.. todo:: WRITEME'
def downward_message(self, downward_state):
self.h_space.validate(downward_state) return self.transformer.lmul_T(downward_state)
'.. todo:: WRITEME'
def set_batch_size(self, batch_size):
self.transformer.set_batch_size(batch_size)
'.. todo:: WRITEME'
def get_weights_topo(self):
return self.transformer.get_weights_topo()
'.. todo:: WRITEME'
def init_mf_state(self):
default_z = self.broadcasted_bias() shape = {'b': self.dbm.batch_size, 0: self.h_space.shape[0], 1: self.h_space.shape[1], 'c': self.h_space.num_channels} default_z += T.alloc(*([0.0] + [shape[elem] for elem in self.h_space.axes])).astype(default_z.dtype) assert (default_z.ndim == 4) (p, h) = self.max_pool(z=default_z, pool_shape=(self.pool_rows, self.pool_cols)) return (p, h)
'.. todo:: WRITEME properly Returns a shared variable containing an actual state (not a mean field state) for this variable.'
def make_state(self, num_examples, numpy_rng):
raise NotImplementedError('Need to update for C01B') t1 = time.time() empty_input = self.h_space.get_origin_batch(self.dbm.batch_size) h_state = sharedX(empty_input) default_z = (T.zeros_like(h_state) + self.broadcasted_bias()) theano_rng = make_theano_rng(None, numpy_rng.randint((2 ** 16)), which_method='binomial') (p_exp, h_exp, p_sample, h_sample) = self.max_pool(z=default_z, pool_shape=(self.pool_rows, self.pool_cols), theano_rng=theano_rng) p_state = sharedX(self.output_space.get_origin_batch(self.dbm.batch_size)) t2 = time.time() f = function([], updates=[(p_state, p_sample), (h_state, h_sample)]) t3 = time.time() f() t4 = time.time() logger.info('{0}.make_state took {1}'.format(self, (t4 - t1))) logger.info(' DCTB compose time: {0}'.format((t2 - t1))) logger.info(' DCTB compile time: {0}'.format((t3 - t2))) logger.info(' DCTB execute time: {0}'.format((t4 - t3))) p_state.name = 'p_sample_shared' h_state.name = 'h_sample_shared' return (p_state, h_state)
'.. todo:: WRITEME'
def expected_energy_term(self, state, average, state_below, average_below):
raise NotImplementedError('Need to update for C01B') self.input_space.validate(state_below) downward_state = self.downward_state(state) self.h_space.validate(downward_state) bias_term = (downward_state * self.broadcasted_bias()).sum(axis=(1, 2, 3)) weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=(1, 2, 3)) rval = ((- bias_term) - weights_term) assert (rval.ndim == 1) return rval
'.. todo:: WRITEME'
def get_weights(self):
if self.requires_reformat: raise NotImplementedError() (W,) = self.transformer.get_params() W = W.get_value() x = input('multiply by beta?') if (x == 'y'): beta = self.input_layer.beta.get_value() return (W.T * beta).T assert (x == 'n') return W
'.. todo:: WRITEME'
def set_weights(self, weights):
raise NotImplementedError('beta would make get_weights for visualization not correspond to set_weights') (W,) = self.transformer.get_params() W.set_value(weights)
'.. todo:: WRITEME'
def set_biases(self, biases, recenter=False):
self.b.set_value(biases) if recenter: assert self.center if (self.pool_size != 1): raise NotImplementedError() self.offset.set_value(sigmoid_numpy(self.b.get_value()))
'.. todo:: WRITEME'
def get_biases(self):
return (self.b.get_value() - self.beta_bias().eval())
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
raise NotImplementedError('need to account for beta') if (self.copies != 1): raise NotImplementedError() if (theano_rng is None): raise ValueError('theano_rng is required; it just defaults to None so that it may appear after layer_above / state_above in the list.') if (state_above is not None): msg = layer_above.downward_message(state_above) else: msg = None if self.requires_reformat: state_below = self.input_space.format_as(state_below, self.desired_space) z = (self.transformer.lmul(state_below) + self.b) (p, h, p_sample, h_sample) = max_pool_channels(z, self.pool_size, msg, theano_rng) return (p_sample, h_sample)