desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'.. todo:: WRITEME'
def get_weights_topo(self):
if (not isinstance(self.input_space, Conv2DSpace)): raise NotImplementedError() (W,) = self.transformer.get_params() W = W.T W = W.reshape((self.detector_layer_dim, self.input_space.shape[0], self.input_space.shape[1], self.input_space.nchannels)) W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c')) return function([], W)()
'.. todo:: WRITEME'
def upward_state(self, total_state):
return total_state
'.. todo:: WRITEME'
def downward_state(self, total_state):
return total_state
'.. todo:: WRITEME'
def get_monitoring_channels(self):
(W,) = self.transformer.get_params() assert (W.ndim == 2) sq_W = T.sqr(W) row_norms = T.sqrt(sq_W.sum(axis=1)) col_norms = T.sqrt(sq_W.sum(axis=0)) return OrderedDict([('row_norms_min', row_norms.min()), ('row_norms_mean', row_norms.mean()), ('row_norms_max', row_norms.max()), ('col_norms_min', col_norms.min()), ('col_norms_mean', col_norms.mean()), ('col_norms_max', col_norms.max())])
'.. todo:: WRITEME'
def get_monitoring_channels_from_state(self, state):
P = state rval = OrderedDict() vars_and_prefixes = [(P, '')] for (var, prefix) in vars_and_prefixes: v_max = var.max(axis=0) v_min = var.min(axis=0) v_mean = var.mean(axis=0) v_range = (v_max - v_min) for (key, val) in [('max_x.max_u', v_max.max()), ('max_x.mean_u', v_max.mean()), ('max_x.min_u', v_max.min()), ('min_x.max_u', v_min.max()), ('min_x.mean_u', v_min.mean()), ('min_x.min_u', v_min.min()), ('range_x.max_u', v_range.max()), ('range_x.mean_u', v_range.mean()), ('range_x.min_u', v_range.min()), ('mean_x.max_u', v_mean.max()), ('mean_x.mean_u', v_mean.mean()), ('mean_x.min_u', v_mean.min())]: rval[(prefix + key)] = val return rval
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
if (theano_rng is None): raise ValueError((('theano_rng is required; it just defaults to ' + 'None so that it may appear after layer_above ') + '/ state_above in the list.')) if (state_above is not None): msg = layer_above.downward_message(state_above) else: msg = None if self.requires_reformat: state_below = self.input_space.format_as(state_below, self.desired_space) z = (self.transformer.lmul(state_below) + self.b) if (msg is not None): z = (z + msg) on_prob = T.nnet.sigmoid(((2.0 * self.beta) * z)) samples = ((theano_rng.binomial(p=on_prob, n=1, size=on_prob.shape, dtype=on_prob.dtype) * 2.0) - 1.0) return samples
'.. todo:: WRITEME'
def downward_message(self, downward_state):
rval = self.transformer.lmul_T(downward_state) if self.requires_reformat: rval = self.desired_space.format_as(rval, self.input_space) return rval
'.. todo:: WRITEME'
def init_mf_state(self):
z = (T.alloc(0.0, self.dbm.batch_size, self.dim).astype(self.b.dtype) + self.b.dimshuffle('x', 0)) rval = T.tanh((self.beta * z)) return rval
'.. todo:: WRITEME properly Returns a shared variable containing an actual state (not a mean field state) for this variable.'
def make_state(self, num_examples, numpy_rng):
driver = numpy_rng.uniform(0.0, 1.0, (num_examples, self.dim)) on_prob = sigmoid_numpy(((2.0 * self.beta.get_value()) * self.b.get_value())) sample = ((2.0 * (driver < on_prob)) - 1.0) rval = sharedX(sample, name='v_sample_shared') return rval
'.. todo:: WRITEME'
def make_symbolic_state(self, num_examples, theano_rng):
mean = T.nnet.sigmoid(((2.0 * self.beta) * self.b)) rval = theano_rng.binomial(size=(num_examples, self.nvis), p=mean) rval = ((2.0 * rval) - 1.0) return rval
'.. todo:: WRITEME'
def expected_energy_term(self, state, average, state_below, average_below):
self.input_space.validate(state_below) if self.requires_reformat: if (not isinstance(state_below, tuple)): for sb in get_debug_values(state_below): if (sb.shape[0] != self.dbm.batch_size): raise ValueError(('self.dbm.batch_size is %d but got ' + ('shape of %d' % (self.dbm.batch_size, sb.shape[0])))) assert (reduce(operator.mul, sb.shape[1:]) == self.input_dim) state_below = self.input_space.format_as(state_below, self.desired_space) bias_term = T.dot(state, self.b) weights_term = (self.transformer.lmul(state_below) * state).sum(axis=1) rval = ((- bias_term) - weights_term) rval *= self.beta assert (rval.ndim == 1) return rval
'.. todo:: WRITEME properly Used to implement TorontoSparsity. Unclear exactly what properties of it are important or how to implement it for other layers. Properties it must have: output is same kind of data structure (ie, tuple of theano 2-tensors) as mf_update Properties it probably should have for other layer types: An infinitesimal change in state_below or the parameters should cause the same sign of change in the output of linear_feed_forward_approximation and in mf_update Should not have any non-linearities that cause the gradient to shrink Should disregard top-down feedback'
def linear_feed_forward_approximation(self, state_below):
z = (self.beta * (self.transformer.lmul(state_below) + self.b)) if (self.pool_size != 1): raise NotImplementedError() return (z, z)
'.. todo:: WRITEME'
def mf_update(self, state_below, state_above, layer_above=None, double_weights=False, iter_name=None):
self.input_space.validate(state_below) if self.requires_reformat: if (not isinstance(state_below, tuple)): for sb in get_debug_values(state_below): if (sb.shape[0] != self.dbm.batch_size): raise ValueError(('self.dbm.batch_size is %d but got ' + ('shape of %d' % (self.dbm.batch_size, sb.shape[0])))) assert (reduce(operator.mul, sb.shape[1:]) == self.input_dim) state_below = self.input_space.format_as(state_below, self.desired_space) if (iter_name is None): iter_name = 'anon' if (state_above is not None): assert (layer_above is not None) msg = layer_above.downward_message(state_above) msg.name = (((((('msg_from_' + layer_above.layer_name) + '_to_') + self.layer_name) + '[') + iter_name) + ']') else: msg = None if double_weights: state_below = (2.0 * state_below) state_below.name = (((self.layer_name + '_') + iter_name) + '_2state') z = (self.transformer.lmul(state_below) + self.b) if ((self.layer_name is not None) and (iter_name is not None)): z.name = (((self.layer_name + '_') + iter_name) + '_z') if (msg is not None): z = (z + msg) h = T.tanh((self.beta * z)) return h
'.. todo:: WRITEME'
def finalize_initialization(self):
if (self.sampling_b_stdev is not None): self.noisy_sampling_b = sharedX(np.zeros((self.layer_above.dbm.batch_size, self.nvis))) updates = OrderedDict() updates[self.boltzmann_bias] = self.boltzmann_bias updates[self.layer_above.W] = self.layer_above.W self.enforce_constraints()
'.. todo:: WRITEME'
def _modify_updates(self, updates):
beta = self.beta if (beta in updates): updated_beta = updates[beta] updates[beta] = T.clip(updated_beta, 1.0, 1000.0) if any(((constraint is not None) for constraint in [self.min_ising_b, self.max_ising_b])): bmn = self.min_ising_b if (bmn is None): bmn = (-1000000.0) bmx = self.max_ising_b if (bmx is None): bmx = 1000000.0 wmn_above = self.layer_above.min_ising_W if (wmn_above is None): wmn_above = (-1000000.0) wmx_above = self.layer_above.max_ising_W if (wmx_above is None): wmx_above = 1000000.0 b = updates[self.boltzmann_bias] W_above = updates[self.layer_above.W] ising_b = ((0.5 * b) + (0.25 * W_above.sum(axis=1))) ising_b = T.clip(ising_b, bmn, bmx) ising_W_above = (0.25 * W_above) ising_W_above = T.clip(ising_W_above, wmn_above, wmx_above) bhn = (2.0 * (ising_b - ising_W_above.sum(axis=1))) updates[self.boltzmann_bias] = bhn if (self.noisy_sampling_b is not None): theano_rng = make_theano_rng(None, self.dbm.rng.randint((2 ** 16)), which_method='normal') b = updates[self.boltzmann_bias] W_above = updates[self.layer_above.W] ising_b = ((0.5 * b) + (0.25 * W_above.sum(axis=1))) noisy_sampling_b = theano_rng.normal(avg=ising_b.dimshuffle('x', 0), std=self.sampling_b_stdev, size=self.noisy_sampling_b.shape, dtype=ising_b.dtype) updates[self.noisy_sampling_b] = noisy_sampling_b
'.. todo:: WRITEME'
def resample_bias_noise(self, batch_size_changed=False):
if batch_size_changed: self.resample_fn = None if (self.resample_fn is None): updates = OrderedDict() if (self.sampling_b_stdev is not None): self.noisy_sampling_b = sharedX(np.zeros((self.dbm.batch_size, self.nvis))) if (self.noisy_sampling_b is not None): theano_rng = make_theano_rng(None, self.dbm.rng.randint((2 ** 16)), which_method='normal') b = self.boltzmann_bias W_above = self.layer_above.W ising_b = ((0.5 * b) + (0.25 * W_above.sum(axis=1))) noisy_sampling_b = theano_rng.normal(avg=ising_b.dimshuffle('x', 0), std=self.sampling_b_stdev, size=self.noisy_sampling_b.shape, dtype=ising_b.dtype) updates[self.noisy_sampling_b] = noisy_sampling_b self.resample_fn = function([], updates=updates) self.resample_fn()
'.. todo:: WRITEME'
def get_biases(self):
warnings.warn(('BoltzmannIsingVisible.get_biases returns the ' + 'BOLTZMANN biases, is that what we want?')) return self.boltzmann_bias.get_value()
'.. todo:: WRITEME'
def set_biases(self, biases, recenter=False):
assert False
'.. todo:: WRITEME'
def ising_bias(self, for_sampling=False):
if (for_sampling and (self.layer_above.sampling_b_stdev is not None)): return self.noisy_sampling_b return ((0.5 * self.boltzmann_bias) + (0.25 * self.layer_above.W.sum(axis=1)))
'.. todo:: WRITEME'
def ising_bias_numpy(self):
return ((0.5 * self.boltzmann_bias.get_value()) + (0.25 * self.layer_above.W.get_value().sum(axis=1)))
'.. todo:: WRITEME'
def upward_state(self, total_state):
return total_state
'.. todo:: WRITEME'
def get_params(self):
rval = [self.boltzmann_bias] if self.learn_beta: rval.append(self.beta) return rval
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
assert (state_below is None) msg = layer_above.downward_message(state_above, for_sampling=True) bias = self.ising_bias(for_sampling=True) z = (msg + bias) phi = T.nnet.sigmoid(((2.0 * self.beta) * z)) rval = theano_rng.binomial(size=phi.shape, p=phi, dtype=phi.dtype, n=1) return ((rval * 2.0) - 1.0)
'.. todo:: WRITEME'
def make_state(self, num_examples, numpy_rng):
driver = numpy_rng.uniform(0.0, 1.0, (num_examples, self.nvis)) on_prob = sigmoid_numpy(((2.0 * self.beta.get_value()) * self.ising_bias_numpy())) sample = ((2.0 * (driver < on_prob)) - 1.0) rval = sharedX(sample, name='v_sample_shared') return rval
'.. todo:: WRITEME'
def make_symbolic_state(self, num_examples, theano_rng):
mean = T.nnet.sigmoid(((2.0 * self.beta) * self.ising_bias())) rval = theano_rng.binomial(size=(num_examples, self.nvis), p=mean) rval = ((2.0 * rval) - 1.0) return rval
'.. todo:: WRITEME'
def mf_update(self, state_above, layer_above):
msg = layer_above.downward_message(state_above, for_sampling=True) bias = self.ising_bias(for_sampling=True) z = (msg + bias) rval = T.tanh((self.beta * z)) return rval
'.. todo:: WRITEME'
def expected_energy_term(self, state, average, state_below=None, average_below=None):
assert (state_below is None) assert (average_below is None) assert (average in [True, False]) self.space.validate(state) rval = (- (self.beta * T.dot(state, self.ising_bias()))) assert (rval.ndim == 1) return rval
'.. todo:: WRITEME'
def get_monitoring_channels(self):
rval = OrderedDict() ising_b = self.ising_bias() rval['ising_b_min'] = ising_b.min() rval['ising_b_max'] = ising_b.max() rval['beta'] = self.beta if hasattr(self, 'noisy_sampling_b'): rval['noisy_sampling_b_min'] = self.noisy_sampling_b.min() rval['noisy_sampling_b_max'] = self.noisy_sampling_b.max() return rval
'.. todo:: WRITEME'
def get_lr_scalers(self):
if (not hasattr(self, 'W_lr_scale')): self.W_lr_scale = None if (not hasattr(self, 'b_lr_scale')): self.b_lr_scale = None if (not hasattr(self, 'beta_lr_scale')): self.beta_lr_scale = None rval = OrderedDict() if (self.W_lr_scale is not None): W = self.W rval[W] = self.W_lr_scale if (self.b_lr_scale is not None): rval[self.boltzmann_b] = self.b_lr_scale if (self.beta_lr_scale is not None): rval[self.beta] = self.beta_lr_scale return rval
'.. todo:: WRITEME properly Note: this resets parameters!'
def set_input_space(self, space):
self.input_space = space if isinstance(space, VectorSpace): self.requires_reformat = False self.input_dim = space.dim else: self.requires_reformat = True self.input_dim = space.get_total_dimension() self.desired_space = VectorSpace(self.input_dim) self.output_space = VectorSpace(self.dim) rng = self.dbm.rng if (self.irange is not None): assert (self.sparse_init is None) W = (rng.uniform((- self.irange), self.irange, (self.input_dim, self.dim)) * (rng.uniform(0.0, 1.0, (self.input_dim, self.dim)) < self.include_prob)) else: assert (self.sparse_init is not None) W = np.zeros((self.input_dim, self.dim)) W *= self.sparse_stdev W = sharedX(W) W.name = (self.layer_name + '_W') self.W = W self.boltzmann_b = sharedX((np.zeros((self.dim,)) + self.init_bias), name=(self.layer_name + '_b'))
'.. todo:: WRITEME'
def finalize_initialization(self):
if (self.sampling_b_stdev is not None): self.noisy_sampling_b = sharedX(np.zeros((self.dbm.batch_size, self.dim))) if (self.sampling_W_stdev is not None): self.noisy_sampling_W = sharedX(np.zeros((self.input_dim, self.dim)), 'noisy_sampling_W') updates = OrderedDict() updates[self.boltzmann_b] = self.boltzmann_b updates[self.W] = self.W if (self.layer_above is not None): updates[self.layer_above.W] = self.layer_above.W self.enforce_constraints()
'.. todo:: WRITEME'
def _modify_updates(self, updates):
beta = self.beta if (beta in updates): updated_beta = updates[beta] updates[beta] = T.clip(updated_beta, 1.0, 1000.0) if any(((constraint is not None) for constraint in [self.min_ising_b, self.max_ising_b, self.min_ising_W, self.max_ising_W])): bmn = self.min_ising_b if (bmn is None): bmn = (-1000000.0) bmx = self.max_ising_b if (bmx is None): bmx = 1000000.0 wmn = self.min_ising_W if (wmn is None): wmn = (-1000000.0) wmx = self.max_ising_W if (wmx is None): wmx = 1000000.0 if (self.layer_above is not None): wmn_above = self.layer_above.min_ising_W if (wmn_above is None): wmn_above = (-1000000.0) wmx_above = self.layer_above.max_ising_W if (wmx_above is None): wmx_above = 1000000.0 W = updates[self.W] ising_W = (0.25 * W) ising_W = T.clip(ising_W, wmn, wmx) b = updates[self.boltzmann_b] if (self.layer_above is not None): W_above = updates[self.layer_above.W] ising_b = (((0.5 * b) + (0.25 * W.sum(axis=0))) + (0.25 * W_above.sum(axis=1))) else: ising_b = ((0.5 * b) + (0.25 * W.sum(axis=0))) ising_b = T.clip(ising_b, bmn, bmx) if (self.layer_above is not None): ising_W_above = (0.25 * W_above) ising_W_above = T.clip(ising_W_above, wmn_above, wmx_above) bhn = (2.0 * ((ising_b - ising_W.sum(axis=0)) - ising_W_above.sum(axis=1))) else: bhn = (2.0 * (ising_b - ising_W.sum(axis=0))) Wn = (4.0 * ising_W) updates[self.W] = Wn updates[self.boltzmann_b] = bhn if (self.noisy_sampling_W is not None): theano_rng = make_theano_rng(None, self.dbm.rng.randint((2 ** 16)), which_method='normal') W = updates[self.W] ising_W = (0.25 * W) noisy_sampling_W = theano_rng.normal(avg=ising_W, std=self.sampling_W_stdev, size=ising_W.shape, dtype=ising_W.dtype) updates[self.noisy_sampling_W] = noisy_sampling_W b = updates[self.boltzmann_b] if (self.layer_above is not None): W_above = updates[self.layer_above.W] ising_b = (((0.5 * b) + (0.25 * W.sum(axis=0))) + (0.25 * W_above.sum(axis=1))) else: ising_b = ((0.5 * b) + (0.25 * W.sum(axis=0))) noisy_sampling_b = theano_rng.normal(avg=ising_b.dimshuffle('x', 0), std=self.sampling_b_stdev, size=self.noisy_sampling_b.shape, dtype=ising_b.dtype) updates[self.noisy_sampling_b] = noisy_sampling_b
'.. todo:: WRITEME'
def resample_bias_noise(self, batch_size_changed=False):
if batch_size_changed: self.resample_fn = None if (self.resample_fn is None): updates = OrderedDict() if (self.sampling_b_stdev is not None): self.noisy_sampling_b = sharedX(np.zeros((self.dbm.batch_size, self.dim))) if (self.noisy_sampling_b is not None): theano_rng = make_theano_rng(None, self.dbm.rng.randint((2 ** 16)), which_method='normal') b = self.boltzmann_b if (self.layer_above is not None): W_above = self.layer_above.W ising_b = (((0.5 * b) + (0.25 * self.W.sum(axis=0))) + (0.25 * W_above.sum(axis=1))) else: ising_b = ((0.5 * b) + (0.25 * self.W.sum(axis=0))) noisy_sampling_b = theano_rng.normal(avg=ising_b.dimshuffle('x', 0), std=self.sampling_b_stdev, size=self.noisy_sampling_b.shape, dtype=ising_b.dtype) updates[self.noisy_sampling_b] = noisy_sampling_b self.resample_fn = function([], updates=updates) self.resample_fn()
'.. todo:: WRITEME'
def get_total_state_space(self):
return VectorSpace(self.dim)
'.. todo:: WRITEME'
def get_params(self):
assert (self.boltzmann_b.name is not None) W = self.W assert (W.name is not None) rval = [W] assert (not isinstance(rval, set)) rval = list(rval) assert (self.boltzmann_b not in rval) rval.append(self.boltzmann_b) if self.learn_beta: rval.append(self.beta) return rval
'.. todo:: WRITEME'
def ising_weights(self, for_sampling=False):
if (not hasattr(self, 'sampling_W_stdev')): self.sampling_W_stdev = None if (for_sampling and (self.sampling_W_stdev is not None)): return self.noisy_sampling_W return (0.25 * self.W)
'.. todo:: WRITEME'
def ising_b(self, for_sampling=False):
if (not hasattr(self, 'sampling_b_stdev')): self.sampling_b_stdev = None if (for_sampling and (self.sampling_b_stdev is not None)): return self.noisy_sampling_b elif (self.layer_above is not None): return (((0.5 * self.boltzmann_b) + (0.25 * self.W.sum(axis=0))) + (0.25 * self.layer_above.W.sum(axis=1))) else: return ((0.5 * self.boltzmann_b) + (0.25 * self.W.sum(axis=0)))
'.. todo:: WRITEME'
def ising_b_numpy(self):
if (self.layer_above is not None): return (((0.5 * self.boltzmann_b.get_value()) + (0.25 * self.W.get_value().sum(axis=0))) + (0.25 * self.layer_above.W.get_value().sum(axis=1))) else: return ((0.5 * self.boltzmann_b.get_value()) + (0.25 * self.W.get_value().sum(axis=0)))
'.. todo:: WRITEME'
def get_weight_decay(self, coeff):
if isinstance(coeff, str): coeff = float(coeff) assert (isinstance(coeff, float) or hasattr(coeff, 'dtype')) W = self.W return (coeff * T.sqr(W).sum())
'.. todo:: WRITEME'
def get_weights(self):
warnings.warn(('BoltzmannIsingHidden.get_weights returns the ' + 'BOLTZMANN weights, is that what we want?')) W = self.W return W.get_value()
'.. todo:: WRITEME'
def set_weights(self, weights):
warnings.warn(('BoltzmannIsingHidden.set_weights sets the BOLTZMANN ' + 'weights, is that what we want?')) W = self.W W.set_value(weights)
'.. todo:: WRITEME'
def set_biases(self, biases, recenter=False):
self.boltzmann_b.set_value(biases) assert (not recenter)
'.. todo:: WRITEME'
def get_biases(self):
warnings.warn(('BoltzmannIsingHidden.get_biases returns the ' + 'BOLTZMANN biases, is that what we want?')) return self.boltzmann_b.get_value()
'.. todo:: WRITEME'
def get_weights_format(self):
return ('v', 'h')
'.. todo:: WRITEME'
def get_weights_topo(self):
warnings.warn(('BoltzmannIsingHidden.get_weights_topo returns the ' + 'BOLTZMANN weights, is that what we want?')) if (not isinstance(self.input_space, Conv2DSpace)): raise NotImplementedError() W = self.W W = W.T W = W.reshape((self.detector_layer_dim, self.input_space.shape[0], self.input_space.shape[1], self.input_space.nchannels)) W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c')) return function([], W)()
'.. todo:: WRITEME'
def upward_state(self, total_state):
return total_state
'.. todo:: WRITEME'
def downward_state(self, total_state):
return total_state
'.. todo:: WRITEME'
def get_monitoring_channels(self):
W = self.W assert (W.ndim == 2) sq_W = T.sqr(W) row_norms = T.sqrt(sq_W.sum(axis=1)) col_norms = T.sqrt(sq_W.sum(axis=0)) rval = OrderedDict([('boltzmann_row_norms_min', row_norms.min()), ('boltzmann_row_norms_mean', row_norms.mean()), ('boltzmann_row_norms_max', row_norms.max()), ('boltzmann_col_norms_min', col_norms.min()), ('boltzmann_col_norms_mean', col_norms.mean()), ('boltzmann_col_norms_max', col_norms.max())]) ising_W = self.ising_weights() rval['ising_W_min'] = ising_W.min() rval['ising_W_max'] = ising_W.max() ising_b = self.ising_b() rval['ising_b_min'] = ising_b.min() rval['ising_b_max'] = ising_b.max() if hasattr(self, 'noisy_sampling_W'): rval['noisy_sampling_W_min'] = self.noisy_sampling_W.min() rval['noisy_sampling_W_max'] = self.noisy_sampling_W.max() rval['noisy_sampling_b_min'] = self.noisy_sampling_b.min() rval['noisy_sampling_b_max'] = self.noisy_sampling_b.max() return rval
'.. todo:: WRITEME'
def get_monitoring_channels_from_state(self, state):
P = state rval = OrderedDict() vars_and_prefixes = [(P, '')] for (var, prefix) in vars_and_prefixes: v_max = var.max(axis=0) v_min = var.min(axis=0) v_mean = var.mean(axis=0) v_range = (v_max - v_min) for (key, val) in [('max_x.max_u', v_max.max()), ('max_x.mean_u', v_max.mean()), ('max_x.min_u', v_max.min()), ('min_x.max_u', v_min.max()), ('min_x.mean_u', v_min.mean()), ('min_x.min_u', v_min.min()), ('range_x.max_u', v_range.max()), ('range_x.mean_u', v_range.mean()), ('range_x.min_u', v_range.min()), ('mean_x.max_u', v_mean.max()), ('mean_x.mean_u', v_mean.mean()), ('mean_x.min_u', v_mean.min())]: rval[(prefix + key)] = val return rval
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
if (theano_rng is None): raise ValueError((('theano_rng is required; it just defaults to ' + 'None so that it may appear after layer_above ') + '/ state_above in the list.')) if (state_above is not None): msg = layer_above.downward_message(state_above, for_sampling=True) else: msg = None if self.requires_reformat: state_below = self.input_space.format_as(state_below, self.desired_space) z = (T.dot(state_below, self.ising_weights(for_sampling=True)) + self.ising_b(for_sampling=True)) if (msg is not None): z = (z + msg) on_prob = T.nnet.sigmoid(((2.0 * self.beta) * z)) samples = ((theano_rng.binomial(p=on_prob, n=1, size=on_prob.shape, dtype=on_prob.dtype) * 2.0) - 1.0) return samples
'.. todo:: WRITEME'
def downward_message(self, downward_state, for_sampling=False):
rval = T.dot(downward_state, self.ising_weights(for_sampling=for_sampling).T) if self.requires_reformat: rval = self.desired_space.format_as(rval, self.input_space) return rval
'.. todo:: WRITEME'
def init_mf_state(self):
z = (T.alloc(0.0, self.dbm.batch_size, self.dim).astype(self.boltzmann_b.dtype) + self.ising_b().dimshuffle('x', 0)) rval = T.tanh((self.beta * z)) return rval
'.. todo:: WRITEME properly Returns a shared variable containing an actual state (not a mean field state) for this variable.'
def make_state(self, num_examples, numpy_rng):
driver = numpy_rng.uniform(0.0, 1.0, (num_examples, self.dim)) on_prob = sigmoid_numpy(((2.0 * self.beta.get_value()) * self.ising_b_numpy())) sample = ((2.0 * (driver < on_prob)) - 1.0) rval = sharedX(sample, name='v_sample_shared') return rval
'.. todo:: WRITEME'
def make_symbolic_state(self, num_examples, theano_rng):
mean = T.nnet.sigmoid(((2.0 * self.beta) * self.ising_b())) rval = theano_rng.binomial(size=(num_examples, self.dim), p=mean) rval = ((2.0 * rval) - 1.0) return rval
'.. todo:: WRITEME'
def expected_energy_term(self, state, average, state_below, average_below):
self.input_space.validate(state_below) if self.requires_reformat: if (not isinstance(state_below, tuple)): for sb in get_debug_values(state_below): if (sb.shape[0] != self.dbm.batch_size): raise ValueError(('self.dbm.batch_size is %d but got ' + ('shape of %d' % (self.dbm.batch_size, sb.shape[0])))) assert (reduce(operator.mul, sb.shape[1:]) == self.input_dim) state_below = self.input_space.format_as(state_below, self.desired_space) bias_term = T.dot(state, self.ising_b()) weights_term = (T.dot(state_below, self.ising_weights()) * state).sum(axis=1) rval = ((- bias_term) - weights_term) rval *= self.beta assert (rval.ndim == 1) return rval
'.. todo:: WRITEME properly Used to implement TorontoSparsity. Unclear exactly what properties of it are important or how to implement it for other layers. Properties it must have: output is same kind of data structure (ie, tuple of theano 2-tensors) as mf_update Properties it probably should have for other layer types: An infinitesimal change in state_below or the parameters should cause the same sign of change in the output of linear_feed_forward_approximation and in mf_update Should not have any non-linearities that cause the gradient to shrink Should disregard top-down feedback'
def linear_feed_forward_approximation(self, state_below):
z = (self.beta * (T.dot(state_below, self.ising_weights()) + self.ising_b())) return z
'.. todo:: WRITEME'
def mf_update(self, state_below, state_above, layer_above=None, double_weights=False, iter_name=None):
self.input_space.validate(state_below) if self.requires_reformat: if (not isinstance(state_below, tuple)): for sb in get_debug_values(state_below): if (sb.shape[0] != self.dbm.batch_size): raise ValueError(('self.dbm.batch_size is %d but got ' + ('shape of %d' % (self.dbm.batch_size, sb.shape[0])))) assert (reduce(operator.mul, sb.shape[1:]) == self.input_dim) state_below = self.input_space.format_as(state_below, self.desired_space) if (iter_name is None): iter_name = 'anon' if (state_above is not None): assert (layer_above is not None) msg = layer_above.downward_message(state_above) msg.name = (((((('msg_from_' + layer_above.layer_name) + '_to_') + self.layer_name) + '[') + iter_name) + ']') else: msg = None if double_weights: state_below = (2.0 * state_below) state_below.name = (((self.layer_name + '_') + iter_name) + '_2state') z = (T.dot(state_below, self.ising_weights()) + self.ising_b()) if ((self.layer_name is not None) and (iter_name is not None)): z.name = (((self.layer_name + '_') + iter_name) + '_z') if (msg is not None): z = (z + msg) h = T.tanh((self.beta * z)) return h
'.. todo:: WRITEME'
def get_l2_act_cost(self, state, target, coeff):
avg = state.mean(axis=0) diff = (avg - target) return (coeff * T.sqr(diff).mean())
'Returns the DBM that this layer belongs to, or None if it has not been assigned to a DBM yet.'
def get_dbm(self):
if hasattr(self, 'dbm'): return self.dbm return None
'Assigns this layer to a DBM. Parameters dbm : WRITEME'
def set_dbm(self, dbm):
assert (self.get_dbm() is None) self.dbm = dbm
'Returns the Space that the layer\'s total state lives in.'
def get_total_state_space(self):
raise NotImplementedError(((str(type(self)) + ' does not implement ') + 'get_total_state_space()'))
'.. todo:: WRITEME'
def get_monitoring_channels(self):
return OrderedDict()
'.. todo:: WRITEME'
def get_monitoring_channels_from_state(self, state):
return OrderedDict()
'Takes total_state and turns it into the state that layer_above should see when computing P( layer_above | this_layer). So far this has two uses: * If this layer consists of a detector sub-layer h that is pooled into a pooling layer p, then total_state = (p,h) but layer_above should only see p. * If the conditional P( layer_above | this_layer) depends on parameters of this_layer, sometimes you can play games with the state to avoid needing the layers to communicate. So far the only instance of this usage is when the visible layer is N( Wh, beta). This makes the hidden layer be sigmoid( v beta W + b). Rather than having the hidden layer explicitly know about beta, we can just pass v beta as the upward state. Parameters total_state : WRITEME Notes This method should work both for computing sampling updates and for computing mean field updates. So far I haven\'t encountered a case where it needs to do different things for those two contexts.'
def upward_state(self, total_state):
return total_state
'Returns a shared variable containing an actual state (not a mean field state) for this variable. Parameters num_examples : WRITEME numpy_rng : WRITEME Returns WRITEME'
def make_state(self, num_examples, numpy_rng):
raise NotImplementedError(("%s doesn't implement make_state" % type(self)))
'Returns a theano symbolic variable containing an actual state (not a mean field state) for this variable. Parameters num_examples : WRITEME numpy_rng : WRITEME Returns WRITEME'
def make_symbolic_state(self, num_examples, theano_rng):
raise NotImplementedError(("%s doesn't implement make_symbolic_state" % type(self)))
'Returns an expression for samples of this layer\'s state, conditioned on the layers above and below Should be valid as an update to the shared variable returned by self.make_state Parameters state_below : WRITEME Corresponds to layer_below.upward_state(full_state_below), where full_state_below is the same kind of object as you get out of layer_below.make_state state_above : WRITEME Corresponds to layer_above.downward_state(full_state_above) theano_rng : WRITEME An MRG_RandomStreams instance Returns WRITEME Notes This can return multiple expressions if this layer\'s total state consists of more than one shared variable.'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
if hasattr(self, 'get_sampling_updates'): raise AssertionError((('Looks like ' + str(type(self))) + ' needs to rename get_sampling_updates to sample.')) raise NotImplementedError(("%s doesn't implement sample" % type(self)))
'Returns a term of the expected energy of the entire model. This term should correspond to the expected value of terms of the energy function that: - involve this layer only - if there is a layer below, include terms that involve both this layer and the layer below Do not include terms that involve the layer below only. Do not include any terms that involve the layer above, if it exists, in any way (the interface doesn\'t let you see the layer above anyway). Parameters state_below : WRITEME Upward state of the layer below. state : WRITEME Total state of this layer average_below : bool If True, the layer below is one of the variables to integrate over in the expectation, and state_below gives its variational parameters. If False, that layer is to be held constant and state_below gives a set of assignments to it average: like average_below, but for \'state\' rather than \'state_below\' Returns rval : tensor_like A 1D theano tensor giving the expected energy term for each example'
def expected_energy_term(self, state, average, state_below, average_below):
raise NotImplementedError((str(type(self)) + ' does not implement expected_energy_term.'))
'Some layers\' initialization depends on layer above being initialized, which is why this method is called after `set_input_space` has been called.'
def finalize_initialization(self):
pass
'Returns the total state of the layer. Returns total_state : member of the input space The total state of the layer.'
def get_total_state_space(self):
return self.get_input_space()
'.. todo:: WRITEME'
def downward_state(self, total_state):
return total_state
'.. todo:: WRITEME'
def get_stdev_rewards(self, state, coeffs):
raise NotImplementedError((str(type(self)) + ' does not implement get_stdev_rewards'))
'.. todo:: WRITEME'
def get_range_rewards(self, state, coeffs):
raise NotImplementedError((str(type(self)) + ' does not implement get_range_rewards'))
'.. todo:: WRITEME'
def get_l1_act_cost(self, state, target, coeff, eps):
raise NotImplementedError((str(type(self)) + ' does not implement get_l1_act_cost'))
'.. todo:: WRITEME'
def get_l2_act_cost(self, state, target, coeff):
raise NotImplementedError((str(type(self)) + ' does not implement get_l2_act_cost'))
'Returns biases : ndarray The numpy value of the biases'
def get_biases(self):
return self.bias.get_value()
'.. todo:: WRITEME'
def set_biases(self, biases, recenter=False):
self.bias.set_value(biases) if recenter: assert self.center self.offset.set_value(sigmoid_numpy(self.bias.get_value()))
'.. todo:: WRITEME'
def upward_state(self, total_state):
if (not hasattr(self, 'center')): self.center = False if self.center: rval = (total_state - self.offset) else: rval = total_state if (not hasattr(self, 'copies')): self.copies = 1 return (rval * self.copies)
'.. todo:: WRITEME'
def get_params(self):
return [self.bias]
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
assert (state_below is None) if (self.copies != 1): raise NotImplementedError() msg = layer_above.downward_message(state_above) bias = self.bias z = (msg + bias) phi = T.nnet.sigmoid(z) rval = theano_rng.binomial(size=phi.shape, p=phi, dtype=phi.dtype, n=1) return rval
'.. todo:: WRITEME'
def mf_update(self, state_above, layer_above):
msg = layer_above.downward_message(state_above) mu = self.bias z = (msg + mu) rval = T.nnet.sigmoid(z) return rval
'.. todo:: WRITEME'
def make_state(self, num_examples, numpy_rng):
if (not hasattr(self, 'copies')): self.copies = 1 if (self.copies != 1): raise NotImplementedError() driver = numpy_rng.uniform(0.0, 1.0, (num_examples, self.nvis)) mean = sigmoid_numpy(self.bias.get_value()) sample = (driver < mean) rval = sharedX(sample, name='v_sample_shared') return rval
'.. todo:: WRITEME'
def make_symbolic_state(self, num_examples, theano_rng):
if (not hasattr(self, 'copies')): self.copies = 1 if (self.copies != 1): raise NotImplementedError() mean = T.nnet.sigmoid(self.bias) rval = theano_rng.binomial(size=(num_examples, self.nvis), p=mean, dtype=theano.config.floatX) return rval
'.. todo:: WRITEME'
def expected_energy_term(self, state, average, state_below=None, average_below=None):
if self.center: state = (state - self.offset) assert (state_below is None) assert (average_below is None) assert (average in [True, False]) self.space.validate(state) rval = (- T.dot(state, self.bias)) assert (rval.ndim == 1) return (rval * self.copies)
'.. todo:: WRITEME'
def init_inpainting_state(self, V, drop_mask, noise=False, return_unmasked=False):
assert ((drop_mask is None) or (drop_mask.ndim > 1)) unmasked = T.nnet.sigmoid(self.bias.dimshuffle('x', 0)) assert (unmasked.ndim == 2) assert hasattr(unmasked.owner.op, 'scalar_op') if (drop_mask is not None): masked_mean = (unmasked * drop_mask) else: masked_mean = unmasked if (not hasattr(self, 'learn_init_inpainting_state')): self.learn_init_inpainting_state = 0 if (not self.learn_init_inpainting_state): masked_mean = block_gradient(masked_mean) masked_mean.name = 'masked_mean' if noise: theano_rng = theano.sandbox.rng_mrg.MRG_RandomStreams(42) unmasked = T.nnet.sigmoid(theano_rng.normal(avg=0.0, std=1.0, size=masked_mean.shape, dtype=masked_mean.dtype)) masked_mean = (unmasked * drop_mask) masked_mean.name = 'masked_noise' if (drop_mask is None): rval = masked_mean else: masked_V = (V * (1 - drop_mask)) rval = (masked_mean + masked_V) rval.name = 'init_inpainting_state' if return_unmasked: assert (unmasked.ndim > 1) return (rval, unmasked) return rval
'.. todo:: WRITEME'
def inpaint_update(self, state_above, layer_above, drop_mask=None, V=None, return_unmasked=False):
msg = layer_above.downward_message(state_above) mu = self.bias z = (msg + mu) z.name = 'inpainting_z_[unknown_iter]' unmasked = T.nnet.sigmoid(z) if (drop_mask is not None): rval = ((drop_mask * unmasked) + ((1 - drop_mask) * V)) else: rval = unmasked rval.name = 'inpainted_V[unknown_iter]' if return_unmasked: owner = unmasked.owner assert (owner is not None) op = owner.op assert hasattr(op, 'scalar_op') assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid) return (rval, unmasked) return rval
'.. todo:: WRITEME'
def recons_cost(self, V, V_hat_unmasked, drop_mask=None, use_sum=False):
if use_sum: raise NotImplementedError() V_hat = V_hat_unmasked assert hasattr(V_hat, 'owner') owner = V_hat.owner assert (owner is not None) op = owner.op block_grad = False if is_block_gradient(op): assert isinstance(op.scalar_op, theano.scalar.Identity) block_grad = True (real,) = owner.inputs owner = real.owner op = owner.op if (not hasattr(op, 'scalar_op')): raise ValueError(((('Expected V_hat_unmasked to be generated by an Elemwise op, got ' + str(op)) + ' of type ') + str(type(op)))) assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid) (z,) = owner.inputs if block_grad: z = block_gradient(z) if (V.ndim != V_hat.ndim): raise ValueError(('V and V_hat_unmasked should have same ndim, but are %d and %d.' % (V.ndim, V_hat.ndim))) unmasked_cost = ((V * T.nnet.softplus((- z))) + ((1 - V) * T.nnet.softplus(z))) assert (unmasked_cost.ndim == V_hat.ndim) if (drop_mask is None): masked_cost = unmasked_cost else: masked_cost = (drop_mask * unmasked_cost) return masked_cost.mean()
'.. todo:: WRITEME'
def get_lr_scalers(self):
if (not hasattr(self, 'W_lr_scale')): self.W_lr_scale = None if (not hasattr(self, 'b_lr_scale')): self.b_lr_scale = None rval = OrderedDict() if (self.W_lr_scale is not None): (W,) = self.transformer.get_params() rval[W] = self.W_lr_scale if (self.b_lr_scale is not None): rval[self.b] = self.b_lr_scale return rval
'.. todo:: WRITEME Notes This resets parameters!'
def set_input_space(self, space):
self.input_space = space if isinstance(space, VectorSpace): self.requires_reformat = False self.input_dim = space.dim else: self.requires_reformat = True self.input_dim = space.get_total_dimension() self.desired_space = VectorSpace(self.input_dim) if (not ((self.detector_layer_dim % self.pool_size) == 0)): raise ValueError(('detector_layer_dim = %d, pool_size = %d. Should be divisible but remainder is %d' % (self.detector_layer_dim, self.pool_size, (self.detector_layer_dim % self.pool_size)))) self.h_space = VectorSpace(self.detector_layer_dim) self.pool_layer_dim = (self.detector_layer_dim / self.pool_size) self.output_space = VectorSpace(self.pool_layer_dim) rng = self.dbm.rng if (self.irange is not None): assert (self.sparse_init is None) W = (rng.uniform((- self.irange), self.irange, (self.input_dim, self.detector_layer_dim)) * (rng.uniform(0.0, 1.0, (self.input_dim, self.detector_layer_dim)) < self.include_prob)) else: assert (self.sparse_init is not None) W = np.zeros((self.input_dim, self.detector_layer_dim)) def mask_rejects(idx, i): if (self.mask_weights is None): return False return (self.mask_weights[(idx, i)] == 0.0) for i in xrange(self.detector_layer_dim): assert (self.sparse_init <= self.input_dim) for j in xrange(self.sparse_init): idx = rng.randint(0, self.input_dim) while ((W[(idx, i)] != 0) or mask_rejects(idx, i)): idx = rng.randint(0, self.input_dim) W[(idx, i)] = rng.randn() W *= self.sparse_stdev W = sharedX(W) W.name = (self.layer_name + '_W') self.transformer = MatrixMul(W) (W,) = self.transformer.get_params() assert (W.name is not None) if (self.mask_weights is not None): expected_shape = (self.input_dim, self.detector_layer_dim) if (expected_shape != self.mask_weights.shape): raise ValueError(((('Expected mask with shape ' + str(expected_shape)) + ' but got ') + str(self.mask_weights.shape))) self.mask = sharedX(self.mask_weights)
'.. todo:: WRITEME'
def get_total_state_space(self):
return CompositeSpace((self.output_space, self.h_space))
'.. todo:: WRITEME'
def get_params(self):
assert (self.b.name is not None) (W,) = self.transformer.get_params() assert (W.name is not None) rval = self.transformer.get_params() assert (not isinstance(rval, set)) rval = list(rval) assert (self.b not in rval) rval.append(self.b) return rval
'.. todo:: WRITEME'
def get_weight_decay(self, coeff):
if isinstance(coeff, str): coeff = float(coeff) assert (isinstance(coeff, float) or hasattr(coeff, 'dtype')) (W,) = self.transformer.get_params() return (coeff * T.sqr(W).sum())
'.. todo:: WRITEME'
def get_weights(self):
if self.requires_reformat: raise NotImplementedError() (W,) = self.transformer.get_params() return W.get_value()
'.. todo:: WRITEME'
def set_weights(self, weights):
(W,) = self.transformer.get_params() W.set_value(weights)
'.. todo:: WRITEME'
def set_biases(self, biases, recenter=False):
self.b.set_value(biases) if recenter: assert self.center if (self.pool_size != 1): raise NotImplementedError() self.offset.set_value(sigmoid_numpy(self.b.get_value()))
'.. todo:: WRITEME'
def get_biases(self):
return self.b.get_value()
'.. todo:: WRITEME'
def get_weights_format(self):
return ('v', 'h')
'.. todo:: WRITEME'
def get_weights_view_shape(self):
total = self.detector_layer_dim cols = self.pool_size if (cols == 1): raise NotImplementedError() rows = (total / cols) return (rows, cols)
'.. todo:: WRITEME'
def get_weights_topo(self):
if (not isinstance(self.input_space, Conv2DSpace)): raise NotImplementedError() (W,) = self.transformer.get_params() W = W.T W = W.reshape((self.detector_layer_dim, self.input_space.shape[0], self.input_space.shape[1], self.input_space.num_channels)) W = Conv2DSpace.convert(W, self.input_space.axes, ('b', 0, 1, 'c')) return function([], W)()
'.. todo:: WRITEME'
def upward_state(self, total_state):
(p, h) = total_state self.h_space.validate(h) self.output_space.validate(p) if (not hasattr(self, 'center')): self.center = False if self.center: return (p - self.offset) if (not hasattr(self, 'copies')): self.copies = 1 return (p * self.copies)