desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'.. todo::
WRITEME'
| def downward_message(self, downward_state):
| rval = self.transformer.lmul_T(downward_state)
if self.requires_reformat:
rval = self.desired_space.format_as(rval, self.input_space)
return (rval * self.copies)
|
'.. todo::
WRITEME'
| def init_mf_state(self):
| z = ((T.alloc(0.0, self.dbm.batch_size, self.detector_layer_dim).astype(self.b.dtype) + self.b.dimshuffle('x', 0)) + self.beta_bias())
rval = max_pool_channels(z=z, pool_size=self.pool_size)
return rval
|
'.. todo::
WRITEME properly
Returns a shared variable containing an actual state
(not a mean field state) for this variable.'
| def make_state(self, num_examples, numpy_rng):
| raise NotImplementedError('need to account for beta')
if (not hasattr(self, 'copies')):
self.copies = 1
if (self.copies != 1):
raise NotImplementedError()
empty_input = self.h_space.get_origin_batch(num_examples)
empty_output = self.output_space.get_origin_batch(num_examples)
h_state = sharedX(empty_input)
p_state = sharedX(empty_output)
theano_rng = make_theano_rng(None, numpy_rng.randint((2 ** 16)), which_method='binomial')
default_z = (T.zeros_like(h_state) + self.b)
(p_exp, h_exp, p_sample, h_sample) = max_pool_channels(z=default_z, pool_size=self.pool_size, theano_rng=theano_rng)
assert (h_sample.dtype == default_z.dtype)
f = function([], updates=[(p_state, p_sample), (h_state, h_sample)])
f()
p_state.name = 'p_sample_shared'
h_state.name = 'h_sample_shared'
return (p_state, h_state)
|
'.. todo::
WRITEME'
| def expected_energy_term(self, state, average, state_below, average_below):
| raise NotImplementedError('need to account for beta, and maybe some oether stuff')
self.input_space.validate(state_below)
if self.requires_reformat:
if (not isinstance(state_below, tuple)):
for sb in get_debug_values(state_below):
if (sb.shape[0] != self.dbm.batch_size):
raise ValueError(('self.dbm.batch_size is %d but got shape of %d' % (self.dbm.batch_size, sb.shape[0])))
assert (reduce(operator.mul, sb.shape[1:]) == self.input_dim)
state_below = self.input_space.format_as(state_below, self.desired_space)
downward_state = self.downward_state(state)
self.h_space.validate(downward_state)
bias_term = T.dot(downward_state, self.b)
weights_term = (self.transformer.lmul(state_below) * downward_state).sum(axis=1)
rval = ((- bias_term) - weights_term)
assert (rval.ndim == 1)
return (rval * self.copies)
|
'.. todo::
WRITEME properly
Used to implement TorontoSparsity. Unclear exactly what properties of it are
important or how to implement it for other layers.
Properties it must have:
output is same kind of data structure (ie, tuple of theano 2-tensors)
as mf_update
Properties it probably should have for other layer types:
An infinitesimal change in state_below or the parameters should cause the same sign of change
in the output of linear_feed_forward_approximation and in mf_update
Should not have any non-linearities that cause the gradient to shrink
Should disregard top-down feedback'
| def linear_feed_forward_approximation(self, state_below):
| raise NotImplementedError('need to account for beta')
z = (self.transformer.lmul(state_below) + self.b)
if (self.pool_size != 1):
raise NotImplementedError()
return (z, z)
|
'.. todo::
WRITEME'
| def beta_bias(self):
| (W,) = self.transformer.get_params()
beta = self.input_layer.beta
assert (beta.ndim == 1)
return ((-0.5) * T.dot(beta, T.sqr(W)))
|
'.. todo::
WRITEME'
| def mf_update(self, state_below, state_above, layer_above=None, double_weights=False, iter_name=None):
| self.input_space.validate(state_below)
if self.requires_reformat:
if (not isinstance(state_below, tuple)):
for sb in get_debug_values(state_below):
if (sb.shape[0] != self.dbm.batch_size):
raise ValueError(('self.dbm.batch_size is %d but got shape of %d' % (self.dbm.batch_size, sb.shape[0])))
assert (reduce(operator.mul, sb.shape[1:]) == self.input_dim)
state_below = self.input_space.format_as(state_below, self.desired_space)
if (iter_name is None):
iter_name = 'anon'
if (state_above is not None):
assert (layer_above is not None)
msg = layer_above.downward_message(state_above)
msg.name = (((((('msg_from_' + layer_above.layer_name) + '_to_') + self.layer_name) + '[') + iter_name) + ']')
else:
msg = None
if double_weights:
state_below = (2.0 * state_below)
state_below.name = (((self.layer_name + '_') + iter_name) + '_2state')
z = ((self.transformer.lmul(state_below) + self.b) + self.beta_bias())
if ((self.layer_name is not None) and (iter_name is not None)):
z.name = (((self.layer_name + '_') + iter_name) + '_z')
(p, h) = max_pool_channels(z, self.pool_size, msg)
p.name = ((self.layer_name + '_p_') + iter_name)
h.name = ((self.layer_name + '_h_') + iter_name)
return (p, h)
|
'.. todo::
WRITEME'
| def set_input_space(self, space):
| self.input_space = space
if (not isinstance(space, CompositeSpace)):
assert (self.inputs_to_components is None)
self.routing_needed = False
elif (self.inputs_to_components is None):
self.routing_needed = False
else:
self.routing_needed = True
assert (max(self.inputs_to_components) < space.num_components)
self.components_to_inputs = OrderedDict()
for i in xrange(self.num_components):
inputs = []
for j in xrange(space.num_components):
if (i in self.inputs_to_components[j]):
inputs.append(i)
if (len(inputs) < space.num_components):
self.components_to_inputs[i] = inputs
for (i, component) in enumerate(self.components):
if (self.routing_needed and (i in self.components_to_inputs)):
cur_space = space.restrict(self.components_to_inputs[i])
else:
cur_space = space
component.set_input_space(cur_space)
self.output_space = CompositeSpace([component.get_output_space() for component in self.components])
|
'.. todo::
WRITEME'
| def make_state(self, num_examples, numpy_rng):
| return tuple((component.make_state(num_examples, numpy_rng) for component in self.components))
|
'.. todo::
WRITEME'
| def get_total_state_space(self):
| return CompositeSpace([component.get_total_state_space() for component in self.components])
|
'.. todo::
WRITEME'
| def set_batch_size(self, batch_size):
| for component in self.components:
component.set_batch_size(batch_size)
|
'.. todo::
WRITEME'
| def set_dbm(self, dbm):
| for component in self.components:
component.set_dbm(dbm)
|
'.. todo::
WRITEME'
| def mf_update(self, state_below, state_above, layer_above=None, double_weights=False, iter_name=None):
| rval = []
for (i, component) in enumerate(self.components):
if (self.routing_needed and (i in self.components_to_inputs)):
cur_state_below = self.input_space.restrict_batch(state_below, self.components_to_inputs[i])
else:
cur_state_below = state_below
class RoutingLayer(object, ):
def __init__(self, idx, layer):
self.__dict__.update(locals())
del self.self
self.layer_name = ((('route_' + str(idx)) + '_') + layer.layer_name)
def downward_message(self, state):
return self.layer.downward_message(state)[self.idx]
if (layer_above is not None):
cur_layer_above = RoutingLayer(i, layer_above)
else:
cur_layer_above = None
mf_update = component.mf_update(state_below=cur_state_below, state_above=state_above, layer_above=cur_layer_above, double_weights=double_weights, iter_name=iter_name)
rval.append(mf_update)
return tuple(rval)
|
'.. todo::
WRITEME'
| def init_mf_state(self):
| return tuple([component.init_mf_state() for component in self.components])
|
'.. todo::
WRITEME'
| def get_weight_decay(self, coeffs):
| return sum([component.get_weight_decay(coeff) for (component, coeff) in safe_zip(self.components, coeffs)])
|
'.. todo::
WRITEME'
| def upward_state(self, total_state):
| return tuple([component.upward_state(elem) for (component, elem) in safe_zip(self.components, total_state)])
|
'.. todo::
WRITEME'
| def downward_state(self, total_state):
| return tuple([component.downward_state(elem) for (component, elem) in safe_zip(self.components, total_state)])
|
'.. todo::
WRITEME'
| def downward_message(self, downward_state):
| if isinstance(self.input_space, CompositeSpace):
num_input_components = self.input_space.num_components
else:
num_input_components = 1
rval = ([None] * num_input_components)
def add(x, y):
if (x is None):
return y
if (y is None):
return x
return (x + y)
for (i, packed) in enumerate(safe_zip(self.components, downward_state)):
(component, state) = packed
if (self.routing_needed and (i in self.components_to_inputs)):
input_idx = self.components_to_inputs[i]
else:
input_idx = range(num_input_components)
partial_message = component.downward_message(state)
if (len(input_idx) == 1):
partial_message = [partial_message]
assert (len(input_idx) == len(partial_message))
for (idx, msg) in safe_zip(input_idx, partial_message):
rval[idx] = add(rval[idx], msg)
if (len(rval) == 1):
rval = rval[0]
else:
rval = tuple(rval)
self.input_space.validate(rval)
return rval
|
'.. todo::
WRITEME'
| def get_l1_act_cost(self, state, target, coeff, eps):
| return sum([comp.get_l1_act_cost(s, t, c, e) for (comp, s, t, c, e) in safe_zip(self.components, state, target, coeff, eps)])
|
'.. todo::
WRITEME'
| def get_range_rewards(self, state, coeffs):
| return sum([comp.get_range_rewards(s, c) for (comp, s, c) in safe_zip(self.components, state, coeffs)])
|
'.. todo::
WRITEME'
| def get_params(self):
| return reduce((lambda x, y: safe_union(x, y)), [component.get_params() for component in self.components])
|
'.. todo::
WRITEME'
| def get_weights_topo(self):
| logger.info('Get topological weights for which layer?')
for (i, component) in enumerate(self.components):
logger.info('{0} {1}'.format(i, component.layer_name))
x = input()
return self.components[int(x)].get_weights_topo()
|
'.. todo::
WRITEME'
| def get_monitoring_channels_from_state(self, state):
| rval = OrderedDict()
for (layer, s) in safe_zip(self.components, state):
d = layer.get_monitoring_channels_from_state(s)
for key in d:
rval[((layer.layer_name + '_') + key)] = d[key]
return rval
|
'.. todo::
WRITEME'
| def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
| rval = []
for (i, component) in enumerate(self.components):
if (self.routing_needed and (i in self.components_to_inputs)):
cur_state_below = self.input_space.restrict_batch(state_below, self.components_to_inputs[i])
else:
cur_state_below = state_below
class RoutingLayer(object, ):
def __init__(self, idx, layer):
self.__dict__.update(locals())
del self.self
self.layer_name = ((('route_' + str(idx)) + '_') + layer.layer_name)
def downward_message(self, state):
return self.layer.downward_message(state)[self.idx]
if (layer_above is not None):
cur_layer_above = RoutingLayer(i, layer_above)
else:
cur_layer_above = None
sample = component.sample(state_below=cur_state_below, state_above=state_above, layer_above=cur_layer_above, theano_rng=theano_rng)
rval.append(sample)
return tuple(rval)
|
'Associates the SamplingProcedure with a specific DBM.
Parameters
dbm : pylearn2.models.dbm.DBM instance
The model to perform sampling from.'
| def set_dbm(self, dbm):
| self.dbm = dbm
|
'Samples from self.dbm using `layer_to_state` as starting values.
Parameters
layer_to_state : dict
Maps the DBM\'s Layer instances to theano variables representing
batches of samples of them.
theano_rng : theano.sandbox.rng_mrg.MRG_RandomStreams
Random number generator
layer_to_clamp : dict, optional
Maps Layers to bools. If a layer is not in the dictionary,
defaults to False. True indicates that this layer should be
clamped, so we are sampling from a conditional distribution
rather than the joint distribution.
num_steps : int, optional
Steps of the sampling procedure. It samples for `num_steps`
times and return the last sample.
Returns
layer_to_updated_state : dict
Maps the DBM\'s Layer instances to theano variables representing
batches of updated samples of them.'
| def sample(self, layer_to_state, theano_rng, layer_to_clamp=None, num_steps=1):
| raise NotImplementedError(((str(type(self)) + ' does not implement ') + 'sample.'))
|
'.. todo::
WRITEME'
| def sample(self, layer_to_state, theano_rng, layer_to_clamp=None, num_steps=1):
| assert isinstance(num_steps, py_integer_types)
assert (num_steps > 0)
if (num_steps != 1):
for i in xrange(num_steps):
layer_to_state = self.sample(layer_to_state, theano_rng, layer_to_clamp, num_steps=1)
return layer_to_state
assert (len(self.dbm.hidden_layers) > 0)
if (layer_to_clamp is None):
layer_to_clamp = OrderedDict()
for key in layer_to_clamp:
assert ((key is self.dbm.visible_layer) or (key in self.dbm.hidden_layers))
for layer in ([self.dbm.visible_layer] + self.dbm.hidden_layers):
if (layer not in layer_to_clamp):
layer_to_clamp[layer] = False
layer_to_updated = OrderedDict()
for (i, this_layer) in list(enumerate(self.dbm.hidden_layers))[::2]:
if (i == 0):
layer_below = self.dbm.visible_layer
else:
layer_below = self.dbm.hidden_layers[(i - 1)]
state_below = layer_to_state[layer_below]
state_below = layer_below.upward_state(state_below)
if ((i + 1) < len(self.dbm.hidden_layers)):
layer_above = self.dbm.hidden_layers[(i + 1)]
state_above = layer_to_state[layer_above]
state_above = layer_above.downward_state(state_above)
else:
state_above = None
layer_above = None
if layer_to_clamp[this_layer]:
this_state = layer_to_state[this_layer]
this_sample = this_state
else:
this_sample = this_layer.sample(state_below=state_below, state_above=state_above, layer_above=layer_above, theano_rng=theano_rng)
layer_to_updated[this_layer] = this_sample
vis_state = layer_to_state[self.dbm.visible_layer]
if layer_to_clamp[self.dbm.visible_layer]:
vis_sample = vis_state
else:
first_hid = self.dbm.hidden_layers[0]
state_above = layer_to_updated[first_hid]
state_above = first_hid.downward_state(state_above)
vis_sample = self.dbm.visible_layer.sample(state_above=state_above, layer_above=first_hid, theano_rng=theano_rng)
layer_to_updated[self.dbm.visible_layer] = vis_sample
for (i, this_layer) in list(enumerate(self.dbm.hidden_layers))[1::2]:
layer_below = self.dbm.hidden_layers[(i - 1)]
state_below = layer_to_updated[layer_below]
state_below = layer_below.upward_state(state_below)
if ((i + 1) < len(self.dbm.hidden_layers)):
layer_above = self.dbm.hidden_layers[(i + 1)]
state_above = layer_to_updated[layer_above]
state_above = layer_above.downward_state(state_above)
else:
state_above = None
layer_above = None
if layer_to_clamp[this_layer]:
this_state = layer_to_state[this_layer]
this_sample = this_state
else:
this_sample = this_layer.sample(state_below=state_below, state_above=state_above, layer_above=layer_above, theano_rng=theano_rng)
layer_to_updated[this_layer] = this_sample
assert all([(layer in layer_to_updated) for layer in layer_to_state])
assert all([(layer in layer_to_state) for layer in layer_to_updated])
assert all([((layer_to_state[layer] is layer_to_updated[layer]) == layer_to_clamp[layer]) for layer in layer_to_state])
return layer_to_updated
|
'.. todo::
WRITEME'
| def get_output_dim(self):
| return self.nhid
|
'.. todo::
WRITEME'
| def get_output_channels(self):
| return self.nhid
|
'.. todo::
WRITEME'
| def normalize_W(self):
| W = self.W.get_value(borrow=True)
norms = N.sqrt(N.square(W).sum(axis=0))
self.W.set_value((W / norms), borrow=True)
|
'.. todo::
WRITEME'
| def redo_everything(self):
| self.W = shared(N.cast[floatX](self.rng.randn(self.nvis, self.nhid)), name='W')
self.pred_W = shared(self.W.get_value(borrow=False), name='pred_W')
self.pred_b = shared(N.zeros(self.nhid, dtype=floatX), name='pred_b')
self.pred_g = shared(N.ones(self.nhid, dtype=floatX), name='pred_g')
self.normalize_W()
self.p = shared((N.zeros(self.nhid, dtype=floatX) + N.cast[floatX](self.init_p)), name='p')
self.lamda = shared((N.zeros(self.nhid, dtype=floatX) + N.cast[floatX](self.init_lambda)), name='lambda')
self.alpha = self.init_alpha
self.failure_rate = 0.5
self.examples_seen = 0
self.batches_seen = 0
self.redo_theano()
|
'.. todo::
WRITEME'
| def recons_error(self, v, h):
| recons = T.dot(self.W, h)
diffs = (recons - v)
rval = (T.dot(diffs, diffs) / N.cast[floatX](self.nvis))
return rval
|
'.. todo::
WRITEME'
| def recons_error_batch(self, V, H):
| recons = T.dot(H, self.W.T)
diffs = (recons - V)
rval = T.mean(T.sqr(diffs))
return rval
|
'.. todo::
WRITEME'
| def sparsity_penalty(self, v, h):
| sparsity_measure = ((((h * T.log(h)) - (h * T.log(self.p))) - h) + self.p)
rval = (T.dot(self.lamda, sparsity_measure) / N.cast[floatX](self.nhid))
return rval
|
'.. todo::
WRITEME'
| def sparsity_penalty_batch(self, V, H):
| sparsity_measure = ((((H * T.log(H)) - (H * T.log(self.p))) - H) + self.p)
sparsity_measure_exp = T.mean(sparsity_measure, axis=0)
rval = (T.dot(self.lamda, sparsity_measure_exp) / N.cast[floatX](self.nhid))
return rval
|
'.. todo::
WRITEME'
| def coding_obj(self, v, h):
| return (self.recons_error(v, h) + self.sparsity_penalty(v, h))
|
'.. todo::
WRITEME'
| def coding_obj_batch(self, V, H):
| return (self.recons_error_batch(V, H) + self.sparsity_penalty_batch(V, H))
|
'.. todo::
WRITEME'
| def predict(self, V):
| rval = (T.nnet.sigmoid((T.dot(V, self.pred_W) + self.pred_b)) * self.pred_g)
assert (rval.type.dtype == V.type.dtype)
return rval
|
'.. todo::
WRITEME'
| def redo_theano(self):
| self.h = shared(N.zeros(self.nhid, dtype=floatX), name='h')
self.v = shared(N.zeros(self.nvis, dtype=floatX), name='v')
input_v = T.vector()
assert (input_v.type.dtype == floatX)
self.init_h_v = function([input_v], updates={self.h: self.predict(input_v), self.v: input_v})
coding_obj = self.coding_obj(self.v, self.h)
assert (len(coding_obj.type.broadcastable) == 0)
coding_grad = T.grad(coding_obj, self.h)
assert (len(coding_grad.type.broadcastable) == 1)
self.coding_obj_grad = function([], [coding_obj, coding_grad])
self.new_h = shared(N.zeros(self.nhid, dtype=floatX), name='new_h')
alpha = T.scalar(name='alpha')
outside_grad = T.vector(name='outside_grad')
new_h = T.clip((self.h * T.exp(((- alpha) * outside_grad))), 1e-10, 10000.0)
new_obj = self.coding_obj(self.v, new_h)
self.try_step = function([alpha, outside_grad], updates={self.new_h: new_h}, outputs=new_obj)
self.accept_h = function([], updates={self.h: self.new_h})
self.get_h = function([], self.h)
V = T.matrix(name='V')
H = T.matrix(name='H')
coding_obj_batch = self.coding_obj_batch(V, H)
self.code_learning_obj = function([V, H], coding_obj_batch)
learning_grad = T.grad(coding_obj_batch, self.W)
self.code_learning_step = function([V, H, alpha], updates={self.W: (self.W - (alpha * learning_grad))})
pred_obj = T.mean(T.sqr((self.predict(V) - H)))
predictor_params = [self.pred_W, self.pred_b, self.pred_g]
pred_grads = T.grad(pred_obj, wrt=predictor_params)
predictor_updates = {}
for (param, grad) in zip(predictor_params, pred_grads):
predictor_updates[param] = (param - (alpha * grad))
predictor_updates[self.pred_g] = T.clip(predictor_updates[self.pred_g], N.cast[floatX](0.5), N.cast[floatX](1000.0))
self.train_predictor = function([V, H, alpha], updates=predictor_updates)
|
'.. todo::
WRITEME'
| def weights_format(self):
| return ['v', 'h']
|
'.. todo::
WRITEME'
| def error_func(self, x):
| batch_size = x.shape[0]
H = N.zeros((batch_size, self.nhid), dtype=floatX)
for i in xrange(batch_size):
assert (self.alpha > 9e-08)
H[i, :] = self.optimize_h(x[i, :])
assert (self.alpha > 9e-08)
return self.code_learning_obj(x, H)
|
'.. todo::
WRITEME'
| def record_monitoring_error(self, dataset, batch_size, batches):
| logger.info('running on monitoring set')
assert (self.error_record_mode == self.ERROR_RECORD_MODE_MONITORING)
w = self.W.get_value(borrow=True)
logger.info('weights summary: ({0}, {1}, {2})'.format(w.min(), w.mean(), w.max()))
errors = []
if self.instrumented:
self.clear_instruments()
for i in xrange(batches):
x = dataset.get_batch_design(batch_size)
error = self.error_func(x)
errors.append(error)
if self.instrumented:
self.update_instruments(x)
self.error_record.append((self.examples_seen, self.batches_seen, N.asarray(errors).mean()))
if self.instrumented:
self.instrument_record.begin_report(examples_seen=self.examples_seen, batches_seen=self.batches_seen)
self.make_instrument_report()
self.instrument_record.end_report()
self.clear_instruments()
logger.info('monitoring set done')
|
'.. todo::
WRITEME'
| def infer_h(self, v):
| return self.optimize_h(v)
|
'.. todo::
WRITEME'
| def optimize_h(self, v):
| assert (self.alpha > 9e-08)
self.init_h_v(v)
first = True
while True:
(obj, grad) = self.coding_obj_grad()
if first:
first = False
assert (not N.any(N.isnan(obj)))
assert (not N.any(N.isnan(grad)))
if (N.abs(grad).max() < self.tol):
break
cur_alpha = N.cast[floatX]((self.alpha + 0.0))
new_obj = self.try_step(cur_alpha, grad)
assert (not N.isnan(new_obj))
self.failure_rate = (((1.0 - self.time_constant) * self.failure_rate) + (self.time_constant * float((new_obj > obj))))
assert (self.alpha > 9e-08)
if ((self.failure_rate > 0.6) and (self.alpha > 1e-07)):
self.alpha *= 0.9
elif (self.failure_rate < 0.3):
self.alpha *= 1.1
assert (self.alpha > 9e-08)
while (new_obj >= obj):
cur_alpha *= 0.9
if (cur_alpha < 1e-12):
self.accept_h()
return self.get_h()
new_obj = self.try_step(cur_alpha, grad)
assert (not N.isnan(new_obj))
self.accept_h()
return self.get_h()
|
'.. todo::
WRITEME'
| def train_batch(self, dataset, batch_size):
| self.learn_mini_batch(dataset.get_batch_design(batch_size))
return True
|
'.. todo::
WRITEME'
| def learn_mini_batch(self, x):
| assert (self.alpha > 9e-08)
batch_size = x.shape[0]
H = N.zeros((batch_size, self.nhid), dtype=floatX)
for i in xrange(batch_size):
assert (self.alpha > 9e-08)
H[i, :] = self.optimize_h(x[i, :])
assert (self.alpha > 9e-08)
self.code_learning_step(x, H, self.learning_rate)
self.normalize_W()
self.train_predictor(x, H, self.predictor_learning_rate)
self.examples_seen += x.shape[0]
self.batches_seen += 1
|
'Returns the MLP that this layer belongs to.
Returns
mlp : MLP
The MLP that this layer belongs to, or None if it has not been
assigned to an MLP yet.'
| def get_mlp(self):
| if hasattr(self, 'mlp'):
return self.mlp
return None
|
'Assigns this layer to an MLP. This layer will then use the MLP\'s
random number generator, batch size, etc. This layer\'s name must
be unique within the MLP.
Parameters
mlp : MLP'
| def set_mlp(self, mlp):
| assert (self.get_mlp() is None)
self.mlp = mlp
|
'Returns monitoring channels.
Parameters
state_below : member of self.input_space
A minibatch of states that this Layer took as input.
Most of the time providing state_blow is unnecessary when
state is given.
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don\'t need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
targets : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
Returns
channels : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.'
| def get_layer_monitoring_channels(self, state_below=None, state=None, targets=None):
| return OrderedDict()
|
'Does the forward prop transformation for this layer.
Parameters
state_below : member of self.input_space
A minibatch of states of the layer below.
Returns
state : member of self.output_space
A minibatch of states of this layer.'
| def fprop(self, state_below):
| raise NotImplementedError((str(type(self)) + ' does not implement fprop.'))
|
'The cost of outputting Y_hat when the true output is Y.
Parameters
Y : theano.gof.Variable
The targets
Y_hat : theano.gof.Variable
The predictions.
Assumed to be the output of the layer\'s `fprop` method.
The implmentation is permitted to do things like look at the
ancestors of `Y_hat` in the theano graph. This is useful for
e.g. computing numerically stable *log* probabilities when
`Y_hat` is the *probability*.
Returns
cost : theano.gof.Variable
A Theano scalar describing the cost.'
| def cost(self, Y, Y_hat):
| raise NotImplementedError((str(type(self)) + ' does not implement mlp.Layer.cost.'))
|
'The cost final scalar cost computed from the cost matrix
Parameters
cost_matrix : WRITEME
Examples
>>> # C = model.cost_matrix(Y, Y_hat)
>>> # Do something with C like setting some values to 0
>>> # cost = model.cost_from_cost_matrix(C)'
| def cost_from_cost_matrix(self, cost_matrix):
| raise NotImplementedError((str(type(self)) + ' does not implement mlp.Layer.cost_from_cost_matrix.'))
|
'The element wise cost of outputting Y_hat when the true output is Y.
Parameters
Y : WRITEME
Y_hat : WRITEME
Returns
WRITEME'
| def cost_matrix(self, Y, Y_hat):
| raise NotImplementedError((str(type(self)) + ' does not implement mlp.Layer.cost_matrix'))
|
'Sets the weights of the layer.
Parameters
weights : ndarray
A numpy ndarray containing the desired weights of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.'
| def set_weights(self, weights):
| raise NotImplementedError((str(type(self)) + ' does not implement set_weights.'))
|
'Returns the value of the biases of the layer.
Returns
biases : ndarray
A numpy ndarray containing the biases of the layer. This docstring
is provided by the Layer base class. Layer subclasses should add
their own docstring explaining the subclass-specific format of the
ndarray.'
| def get_biases(self):
| raise NotImplementedError((str(type(self)) + ' does not implement get_biases (perhaps because the class has no biases).'))
|
'Sets the biases of the layer.
Parameters
biases : ndarray
A numpy ndarray containing the desired biases of the layer. This
docstring is provided by the Layer base class. Layer subclasses
should add their own docstring explaining the subclass-specific
format of the ndarray.'
| def set_biases(self, biases):
| raise NotImplementedError((str(type(self)) + ' does not implement set_biases (perhaps because the class has no biases).'))
|
'Returns a description of how to interpret the weights of the layer.
Returns
format: tuple
Either (\'v\', \'h\') or (\'h\', \'v\').
(\'v\', \'h\') means a weight matrix of shape
(num visible units, num hidden units),
while (\'h\', \'v\') means the transpose of it.'
| def get_weights_format(self):
| raise NotImplementedError
|
'Provides an expression for a squared L2 penalty on the weights.
Parameters
coeff : float or tuple
The coefficient on the weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
weight_decay : theano.gof.Variable
An expression for the weight decay penalty term for this
layer.'
| def get_weight_decay(self, coeff):
| raise NotImplementedError((str(type(self)) + ' does not implement get_weight_decay.'))
|
'Provides an expression for an L1 penalty on the weights.
Parameters
coeff : float or tuple
The coefficient on the L1 weight decay penalty for this layer.
This docstring is provided by the Layer base class. Individual
Layer subclasses should add their own docstring explaining the
format of `coeff` for that particular layer. For most ordinary
layers, `coeff` is a single float to multiply by the weight
decay term. Layers containing many pieces may take a tuple or
nested tuple of floats, and should explain the semantics of
the different elements of the tuple.
Returns
weight_decay : theano.gof.Variable
An expression for the L1 weight decay penalty term for this
layer.'
| def get_l1_weight_decay(self, coeff):
| raise NotImplementedError((str(type(self)) + ' does not implement get_l1_weight_decay.'))
|
'Tells the layer to prepare for input formatted according to the
given space.
Parameters
space : Space
The Space the input to this layer will lie in.
Notes
This usually resets parameters.'
| def set_input_space(self, space):
| raise NotImplementedError((str(type(self)) + ' does not implement set_input_space.'))
|
'.. todo::
WRITEME'
| def setup_rng(self):
| assert (not self._nested), "Nested MLPs should use their parent's RNG"
if (self.seed is None):
self.seed = [2013, 1, 4]
self.rng = np.random.RandomState(self.seed)
|
'Tells each layer what its input space should be.
Notes
This usually resets the layer\'s parameters!'
| def _update_layer_input_spaces(self):
| layers = self.layers
try:
layers[0].set_input_space(self.get_input_space())
except BadInputSpaceError as e:
raise TypeError((((((((((('Layer 0 (' + str(layers[0])) + ' of type ') + str(type(layers[0]))) + ") does not support the MLP's ") + 'specified input space (') + str(self.get_input_space())) + ' of type ') + str(type(self.get_input_space()))) + '). Original exception: ') + str(e)))
for i in xrange(1, len(layers)):
layers[i].set_input_space(layers[(i - 1)].get_output_space())
|
'Add new layers on top of the existing hidden layers
Parameters
layers : WRITEME'
| def add_layers(self, layers):
| existing_layers = self.layers
assert (len(existing_layers) > 0)
for layer in layers:
assert (layer.get_mlp() is None)
layer.set_mlp(self)
if ((not self._nested) or hasattr(self, 'input_space')):
layer.set_input_space(existing_layers[(-1)].get_output_space())
existing_layers.append(layer)
assert (layer.layer_name not in self.layer_names)
self.layer_names.add(layer.layer_name)
|
'Freezes some of the parameters (new theano functions that implement
learning will not use them; existing theano functions will continue
to modify them).
Parameters
parameter_set : set
Set of parameters to freeze.'
| def freeze(self, parameter_set):
| self.freeze_set = self.freeze_set.union(parameter_set)
|
'Returns data specs requiring both inputs and targets.
Returns
data_specs: TODO
The data specifications for both inputs and targets.'
| def get_monitoring_data_specs(self):
| if (not self.monitor_targets):
return (self.get_input_space(), self.get_input_source())
space = CompositeSpace((self.get_input_space(), self.get_target_space()))
source = (self.get_input_source(), self.get_target_source())
return (space, source)
|
'Returns the output of the MLP, when applying dropout to the input and
intermediate layers.
Parameters
state_below : WRITEME
The input to the MLP
default_input_include_prob : WRITEME
input_include_probs : WRITEME
default_input_scale : WRITEME
input_scales : WRITEME
per_example : bool, optional
Sample a different mask value for every example in a batch.
Defaults to `True`. If `False`, sample one mask per mini-batch.
Notes
Each input to each layer is randomly included or
excluded for each example. The probability of inclusion is independent
for each input and each example. Each layer uses
`default_input_include_prob` unless that layer\'s name appears as a key
in input_include_probs, in which case the input inclusion probability
is given by the corresponding value.
Each feature is also multiplied by a scale factor. The scale factor for
each layer\'s input scale is determined by the same scheme as the input
probabilities.'
| def dropout_fprop(self, state_below, default_input_include_prob=0.5, input_include_probs=None, default_input_scale=2.0, input_scales=None, per_example=True):
| if (input_include_probs is None):
input_include_probs = {}
if (input_scales is None):
input_scales = {}
self._validate_layer_names(list(input_include_probs.keys()))
self._validate_layer_names(list(input_scales.keys()))
theano_rng = MRG_RandomStreams(max(self.rng.randint((2 ** 15)), 1))
for layer in self.layers:
layer_name = layer.layer_name
if (layer_name in input_include_probs):
include_prob = input_include_probs[layer_name]
else:
include_prob = default_input_include_prob
if (layer_name in input_scales):
scale = input_scales[layer_name]
else:
scale = default_input_scale
state_below = self.apply_dropout(state=state_below, include_prob=include_prob, theano_rng=theano_rng, scale=scale, mask_value=layer.dropout_input_mask_value, input_space=layer.get_input_space(), per_example=per_example)
state_below = layer.fprop(state_below)
return state_below
|
'Forward propagate through the network with a dropout mask
determined by an integer (the binary representation of
which is used to generate the mask).
Parameters
state_below : tensor_like
The (symbolic) output state of the layer below.
mask : int
An integer indexing possible binary masks. It should be
< 2 ** get_total_input_dimension(masked_input_layers)
and greater than or equal to 0.
masked_input_layers : list, optional
A list of layer names to mask. If `None`, the input to all layers
(including the first hidden layer) is masked.
default_input_scale : float, optional
The amount to scale inputs in masked layers that do not appear in
`input_scales`. Defaults to 2.
input_scales : dict, optional
A dictionary mapping layer names to floating point numbers
indicating how much to scale input to a given layer.
Returns
masked_output : tensor_like
The output of the forward propagation of the masked network.'
| def masked_fprop(self, state_below, mask, masked_input_layers=None, default_input_scale=2.0, input_scales=None):
| if (input_scales is not None):
self._validate_layer_names(input_scales)
else:
input_scales = {}
if any(((n not in masked_input_layers) for n in input_scales)):
layers = [n for n in input_scales if (n not in masked_input_layers)]
raise ValueError(('input scales provided for layer not masked: ' % ', '.join(layers)))
if (masked_input_layers is not None):
self._validate_layer_names(masked_input_layers)
else:
masked_input_layers = self.layer_names
num_inputs = self.get_total_input_dimension(masked_input_layers)
assert (mask >= 0), 'Mask must be a non-negative integer.'
if ((mask > 0) and (math.log(mask, 2) > num_inputs)):
raise ValueError(('mask value of %d too large; only %d inputs to layers (%s)' % (mask, num_inputs, ', '.join(masked_input_layers))))
def binary_string(x, length, dtype):
'\n Create the binary representation of an integer `x`, padded to\n `length`, with dtype `dtype`.\n\n Parameters\n ----------\n length : WRITEME\n dtype : WRITEME\n\n Returns\n -------\n WRITEME\n '
s = np.empty(length, dtype=dtype)
for i in range((length - 1), (-1), (-1)):
if ((x // (2 ** i)) == 1):
s[i] = 1
else:
s[i] = 0
x = (x % (2 ** i))
return s
remaining_mask = mask
for layer in self.layers:
if (layer.layer_name in masked_input_layers):
scale = input_scales.get(layer.layer_name, default_input_scale)
n_inputs = layer.get_input_space().get_total_dimension()
layer_dropout_mask = (remaining_mask & ((2 ** n_inputs) - 1))
remaining_mask >>= n_inputs
mask = binary_string(layer_dropout_mask, n_inputs, 'uint8')
shape = layer.get_input_space().get_origin_batch(1).shape
s_mask = T.as_tensor_variable(mask).reshape(shape)
if (layer.dropout_input_mask_value == 0):
state_below = ((state_below * s_mask) * scale)
else:
state_below = T.switch(s_mask, (state_below * scale), layer.dropout_input_mask_value)
state_below = layer.fprop(state_below)
return state_below
|
'.. todo::
WRITEME'
| def _validate_layer_names(self, layers):
| if any(((layer not in self.layer_names) for layer in layers)):
unknown_names = [layer for layer in layers if (layer not in self.layer_names)]
raise ValueError(('MLP has no layer(s) named %s' % ', '.join(unknown_names)))
|
'Get the total number of inputs to the layers whose
names are listed in `layers`. Used for computing the
total number of dropout masks.
Parameters
layers : WRITEME
Returns
WRITEME'
| def get_total_input_dimension(self, layers):
| self._validate_layer_names(layers)
total = 0
for layer in self.layers:
if (layer.layer_name in layers):
total += layer.get_input_space().get_total_dimension()
return total
|
'.. todo::
WRITEME
Parameters
state: WRITEME
include_prob : WRITEME
scale : WRITEME
theano_rng : WRITEME
input_space : WRITEME
mask_value : WRITEME
per_example : bool, optional
Sample a different mask value for every example in a batch.
Defaults to `True`. If `False`, sample one mask per mini-batch.'
| def apply_dropout(self, state, include_prob, scale, theano_rng, input_space, mask_value=0, per_example=True):
| if (include_prob in [None, 1.0, 1]):
return state
assert (scale is not None)
if isinstance(state, tuple):
return tuple((self.apply_dropout(substate, include_prob, scale, theano_rng, mask_value) for substate in state))
if per_example:
mask = theano_rng.binomial(p=include_prob, size=state.shape, dtype=state.dtype)
else:
batch = input_space.get_origin_batch(1)
mask = theano_rng.binomial(p=include_prob, size=batch.shape, dtype=state.dtype)
rebroadcast = T.Rebroadcast(*zip(xrange(batch.ndim), [(s == 1) for s in batch.shape]))
mask = rebroadcast(mask)
if (mask_value == 0):
rval = ((state * mask) * scale)
else:
rval = T.switch(mask, (state * scale), mask_value)
return T.cast(rval, state.dtype)
|
'Computes self.cost, but takes data=(X, Y) rather than Y_hat as an
argument.
This is just a wrapper around self.cost that computes Y_hat by
calling Y_hat = self.fprop(X)
Parameters
data : WRITEME'
| def cost_from_X(self, data):
| self.cost_from_X_data_specs()[0].validate(data)
(X, Y) = data
Y_hat = self.fprop(X)
return self.cost(Y, Y_hat)
|
'Returns the data specs needed by cost_from_X.
This is useful if cost_from_X is used in a MethodCost.'
| def cost_from_X_data_specs(self):
| space = CompositeSpace((self.get_input_space(), self.get_target_space()))
source = (self.get_input_source(), self.get_target_source())
return (space, source)
|
'Summarizes the MLP by printing the size and format of the input to all
layers. Feel free to add reasonably concise info as needed.'
| def __str__(self):
| rval = []
for layer in self.layers:
rval.append(layer.layer_name)
input_space = layer.get_input_space()
rval.append((' DCTB Input space: ' + str(input_space)))
rval.append((' DCTB Total input dimension: ' + str(input_space.get_total_dimension())))
rval = '\n'.join(rval)
return rval
|
'.. todo::
WRITEME'
| @wraps(Layer.set_biases)
def set_biases(self, biases):
| self.b.set_value(biases)
|
'.. todo::
WRITEME'
| @wraps(Layer.get_biases)
def get_biases(self):
| return self.b.get_value()
|
'Parameters
state_below : member of input_space
Returns
output : theano matrix
Affine transformation of state_below'
| def _linear_part(self, state_below):
| self.input_space.validate(state_below)
if self.requires_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
z = self.transformer.lmul(state_below)
if self.use_bias:
z += self.b
if (self.layer_name is not None):
z.name = (self.layer_name + '_z')
return z
|
'Returns a batch (vector) of
mean across units of KL divergence for each example.
Parameters
Y : theano.gof.Variable
Targets
Y_hat : theano.gof.Variable
Output of `fprop`
mean across units, mean across batch of KL divergence
Notes
Uses KL(P || Q) where P is defined by Y and Q is defined by Y_hat
Currently Y must be purely binary. If it\'s not, you\'ll still
get the right gradient, but the value in the monitoring channel
will be wrong.
Y_hat must be generated by fprop, i.e., it must be a symbolic
sigmoid.
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)'
| @wraps(Layer.cost)
def cost(self, Y, Y_hat):
| total = self.kl(Y=Y, Y_hat=Y_hat)
ave = total.mean()
return ave
|
'Computes the KL divergence.
Parameters
Y : Variable
targets for the sigmoid outputs. Currently Y must be purely binary.
If it\'s not, you\'ll still get the right gradient, but the
value in the monitoring channel will be wrong.
Y_hat : Variable
predictions made by the sigmoid layer. Y_hat must be generated by
fprop, i.e., it must be a symbolic sigmoid.
Returns
ave : Variable
average kl divergence between Y and Y_hat.
Notes
Warning: This function expects a sigmoid nonlinearity in the
output layer and it uses kl function under pylearn2/expr/nnet/.
Returns a batch (vector) of mean across units of KL
divergence for each example,
KL(P || Q) where P is defined by Y and Q is defined by Y_hat:
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)'
| def kl(self, Y, Y_hat):
| batch_axis = self.output_space.get_batch_axis()
div = kl(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
return div
|
'Returns monitoring channels when using the layer to do detection
of binary events.
Parameters
state : theano.gof.Variable
Output of `fprop`
target : theano.gof.Variable
The targets from the dataset
Returns
channels : OrderedDict
Dictionary mapping channel names to Theano channel values.'
| def get_detection_channels_from_state(self, state, target):
| rval = OrderedDict()
y_hat = (state > 0.5)
y = (target > 0.5)
wrong_bit = T.cast(T.neq(y, y_hat), state.dtype)
rval['01_loss'] = wrong_bit.mean()
rval['kl'] = self.cost(Y_hat=state, Y=target)
y = T.cast(y, state.dtype)
y_hat = T.cast(y_hat, state.dtype)
tp = (y * y_hat).sum()
fp = ((1 - y) * y_hat).sum()
precision = compute_precision(tp, fp)
recall = compute_recall(y, tp)
f1 = compute_f1(precision, recall)
rval['precision'] = precision
rval['recall'] = recall
rval['f1'] = f1
tp = (y * y_hat).sum(axis=0)
fp = ((1 - y) * y_hat).sum(axis=0)
precision = compute_precision(tp, fp)
rval['per_output_precision_max'] = precision.max()
rval['per_output_precision_mean'] = precision.mean()
rval['per_output_precision_min'] = precision.min()
recall = compute_recall(y, tp)
rval['per_output_recall_max'] = recall.max()
rval['per_output_recall_mean'] = recall.mean()
rval['per_output_recall_min'] = recall.min()
f1 = compute_f1(precision, recall)
rval['per_output_f1_max'] = f1.max()
rval['per_output_f1_mean'] = f1.mean()
rval['per_output_f1_min'] = f1.min()
return rval
|
'Applies the nonlinearity over the convolutional layer.
Parameters
linear_response: Variable
linear response of the layer.
Returns
p: Variable
the response of the layer after the activation function
is applied over.'
| def apply(self, linear_response):
| p = linear_response
return p
|
'Computes the monitoring channels which does not require targets.
Parameters
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don\'t need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
Returns
rval : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.'
| def _get_monitoring_channels_for_activations(self, state):
| rval = OrderedDict({})
mx = state.max(axis=0)
mean = state.mean(axis=0)
mn = state.min(axis=0)
rg = (mx - mn)
rval['range_x_max_u'] = rg.max()
rval['range_x_mean_u'] = rg.mean()
rval['range_x_min_u'] = rg.min()
rval['max_x_max_u'] = mx.max()
rval['max_x_mean_u'] = mx.mean()
rval['max_x_min_u'] = mx.min()
rval['mean_x_max_u'] = mean.max()
rval['mean_x_mean_u'] = mean.mean()
rval['mean_x_min_u'] = mean.min()
rval['min_x_max_u'] = mn.max()
rval['min_x_mean_u'] = mn.mean()
rval['min_x_min_u'] = mn.min()
return rval
|
'Override the default get_monitoring_channels_from_state function.
Parameters
state : member of self.output_space
A minibatch of states that this Layer took on during fprop.
Provided externally so that we don\'t need to make a second
expression for it. This helps keep the Theano graph smaller
so that function compilation runs faster.
target : member of self.output_space
Should be None unless this is the last layer.
If specified, it should be a minibatch of targets for the
last layer.
cost_fn : theano computational graph or None
This is the theano computational graph of a cost function.
Returns
rval : OrderedDict
A dictionary mapping channel names to monitoring channels of
interest for this layer.'
| def get_monitoring_channels_from_state(self, state, target, cost_fn=None):
| rval = self._get_monitoring_channels_for_activations(state)
return rval
|
'The cost of outputting Y_hat when the true output is Y.
Parameters
Y : theano.gof.Variable
Output of `fprop`
Y_hat : theano.gof.Variable
Targets
batch_axis : integer
axis representing batch dimension
Returns
cost : theano.gof.Variable
0-D tensor describing the cost'
| def cost(self, Y, Y_hat, batch_axis):
| raise NotImplementedError((str(type(self)) + ' does not implement cost function.'))
|
'Notes
Mean squared error across examples in a batch'
| @wraps(ConvNonlinearity.cost, append=True)
def cost(self, Y, Y_hat, batch_axis):
| return T.sum(T.mean(T.sqr((Y - Y_hat)), axis=batch_axis))
|
'Parameters
left_slope : float, optional
left slope for the linear response of the rectifier function.
default is 0.0.'
| def __init__(self, left_slope=0.0):
| self.non_lin_name = 'rectifier'
self.left_slope = left_slope
|
'Applies the rectifier nonlinearity over the convolutional layer.'
| @wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
| p = ((linear_response * (linear_response > 0.0)) + ((self.left_slope * linear_response) * (linear_response < 0.0)))
return p
|
'Applies the sigmoid nonlinearity over the convolutional layer.'
| @wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
| p = T.nnet.sigmoid(linear_response)
return p
|
'Notes
Cost mean across units, mean across batch of KL divergence
KL(P || Q) where P is defined by Y and Q is defined by Y_hat
KL(P || Q) = p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)'
| @wraps(ConvNonlinearity.cost, append=True)
def cost(self, Y, Y_hat, batch_axis):
| ave_total = kl(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
ave = ave_total.mean()
return ave
|
'Applies the tanh nonlinearity over the convolutional layer.'
| @wraps(ConvNonlinearity.apply)
def apply(self, linear_response):
| p = T.tanh(linear_response)
return p
|
'This function initializes the transformer of the class. Re-running
this function will reset the transformer.
Parameters
rng : object
random number generator object.'
| def initialize_transformer(self, rng):
| if (self.irange is not None):
assert (self.sparse_init is None)
self.transformer = conv2d.make_random_conv2D(irange=self.irange, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng)
elif (self.sparse_init is not None):
self.transformer = conv2d.make_sparse_random_conv2D(num_nonzero=self.sparse_init, input_space=self.input_space, output_space=self.detector_space, kernel_shape=self.kernel_shape, subsample=self.kernel_stride, border_mode=self.border_mode, rng=rng)
else:
raise ValueError('irange and sparse_init cannot be both None')
|
'Initializes the output space of the ConvElemwise layer by taking
pooling operator and the hyperparameters of the convolutional layer
into consideration as well.'
| def initialize_output_space(self):
| dummy_batch_size = self.mlp.batch_size
if (dummy_batch_size is None):
dummy_batch_size = 2
dummy_detector = sharedX(self.detector_space.get_origin_batch(dummy_batch_size))
if (self.pool_type is not None):
assert (self.pool_type in ['max', 'mean'])
if (self.pool_type == 'max'):
dummy_p = max_pool(bc01=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride, image_shape=self.detector_space.shape)
elif (self.pool_type == 'mean'):
dummy_p = mean_pool(bc01=dummy_detector, pool_shape=self.pool_shape, pool_stride=self.pool_stride, image_shape=self.detector_space.shape)
dummy_p = dummy_p.eval()
self.output_space = Conv2DSpace(shape=[dummy_p.shape[2], dummy_p.shape[3]], num_channels=self.output_channels, axes=('b', 'c', 0, 1))
else:
dummy_detector = dummy_detector.eval()
self.output_space = Conv2DSpace(shape=[dummy_detector.shape[2], dummy_detector.shape[3]], num_channels=self.output_channels, axes=('b', 'c', 0, 1))
logger.info('Output space: {0}'.format(self.output_space.shape))
|
'Note: this function will reset the parameters!'
| @wraps(Layer.set_input_space)
def set_input_space(self, space):
| self.input_space = space
if (not isinstance(space, Conv2DSpace)):
raise BadInputSpaceError(((((self.__class__.__name__ + '.set_input_space expected a Conv2DSpace, got ') + str(space)) + ' of type ') + str(type(space))))
rng = self.mlp.rng
if (self.border_mode == 'valid'):
output_shape = [(int(((self.input_space.shape[0] - self.kernel_shape[0]) / self.kernel_stride[0])) + 1), (int(((self.input_space.shape[1] - self.kernel_shape[1]) / self.kernel_stride[1])) + 1)]
elif (self.border_mode == 'full'):
output_shape = [(int(((self.input_space.shape[0] + self.kernel_shape[0]) / self.kernel_stride[0])) - 1), (int(((self.input_space.shape[1] + self.kernel_shape[1]) / self.kernel_stride[1])) - 1)]
self.detector_space = Conv2DSpace(shape=output_shape, num_channels=self.output_channels, axes=('b', 'c', 0, 1))
self.initialize_transformer(rng)
(W,) = self.transformer.get_params()
W.name = (self.layer_name + '_W')
if self.tied_b:
self.b = sharedX((np.zeros(self.detector_space.num_channels) + self.init_bias))
else:
self.b = sharedX((self.detector_space.get_origin() + self.init_bias))
self.b.name = (self.layer_name + '_b')
logger.info('Input shape: {0}'.format(self.input_space.shape))
logger.info('Detector space: {0}'.format(self.detector_space.shape))
self.initialize_output_space()
|
'Notes
The cost method calls `self.nonlin.cost`'
| @wraps(Layer.cost, append=True)
def cost(self, Y, Y_hat):
| batch_axis = self.output_space.get_batch_axis()
return self.nonlin.cost(Y=Y, Y_hat=Y_hat, batch_axis=batch_axis)
|
'Provides an expression for a squared L2 penalty on the weights,
which is the weighted sum of the squared L2 penalties of the layer
components.
Parameters
coeff : float or tuple/list
The coefficient on the squared L2 weight decay penalty for
this layer. If a single value is provided, this coefficient is
used for each component layer. If a list of tuple of
coefficients is given they are passed on to the component
layers in the given order.
Returns
weight_decay : theano.gof.Variable
An expression for the squared L2 weight decay penalty term for
this layer.'
| def get_weight_decay(self, coeff):
| return self._weight_decay_aggregate('get_weight_decay', coeff)
|
'Provides an expression for a squared L1 penalty on the weights,
which is the weighted sum of the squared L1 penalties of the layer
components.
Parameters
coeff : float or tuple/list
The coefficient on the L1 weight decay penalty for this layer.
If a single value is provided, this coefficient is used for
each component layer. If a list of tuple of coefficients is
given they are passed on to the component layers in the
given order.
Returns
weight_decay : theano.gof.Variable
An expression for the L1 weight decay penalty term for this
layer.'
| def get_l1_weight_decay(self, coeff):
| return self._weight_decay_aggregate('get_l1_weight_decay', coeff)
|
'Compute the PCA transformation matrix.
Given a rectangular matrix :math:`X = USV` such that :math:`S` is a
diagonal matrix with :math:`X`\'s singular values along its diagonal,
returns :math:`W = V^{-1}`.
If mean is provided, :math:`X` will not be centered first.
Parameters
X : numpy.ndarray
Matrix of shape (n, d) on which to train PCA
mean : numpy.ndarray, optional
Feature means of shape (d,)'
| def train(self, X, mean=None):
| if (self.num_components is None):
self.num_components = X.shape[1]
if (mean is None):
mean = X.mean(axis=0)
X = (X - mean)
(v, W) = self._cov_eigen(X)
self.W = sharedX(W, name='W')
self.v = sharedX(v, name='v')
self.mean = sharedX(mean, name='mean')
self._update_cutoff()
component_cutoff = self.component_cutoff.get_value(borrow=True)
self.v.set_value(self.v.get_value(borrow=True)[:component_cutoff])
self.W.set_value(self.W.get_value(borrow=True)[:, :component_cutoff])
|
'Compute and return the PCA transformation of the current data.
Parameters
inputs : numpy.ndarray
Matrix of shape (n, d) on which to compute PCA
Returns
WRITEME'
| def __call__(self, inputs):
| self._update_cutoff()
normalized_mean = (inputs - self.mean)
normalized_mean.name = 'normalized_mean'
W = self.W[:, :self.component_cutoff]
if self.whiten:
W = (W / tensor.sqrt(self.v[:self.component_cutoff]))
Y = tensor.dot(normalized_mean, W)
return Y
|
'Compute and return the matrix one should multiply with to get the
PCA/whitened data
Returns
WRITEME'
| def get_weights(self):
| self._update_cutoff()
component_cutoff = self.component_cutoff.get_value()
W = self.W.get_value(borrow=False)
W = W[:, :component_cutoff]
if self.whiten:
W /= N.sqrt(self.v.get_value(borrow=False)[:component_cutoff])
return W
|
'Given a PCA transformation of the current data, compute and return
the reconstruction of the original input
Parameters
inputs : WRITEME
add_mean : bool, optional
WRITEME
Returns
WRITEME'
| def reconstruct(self, inputs, add_mean=True):
| self._update_cutoff()
if self.whiten:
inputs *= tensor.sqrt(self.v[:self.component_cutoff])
X = tensor.dot(inputs, self.W[:, :self.component_cutoff].T)
if add_mean:
X = (X + self.mean)
return X
|
'Update component cutoff shared var, based on current parameters.'
| def _update_cutoff(self):
| assert ((self.num_components is not None) and (self.num_components > 0)), 'Number of components requested must be >= 1'
v = self.v.get_value(borrow=True)
var_mask = ((v / v.sum()) > self.min_variance)
assert numpy.any(var_mask), 'No components exceed the given min. variance'
var_cutoff = (1 + numpy.where(var_mask)[0].max())
self.component_cutoff.set_value(min(var_cutoff, self.num_components))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.