desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Returns an instance of pylearn2.space.Space describing the format of the vector space that the model outputs (this is a generalization of get_output_dim)'
def get_output_space(self):
return self.output_space
'Returns an instance of pylearn2.space.Space describing the format of that the targets should be in, which may be different from the output space. Calls get_output_space() unless _target_space exists.'
def get_target_space(self):
if hasattr(self, '_target_space'): return self._target_space else: return self.get_output_space()
'Returns a string, stating the source for the input. By default the model expects only one input source, which is called \'features\'.'
def get_input_source(self):
if hasattr(self, 'input_source'): return self.input_source else: return 'features'
'Returns a string, stating the source for the output. By default the model expects only one output source, which is called \'targets\'.'
def get_target_source(self):
if hasattr(self, 'target_source'): return self.target_source else: return 'targets'
'Compute the free energy of data examples, if this model has probabilistic semantics. Parameters V : tensor_like, 2-dimensional A batch of i.i.d. examples with examples indexed along the first axis and features along the second. This is data on which the monitoring quantities will be calculated (e.g., a validation set). Returns free_energy : tensor, 1-dimensional A (symbolic) vector of free energies for each data example in `V`, i.e. `free_energy[i] = F(V[i])`.'
def free_energy(self, V):
raise NotImplementedError()
'Returns the parameters that define the model. Returns params : list A list of (Theano shared variable) parameters of the model. Notes By default, this returns a copy of the _params attribute, which individual models can simply fill with the list of model parameters. Alternatively, models may override `get_params`, so this should be considered the public interface to model parameters -- directly accessing or modifying _params is at-your-own-risk, as it may or may not exist. This is the main mechanism by which generic training algorithms like SGD know which values to update, however, even model parameters that should not be learned ought to be included here, so that the model\'s parameter set is more predictable. Parameters may be included here but held constant during learning via the `modify_updates` method.'
def get_params(self):
return list(self._params)
'Returns numerical values for the parameters that define the model. Parameters borrow : bool, optional Flag to be passed to the `.get_value()` method of the shared variable. If `False`, a copy will always be returned. Returns params : list A list of `numpy.ndarray` objects containing the current parameters of the model.'
def get_param_values(self, borrow=False):
assert (not isinstance(self.get_params(), set)) return [param.get_value(borrow=borrow) for param in self.get_params()]
'Sets the values of the parameters that define the model Parameters values : list list of ndarrays borrow : bool The `borrow` flag to use with `set_value`.'
def set_param_values(self, values, borrow=False):
for (param, value) in zip(self.get_params(), values): param.set_value(value, borrow=borrow)
'Returns all parameters flattened into a single vector. Returns params : ndarray 1-D array of all parameter values.'
def get_param_vector(self):
values = self.get_param_values() values = [value.reshape(value.size) for value in values] return np.concatenate(values, axis=0)
'Sets all parameters from a single flat vector. Format is consistent with `get_param_vector`. Parameters vector : ndarray 1-D array of all parameter values.'
def set_param_vector(self, vector):
params = self.get_params() cur_values = self.get_param_values() pos = 0 for (param, value) in safe_zip(params, cur_values): size = value.size new_value = vector[pos:(pos + size)] param.set_value(new_value.reshape(*value.shape)) pos += size assert (pos == vector.size)
'Re-compiles all Theano functions used internally by the model. Notes This function is often called after a model is unpickled from disk, since Theano functions are not pickled. However, it is not always called. This allows scripts like show_weights.py to rapidly unpickle a model and inspect its weights without needing to recompile all of its learning machinery. All Theano functions compiled by this method should be registered with the register_names_to_del method.'
def redo_theano(self):
pass
'Returns the number of visible units of the model. Deprecated; this assumes the model operates on a vector. Use get_input_space instead. This method may be removed on or after 2015-05-25.'
def get_input_dim(self):
raise NotImplementedError()
'Returns the number of visible units of the model. Deprecated; this assumes the model operates on a vector. Use get_input_space instead. This method may be removed on or after 2015-05-25.'
def get_output_dim(self):
raise NotImplementedError()
'This is the method that pickle/cPickle uses to determine what portion of the model to serialize. We remove all fields listed in `self.fields_to_del`. In particular, this should include all Theano functions, since they do not play nice with pickling.'
def __getstate__(self):
self._disallow_censor_updates() d = OrderedDict() names_to_del = getattr(self, 'names_to_del', set()) names_to_keep = set(self.__dict__.keys()).difference(names_to_del) for name in names_to_keep: d[name] = self.__dict__[name] return d
'Specifies the batch size to use with compute.test_value Returns test_batch_size : int Number of examples to use in batches with compute.test_value Notes The model specifies the number of examples in case it needs a fixed batch size or to keep the memory usage of testing under control.'
def get_test_batch_size(self):
return self._test_batch_size
'Print version of the various Python packages and basic information about the experiment setup (e.g. cpu, os) Parameters print_theano_config : bool TODO WRITEME Notes Example output: .. code-block:: none numpy:1.6.1 | pylearn:a6e634b83d | pylearn2:57a156beb0 CPU: x86_64 OS: Linux-2.6.35.14-106.fc14.x86_64-x86_64-with-fedora-14-Laughlin'
def print_versions(self, print_theano_config=False):
self.libv.print_versions() self.libv.print_exp_env_info(print_theano_config)
'Register names of fields that should not be pickled. Parameters names : iterable A collection of strings indicating names of fields on ts object that should not be pickled. Notes All names registered will be deleted from the dictionary returned by the model\'s `__getstate__` method (unless a particular model overrides this method).'
def register_names_to_del(self, names):
if isinstance(names, six.string_types): names = [names] try: assert all((isinstance(n, six.string_types) for n in iter(names))) except (TypeError, AssertionError): reraise_as(ValueError('Invalid names argument')) if (not hasattr(self, 'names_to_del')): self.names_to_del = set() self.names_to_del = self.names_to_del.union(names)
'Enforces all constraints encoded by self.modify_updates.'
def enforce_constraints(self):
params = self.get_params() updates = OrderedDict(izip_no_length_check(params, params)) self.modify_updates(updates) f = function([], updates=updates) f()
'A "scratch-space" for storing model metadata. Returns tag : defaultdict A defaultdict with "dict" as the default constructor. This lets you do things like `model.tag[ext_name][quantity_name]` without the annoyance of first initializing the dict `model.tag[ext_name]`. Notes Nothing critical to the implementation of a particular model or training algorithm in the library should get stored in `tag`. This is mainly for extensions or user code to take advantage of, and segregate such things from actual model implementation attributes.'
@property def tag(self):
if (not hasattr(self, '_tag')): self._tag = defaultdict(dict) return self._tag
'Creates the Autoencoder objects needed by the GSN. Parameters layer_sizes : WRITEME activation_funcs : WRITEME tied : WRITEME'
@staticmethod def _make_aes(layer_sizes, activation_funcs, tied=True):
aes = [] assert (len(activation_funcs) == len(layer_sizes)) for i in xrange((len(layer_sizes) - 1)): act_enc = activation_funcs[(i + 1)] act_dec = (act_enc if (i != 0) else activation_funcs[0]) aes.append(Autoencoder(layer_sizes[i], layer_sizes[(i + 1)], act_enc, act_dec, tied_weights=tied)) return aes
'An easy (and recommended) way to initialize a GSN. Parameters layer_sizes : list A list of integers. The i_th element in the list is the size of the i_th layer of the network, and the network will have len(layer_sizes) layers. activation_funcs : list activation_funcs must be a list of the same length as layer_sizes where the i_th element is the activation function for the i_th layer. Each component of the list must refer to an activation function in such a way that the Autoencoder class recognizes the function. Valid values include a callable (which takes a symbolic tensor), a string that refers to a Theano activation function, or None (which gives the identity function). preact_corruptors : list preact_corruptors follows exactly the same format as the activations_func argument. postact_corruptors : list postact_corruptors follows exactly the same format as the activations_func argument. layer_samplers : list layer_samplers follows exactly the same format as the activations_func argument. tied : bool Indicates whether the network should use tied weights. Notes The GSN classes applies functions in the following order: - pre-activation corruption - activation - clamping applied - sampling - post-activation corruption All setting and returning of values occurs after applying the activation function (or clamping if clamping is used) but before applying sampling.'
@classmethod def new(cls, layer_sizes, activation_funcs, pre_corruptors, post_corruptors, layer_samplers, tied=True):
args = [layer_sizes, pre_corruptors, post_corruptors, layer_samplers] if (not all((isinstance(arg, list) for arg in args))): raise TypeError('All arguments except for tied must be lists') if (not all(((len(arg) == len(args[0])) for arg in args))): lengths = map(len, args) raise ValueError(('All list arguments must be of the same length. ' + ('Current lengths are %s' % lengths))) aes = cls._make_aes(layer_sizes, activation_funcs, tied=tied) return cls(aes, preact_cors=pre_corruptors, postact_cors=post_corruptors, layer_samplers=layer_samplers)
'.. todo:: WRITEME'
@functools.wraps(Model.get_params) def get_params(self):
params = set() for ae in self.aes: params.update(ae.get_params()) return list(params)
'Returns how many layers the GSN has.'
@property def nlayers(self):
return (len(self.aes) + 1)
'This runs the GSN on input \'minibatch\' and returns all of the activations at every time step. Parameters minibatch : see parameter description in _set_activations walkback : int How many walkback steps to perform. clamped : list of theano tensors or None. clamped must be None or a list of len(minibatch) where each element is a Theano tensor or None. Each Theano tensor should be 1 for indices where the value should be clamped and 0 for where the value should not be clamped. Returns steps : list of list of tensor_likes A list of the activations at each time step. The activations themselves are lists of tensor_like symbolic variables. A time step consists of a call to the _update function (so updating both the even and odd layers). When there is no walkback, the GSN runs long enough for signal from the bottom layer to propogate to the top layer and then back to the bottom. The walkback parameter adds single steps on top of the default.'
def _run(self, minibatch, walkback=0, clamped=None):
set_idxs = safe_zip(*minibatch)[0] if ((self.nlayers == 2) and (len(set_idxs) == 2)): if (clamped is None): raise ValueError((('Setting both layers of 2 layer GSN without ' + 'clamping causes one layer to overwrite the ') + 'other. The value for layer 0 will not be used.')) else: warnings.warn((('Setting both layers of 2 layer GSN with clamping ' + 'may not be valid, depending on what clamping is ') + 'done')) diff = (lambda L: [(L[i] - L[(i - 1)]) for i in xrange(1, len(L))]) if (1 in diff(sorted(set_idxs))): warnings.warn(((((((('Adjacent layers in the GSN are being set. There is a' + ' significant possibility that some of the set values') + ' are not being used and are just overwriting each ') + 'other. This is dependent on both the ordering of the ') + 'even and odd steps as well as the proximity to the ') + 'edge of the network.\n It is recommended to read the ') + 'source to ensure the behavior is understood if setting ') + 'adjacent layers.')) self._set_activations(minibatch) steps = [self.activations[:]] self.apply_postact_corruption(self.activations, xrange(self.nlayers)) if (clamped is not None): vals = safe_zip(*minibatch)[1] clamped = safe_zip(set_idxs, vals, clamped) for _ in xrange((len(self.aes) + walkback)): steps.append(self._update(self.activations, clamped=clamped)) return steps
'Compiles, wraps, and caches Theano functions for non-symbolic calls to get_samples. Parameters indices : WRITEME clamped : WRITEME'
def _make_or_get_compiled(self, indices, clamped=False):
def compile_f_init(): mb = T.matrices(len(indices)) zipped = safe_zip(indices, mb) f_init = theano.function(mb, self._set_activations(zipped, corrupt=True), allow_input_downcast=True) def wrap_f_init(*args): data = f_init(*args) length = (len(data) / 2) return (data[:length], data[length:]) return wrap_f_init def compile_f_step(): prev = T.matrices(self.nlayers) if clamped: _initial = T.matrices(len(indices)) _clamps = T.matrices(len(indices)) z = self._update(copy.copy(prev), clamped=safe_zip(indices, _initial, _clamps), return_activations=True) f = theano.function(((prev + _initial) + _clamps), z, on_unused_input='ignore', allow_input_downcast=True) else: z = self._update(copy.copy(prev), return_activations=True) f = theano.function(prev, z, on_unused_input='ignore', allow_input_downcast=True) def wrapped(*args): data = f(*args) length = (len(data) / 2) return (data[:length], data[length:]) return wrapped state = (self._corrupt_switch, self._sample_switch, self._bias_switch) if (hasattr(self, '_compiled_cache') and (state == self._compiled_cache[0])): if (indices == self._compiled_cache[1]): return self._compiled_cache[2:] else: f_init = compile_f_init() cc = self._compiled_cache self._compiled_cache = (state, indices, f_init, cc[3]) return self._compiled_cache[2:] else: f_init = compile_f_init() f_step = compile_f_step() self._compiled_cache = (state, indices, f_init, f_step) return self._compiled_cache[2:]
'Runs minibatch through GSN and returns reconstructed data. Parameters minibatch : see parameter description in _set_activations In addition to the description in get_samples, the tensor_likes in the list should be replaced by numpy matrices if symbolic=False. walkback : int How many walkback steps to perform. This is both how many extra samples to take as well as how many extra reconstructed points to train off of. See description in _run. This parameter controls how many samples you get back. indices : None or list of ints, optional Indices of the layers that should be returned for each time step. If indices is None, then get_samples returns the values for all of the layers which were initially specified (by minibatch). symbolic : bool, optional Whether the input (minibatch) contains a Theano (symbolic) tensors or actual (numpy) arrays. This flag is needed because Theano cannot compile the large computational graphs that walkback creates. include_first : bool, optional Whether to include the initial activations (ie just the input) in the output. This is useful for visualization, but can screw up training due to some cost functions failing on perfect reconstruction. clamped : list of tensor_likes, optional See description on _run. Theano symbolics should be replaced by numpy matrices if symbolic=False. Length must be the same as length of minibatch. Returns reconstructions : list of tensor_likes A list of length 1 + number of layers + walkback that contains the samples generated by the GSN. The layers returned at each time step is decided by the indices parameter (and defaults to the layers specified in minibatch). If include_first is True, then the list will be 1 element longer (inserted at beginning) than specified above.'
def get_samples(self, minibatch, walkback=0, indices=None, symbolic=True, include_first=False, clamped=None):
if ((walkback > 8) and symbolic): warnings.warn((((('Running GSN in symbolic mode (needed for training) ' + 'with a lot of walkback. Theano may take a very long ') + 'time to compile this computational graph. If ') + 'compiling is taking too long, then reduce the amount ') + 'of walkback.')) input_idxs = safe_zip(*minibatch)[0] if (indices is None): indices = input_idxs if (not symbolic): vals = safe_zip(*minibatch)[1] (f_init, f_step) = self._make_or_get_compiled(input_idxs, clamped=(clamped is not None)) if (clamped is None): get_args = (lambda x: x) else: mb_values = [mb[1] for mb in minibatch] get_args = (lambda x: ((x + mb_values) + clamped)) (precor, activations) = f_init(*vals) results = [precor] for _ in xrange((len(self.aes) + walkback)): (precor, activations) = f_step(*get_args(activations)) results.append(precor) else: results = self._run(minibatch, walkback=walkback, clamped=clamped) if (not include_first): results = results[1:] return [[step[i] for i in indices] for step in results]
'.. todo:: WRITEME'
@functools.wraps(Autoencoder.reconstruct) def reconstruct(self, minibatch):
assert (len(minibatch) == 1) idx = minibatch[0][0] return self.get_samples(minibatch, walkback=0, indices=[idx])
'As specified by StackedBlocks, this returns the output representation of all layers. This occurs at the final time step. Parameters minibatch : WRITEME Returns WRITEME'
def __call__(self, minibatch):
return self._run(minibatch)[(-1)]
'Initializes the GSN as specified by minibatch. Parameters minibatch : list of (int, tensor_like) The minibatch parameter must be a list of tuples of form (int, tensor_like), where the int component represents the index of the layer (so 0 for visible, -1 for top/last layer) and the tensor_like represents the activation at that level. Layer indices not included in the minibatch will be set to 0. For tuples included in the minibatch, the tensor_like component can actually be None; this will result in that layer getting set to 0 initially. set_val : bool, optional Determines whether the method sets self.activations. corrupt : bool, optional Instructs the method to return both a non-corrupted and corrupted set of activations rather than just non-corrupted. Notes This method creates a new list, not modifying an existing list. This method also does the first odd step in the network.'
def _set_activations(self, minibatch, set_val=True, corrupt=False):
activations = ([None] * self.nlayers) mb_size = minibatch[0][1].shape[0] first_layer_size = self.aes[0].weights.shape[0] activations[0] = T.alloc(0, mb_size, first_layer_size) for i in xrange(1, len(activations)): activations[i] = T.zeros_like(T.dot(activations[(i - 1)], self.aes[(i - 1)].weights)) for (i, val) in minibatch: if (val is not None): activations[i] = val indices = [t[0] for t in minibatch if (t[1] is not None)] self._update_odds(activations, skip_idxs=indices, corrupt=False) if set_val: self.activations = activations if corrupt: return (activations + self.apply_postact_corruption(activations[:], xrange(len(activations)))) else: return activations
'Updates just the odd layers of the network. Parameters activations : list List of symbolic tensors representing the current activations. skip_idxs : list List of integers representing which odd indices should not be updated. This parameter exists so that _set_activations can solve the tricky problem of initializing the network when both even and odd layers are being assigned. corrupt : bool, optional Whether or not to apply post-activation corruption to the odd layers. This parameter does not alter the return value of this method but does modify the activations parameter in place. clamped : list, optional See description for _apply_clamping.'
def _update_odds(self, activations, skip_idxs=frozenset(), corrupt=True, clamped=None):
odds = filter((lambda i: (i not in skip_idxs)), range(1, len(activations), 2)) self._update_activations(activations, odds) if (clamped is not None): self._apply_clamping(activations, clamped) odds_copy = [(i, activations[i]) for i in xrange(1, len(activations), 2)] if corrupt: self.apply_postact_corruption(activations, odds) return odds_copy
'Updates just the even layers of the network. Parameters See all of the descriptions for _update_evens.'
def _update_evens(self, activations, clamped=None):
evens = xrange(0, len(activations), 2) self._update_activations(activations, evens) if (clamped is not None): self._apply_clamping(activations, clamped) evens_copy = [(i, activations[i]) for i in evens] self.apply_postact_corruption(activations, evens) return evens_copy
'See Figure 1 in "Deep Generative Stochastic Networks as Generative Models" by Bengio, Thibodeau-Laufer. This and _update_activations implement exactly that, which is essentially forward propogating the neural network in both directions. Parameters activations : list of tensors List of activations at time step t - 1. clamped : list See description on _apply_clamping return_activations : bool If true, then this method returns both the activation values after the activation function has been applied and the values after the sampling + post-activation corruption has been applied. If false, then only return the values after the activation function has been applied (no corrupted version). This parameter is only set to True when compiling the functions needed by get_samples. Regardless of this parameter setting, the sampling/post-activation corruption noise is still added in-place to activations. Returns y : list of tensors List of activations at time step t (prior to adding postact noise). Notes The return value is generally not equal to the value of activations at the the end of this method. The return value contains all layers without sampling/post-activation noise, but the activations value contains noise on the odd layers (necessary to compute the even layers).'
def _update(self, activations, clamped=None, return_activations=False):
evens_copy = self._update_evens(activations, clamped=clamped) odds_copy = self._update_odds(activations, clamped=clamped) precor = ([None] * len(self.activations)) for (idx, val) in (evens_copy + odds_copy): assert (precor[idx] is None) precor[idx] = val assert (None not in precor) if return_activations: return (precor + activations) else: return precor
'Resets the value of some layers within the network. Parameters activations : list List of symbolic tensors representing the current activations. clamped : list of (int, matrix, matrix or None) tuples The first component of each tuple is an int representing the index of the layer to clamp. The second component is a matrix of the initial values for that layer (ie what we are resetting the values to). The third component is a matrix mask indicated which indices in the minibatch to clamp (1 indicates clamping, 0 indicates not). The value of None is equivalent to the 0 matrix (so no clamping). If symbolic is true then matrices are Theano tensors, otherwise they should be numpy matrices. symbolic : bool, optional Whether to execute with symbolic Theano tensors or numpy matrices.'
@staticmethod def _apply_clamping(activations, clamped, symbolic=True):
for (idx, initial, clamp) in clamped: if (clamp is None): continue clamped_val = (clamp * initial) if symbolic: activations[idx] = T.switch(clamp, initial, activations[idx]) else: activations[idx] = np.switch(clamp, initial, activations[idx]) return activations
'Applies a list of corruptor functions to all layers. Parameters activations : list of tensor_likes Generally gsn.activations corruptors : list of callables Generally gsn.postact_cors or gsn.preact_cors idx_iter : iterable An iterable of indices into self.activations. The indexes indicate which layers the post activation corruptors should be applied to.'
@staticmethod def _apply_corruption(activations, corruptors, idx_iter):
assert (len(corruptors) == len(activations)) for i in idx_iter: activations[i] = corruptors[i](activations[i]) return activations
'.. todo:: WRITEME'
def apply_sampling(self, activations, idx_iter):
if self._sample_switch: self._apply_corruption(activations, self._layer_samplers, idx_iter) return activations
'.. todo:: WRITEME'
def apply_postact_corruption(self, activations, idx_iter, sample=True):
if sample: self.apply_sampling(activations, idx_iter) if self._corrupt_switch: self._apply_corruption(activations, self._postact_cors, idx_iter) return activations
'.. todo:: WRITEME'
def apply_preact_corruption(self, activations, idx_iter):
if self._corrupt_switch: self._apply_corruption(activations, self._preact_cors, idx_iter) return activations
'Actually computes the activations for all indices in idx_iters. This method computes the values for a layer by computing a linear combination of the neighboring layers (dictated by the weight matrices), applying the pre-activation corruption, and then applying the layer\'s activation function. Parameters activations : list of tensor_likes The activations to update (could be self.activations). Updates in-place. idx_iter : iterable An iterable of indices into self.activations. The indexes indicate which layers should be updated. Must be able to iterate over idx_iter multiple times.'
def _update_activations(self, activations, idx_iter):
from_above = (lambda i: ((self.aes[i].visbias if self._bias_switch else 0) + T.dot(activations[(i + 1)], self.aes[i].w_prime))) from_below = (lambda i: ((self.aes[(i - 1)].hidbias if self._bias_switch else 0) + T.dot(activations[(i - 1)], self.aes[(i - 1)].weights))) for i in idx_iter: if (i == 0): activations[i] = from_above(i) elif (i == (len(activations) - 1)): activations[i] = from_below(i) else: activations[i] = (from_below(i) + from_above(i)) self.apply_preact_corruption(activations, idx_iter) for i in idx_iter: act_func = None if (i == 0): act_func = self.aes[0].act_dec else: act_func = self.aes[(i - 1)].act_enc if (act_func is not None): activations[i] = act_func(activations[i])
'\'convert\' essentially serves as the constructor for JointGSN. Parameters gsn : GSN input_idx : int The index of the layer which serves as the "input" to the network. During classification, this layer will be given. Defaults to 0. label_idx : int The index of the layer which serves as the "output" of the network. This label is predicted during classification. Defaults to top layer of network.'
@classmethod def convert(cls, gsn, input_idx=0, label_idx=None):
gsn = copy.copy(gsn) gsn.__class__ = cls gsn.input_idx = input_idx gsn.label_idx = (label_idx or (gsn.nlayers - 1)) return gsn
'Utility method that calculates how much walkback is needed to get at at least \'trials\' samples. Parameters trials : WRITEME'
def calc_walkback(self, trials):
wb = (trials - len(self.aes)) if (wb <= 0): return 0 else: return wb
'See classify method. Returns the prediction vector aggregated over all time steps where axis 0 is the minibatch item and axis 1 is the output for the label.'
def _get_aggregate_classification(self, minibatch, trials=10, skip=0):
clamped = np.ones(minibatch.shape, dtype=np.float32) data = self.get_samples([(self.input_idx, minibatch)], walkback=self.calc_walkback((trials + skip)), indices=[self.label_idx], clamped=[clamped], symbolic=False) data = np.asarray(data[skip:(skip + trials)])[:, 0, :, :] return data.mean(axis=0)
'Classifies a minibatch. This method clamps minibatch at self.input_idx and then runs the GSN. The first \'skip\' predictions are skipped and the next \'trials\' predictions are averaged and then arg-maxed to make a final prediction. The prediction vectors are the activations at self.label_idx. Parameters minibatch : numpy matrix WRITEME trials : int WRITEME skip : int WRITEME Notes A fairly large 3D tensor during classification, so one should watch their memory use. The easiest way to limit memory consumption is to classify just minibatches rather than the whole test set at once. The large tensor is of size (skip + trials) * mb_size * num labels. .. warning:: This method does not directly control whether or not corruption and sampling is applied during classification. These are decided by self._corrupt_switch and self._sample_switch.'
def classify(self, minibatch, trials=10, skip=0):
mean = self._get_aggregate_classification(minibatch, trials=trials, skip=skip) am = np.argmax(mean, axis=1) labels = np.zeros_like(mean) labels[(np.arange(labels.shape[0]), am)] = 1.0 return labels
'Clamps labels and generates samples. Parameters labels : WRITEME trials : WRITEME'
def get_samples_from_labels(self, labels, trials=5):
clamped = np.ones(labels.shape, dtype=np.float32) data = self.get_samples([(self.label_idx, labels)], walkback=self.calc_walkback(trials), indices=[self.input_idx], clamped=[clamped], symbolic=False) return np.array(data)[:, 0, :, :]
'Get all layers in this model. Returns layers : list'
def get_all_layers(self):
return ([self.visible_layer] + self.hidden_layers)
'Compute the energy of current model with visible and hidden samples. Parameters V : tensor_like Theano batch of visible unit observations (must be SAMPLES, not mean field parameters) hidden : list List, one element per hidden layer, of batches of samples (must be SAMPLES, not mean field parameters) Returns rval : tensor_like Vector containing the energy of each sample Notes Applying this function to non-sample theano variables is not guaranteed to give you an expected energy in general, so don\'t use this that way.'
def energy(self, V, hidden):
terms = [] terms.append(self.visible_layer.expected_energy_term(state=V, average=False)) assert (len(self.hidden_layers) > 0) terms.append(self.hidden_layers[0].expected_energy_term(state_below=self.visible_layer.upward_state(V), state=hidden[0], average_below=False, average=False)) for i in xrange(1, len(self.hidden_layers)): layer = self.hidden_layers[i] samples_below = hidden[(i - 1)] layer_below = self.hidden_layers[(i - 1)] samples_below = layer_below.upward_state(samples_below) samples = hidden[i] terms.append(layer.expected_energy_term(state_below=samples_below, state=samples, average_below=False, average=False)) assert (len(terms) > 0) rval = reduce(operator.add, terms) assert (rval.ndim == 1) return rval
'Perform mean field inference, using the model\'s inference procedure.'
def mf(self, *args, **kwargs):
self.setup_inference_procedure() return self.inference_procedure.mf(*args, **kwargs)
'Compute the energy of current model with the visible samples and variational parameters. Parameters V : tensor_like Theano batch of visible unit observations (must be SAMPLES, not mean field parameters: the random variables in the expectation are the hiddens only) mf_hidden : list List, one element per hidden layer, of batches of variational parameters (must be VARIATIONAL PARAMETERS, not samples. Layers with analytically determined variance parameters for their mean field parameters will use those to integrate over the variational distribution, so it\'s not generally the same thing as measuring the energy at a point.) Returns rval : tensor_like Vector containing the expected energy of each example under the corresponding variational distribution.'
def expected_energy(self, V, mf_hidden):
self.visible_layer.space.validate(V) assert isinstance(mf_hidden, (list, tuple)) assert (len(mf_hidden) == len(self.hidden_layers)) terms = [] terms.append(self.visible_layer.expected_energy_term(state=V, average=False)) assert (len(self.hidden_layers) > 0) terms.append(self.hidden_layers[0].expected_energy_term(state_below=self.visible_layer.upward_state(V), average_below=False, state=mf_hidden[0], average=True)) for i in xrange(1, len(self.hidden_layers)): layer = self.hidden_layers[i] layer_below = self.hidden_layers[(i - 1)] mf_below = mf_hidden[(i - 1)] mf_below = layer_below.upward_state(mf_below) mf = mf_hidden[i] terms.append(layer.expected_energy_term(state_below=mf_below, state=mf, average_below=True, average=True)) assert (len(terms) > 0) rval = reduce(operator.add, terms) assert (rval.ndim == 1) return rval
'Set the random number generator for the model.'
def setup_rng(self):
self.rng = make_np_rng(None, [2012, 10, 17], which_method='uniform')
'Set the inference procedure for the model. Default using `WeightDoubling`'
def setup_inference_procedure(self):
if ((not hasattr(self, 'inference_procedure')) or (self.inference_procedure is None)): self.inference_procedure = WeightDoubling() self.inference_procedure.set_dbm(self)
'Set the sampling procedure for the model. Default using `GibbsEvenOdd`'
def setup_sampling_procedure(self):
if ((not hasattr(self, 'sampling_procedure')) or (self.sampling_procedure is None)): self.sampling_procedure = GibbsEvenOdd() self.sampling_procedure.set_dbm(self)
'.. todo:: WRITEME'
def get_output_space(self):
return self.hidden_layers[(-1)].get_output_space()
'Tells each layer what its input space should be. Notes This usually resets the layer\'s parameters!'
def _update_layer_input_spaces(self):
visible_layer = self.visible_layer hidden_layers = self.hidden_layers self.hidden_layers[0].set_input_space(visible_layer.space) for i in xrange(1, len(hidden_layers)): hidden_layers[i].set_input_space(hidden_layers[(i - 1)].get_output_space()) for layer in self.get_all_layers(): layer.finalize_initialization()
'Add new layers on top of the existing hidden layers Parameters layers : list layers to be added'
def add_layers(self, layers):
if (not hasattr(self, 'rng')): self.setup_rng() hidden_layers = self.hidden_layers assert (len(hidden_layers) > 0) for layer in layers: assert (layer.get_dbm() is None) layer.set_dbm(self) layer.set_input_space(hidden_layers[(-1)].get_output_space()) hidden_layers.append(layer) assert (layer.layer_name not in self.layer_names) self.layer_names.add(layer.layer_name)
'.. todo:: WRITEME'
def freeze(self, parameter_set):
if (not hasattr(self, 'freeze_set')): self.freeze_set = set([]) self.freeze_set = self.freeze_set.union(parameter_set)
'.. todo:: WRITEME'
def get_params(self):
rval = [] for param in self.visible_layer.get_params(): assert (param.name is not None) rval = self.visible_layer.get_params() for layer in self.hidden_layers: for param in layer.get_params(): if (param.name is None): raise ValueError((('All of your parameters should have names, but one of ' + layer.layer_name) + "'s doesn't")) layer_params = layer.get_params() assert (not isinstance(layer_params, set)) for param in layer_params: if (param not in rval): rval.append(param) if (not hasattr(self, 'freeze_set')): self.freeze_set = set([]) rval = [elem for elem in rval if (elem not in self.freeze_set)] assert all([(elem.name is not None) for elem in rval]) return rval
'.. todo:: WRITEME'
def set_batch_size(self, batch_size):
self.batch_size = batch_size self.force_batch_size = batch_size for layer in self.hidden_layers: layer.set_batch_size(batch_size) if (not hasattr(self, 'inference_procedure')): self.setup_inference_procedure() self.inference_procedure.set_batch_size(batch_size)
'.. todo:: WRITEME'
def get_input_space(self):
return self.visible_layer.space
'.. todo:: WRITEME'
def get_lr_scalers(self):
rval = OrderedDict() params = self.get_params() for layer in (self.hidden_layers + [self.visible_layer]): contrib = layer.get_lr_scalers() assert (not any([(key in rval) for key in contrib])) assert all([(key in params) for key in contrib]) rval.update(contrib) assert all([isinstance(val, float) for val in rval.values()]) return rval
'.. todo:: WRITEME'
def get_weights(self):
return self.hidden_layers[0].get_weights()
'.. todo:: WRITEME'
def get_weights_view_shape(self):
return self.hidden_layers[0].get_weights_view_shape()
'.. todo:: WRITEME'
def get_weights_format(self):
return self.hidden_layers[0].get_weights_format()
'.. todo:: WRITEME'
def get_weights_topo(self):
return self.hidden_layers[0].get_weights_topo()
'Makes and returns a dictionary mapping layers to states. By states, we mean here a real assignment, not a mean field state. For example, for a layer containing binary random variables, the state will be a shared variable containing values in {0,1}, not [0,1]. The visible layer will be included. Uses a dictionary so it is easy to unambiguously index a layer without needing to remember rules like vis layer = 0, hiddens start at 1, etc. Parameters num_examples : int Number of examples to make up the state rng : MRG_RandomStreams Random number generator, if None then use model\'s rng'
def make_layer_to_state(self, num_examples, rng=None):
layers = ([self.visible_layer] + self.hidden_layers) if (rng is None): rng = self.rng states = [layer.make_state(num_examples, rng) for layer in layers] def recurse_check(layer, state): if isinstance(state, (list, tuple)): for elem in state: recurse_check(layer, elem) else: val = state.get_value() m = val.shape[0] if (m != num_examples): raise ValueError(((((layer.layer_name + ' gave state with ') + str(m)) + ' examples in some component.We requested ') + str(num_examples))) for (layer, state) in safe_zip(layers, states): recurse_check(layer, state) rval = OrderedDict(safe_zip(layers, states)) return rval
'Makes and returns a dictionary mapping layers to states. By states, we mean here a real assignment, not a mean field state. For example, for a layer containing binary random variables, the state will be a shared variable containing values in {0,1}, not [0,1]. The visible layer will be included. Uses a dictionary so it is easy to unambiguously index a layer without needing to remember rules like vis layer = 0, hiddens start at 1, etc. Parameters num_examples : int Number of examples to make up the state rng : MRG_RandomStreams Random number generator Notes This method returns a symbolic expression of the state, while `make_layer_to_state` returns a certain shared variable.'
def make_layer_to_symbolic_state(self, num_examples, rng=None):
layers = ([self.visible_layer] + self.hidden_layers) assert (rng is not None) states = [layer.make_symbolic_state(num_examples, rng) for layer in layers] zipped = safe_zip(layers, states) rval = OrderedDict(zipped) return rval
'This method is for getting an updates dictionary for a theano function. It thus implies that the samples are represented as shared variables. If you want an expression for a sampling step applied to arbitrary theano variables, use the `DBM.sampling_procedure.sample` method. This is a wrapper around that method. Parameters layer_to_state : dict Dictionary mapping the SuperDBM_Layer instances contained in self to shared variables representing batches of samples of them. (you can allocate one by calling self.make_layer_to_state) theano_rng : MRG_RandomStreams Random number generator layer_to_clamp : dict, optional Dictionary mapping layers to bools. If a layer is not in the dictionary, defaults to False. True indicates that this layer should be clamped, so we are sampling from a conditional distribution rather than the joint distribution num_steps : int, optional Steps of the sampling procedure. It samples for `num_steps` times and use the last sample. return_layer_to_updated : bool, optional Whether returns the sample additionally Returns rval : dict Dictionary mapping each shared variable to an expression to update it. Repeatedly applying these updates does MCMC sampling. Notes The specific sampling schedule used by default is to sample all of the even-idexed layers of model.hidden_layers, then the visible layer and all the odd-indexed layers.'
def get_sampling_updates(self, layer_to_state, theano_rng, layer_to_clamp=None, num_steps=1, return_layer_to_updated=False):
updated = self.sampling_procedure.sample(layer_to_state, theano_rng, layer_to_clamp, num_steps) rval = OrderedDict() def add_updates(old, new): if isinstance(old, (list, tuple)): for (old_elem, new_elem) in safe_izip(old, new): add_updates(old_elem, new_elem) else: rval[old] = new if (layer_to_clamp is None): layer_to_clamp = OrderedDict() for key in layer_to_clamp: assert ((key is self.visible_layer) or (key in self.hidden_layers)) for layer in ([self.visible_layer] + self.hidden_layers): if (layer not in layer_to_clamp): layer_to_clamp[layer] = False for layer in layer_to_state: old = layer_to_state[layer] new = updated[layer] if layer_to_clamp[layer]: assert (new is old) else: add_updates(old, new) assert isinstance(self.hidden_layers, list) if return_layer_to_updated: return (rval, updated) return rval
'.. todo:: WRITEME'
def get_monitoring_channels(self, data):
(space, source) = self.get_monitoring_data_specs() space.validate(data) X = data history = self.mf(X, return_history=True) q = history[(-1)] rval = OrderedDict() ch = self.visible_layer.get_monitoring_channels() for key in ch: rval[('vis_' + key)] = ch[key] for (state, layer) in safe_zip(q, self.hidden_layers): ch = layer.get_monitoring_channels() for key in ch: rval[((layer.layer_name + '_') + key)] = ch[key] ch = layer.get_monitoring_channels_from_state(state) for key in ch: rval[((('mf_' + layer.layer_name) + '_') + key)] = ch[key] if (len(history) > 1): prev_q = history[(-2)] flat_q = flatten(q) flat_prev_q = flatten(prev_q) mx = None for (new, old) in safe_zip(flat_q, flat_prev_q): cur_mx = abs((new - old)).max() if (new is old): logger.error('{0} is {1}'.format(new, old)) assert False if (mx is None): mx = cur_mx else: mx = T.maximum(mx, cur_mx) rval['max_var_param_diff'] = mx for (layer, new, old) in safe_zip(self.hidden_layers, q, prev_q): sum_diff = 0.0 for (sub_new, sub_old) in safe_zip(flatten(new), flatten(old)): sum_diff += abs((sub_new - sub_old)).sum() denom = (self.batch_size * layer.get_total_state_space().get_total_dimension()) denom = np.cast[config.floatX](denom) rval[(('mean_' + layer.layer_name) + '_var_param_diff')] = (sum_diff / denom) return rval
'Get the data_specs describing the data for get_monitoring_channel. This implementation returns specification corresponding to unlabeled inputs.'
def get_monitoring_data_specs(self):
return (self.get_input_space(), self.get_input_source())
'.. todo:: WRITEME'
def get_test_batch_size(self):
return self.batch_size
'Reconstruct the visible variables. Returns recons : tensor_like Unmasked reconstructed visible variables.'
def reconstruct(self, V):
H = self.mf(V)[0] downward_state = self.hidden_layers[0].downward_state(H) recons = self.visible_layer.inpaint_update(layer_above=self.hidden_layers[0], state_above=downward_state, drop_mask=None, V=None) return recons
'Does the inference required for multi-prediction training, using the model\'s inference procedure.'
def do_inpainting(self, *args, **kwargs):
self.setup_inference_procedure() return self.inference_procedure.do_inpainting(*args, **kwargs)
'Associates the InferenceProcedure with a specific DBM. Parameters dbm : pylearn2.models.dbm.DBM instance The model to perform inference in.'
def set_dbm(self, dbm):
self.dbm = dbm
'Perform mean field inference. Subclasses must implement. Parameters V : Input space batch The values of the input features modeled by the DBM. Y : (Optional) Target space batch The values of the labels modeled by the DBM. Must be omitted if the DBM does not model labels. If the DBM does model labels, they may be included to perform inference over the hidden layers only, or included to perform inference over the labels. return_history : (Optional) bool Default: False If True, returns the full sequence of mean field updates. niter : (Optional) int block_grad : (Optional) int Default: None If not None, blocks the gradient after `block_grad` iterations, so that only the last `niter` - `block_grad` iterations need to be stored when using the backpropagation algorithm. Returns result : list If not `return_history` (default), a list with one element per inferred layer, containing the full mean field state of that layer. Otherwise, a list of such lists, with the outer list containing one element for each step of inference.'
def mf(self, V, Y=None, return_history=False, niter=None, block_grad=None):
raise NotImplementedError((str(type(self)) + ' does not implement mf.'))
'Inference using "the multi-inference trick." See "Multi-prediction deep Boltzmann machines", Goodfellow et al 2013. Subclasses may implement this method, however it is not needed for any training algorithm, and only expected to work at evaluation time if the model was trained with multi-prediction training. Parameters V : input space batch return_history : bool If True, returns the complete history of the mean field iterations, rather than just the final values niter : int The number of mean field iterations to run block_grad : int If not None, block the gradient after this number of iterations Returns result : list A list of mean field states, or if return_history is True, a list of such lists with one element per mean field iteration'
def multi_infer(self, V, return_history=False, niter=None, block_grad=None):
raise NotImplementedError((str(type(self)) + ' does not implement multi_infer.'))
'Does the inference required for multi-prediction training. If you use this method in your research work, please cite: Multi-prediction deep Boltzmann machines. Ian J. Goodfellow, Mehdi Mirza, Aaron Courville, and Yoshua Bengio. NIPS 2013. Gives the mean field expression for units masked out by drop_mask. Uses self.niter mean field updates. Comes in two variants, unsupervised and supervised: * unsupervised: Y and drop_mask_Y are not passed to the method. The method produces V_hat, an inpainted version of V * supervised: Y and drop_mask_Y are passed to the method. The method produces V_hat and Y_hat Parameters V : tensor_like Theano batch in `model.input_space` Y : tensor_like Theano batch in `model.output_space`, i.e. in the output space of the last hidden layer. (It\'s not really a hidden layer anymore, but oh well. It\'s convenient to code it this way because the labels are sort of "on top" of everything else.) *** Y is always assumed to be a matrix of one-hot category labels. *** drop_mask : tensor_like Theano batch in `model.input_space`. Should be all binary, with 1s indicating that the corresponding element of X should be "dropped", i.e. hidden from the algorithm and filled in as part of the inpainting process drop_mask_Y : tensor_like Theano vector. Since we assume Y is a one-hot matrix, each row is a single categorical variable. `drop_mask_Y` is a binary mask specifying which *rows* to drop. return_history : bool, optional WRITEME noise : bool, optional WRITEME niter : int, optional WRITEME block_grad : WRITEME Returns WRITEME'
def do_inpainting(self, V, Y=None, drop_mask=None, drop_mask_Y=None, return_history=False, noise=False, niter=None, block_grad=None):
raise NotImplementedError((str(type(self)) + ' does not implement do_inpainting.'))
'.. todo:: WRITEME properly Gives the mean field expression for units masked out by drop_mask. Uses self.niter mean field updates. If you use this method in your research work, please cite: Multi-prediction deep Boltzmann machines. Ian J. Goodfellow, Mehdi Mirza, Aaron Courville, and Yoshua Bengio. NIPS 2013. Comes in two variants, unsupervised and supervised: * unsupervised: Y and drop_mask_Y are not passed to the method. The method produces V_hat, an inpainted version of V. * supervised: Y and drop_mask_Y are passed to the method. The method produces V_hat and Y_hat Parameters V : tensor_like Theano batch in `model.input_space` Y : tensor_like Theano batch in `model.output_space`, i.e. in the output space of the last hidden layer. (It\'s not really a hidden layer anymore, but oh well. It\'s convenient to code it this way because the labels are sort of "on top" of everything else.) *** Y is always assumed to be a matrix of one-hot category labels. *** drop_mask : tensor_like Theano batch in `model.input_space`. Should be all binary, with 1s indicating that the corresponding element of X should be "dropped", i.e. hidden from the algorithm and filled in as part of the inpainting process drop_mask_Y : tensor_like Theano vector. Since we assume Y is a one-hot matrix, each row is a single categorical variable. `drop_mask_Y` is a binary mask specifying which *rows* to drop. return_history : bool, optional WRITEME noise : bool, optional WRITEME niter : int, optional WRITEME block_grad : WRITEME Returns WRITEME'
def do_inpainting(self, V, Y=None, drop_mask=None, drop_mask_Y=None, return_history=False, noise=False, niter=None, block_grad=None):
dbm = self.dbm 'TODO: Should add unit test that calling this with a batch of\n different inputs should yield the same output for each\n if noise is False and drop_mask is all 1s' if (niter is None): niter = dbm.niter assert (drop_mask is not None) assert (return_history in [True, False]) assert (noise in [True, False]) if (Y is None): if (drop_mask_Y is not None): raise ValueError('do_inpainting got drop_mask_Y but not Y.') elif (drop_mask_Y is None): raise ValueError('do_inpainting got Y but not drop_mask_Y.') if (Y is not None): assert isinstance(dbm.hidden_layers[(-1)], Softmax) if (drop_mask_Y.ndim != 1): raise ValueError(('do_inpainting assumes Y is a matrix of one-hot labels,so each example is only one variable. drop_mask_Y should therefore be a vector, but we got somethingwith ndim ' + str(drop_mask_Y.ndim))) drop_mask_Y = drop_mask_Y.dimshuffle(0, 'x') orig_V = V orig_drop_mask = drop_mask history = [] (V_hat, V_hat_unmasked) = dbm.visible_layer.init_inpainting_state(V, drop_mask, noise, return_unmasked=True) assert (V_hat_unmasked.ndim > 1) H_hat = [] for i in xrange(0, (len(dbm.hidden_layers) - 1)): if (i == 0): H_hat.append(dbm.hidden_layers[i].mf_update(state_above=None, double_weights=True, state_below=dbm.visible_layer.upward_state(V_hat), iter_name='0')) else: H_hat.append(dbm.hidden_layers[i].mf_update(state_above=None, double_weights=True, state_below=dbm.hidden_layers[(i - 1)].upward_state(H_hat[(i - 1)]), iter_name='0')) if (len(dbm.hidden_layers) > 1): H_hat.append(dbm.hidden_layers[(-1)].mf_update(state_above=None, state_below=dbm.hidden_layers[(-2)].upward_state(H_hat[(-1)]))) else: H_hat.append(dbm.hidden_layers[(-1)].mf_update(state_above=None, state_below=dbm.visible_layer.upward_state(V_hat))) if (Y is not None): Y_hat_unmasked = dbm.hidden_layers[(-1)].init_inpainting_state(Y, noise) dirty_term = (drop_mask_Y * Y_hat_unmasked) clean_term = ((1 - drop_mask_Y) * Y) Y_hat = (dirty_term + clean_term) H_hat[(-1)] = Y_hat if (len(dbm.hidden_layers) > 1): i = (len(dbm.hidden_layers) - 2) if (i == 0): H_hat[i] = dbm.hidden_layers[i].mf_update(state_above=Y_hat, layer_above=dbm.hidden_layers[(-1)], state_below=dbm.visible_layer.upward_state(V_hat), iter_name='0') else: H_hat[i] = dbm.hidden_layers[i].mf_update(state_above=Y_hat, layer_above=dbm.hidden_layers[(-1)], state_below=dbm.hidden_layers[(i - 1)].upward_state(H_hat[(i - 1)]), iter_name='0') def update_history(): assert (V_hat_unmasked.ndim > 1) d = {'V_hat': V_hat, 'H_hat': list(H_hat), 'V_hat_unmasked': V_hat_unmasked} if (Y is not None): d['Y_hat_unmasked'] = Y_hat_unmasked d['Y_hat'] = H_hat[(-1)] history.append(d) if (block_grad == 1): V_hat = block_gradient(V_hat) V_hat_unmasked = block_gradient(V_hat_unmasked) H_hat = block(H_hat) update_history() for i in xrange((niter - 1)): for j in xrange(0, len(H_hat), 2): if (j == 0): state_below = dbm.visible_layer.upward_state(V_hat) else: state_below = dbm.hidden_layers[(j - 1)].upward_state(H_hat[(j - 1)]) if (j == (len(H_hat) - 1)): state_above = None layer_above = None else: state_above = dbm.hidden_layers[(j + 1)].downward_state(H_hat[(j + 1)]) layer_above = dbm.hidden_layers[(j + 1)] H_hat[j] = dbm.hidden_layers[j].mf_update(state_below=state_below, state_above=state_above, layer_above=layer_above) if ((Y is not None) and (j == (len(dbm.hidden_layers) - 1))): Y_hat_unmasked = H_hat[j] H_hat[j] = ((drop_mask_Y * H_hat[j]) + ((1 - drop_mask_Y) * Y)) (V_hat, V_hat_unmasked) = dbm.visible_layer.inpaint_update(state_above=dbm.hidden_layers[0].downward_state(H_hat[0]), layer_above=dbm.hidden_layers[0], V=V, drop_mask=drop_mask, return_unmasked=True) V_hat.name = ('V_hat[%d](V_hat = %s)' % (i, V_hat.name)) for j in xrange(1, len(H_hat), 2): state_below = dbm.hidden_layers[(j - 1)].upward_state(H_hat[(j - 1)]) if (j == (len(H_hat) - 1)): state_above = None layer_above = None else: state_above = dbm.hidden_layers[(j + 1)].downward_state(H_hat[(j + 1)]) layer_above = dbm.hidden_layers[(j + 1)] H_hat[j] = dbm.hidden_layers[j].mf_update(state_below=state_below, state_above=state_above, layer_above=layer_above) if ((Y is not None) and (j == (len(dbm.hidden_layers) - 1))): Y_hat_unmasked = H_hat[j] H_hat[j] = ((drop_mask_Y * H_hat[j]) + ((1 - drop_mask_Y) * Y)) if (block_grad == i): V_hat = block_gradient(V_hat) V_hat_unmasked = block_gradient(V_hat_unmasked) H_hat = block(H_hat) update_history() assert (V is orig_V) assert (drop_mask is orig_drop_mask) Y_hat = H_hat[(-1)] assert (V in theano.gof.graph.ancestors([V_hat])) if (Y is not None): assert (V in theano.gof.graph.ancestors([Y_hat])) if return_history: return history else: if (Y is not None): return (V_hat, Y_hat) return V_hat
'Gives the mean field expression for units masked out by drop_mask. Uses self.niter mean field updates. Comes in two variants, unsupervised and supervised: * unsupervised: Y and drop_mask_Y are not passed to the method. The method produces V_hat, an inpainted version of V. * supervised: Y and drop_mask_Y are passed to the method. The method produces V_hat and Y_hat. If you use this method in your research work, please cite: Multi-prediction deep Boltzmann machines. Ian J. Goodfellow, Mehdi Mirza, Aaron Courville, and Yoshua Bengio. NIPS 2013. Parameters V : tensor_like Theano batch in `model.input_space` Y : tensor_like Theano batch in model.output_space, ie, in the output space of the last hidden layer (it\'s not really a hidden layer anymore, but oh well. It\'s convenient to code it this way because the labels are sort of "on top" of everything else). *** Y is always assumed to be a matrix of one-hot category labels. *** drop_mask : tensor_like A theano batch in `model.input_space`. Should be all binary, with 1s indicating that the corresponding element of X should be "dropped", ie, hidden from the algorithm and filled in as part of the inpainting process drop_mask_Y : tensor_like Theano vector. Since we assume Y is a one-hot matrix, each row is a single categorical variable. `drop_mask_Y` is a binary mask specifying which *rows* to drop.'
def do_inpainting(self, V, Y=None, drop_mask=None, drop_mask_Y=None, return_history=False, noise=False, niter=None, block_grad=None):
dbm = self.dbm 'TODO: Should add unit test that calling this with a batch of\n different inputs should yield the same output for each\n if noise is False and drop_mask is all 1s' if (niter is None): niter = dbm.niter assert (drop_mask is not None) assert (return_history in [True, False]) assert (noise in [True, False]) if (Y is None): if (drop_mask_Y is not None): raise ValueError('do_inpainting got drop_mask_Y but not Y.') elif (drop_mask_Y is None): raise ValueError('do_inpainting got Y but not drop_mask_Y.') if (Y is not None): assert isinstance(dbm.hidden_layers[(-1)], Softmax) if (drop_mask_Y.ndim != 1): raise ValueError(('do_inpainting assumes Y is a matrix of one-hot labels,so each example is only one variable. drop_mask_Y should therefore be a vector,but we got something with ndim ' + str(drop_mask_Y.ndim))) drop_mask_Y = drop_mask_Y.dimshuffle(0, 'x') orig_V = V orig_drop_mask = drop_mask history = [] (V_hat, V_hat_unmasked) = dbm.visible_layer.init_inpainting_state(V, drop_mask, noise, return_unmasked=True) assert (V_hat_unmasked.ndim > 1) H_hat = ([None] + [layer.init_mf_state() for layer in dbm.hidden_layers[1:]]) if (Y is not None): Y_hat_unmasked = dbm.hidden_layers[(-1)].init_inpainting_state(Y, noise) Y_hat = ((drop_mask_Y * Y_hat_unmasked) + ((1 - drop_mask_Y) * Y)) H_hat[(-1)] = Y_hat def update_history(): assert (V_hat_unmasked.ndim > 1) d = {'V_hat': V_hat, 'H_hat': H_hat, 'V_hat_unmasked': V_hat_unmasked} if (Y is not None): d['Y_hat_unmasked'] = Y_hat_unmasked d['Y_hat'] = H_hat[(-1)] history.append(d) update_history() for i in xrange(niter): for j in xrange(0, len(H_hat), 2): if (j == 0): state_below = dbm.visible_layer.upward_state(V_hat) else: state_below = dbm.hidden_layers[(j - 1)].upward_state(H_hat[(j - 1)]) if (j == (len(H_hat) - 1)): state_above = None layer_above = None else: state_above = dbm.hidden_layers[(j + 1)].downward_state(H_hat[(j + 1)]) layer_above = dbm.hidden_layers[(j + 1)] H_hat[j] = dbm.hidden_layers[j].mf_update(state_below=state_below, state_above=state_above, layer_above=layer_above) if ((Y is not None) and (j == (len(dbm.hidden_layers) - 1))): Y_hat_unmasked = H_hat[j] H_hat[j] = ((drop_mask_Y * H_hat[j]) + ((1 - drop_mask_Y) * Y)) (V_hat, V_hat_unmasked) = dbm.visible_layer.inpaint_update(state_above=dbm.hidden_layers[0].downward_state(H_hat[0]), layer_above=dbm.hidden_layers[0], V=V, drop_mask=drop_mask, return_unmasked=True) V_hat.name = ('V_hat[%d](V_hat = %s)' % (i, V_hat.name)) for j in xrange(1, len(H_hat), 2): state_below = dbm.hidden_layers[(j - 1)].upward_state(H_hat[(j - 1)]) if (j == (len(H_hat) - 1)): state_above = None layer_above = None else: state_above = dbm.hidden_layers[(j + 1)].downward_state(H_hat[(j + 1)]) layer_above = dbm.hidden_layers[(j + 1)] H_hat[j] = dbm.hidden_layers[j].mf_update(state_below=state_below, state_above=state_above, layer_above=layer_above) if ((Y is not None) and (j == (len(dbm.hidden_layers) - 1))): Y_hat_unmasked = H_hat[j] H_hat[j] = ((drop_mask_Y * H_hat[j]) + ((1 - drop_mask_Y) * Y)) if (block_grad == (i + 1)): V_hat = block_gradient(V_hat) V_hat_unmasked = block_gradient(V_hat_unmasked) H_hat = block(H_hat) update_history() assert (V is orig_V) assert (drop_mask is orig_drop_mask) Y_hat = H_hat[(-1)] assert (V in theano.gof.graph.ancestors([V_hat])) if (Y is not None): assert (V in theano.gof.graph.ancestors([Y_hat])) if return_history: return history else: if (Y is not None): return (V_hat, Y_hat) return V_hat
'.. todo:: WRITEME'
@functools.wraps(InferenceProcedure.mf) def mf(self, V, Y=None, return_history=False, niter=None, block_grad=None):
dbm = self.dbm assert (Y not in [True, False, 0, 1]) assert (return_history in [True, False, 0, 1]) if (Y is not None): dbm.hidden_layers[(-1)].get_output_space().validate(Y) if (niter is None): niter = dbm.niter H_hat = ([None] + [layer.init_mf_state() for layer in dbm.hidden_layers[1:]]) if (Y is not None): H_hat[(-1)] = Y history = [list(H_hat)] assert ((niter > 1) == (len(dbm.hidden_layers) > 1)) for i in xrange(niter): if ((i % 2) == 0): start = 0 stop = len(H_hat) inc = 1 else: start = (len(H_hat) - 1) stop = (-1) inc = (-1) for j in xrange(start, stop, inc): if (j == 0): state_below = dbm.visible_layer.upward_state(V) else: state_below = dbm.hidden_layers[(j - 1)].upward_state(H_hat[(j - 1)]) if (j == (len(H_hat) - 1)): state_above = None layer_above = None else: state_above = dbm.hidden_layers[(j + 1)].downward_state(H_hat[(j + 1)]) layer_above = dbm.hidden_layers[(j + 1)] H_hat[j] = dbm.hidden_layers[j].mf_update(state_below=state_below, state_above=state_above, layer_above=layer_above) if (Y is not None): H_hat[(-1)] = Y if (Y is not None): H_hat[(-1)] = Y if (block_grad == (i + 1)): H_hat = block(H_hat) history.append(list(H_hat)) for (layer, state) in safe_izip(dbm.hidden_layers, H_hat): upward_state = layer.upward_state(state) layer.get_output_space().validate(upward_state) if (Y is not None): assert all([(elem[(-1)] is Y) for elem in history]) assert (H_hat[(-1)] is Y) if return_history: return history else: return H_hat
'.. todo:: WRITEME properly Gives the mean field expression for units masked out by drop_mask. Uses self.niter mean field updates. Comes in two variants, unsupervised and supervised: * unsupervised: Y and drop_mask_Y are not passed to the method. The method produces V_hat, an inpainted version of V. * supervised: Y and drop_mask_Y are passed to the method. The method produces V_hat and Y_hat. If you use this method in your research work, please cite: Multi-prediction deep Boltzmann machines. Ian J. Goodfellow, Mehdi Mirza, Aaron Courville, and Yoshua Bengio. NIPS 2013. Parameters V : tensor_like Theano batch in `model.input_space` Y : tensor_like Theano batch in model.output_space, ie, in the output space of the last hidden layer (it\'s not really a hidden layer anymore, but oh well. It\'s convenient to code it this way because the labels are sort of "on top" of everything else). *** Y is always assumed to be a matrix of one-hot category labels. *** drop_mask : tensor_like A theano batch in `model.input_space`. Should be all binary, with 1s indicating that the corresponding element of X should be "dropped", ie, hidden from the algorithm and filled in as part of the inpainting process drop_mask_Y : tensor_like Theano vector. Since we assume Y is a one-hot matrix, each row is a single categorical variable. `drop_mask_Y` is a binary mask specifying which *rows* to drop.'
def do_inpainting(self, V, Y=None, drop_mask=None, drop_mask_Y=None, return_history=False, noise=False, niter=None, block_grad=None):
if (Y is not None): assert isinstance(self.hidden_layers[(-1)], Softmax) model = self.dbm 'TODO: Should add unit test that calling this with a batch of\n different inputs should yield the same output for each\n if noise is False and drop_mask is all 1s' if (niter is None): niter = model.niter assert (drop_mask is not None) assert (return_history in [True, False]) assert (noise in [True, False]) if (Y is None): if (drop_mask_Y is not None): raise ValueError('do_inpainting got drop_mask_Y but not Y.') elif (drop_mask_Y is None): raise ValueError('do_inpainting got Y but not drop_mask_Y.') if (Y is not None): assert isinstance(model.hidden_layers[(-1)], Softmax) if (drop_mask_Y.ndim != 1): raise ValueError(('do_inpainting assumes Y is a matrix ofone-hot labels,so each example is only one variable. drop_mask_Y should therefore be a vector, but we got something with ndim ' + str(drop_mask_Y.ndim))) drop_mask_Y = drop_mask_Y.dimshuffle(0, 'x') orig_V = V orig_drop_mask = drop_mask history = [] (V_hat, V_hat_unmasked) = model.visible_layer.init_inpainting_state(V, drop_mask, noise, return_unmasked=True) assert (V_hat_unmasked.ndim > 1) H_hat = ([None] + [layer.init_mf_state() for layer in model.hidden_layers[1:]]) if (Y is not None): Y_hat_unmasked = model.hidden_layers[(-1)].init_inpainting_state(Y, noise) Y_hat = ((drop_mask_Y * Y_hat_unmasked) + ((1 - drop_mask_Y) * Y)) H_hat[(-1)] = Y_hat def update_history(): assert (V_hat_unmasked.ndim > 1) d = {'V_hat': V_hat, 'H_hat': H_hat, 'V_hat_unmasked': V_hat_unmasked} if (Y is not None): d['Y_hat_unmasked'] = Y_hat_unmasked d['Y_hat'] = H_hat[(-1)] history.append(d) update_history() for i in xrange(niter): if ((i % 2) == 0): start = 0 stop = len(H_hat) inc = 1 if (i > 0): (V_hat, V_hat_unmasked) = model.visible_layer.inpaint_update(state_above=model.hidden_layers[0].downward_state(H_hat[0]), layer_above=model.hidden_layers[0], V=V, drop_mask=drop_mask, return_unmasked=True) V_hat.name = ('V_hat[%d](V_hat = %s)' % (i, V_hat.name)) else: start = (len(H_hat) - 1) stop = (-1) inc = (-1) for j in xrange(start, stop, inc): if (j == 0): state_below = model.visible_layer.upward_state(V_hat) else: state_below = model.hidden_layers[(j - 1)].upward_state(H_hat[(j - 1)]) if (j == (len(H_hat) - 1)): state_above = None layer_above = None else: state_above = model.hidden_layers[(j + 1)].downward_state(H_hat[(j + 1)]) layer_above = model.hidden_layers[(j + 1)] H_hat[j] = model.hidden_layers[j].mf_update(state_below=state_below, state_above=state_above, layer_above=layer_above) if ((Y is not None) and (j == (len(model.hidden_layers) - 1))): Y_hat_unmasked = H_hat[j] H_hat[j] = ((drop_mask_Y * H_hat[j]) + ((1 - drop_mask_Y) * Y)) if ((i % 2) == 1): (V_hat, V_hat_unmasked) = model.visible_layer.inpaint_update(state_above=model.hidden_layers[0].downward_state(H_hat[0]), layer_above=model.hidden_layers[0], V=V, drop_mask=drop_mask, return_unmasked=True) V_hat.name = ('V_hat[%d](V_hat = %s)' % (i, V_hat.name)) if (block_grad == (i + 1)): V_hat = block_gradient(V_hat) V_hat_unmasked = block_gradient(V_hat_unmasked) H_hat = block(H_hat) update_history() assert (V is orig_V) assert (drop_mask is orig_drop_mask) Y_hat = H_hat[(-1)] assert (V in theano.gof.graph.ancestors([V_hat])) if (Y is not None): assert (V in theano.gof.graph.ancestors([Y_hat])) if return_history: return history else: if (Y is not None): return (V_hat, Y_hat) return V_hat
'.. todo:: WRITEME'
def __call__(self, inputs):
space = self.dbm.get_input_space() num_examples = space.batch_size(inputs) last_layer = self.dbm.get_all_layers()[(-1)] layer_to_chains = self.dbm.make_layer_to_symbolic_state(num_examples, self.theano_rng) layer_to_chains[self.dbm.visible_layer] = inputs layer_to_clamp = OrderedDict([(self.dbm.visible_layer, True)]) layer_to_chains = self.dbm.sampling_procedure.sample(layer_to_state=layer_to_chains, theano_rng=self.theano_rng, layer_to_clamp=layer_to_clamp, num_steps=1) rval = layer_to_chains[last_layer] rval = last_layer.upward_state(rval) return rval
'.. todo:: WRITEME'
def get_input_space(self):
return self.dbm.get_input_space()
'.. todo:: WRITEME'
def get_output_space(self):
return self.dbm.get_output_space()
'.. todo:: WRITEME'
def get_biases(self):
return self.bias.get_value()
'.. todo:: WRITEME'
def set_biases(self, biases, recenter=False):
self.bias.set_value(biases) if recenter: assert self.center self.offset.set_value(sigmoid_numpy(self.bias.get_value()))
'.. todo:: WRITEME'
def upward_state(self, total_state):
return total_state
'.. todo:: WRITEME'
def get_params(self):
rval = [self.bias] if self.learn_beta: rval.append(self.beta) return rval
'.. todo:: WRITEME'
def mf_update(self, state_above, layer_above):
msg = layer_above.downward_message(state_above) bias = self.bias z = (msg + bias) rval = T.tanh((self.beta * z)) return rval
'.. todo:: WRITEME'
def sample(self, state_below=None, state_above=None, layer_above=None, theano_rng=None):
assert (state_below is None) msg = layer_above.downward_message(state_above) bias = self.bias z = (msg + bias) phi = T.nnet.sigmoid(((2.0 * self.beta) * z)) rval = theano_rng.binomial(size=phi.shape, p=phi, dtype=phi.dtype, n=1) return ((rval * 2.0) - 1.0)
'.. todo:: WRITEME'
def make_state(self, num_examples, numpy_rng):
driver = numpy_rng.uniform(0.0, 1.0, (num_examples, self.nvis)) on_prob = sigmoid_numpy(((2.0 * self.beta.get_value()) * self.bias.get_value())) sample = ((2.0 * (driver < on_prob)) - 1.0) rval = sharedX(sample, name='v_sample_shared') return rval
'.. todo:: WRITEME'
def make_symbolic_state(self, num_examples, theano_rng):
mean = T.nnet.sigmoid(((2.0 * self.beta) * self.b)) rval = theano_rng.binomial(size=(num_examples, self.nvis), p=mean) rval = ((2.0 * rval) - 1.0) return rval
'.. todo:: WRITEME'
def expected_energy_term(self, state, average, state_below=None, average_below=None):
assert (state_below is None) assert (average_below is None) assert (average in [True, False]) self.space.validate(state) rval = (- (self.beta * T.dot(state, self.bias))) assert (rval.ndim == 1) return rval
'.. todo:: WRITEME'
def get_lr_scalers(self):
if (not hasattr(self, 'W_lr_scale')): self.W_lr_scale = None if (not hasattr(self, 'b_lr_scale')): self.b_lr_scale = None rval = OrderedDict() if (self.W_lr_scale is not None): (W,) = self.transformer.get_params() rval[W] = self.W_lr_scale if (self.b_lr_scale is not None): rval[self.b] = self.b_lr_scale return rval
'.. todo:: WRITEME properly Notes Note: this resets parameters!'
def set_input_space(self, space):
self.input_space = space if isinstance(space, VectorSpace): self.requires_reformat = False self.input_dim = space.dim else: self.requires_reformat = True self.input_dim = space.get_total_dimension() self.desired_space = VectorSpace(self.input_dim) self.output_space = VectorSpace(self.dim) rng = self.dbm.rng if (self.irange is not None): assert (self.sparse_init is None) W = (rng.uniform((- self.irange), self.irange, (self.input_dim, self.dim)) * (rng.uniform(0.0, 1.0, (self.input_dim, self.dim)) < self.include_prob)) else: assert (self.sparse_init is not None) W = np.zeros((self.input_dim, self.dim)) W *= self.sparse_stdev W = sharedX(W) W.name = (self.layer_name + '_W') self.transformer = MatrixMul(W) (W,) = self.transformer.get_params() assert (W.name is not None)
'.. todo:: WRITEME'
def get_total_state_space(self):
return VectorSpace(self.dim)
'.. todo:: WRITEME'
def get_params(self):
assert (self.b.name is not None) (W,) = self.transformer.get_params() assert (W.name is not None) rval = self.transformer.get_params() assert (not isinstance(rval, set)) rval = list(rval) assert (self.b not in rval) rval.append(self.b) if self.learn_beta: rval.append(self.beta) return rval
'.. todo:: WRITEME'
def get_weight_decay(self, coeff):
if isinstance(coeff, str): coeff = float(coeff) assert (isinstance(coeff, float) or hasattr(coeff, 'dtype')) (W,) = self.transformer.get_params() return (coeff * T.sqr(W).sum())
'.. todo:: WRITEME'
def get_weights(self):
if self.requires_reformat: raise NotImplementedError() (W,) = self.transformer.get_params() return W.get_value()
'.. todo:: WRITEME'
def set_weights(self, weights):
(W,) = self.transformer.get_params() W.set_value(weights)
'.. todo:: WRITEME'
def set_biases(self, biases, recenter=False):
self.b.set_value(biases) if recenter: assert self.center if (self.pool_size != 1): raise NotImplementedError() self.offset.set_value(sigmoid_numpy(self.b.get_value()))
'.. todo:: WRITEME'
def get_biases(self):
return self.b.get_value()
'.. todo:: WRITEME'
def get_weights_format(self):
return ('v', 'h')