desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Sets up the main loop. This is also called at the start of the main loop, so you need only call it if you\'re using a driver script that replaces the main loop with something else.'
def setup(self):
self.model.monitor = Monitor.get_monitor(self.model) self.model.monitor.time_budget_exceeded = False if (self.algorithm is not None): self.algorithm.setup(model=self.model, dataset=self.dataset) self.setup_extensions() self.model.enforce_constraints()
'Repeatedly runs an epoch of the training algorithm, runs any epoch-level callbacks, and saves the model. Parameters time_budget : int, optional The maximum number of seconds before interrupting training. Default is `None`, no time limit.'
def main_loop(self, time_budget=None):
t0 = datetime.now() self.setup() if (self.algorithm is None): extension_continue = self.run_callbacks_and_monitoring() continue_learning = (self.model.continue_learning() and extension_continue) assert (continue_learning in [True, False, 0, 1]) while continue_learning: if self.exceeded_time_budget(t0, time_budget): break rval = self.model.train_all(dataset=self.dataset) if (rval is not None): raise ValueError('Model.train_all should not return anything. Use Model.continue_learning to control whether learning continues.') self.model.monitor.report_epoch() extension_continue = self.run_callbacks_and_monitoring() freq = self.save_freq if ((freq > 0) and ((self.model.monitor.get_epochs_seen() % freq) == 0)): self.save() continue_learning = (self.model.continue_learning() and extension_continue) assert (continue_learning in [True, False, 0, 1]) else: if (not hasattr(self.model, 'monitor')): raise RuntimeError('The algorithm is responsible for setting up the Monitor, but failed to.') if (len(self.model.monitor._datasets) > 0): self.training_seconds.__doc__ = 'The number of seconds that were spent in actual training during the most\nrecent epoch. This excludes seconds that were spent running callbacks for\nthe extensions, computing monitoring channels, etc.' self.model.monitor.add_channel(name='training_seconds_this_epoch', ipt=None, val=self.training_seconds, data_specs=(NullSpace(), ''), dataset=self.model.monitor._datasets[0]) self.total_seconds.__doc__ = 'The number of seconds that were spent on the entirety of processing for the\nprevious epoch. This includes not only training but also the computation of\nthe monitoring channels, running TrainExtension callbacks, etc. This value\nis reported for the *previous* epoch because the amount of time spent on\nmonitoring for this epoch is not known until the monitoring channels have\nalready been reported.' self.model.monitor.add_channel(name='total_seconds_last_epoch', ipt=None, val=self.total_seconds, data_specs=(NullSpace(), ''), dataset=self.model.monitor._datasets[0]) extension_continue = self.run_callbacks_and_monitoring() continue_learning = (self.algorithm.continue_learning(self.model) and extension_continue) assert (continue_learning in [True, False, 0, 1]) while continue_learning: if self.exceeded_time_budget(t0, time_budget): break with log_timing(log, None, level=logging.DEBUG, callbacks=[self.total_seconds.set_value]): with log_timing(log, None, final_msg='Time this epoch:', callbacks=[self.training_seconds.set_value]): rval = self.algorithm.train(dataset=self.dataset) if (rval is not None): raise ValueError('TrainingAlgorithm.train should not return anything. Use TrainingAlgorithm.continue_learning to control whether learning continues.') self.model.monitor.report_epoch() extension_continue = self.run_callbacks_and_monitoring() if ((self.save_freq > 0) and ((self.model.monitor.get_epochs_seen() % self.save_freq) == 0)): self.save() continue_learning = (self.algorithm.continue_learning(self.model) and extension_continue) assert (continue_learning in [True, False, 0, 1]) self.model.monitor.training_succeeded = True if (self.save_freq > 0): self.save()
'Runs the monitor, then calls Extension.on_monitor for all extensions. Returns continue_learning : bool If `False`, signals that at least one train extension wants to stop learning.'
def run_callbacks_and_monitoring(self):
self.model.monitor() continue_learning = True for extension in self.extensions: try: extension.on_monitor(self.model, self.dataset, self.algorithm) except TypeError: logging.warning(('Failure during callback ' + str(extension))) raise except StopIteration: log.info('Extension requested training halt.') continue_learning = False return continue_learning
'Saves the model.'
def save(self):
for extension in self.extensions: extension.on_save(self.model, self.dataset, self.algorithm) if (self.save_path is not None): with log_timing(log, ('Saving to ' + self.save_path)): if (self.first_save and (not self.allow_overwrite) and os.path.exists(self.save_path)): raise IOError('Trying to overwrite file when not allowed.') try: self.dataset._serialization_guard = SerializationGuard() serial.save(self.save_path, self.model, on_overwrite='backup') finally: self.dataset._serialization_guard = None self.first_save = False
'This method is called when someone attempts to serialize the object. This method raises an exception to prevent the serialization from occurring.'
def __getstate__(self):
raise IOError('You tried to serialize something that should not be serialized.')
'Initializes the formatter given the number of max labels.'
def __init__(self, max_labels, dtype=None):
try: np.empty(max_labels) except (ValueError, TypeError): reraise_as(ValueError(("%s got bad max_labels argument '%s'" % (self.__class__.__name__, str(max_labels))))) self._max_labels = max_labels if (dtype is None): self._dtype = config.floatX else: try: np.dtype(dtype) except TypeError: reraise_as(TypeError(('%s got bad dtype identifier %s' % (self.__class__.__name__, str(dtype))))) self._dtype = dtype
'Formats a given array of target labels into a one-hot vector. If labels appear multiple times, their value in the one-hot vector is incremented. Parameters targets : ndarray A 1D array of targets, or a batch (2D array) where each row is a list of targets. mode : string The way in which to convert the labels to arrays. Takes three different options: - "concatenate" : concatenates the one-hot vectors from multiple labels - "stack" : returns a matrix where each row is the one-hot vector of a label - "merge" : merges the one-hot vectors together to form a vector where the elements are the result of an indicator function NB: As the result of an indicator function the result is the same in case a label is duplicated in the input. sparse : bool If true then the return value is sparse matrix. Note that if sparse is True, then mode cannot be \'stack\' because sparse matrices need to be 2D Returns one_hot : a NumPy array (can be 1D-3D depending on settings) where normally the first axis are the different batch items, the second axis the labels, the third axis the one_hot vectors. Can be dense or sparse.'
def format(self, targets, mode='stack', sparse=False):
if (mode not in ('concatenate', 'stack', 'merge')): raise ValueError(("%s got bad mode argument '%s'" % (self.__class__.__name__, str(self._max_labels)))) elif ((mode == 'stack') and sparse): raise ValueError('Sparse matrices need to be 2D, hence theycannot be stacked') if (targets.ndim > 2): raise ValueError(('Targets needs to be 1D or 2D, but received %d dimensions' % targets.ndim)) if ('int' not in str(targets.dtype)): raise TypeError('need an integer array for targets') if sparse: if (not scipy_available): raise RuntimeError('The converting of indices to a sparse one-hot vector requires scipy to be installed') if (mode == 'concatenate'): one_hot = scipy.sparse.csr_matrix((np.ones(targets.size, dtype=self._dtype), ((targets.flatten() + (np.arange(targets.size) * self._max_labels)) % (self._max_labels * targets.shape[1])), (np.arange((targets.shape[0] + 1)) * targets.shape[1])), (targets.shape[0], (self._max_labels * targets.shape[1]))) elif (mode == 'merge'): one_hot = scipy.sparse.csr_matrix((np.ones(targets.size), targets.flatten(), (np.arange((targets.shape[0] + 1)) * targets.shape[1])), (targets.shape[0], self._max_labels)) else: one_hot = np.zeros((targets.shape + (self._max_labels,)), dtype=self._dtype) shape = (np.prod(one_hot.shape[:(-1)]), one_hot.shape[(-1)]) one_hot.reshape(shape)[(np.arange(shape[0]), targets.flatten())] = 1 if (mode == 'concatenate'): shape = (one_hot.shape[(-3):(-2)] + (reduce(mul, one_hot.shape[(-2):], 1),)) one_hot = one_hot.reshape(shape) elif (mode == 'merge'): one_hot = np.minimum(one_hot.sum(axis=(one_hot.ndim - 2)), 1) return one_hot
'Return the one-hot transformation as a symbolic expression. If labels appear multiple times, their value in the one-hot vector is incremented. Parameters targets : tensor_like, 1- or 2-dimensional, integer dtype A symbolic tensor representing labels as integers between 0 and `max_labels` - 1, `max_labels` supplied at formatter construction. mode : string The way in which to convert the labels to arrays. Takes three different options: - "concatenate" : concatenates the one-hot vectors from multiple labels - "stack" : returns a matrix where each row is the one-hot vector of a label - "merge" : merges the one-hot vectors together to form a vector where the elements are the result of an indicator function NB: As the result of an indicator function the result is the same in case a label is duplicated in the input. sparse : bool If true then the return value is sparse matrix. Note that if sparse is True, then mode cannot be \'stack\' because sparse matrices need to be 2D Returns one_hot : TensorVariable, 1, 2 or 3-dimensional, sparse or dense A symbolic tensor representing a one-hot encoding of the supplied labels.'
def theano_expr(self, targets, mode='stack', sparse=False):
if (mode not in ('concatenate', 'stack', 'merge')): raise ValueError(("%s got bad mode argument '%s'" % (self.__class__.__name__, str(self._max_labels)))) elif ((mode == 'stack') and sparse): raise ValueError('Sparse matrices need to be 2D, hence theycannot be stacked') squeeze_required = False if (targets.ndim != 2): if (targets.ndim == 1): squeeze_required = True targets = targets.dimshuffle('x', 0) else: raise ValueError('targets tensor must be 1 or 2-dimensional') if ('int' not in str(targets.dtype)): raise TypeError('need an integer tensor for targets') if sparse: if (mode == 'concatenate'): one_hot = theano.sparse.CSR(tensor.ones_like(targets, dtype=self._dtype).flatten(), ((targets.flatten() + (tensor.arange(targets.size) * self._max_labels)) % (self._max_labels * targets.shape[1])), (tensor.arange((targets.shape[0] + 1)) * targets.shape[1]), tensor.stack(targets.shape[0], (self._max_labels * targets.shape[1]))) else: one_hot = theano.sparse.CSR(tensor.ones_like(targets, dtype=self._dtype).flatten(), targets.flatten(), (tensor.arange((targets.shape[0] + 1)) * targets.shape[1]), tensor.stack(targets.shape[0], self._max_labels)) else: if (mode == 'concatenate'): one_hot = tensor.zeros(((targets.shape[0] * targets.shape[1]), self._max_labels), dtype=self._dtype) one_hot = tensor.set_subtensor(one_hot[(tensor.arange(targets.size), targets.flatten())], 1) one_hot = one_hot.reshape((targets.shape[0], (targets.shape[1] * self._max_labels))) elif (mode == 'merge'): one_hot = tensor.zeros((targets.shape[0], self._max_labels), dtype=self._dtype) one_hot = tensor.set_subtensor(one_hot[((tensor.arange(targets.size) % targets.shape[0]), targets.T.flatten())], 1) else: one_hot = tensor.zeros((targets.shape[0], targets.shape[1], self._max_labels), dtype=self._dtype) one_hot = tensor.set_subtensor(one_hot[(tensor.arange(targets.shape[0]).reshape((targets.shape[0], 1)), tensor.arange(targets.shape[1]), targets)], 1) if squeeze_required: if (one_hot.ndim == 2): one_hot = one_hot.reshape((one_hot.shape[1],)) if (one_hot.ndim == 3): one_hot = one_hot.reshape((one_hot.shape[1], one_hot.shape[2])) return one_hot
'.. todo:: WRITEME'
def __getstate__(self):
state = self.__dict__.copy() if ('_compiled_functions' in state): del state['_compiled_functions'] return state
'Builds a nested tuple of integers representing the mapping Parameters space : WRITEME source : WRITEME Returns WRITEME'
def _fill_mapping(self, space, source):
if isinstance(space, NullSpace): assert (source == '') return None elif (not isinstance(space, CompositeSpace)): if isinstance(source, (tuple, list)): (source,) = source if ((space, source) in self.specs_to_index): spec_index = self.specs_to_index[(space, source)] else: spec_index = self.n_unique_specs self.specs_to_index[(space, source)] = spec_index self.n_unique_specs += 1 return spec_index else: spec_mapping = tuple((self._fill_mapping(sub_space, sub_source) for (sub_space, sub_source) in safe_zip(space.components, source))) return spec_mapping
'Auxiliary recursive function used by self.flatten Parameters nested : WRITEME mapping : WRITEME rval : WRITEME Returns WRITEME'
def _fill_flat(self, nested, mapping, rval):
if isinstance(nested, CompositeSpace): nested = tuple(nested.components) if (mapping is None): if (not isinstance(nested, NullSpace)): assert (not nested), ('The following element is mapped to NullSpace, so it should evaluate to False (for instance, None, an empty string or an empty tuple), but is %s' % nested) return if isinstance(mapping, int): idx = mapping if isinstance(nested, (tuple, list)): if (len(nested) != 1): raise ValueError(((('When mapping is an int, we expect nested to be a single element. But mapping is ' + str(mapping)) + ' and nested is a tuple of length ') + str(len(nested)))) (nested,) = nested if (rval[idx] is None): rval[idx] = nested else: assert (rval[idx] == nested), ('This mapping was built with the same element occurring more than once in the nested representation, but current nested sequence has different values (%s and %s) at these positions.' % (rval[idx], nested)) else: for (sub_nested, sub_mapping) in safe_zip(nested, mapping): self._fill_flat(sub_nested, sub_mapping, rval)
'Iterate jointly through nested and spec_mapping, returns a flat tuple. The integer in spec_mapping corresponding to each element in nested represents the index of that element in the returned sequence. If the original data_specs had duplicate elements at different places, then "nested" also have to have equal elements at these positions. "nested" can be a nested tuple, or composite space. If it is a composite space, a flattened composite space will be returned. If `return_tuple` is True, a tuple is always returned (tuple of non-composite Spaces if nested is a Space, empty tuple if all Spaces are NullSpaces, length-1 tuple if there is only one non-composite Space, etc.). Parameters nested : WRITEME return_tuple : WRITEME Returns WRITEME'
def flatten(self, nested, return_tuple=False):
rval = ([None] * self.n_unique_specs) self._fill_flat(nested, self.spec_mapping, rval) assert (None not in rval), ('This mapping is invalid, as it did not contain all numbers from 0 to %i (or None was in nested), nested: %s' % ((self.n_unique_specs - 1), nested)) if return_tuple: return tuple(rval) if (len(rval) == 1): return rval[0] if isinstance(nested, (tuple, list)): return tuple(rval) elif isinstance(nested, Space): return CompositeSpace(rval)
'Auxiliary recursive function used by self.nest Parameters flat : WRITEME mapping : WRITEME Returns WRITEME'
def _make_nested_tuple(self, flat, mapping):
if (mapping is None): return None if isinstance(mapping, int): idx = mapping if isinstance(flat, (tuple, list)): assert (0 <= idx < len(flat)) return flat[idx] else: assert (idx == 0) return flat else: return tuple((self._make_nested_tuple(flat, sub_mapping) for sub_mapping in mapping))
'Auxiliary recursive function used by self.nest Parameters flat : WRITEME mapping : WRITEME Returns WRITEME'
def _make_nested_space(self, flat, mapping):
if isinstance(mapping, int): idx = mapping if isinstance(flat, CompositeSpace): assert (0 <= idx < len(flat.components)) return flat.components[idx] else: assert (idx == 0) return flat else: return CompositeSpace([self._make_nested_space(flat, sub_mapping) for sub_mapping in mapping])
'Iterate through spec_mapping, building a nested tuple from "flat". The length of "flat" should be equal to self.n_unique_specs. Parameters flat : Space or tuple WRITEME Returns WRITEME'
def nest(self, flat):
if isinstance(flat, Space): if isinstance(flat, CompositeSpace): assert (len(flat.components) == self.n_unique_specs) else: assert (self.n_unique_specs == 1) return self._make_nested_space(flat, self.spec_mapping) else: if isinstance(flat, (list, tuple)): assert (len(flat) == self.n_unique_specs) else: assert (self.n_unique_specs == 1) return self._make_nested_tuple(flat, self.spec_mapping)
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self.ndim == other.ndim) and (self.axis == other.axis) and (self.fill == other.fill))
'.. todo:: WRITEME'
def __hash__(self):
return (((hash(type(self)) ^ hash(self.ndim)) ^ hash(self.axis)) ^ hash(self.fill))
'.. todo:: WRITEME'
def make_node(self, x, new_length, insert_at):
x_ = tensor.as_tensor_variable(x) new_length_ = tensor.as_tensor_variable(new_length) insert_at_ = tensor.as_tensor_variable(insert_at) assert (x_.ndim == self.ndim), ('%s instance expected x.ndim = %d, got %d' % (self.__class__.__name__, self.ndim, x.ndim)) assert (new_length_.ndim == 0), 'new_length must be a scalar' assert (insert_at_.ndim == 1), 'insert_at must be vector' assert (new_length_.dtype.startswith('int') or new_length.dtype.startswith('uint')), 'new_length must be integer type' assert (insert_at_.dtype.startswith('int') or insert_at_.dtype.startswith('uint')), 'insert_at must be integer type' return theano.Apply(self, inputs=[x_, new_length_, insert_at_], outputs=[x_.type()])
'.. todo:: WRITEME'
def perform(self, node, inputs, output_storage):
(x, new_length, nonconstants) = inputs nonconstant_set = set(nonconstants) constant = sorted((set(xrange(new_length)) - nonconstant_set)) assert (x.shape[self.axis] == len(nonconstant_set)), ('x.shape[%d] != len(set(nonconstants))' % self.axis) assert (new_length >= x.shape[self.axis]), 'number of items along axis in new array is less than old array' new_shape = ((x.shape[:self.axis] + (int(new_length),)) + x.shape[(self.axis + 1):]) z = output_storage[0][0] = np.empty(new_shape, dtype=x.dtype) z[index_along_axis(nonconstants, self.ndim, self.axis)] = x z[index_along_axis(constant, self.ndim, self.axis)] = self.fill
'.. todo:: WRITEME'
def grad(self, inputs, gradients):
(x, new_length, nonconstants) = inputs d_out = gradients[0] swap = list(range(self.ndim)) swap.remove(self.axis) swap.insert(0, self.axis) return [d_out.dimshuffle(swap)[nonconstants].dimshuffle(swap), grad_not_implemented(self, 1, new_length), grad_not_implemented(self, 2, nonconstants)]
'.. todo:: WRITEME'
def __str__(self):
return ('%s{ndim=%d,axis=%d,fill=%s}' % (self.__class__.__name__, self.ndim, self.axis, str(self.fill)))
'.. todo:: WRITEME'
def __enter__(self):
if isinstance(self._f, six.string_types): self._handle = open(self._f, self._mode, self._buffering) else: self._handle = self._f return self._handle
'.. todo:: WRITEME'
def __exit__(self, exc_type, exc_value, traceback):
if (self._handle is not self._f): self._handle.close()
'.. todo:: WRITEME'
def make_node(self, xin):
xout = xin.type.make_variable() return theano.gof.Apply(op=self, inputs=[xin], outputs=[xout])
'.. todo:: WRITEME'
def perform(self, node, inputs, output_storage):
(xin,) = inputs (xout,) = output_storage xout[0] = xin self.callback(xin)
'.. todo:: WRITEME'
def grad(self, inputs, output_gradients):
return output_gradients
'.. todo:: WRITEME'
def R_op(self, inputs, eval_points):
return [x for x in eval_points]
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self.callback == other.callback))
'.. todo:: WRITEME'
def hash(self):
return hash(self.callback)
'.. todo:: WRITEME'
def __hash__(self):
return self.hash()
'Report being disconnected to all inputs in order to have no gradient at all. Parameters node : WRITEME'
def connection_pattern(self, node):
return [[False]]
'Report being disconnected to all inputs in order to have no gradient at all. Parameters inputs : WRITEME output_gradients : WRITEME'
def grad(self, inputs, output_gradients):
return [theano.gradient.DisconnectedType()()]
'Retrieves description of the next batch of examples. Returns next_batch : `slice` or list of int An object describing the indices in the dataset of a batch of data. Either a `slice` object or a list of integers specifying individual indices of examples. Raises StopIteration When there are no more batches to return.'
def next(self):
raise NotImplementedError()
'The (maximum) number of examples in each batch. Returns batch_size : int The (maximum) number of examples in each batch. This is either as specified via the constructor, or inferred from the dataset size and the number of batches requested.'
@property def batch_size(self):
return self._batch_size
'The total number of batches that the iterator will ever return. Returns num_batches : int The total number of batches the iterator will ever return. This is either as specified via the constructor, or inferred from the dataset size and the batch size.'
@property def num_batches(self):
return self._num_batches
'The total number of examples over which the iterator operates. Returns num_examples : int The total number of examples over which the iterator operates. May be less than the dataset size.'
@property def num_examples(self):
return (self.batch_size * self.num_batches)
'Whether every batch will be the same size. Returns uneven : bool `True` if returned batches may be of differing sizes, `False` otherwise.'
@property def uneven(self):
raise NotImplementedError()
'Number of examples that will be visited by the iterator. (May be lower than dataset_size)'
@property def num_examples(self):
product = (self.batch_size * self.num_batches) if (product > self._dataset_size): return (self.batch_size * (self.num_batches - 1)) else: return product
'Returns next batch of _base_iterator Raises StopException When _base_iterator reachs the end of the dataset Notes Uneven batches may be discarded and StopException will be raised without having iterated throught every examples.'
def next(self):
length = (-1) while (length != self.batch_size): batch = self._base_iterator.next() if isinstance(batch, slice): length = (batch.stop - batch.start) else: length = len(batch) return batch
'Retrieves the next batch of examples. Returns next_batch : object An object representing a mini-batch of data, conforming to the space specified in the `data_specs` constructor argument to this iterator. Will be a tuple if more than one data source was specified or if the constructor parameter `return_tuple` was `True`. Raises StopIteration When there are no more batches to return.'
@wraps(SubsetIterator.next) def next(self):
next_index = self._subset_iterator.next() if hasattr(self._dataset, 'get'): rval = self._next(next_index) else: rval = self._fallback_next(next_index) if ((not self._return_tuple) and (len(rval) == 1)): (rval,) = rval return rval
'Get information about the experimental environment such as the cpu, os and the hostname of the machine on which the experiment is running.'
def _get_exp_env_info(self):
self.exp_env_info['host'] = socket.gethostname() self.exp_env_info['cpu'] = platform.processor() self.exp_env_info['os'] = platform.platform() if ('theano' in sys.modules): self.exp_env_info['theano_config'] = sys.modules['theano'].config else: self.exp_env_info['theano_config'] = None
'Get version of Python packages.'
def _get_lib_versions(self):
repos = os.getenv('PYLEARN2_TRACK_MODULES', '') default_repos = 'pylearn2:theano:numpy:scipy' repos = ((default_repos + ':') + repos) repos = set(repos.split(':')) for repo in repos: try: if (repo == ''): continue __import__(repo) if hasattr(sys.modules[repo], '__version__'): v = sys.modules[repo].__version__ if (v != 'unknown'): self.versions[repo] = v continue self.versions[repo] = self._get_git_version(self._get_module_parent_path(sys.modules[repo])) except ImportError: self.versions[repo] = None known = copy.copy(self.versions) unknown = [k for (k, w) in known.items() if (not w)] known = dict(((k, w) for (k, w) in known.items() if w)) self.str_versions = ' | '.join(([('%s:%s' % (k, w)) for (k, w) in sorted(six.iteritems(known))] + [('%s:?' % ','.join(sorted(unknown)))]))
'Return version of the Python packages as a string. e.g. numpy:1.6.1 | pylearn:a6e634b83d | pylearn2:57a156beb0'
def __str__(self):
return self.str_versions
'Return the git revision of a repository with the letter \'M\' appended to the revision if the repo was modified. e.g. 10d3046e85 M Parameters root : str Root folder of the repository Returns rval : str or None A string with the revision hash, or None if it could not be retrieved (e.g. if it is not actually a git repository)'
def _get_git_version(self, root):
if (not os.path.isdir(os.path.join(root, '.git'))): return None cwd_backup = os.getcwd() try: os.chdir(root) sub_p = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) version = sub_p.communicate()[0][0:10].strip() sub_p = subprocess.Popen(['git', 'diff', '--name-only'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) modified = sub_p.communicate()[0] if len(modified): version += ' M' return version except Exception: pass finally: try: os.chdir(cwd_backup) except Exception: warnings.warn(('Could not chdir back to ' + cwd_backup))
'Same as `get_git_version` but for a Mercurial repository.'
def _get_hg_version(self, root):
if (not os.path.isdir(os.path.join(root, '.hg'))): return None cwd_backup = os.getcwd() try: os.chdir(root) sub_p = subprocess.Popen(['hg', 'parents'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) sub_p_output = sub_p.communicate()[0] finally: os.chdir(cwd_backup) first_line = sub_p_output.split('\n')[0] return first_line.split(':')[2][0:10]
'Return path to a given module.'
def _get_module_path(self, module):
return os.path.realpath(module.__path__[0])
'Return path to the parent directory of a given module.'
def _get_module_parent_path(self, module):
return os.path.dirname(self._get_module_path(module))
'Print version of the Python packages as a string. e.g. numpy:1.6.1 | pylearn:a6e634b83d | pylearn2:57a156beb0'
def print_versions(self):
logger.info(self.__str__())
'Return basic information about the experiment setup such as the hostname of the machine the experiment was run on, the operating system installed on the machine. Parameters print_theano_config : bool, optional If True, information about the theano configuration will be displayed.'
def print_exp_env_info(self, print_theano_config=False):
logger.info('HOST: {0}'.format(self.exp_env_info['host'])) logger.info('CPU: {0}'.format(self.exp_env_info['cpu'])) logger.info('OS: {0}'.format(self.exp_env_info['os'])) if print_theano_config: logger.info(self.exp_env_info['theano_config'])
'Generator function to iterate through all minibatches'
def __iter__(self):
counter = [0, 0, 0] for chosen in self.permut: index = counter[chosen] minibatch = self.dataset[chosen][(index * self.batch_size):((index + 1) * self.batch_size)] counter[chosen] = ((counter[chosen] + 1) % self.limit[chosen]) (yield minibatch)
'Return length of the weighted union'
def __len__(self):
return self.length
'Same generator as __iter__, but yield only the chosen indexes'
def by_index(self):
counter = [0, 0, 0] for chosen in self.permut: index = counter[chosen] counter[chosen] = ((counter[chosen] + 1) % self.limit[chosen]) (yield (chosen, index))
'Format the specified record as text. Parameters record : object A LogRecord object with the appropriate attributes. Returns s : str A string containing the formatted log message. Notes The record\'s attribute dictionary is used as the operand to a string formatting operation which yields the returned string. Before formatting the dictionary, a couple of preparatory steps are carried out. The message attribute of the record is computed using LogRecord.getMessage(). If the formatting string uses the time (as determined by a call to usesTime(), formatTime() is called to format the event time. If there is exception information, it is formatted using formatException() and appended to the message.'
def format(self, record):
record.message = record.getMessage() if (hasattr(self, 'usesTime') and self.usesTime()): record.asctime = self.formatTime(record, self.datefmt) emit_special = ((self._only_from is None) or record.name.startswith(self._only_from)) if ((record.levelno == logging.INFO) and emit_special): s = (self._info_fmt % record.__dict__) else: s = (self._fmt % record.__dict__) if record.exc_info: if (not record.exc_text): record.exc_text = self.formatException(record.exc_info) if record.exc_text: if (s[(-1):] != '\n'): s = (s + '\n') try: s = (s + record.exc_text) except UnicodeError: s = (s + record.exc_text.decode(sys.getfilesystemencoding())) return s
'.. todo:: WRITEME'
@property def stdout(self):
return (sys.stdout if (self._stdout is None) else self._stdout)
'.. todo:: WRITEME'
@property def stderr(self):
return (sys.stderr if (self._stderr is None) else self._stderr)
'Flushes the stream.'
def flush(self):
for stream in (self.stdout, self.stderr): stream.flush()
'Emit a record. If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. If the stream has an \'encoding\' attribute, it is used to determine how to do the output to the stream. Parameters record : WRITEME'
def emit(self, record):
try: msg = self.format(record) if (record.levelno > logging.INFO): stream = self.stderr else: stream = self.stdout fs = u'%s\n' if (not getattr(logging, '_unicode', True)): stream.write((fs % msg)) else: try: if (isinstance(msg, six.text_type) and getattr(stream, 'encoding', None)): try: stream.write((fs % msg)) except UnicodeEncodeError: stream.write((fs % msg).encode(stream.encoding)) else: stream.write((fs % msg)) except (UnicodeError, TypeError): stream.write((fs % msg).encode('UTF-8')) self.flush() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
'Initialize the dA class by specifying the number of visible units (the dimension d of the input ), the number of hidden units ( the dimension d\' of the latent or hidden space ) and the corruption level. The constructor also receives symbolic variables for the input, weights and bias. Such a symbolic variables are useful when, for example the input is the result of some computations, or when weights are shared between the dA and an MLP layer. When dealing with SdAs this always happens, the dA on layer 2 gets as input the output of the dA on layer 1, and the weights of the dA are used in the second stage of training to construct an MLP. :type numpy_rng: numpy.random.RandomState :param numpy_rng: number random generator used to generate weights :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams :param theano_rng: Theano random generator; if None is given one is generated based on a seed drawn from `rng` :type input: theano.tensor.TensorType :param input: a symbolic description of the input or None for standalone dA :type n_visible: int :param n_visible: number of visible units :type n_hidden: int :param n_hidden: number of hidden units :type W: theano.tensor.TensorType :param W: Theano variable pointing to a set of weights that should be shared belong the dA and another architecture; if dA should be standalone set this to None :type bhid: theano.tensor.TensorType :param bhid: Theano variable pointing to a set of biases values (for hidden units) that should be shared belong dA and another architecture; if dA should be standalone set this to None :type bvis: theano.tensor.TensorType :param bvis: Theano variable pointing to a set of biases values (for visible units) that should be shared belong dA and another architecture; if dA should be standalone set this to None'
def __init__(self, numpy_rng, theano_rng=None, input=None, n_visible=784, n_hidden=500, W=None, bhid=None, bvis=None):
self.n_visible = n_visible self.n_hidden = n_hidden if (not theano_rng): theano_rng = RandomStreams(numpy_rng.randint((2 ** 30))) if (not W): initial_W = numpy.asarray(numpy_rng.uniform(low=((-4) * numpy.sqrt((6.0 / (n_hidden + n_visible)))), high=(4 * numpy.sqrt((6.0 / (n_hidden + n_visible)))), size=(n_visible, n_hidden)), dtype=theano.config.floatX) W = theano.shared(value=initial_W, name='W', borrow=True) if (not bvis): bvis = theano.shared(value=numpy.zeros(n_visible, dtype=theano.config.floatX), borrow=True) if (not bhid): bhid = theano.shared(value=numpy.zeros(n_hidden, dtype=theano.config.floatX), name='b', borrow=True) self.W = W self.b = bhid self.b_prime = bvis self.W_prime = self.W.T self.theano_rng = theano_rng if (input is None): self.x = T.dmatrix(name='input') else: self.x = input self.params = [self.W, self.b, self.b_prime]
'This function keeps ``1-corruption_level`` entries of the inputs the same and zero-out randomly selected subset of size ``corruption_level`` Note : first argument of theano.rng.binomial is the shape(size) of random numbers that it should produce second argument is the number of trials third argument is the probability of success of any trial this will produce an array of 0s and 1s where 1 has a probability of 1 - ``corruption_level`` and 0 with ``corruption_level`` The binomial function return int64 data type by default. int64 multiplicated by the input type(floatX) always return float64. To keep all data in floatX when floatX is float32, we set the dtype of the binomial to floatX. As in our case the value of the binomial is always 0 or 1, this don\'t change the result. This is needed to allow the gpu to work correctly as it only support float32 for now.'
def get_corrupted_input(self, input, corruption_level):
return (self.theano_rng.binomial(size=input.shape, n=1, p=(1 - corruption_level), dtype=theano.config.floatX) * input)
'Computes the values of the hidden layer'
def get_hidden_values(self, input):
return T.nnet.sigmoid((T.dot(input, self.W) + self.b))
'Computes the reconstructed input given the values of the hidden layer'
def get_reconstructed_input(self, hidden):
return T.nnet.sigmoid((T.dot(hidden, self.W_prime) + self.b_prime))
'This function computes the cost and the updates for one trainng step of the dA'
def get_cost_updates(self, corruption_level, learning_rate):
tilde_x = self.get_corrupted_input(self.x, corruption_level) y = self.get_hidden_values(tilde_x) z = self.get_reconstructed_input(y) L = (- T.sum(((self.x * T.log(z)) + ((1 - self.x) * T.log((1 - z)))), axis=1)) cost = T.mean(L) gparams = T.grad(cost, self.params) updates = [(param, (param - (learning_rate * gparam))) for (param, gparam) in zip(self.params, gparams)] return (cost, updates)
'RBM constructor. Defines the parameters of the model along with basic operations for inferring hidden from visible (and vice-versa), as well as for performing CD updates. :param input: None for standalone RBMs or symbolic variable if RBM is part of a larger graph. :param n_visible: number of visible units :param n_hidden: number of hidden units :param W: None for standalone RBMs or symbolic variable pointing to a shared weight matrix in case RBM is part of a DBN network; in a DBN, the weights are shared between RBMs and layers of a MLP :param hbias: None for standalone RBMs or symbolic variable pointing to a shared hidden units bias vector in case RBM is part of a different network :param vbias: None for standalone RBMs or a symbolic variable pointing to a shared visible units bias'
def __init__(self, input=None, n_visible=784, n_hidden=500, W=None, hbias=None, vbias=None, numpy_rng=None, theano_rng=None):
self.n_visible = n_visible self.n_hidden = n_hidden if (numpy_rng is None): numpy_rng = numpy.random.RandomState(1234) if (theano_rng is None): theano_rng = RandomStreams(numpy_rng.randint((2 ** 30))) if (W is None): initial_W = numpy.asarray(numpy_rng.uniform(low=((-4) * numpy.sqrt((6.0 / (n_hidden + n_visible)))), high=(4 * numpy.sqrt((6.0 / (n_hidden + n_visible)))), size=(n_visible, n_hidden)), dtype=theano.config.floatX) W = theano.shared(value=initial_W, name='W', borrow=True) if (hbias is None): hbias = theano.shared(value=numpy.zeros(n_hidden, dtype=theano.config.floatX), name='hbias', borrow=True) if (vbias is None): vbias = theano.shared(value=numpy.zeros(n_visible, dtype=theano.config.floatX), name='vbias', borrow=True) self.input = input if (not input): self.input = T.matrix('input') self.W = W self.hbias = hbias self.vbias = vbias self.theano_rng = theano_rng self.params = [self.W, self.hbias, self.vbias]
'Function to compute the free energy'
def free_energy(self, v_sample):
wx_b = (T.dot(v_sample, self.W) + self.hbias) vbias_term = T.dot(v_sample, self.vbias) hidden_term = T.sum(T.log((1 + T.exp(wx_b))), axis=1) return ((- hidden_term) - vbias_term)
'This function propagates the visible units activation upwards to the hidden units Note that we return also the pre-sigmoid activation of the layer. As it will turn out later, due to how Theano deals with optimizations, this symbolic variable will be needed to write down a more stable computational graph (see details in the reconstruction cost function)'
def propup(self, vis):
pre_sigmoid_activation = (T.dot(vis, self.W) + self.hbias) return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
'This function infers state of hidden units given visible units'
def sample_h_given_v(self, v0_sample):
(pre_sigmoid_h1, h1_mean) = self.propup(v0_sample) h1_sample = self.theano_rng.binomial(size=h1_mean.shape, n=1, p=h1_mean, dtype=theano.config.floatX) return [pre_sigmoid_h1, h1_mean, h1_sample]
'This function propagates the hidden units activation downwards to the visible units Note that we return also the pre_sigmoid_activation of the layer. As it will turn out later, due to how Theano deals with optimizations, this symbolic variable will be needed to write down a more stable computational graph (see details in the reconstruction cost function)'
def propdown(self, hid):
pre_sigmoid_activation = (T.dot(hid, self.W.T) + self.vbias) return [pre_sigmoid_activation, T.nnet.sigmoid(pre_sigmoid_activation)]
'This function infers state of visible units given hidden units'
def sample_v_given_h(self, h0_sample):
(pre_sigmoid_v1, v1_mean) = self.propdown(h0_sample) v1_sample = self.theano_rng.binomial(size=v1_mean.shape, n=1, p=v1_mean, dtype=theano.config.floatX) return [pre_sigmoid_v1, v1_mean, v1_sample]
'This function implements one step of Gibbs sampling, starting from the hidden state'
def gibbs_hvh(self, h0_sample):
(pre_sigmoid_v1, v1_mean, v1_sample) = self.sample_v_given_h(h0_sample) (pre_sigmoid_h1, h1_mean, h1_sample) = self.sample_h_given_v(v1_sample) return [pre_sigmoid_v1, v1_mean, v1_sample, pre_sigmoid_h1, h1_mean, h1_sample]
'This function implements one step of Gibbs sampling, starting from the visible state'
def gibbs_vhv(self, v0_sample):
(pre_sigmoid_h1, h1_mean, h1_sample) = self.sample_h_given_v(v0_sample) (pre_sigmoid_v1, v1_mean, v1_sample) = self.sample_v_given_h(h1_sample) return [pre_sigmoid_h1, h1_mean, h1_sample, pre_sigmoid_v1, v1_mean, v1_sample]
'This functions implements one step of CD-k or PCD-k :param lr: learning rate used to train the RBM :param persistent: None for CD. For PCD, shared variable containing old state of Gibbs chain. This must be a shared variable of size (batch size, number of hidden units). :param k: number of Gibbs steps to do in CD-k/PCD-k Returns a proxy for the cost and the updates dictionary. The dictionary contains the update rules for weights and biases but also an update of the shared variable used to store the persistent chain, if one is used.'
def get_cost_updates(self, lr=0.1, persistent=None, k=1):
(pre_sigmoid_ph, ph_mean, ph_sample) = self.sample_h_given_v(self.input) if (persistent is None): chain_start = ph_sample else: chain_start = persistent ([pre_sigmoid_nvs, nv_means, nv_samples, pre_sigmoid_nhs, nh_means, nh_samples], updates) = theano.scan(self.gibbs_hvh, outputs_info=[None, None, None, None, None, chain_start], n_steps=k, name='gibbs_hvh') chain_end = nv_samples[(-1)] cost = (T.mean(self.free_energy(self.input)) - T.mean(self.free_energy(chain_end))) gparams = T.grad(cost, self.params, consider_constant=[chain_end]) for (gparam, param) in zip(gparams, self.params): updates[param] = (param - (gparam * T.cast(lr, dtype=theano.config.floatX))) if persistent: updates[persistent] = nh_samples[(-1)] monitoring_cost = self.get_pseudo_likelihood_cost(updates) else: monitoring_cost = self.get_reconstruction_cost(updates, pre_sigmoid_nvs[(-1)]) return (monitoring_cost, updates)
'Stochastic approximation to the pseudo-likelihood'
def get_pseudo_likelihood_cost(self, updates):
bit_i_idx = theano.shared(value=0, name='bit_i_idx') xi = T.round(self.input) fe_xi = self.free_energy(xi) xi_flip = T.set_subtensor(xi[:, bit_i_idx], (1 - xi[:, bit_i_idx])) fe_xi_flip = self.free_energy(xi_flip) cost = T.mean((self.n_visible * T.log(T.nnet.sigmoid((fe_xi_flip - fe_xi))))) updates[bit_i_idx] = ((bit_i_idx + 1) % self.n_visible) return cost
'Approximation to the reconstruction error Note that this function requires the pre-sigmoid activation as input. To understand why this is so you need to understand a bit about how Theano works. Whenever you compile a Theano function, the computational graph that you pass as input gets optimized for speed and stability. This is done by changing several parts of the subgraphs with others. One such optimization expresses terms of the form log(sigmoid(x)) in terms of softplus. We need this optimization for the cross-entropy since sigmoid of numbers larger than 30. (or even less then that) turn to 1. and numbers smaller than -30. turn to 0 which in terms will force theano to compute log(0) and therefore we will get either -inf or NaN as cost. If the value is expressed in terms of softplus we do not get this undesirable behaviour. This optimization usually works fine, but here we have a special case. The sigmoid is applied inside the scan op, while the log is outside. Therefore Theano will only see log(scan(..)) instead of log(sigmoid(..)) and will not apply the wanted optimization. We can not go and replace the sigmoid in scan with something else also, because this only needs to be done on the last step. Therefore the easiest and more efficient way is to get also the pre-sigmoid activation as an output of scan, and apply both the log and sigmoid outside scan such that Theano can catch and optimize the expression.'
def get_reconstruction_cost(self, updates, pre_sigmoid_nv):
cross_entropy = T.mean(T.sum(((self.input * T.log(T.nnet.sigmoid(pre_sigmoid_nv))) + ((1 - self.input) * T.log((1 - T.nnet.sigmoid(pre_sigmoid_nv))))), axis=1)) return cross_entropy
'Initialize the cA class by specifying the number of visible units (the dimension d of the input), the number of hidden units (the dimension d\' of the latent or hidden space) and the contraction level. The constructor also receives symbolic variables for the input, weights and bias. :type numpy_rng: numpy.random.RandomState :param numpy_rng: number random generator used to generate weights :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams :param theano_rng: Theano random generator; if None is given one is generated based on a seed drawn from `rng` :type input: theano.tensor.TensorType :param input: a symbolic description of the input or None for standalone cA :type n_visible: int :param n_visible: number of visible units :type n_hidden: int :param n_hidden: number of hidden units :type n_batchsize int :param n_batchsize: number of examples per batch :type W: theano.tensor.TensorType :param W: Theano variable pointing to a set of weights that should be shared belong the dA and another architecture; if dA should be standalone set this to None :type bhid: theano.tensor.TensorType :param bhid: Theano variable pointing to a set of biases values (for hidden units) that should be shared belong dA and another architecture; if dA should be standalone set this to None :type bvis: theano.tensor.TensorType :param bvis: Theano variable pointing to a set of biases values (for visible units) that should be shared belong dA and another architecture; if dA should be standalone set this to None'
def __init__(self, numpy_rng, input=None, n_visible=784, n_hidden=100, n_batchsize=1, W=None, bhid=None, bvis=None):
self.n_visible = n_visible self.n_hidden = n_hidden self.n_batchsize = n_batchsize if (not W): initial_W = numpy.asarray(numpy_rng.uniform(low=((-4) * numpy.sqrt((6.0 / (n_hidden + n_visible)))), high=(4 * numpy.sqrt((6.0 / (n_hidden + n_visible)))), size=(n_visible, n_hidden)), dtype=theano.config.floatX) W = theano.shared(value=initial_W, name='W', borrow=True) if (not bvis): bvis = theano.shared(value=numpy.zeros(n_visible, dtype=theano.config.floatX), borrow=True) if (not bhid): bhid = theano.shared(value=numpy.zeros(n_hidden, dtype=theano.config.floatX), name='b', borrow=True) self.W = W self.b = bhid self.b_prime = bvis self.W_prime = self.W.T if (input is None): self.x = T.dmatrix(name='input') else: self.x = input self.params = [self.W, self.b, self.b_prime]
'Computes the values of the hidden layer'
def get_hidden_values(self, input):
return T.nnet.sigmoid((T.dot(input, self.W) + self.b))
'Computes the jacobian of the hidden layer with respect to the input, reshapes are necessary for broadcasting the element-wise product on the right axis'
def get_jacobian(self, hidden, W):
return (T.reshape((hidden * (1 - hidden)), (self.n_batchsize, 1, self.n_hidden)) * T.reshape(W, (1, self.n_visible, self.n_hidden)))
'Computes the reconstructed input given the values of the hidden layer'
def get_reconstructed_input(self, hidden):
return T.nnet.sigmoid((T.dot(hidden, self.W_prime) + self.b_prime))
'This function computes the cost and the updates for one trainng step of the cA'
def get_cost_updates(self, contraction_level, learning_rate):
y = self.get_hidden_values(self.x) z = self.get_reconstructed_input(y) J = self.get_jacobian(y, self.W) self.L_rec = (- T.sum(((self.x * T.log(z)) + ((1 - self.x) * T.log((1 - z)))), axis=1)) self.L_jacob = (T.sum((J ** 2)) // self.n_batchsize) cost = (T.mean(self.L_rec) + (contraction_level * T.mean(self.L_jacob))) gparams = T.grad(cost, self.params) updates = [] for (param, gparam) in zip(self.params, gparams): updates.append((param, (param - (learning_rate * gparam)))) return (cost, updates)
':param shared_positions: theano ndarray shared var with many particle [initial] positions :param energy_fn: callable such that energy_fn(positions) returns theano vector of energies. The len of this vector is the batchsize. The sum of this energy vector must be differentiable (with theano.tensor.grad) with respect to the positions for HMC sampling to work.'
@classmethod def new_from_shared_positions(cls, shared_positions, energy_fn, initial_stepsize=0.01, target_acceptance_rate=0.9, n_steps=20, stepsize_dec=0.98, stepsize_min=0.001, stepsize_max=0.25, stepsize_inc=1.02, avg_acceptance_slowness=0.9, seed=12345):
stepsize = sharedX(initial_stepsize, 'hmc_stepsize') avg_acceptance_rate = sharedX(target_acceptance_rate, 'avg_acceptance_rate') s_rng = theano.sandbox.rng_mrg.MRG_RandomStreams(seed) (accept, final_pos) = hmc_move(s_rng, shared_positions, energy_fn, stepsize, n_steps) simulate_updates = hmc_updates(shared_positions, stepsize, avg_acceptance_rate, final_pos=final_pos, accept=accept, stepsize_min=stepsize_min, stepsize_max=stepsize_max, stepsize_inc=stepsize_inc, stepsize_dec=stepsize_dec, target_acceptance_rate=target_acceptance_rate, avg_acceptance_slowness=avg_acceptance_slowness) simulate = function([], [], updates=simulate_updates) return cls(positions=shared_positions, stepsize=stepsize, stepsize_min=stepsize_min, stepsize_max=stepsize_max, avg_acceptance_rate=avg_acceptance_rate, target_acceptance_rate=target_acceptance_rate, s_rng=s_rng, _updates=simulate_updates, simulate=simulate)
'Returns a new position obtained after `n_steps` of HMC simulation. Parameters kwargs: dictionary The `kwargs` dictionary is passed to the shared variable (self.positions) `get_value()` function. For example, to avoid copying the shared variable value, consider passing `borrow=True`. Returns rval: numpy matrix Numpy matrix whose of dimensions similar to `initial_position`.'
def draw(self, **kwargs):
self.simulate() return self.positions.get_value(borrow=False)
'This class is made to support a variable number of layers. :type numpy_rng: numpy.random.RandomState :param numpy_rng: numpy random number generator used to draw initial weights :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams :param theano_rng: Theano random generator; if None is given one is generated based on a seed drawn from `rng` :type n_ins: int :param n_ins: dimension of the input to the DBN :type hidden_layers_sizes: list of ints :param hidden_layers_sizes: intermediate layers size, must contain at least one value :type n_outs: int :param n_outs: dimension of the output of the network'
def __init__(self, numpy_rng, theano_rng=None, n_ins=784, hidden_layers_sizes=[500, 500], n_outs=10):
self.sigmoid_layers = [] self.rbm_layers = [] self.params = [] self.n_layers = len(hidden_layers_sizes) assert (self.n_layers > 0) if (not theano_rng): theano_rng = MRG_RandomStreams(numpy_rng.randint((2 ** 30))) self.x = T.matrix('x') self.y = T.ivector('y') for i in range(self.n_layers): if (i == 0): input_size = n_ins else: input_size = hidden_layers_sizes[(i - 1)] if (i == 0): layer_input = self.x else: layer_input = self.sigmoid_layers[(-1)].output sigmoid_layer = HiddenLayer(rng=numpy_rng, input=layer_input, n_in=input_size, n_out=hidden_layers_sizes[i], activation=T.nnet.sigmoid) self.sigmoid_layers.append(sigmoid_layer) self.params.extend(sigmoid_layer.params) rbm_layer = RBM(numpy_rng=numpy_rng, theano_rng=theano_rng, input=layer_input, n_visible=input_size, n_hidden=hidden_layers_sizes[i], W=sigmoid_layer.W, hbias=sigmoid_layer.b) self.rbm_layers.append(rbm_layer) self.logLayer = LogisticRegression(input=self.sigmoid_layers[(-1)].output, n_in=hidden_layers_sizes[(-1)], n_out=n_outs) self.params.extend(self.logLayer.params) self.finetune_cost = self.logLayer.negative_log_likelihood(self.y) self.errors = self.logLayer.errors(self.y)
'Generates a list of functions, for performing one step of gradient descent at a given layer. The function will require as input the minibatch index, and to train an RBM you just need to iterate, calling the corresponding function on all minibatch indexes. :type train_set_x: theano.tensor.TensorType :param train_set_x: Shared var. that contains all datapoints used for training the RBM :type batch_size: int :param batch_size: size of a [mini]batch :param k: number of Gibbs steps to do in CD-k / PCD-k'
def pretraining_functions(self, train_set_x, batch_size, k):
index = T.lscalar('index') learning_rate = T.scalar('lr') batch_begin = (index * batch_size) batch_end = (batch_begin + batch_size) pretrain_fns = [] for rbm in self.rbm_layers: (cost, updates) = rbm.get_cost_updates(learning_rate, persistent=None, k=k) fn = theano.function(inputs=[index, theano.In(learning_rate, value=0.1)], outputs=cost, updates=updates, givens={self.x: train_set_x[batch_begin:batch_end]}) pretrain_fns.append(fn) return pretrain_fns
'Generates a function `train` that implements one step of finetuning, a function `validate` that computes the error on a batch from the validation set, and a function `test` that computes the error on a batch from the testing set :type datasets: list of pairs of theano.tensor.TensorType :param datasets: It is a list that contain all the datasets; the has to contain three pairs, `train`, `valid`, `test` in this order, where each pair is formed of two Theano variables, one for the datapoints, the other for the labels :type batch_size: int :param batch_size: size of a minibatch :type learning_rate: float :param learning_rate: learning rate used during finetune stage'
def build_finetune_functions(self, datasets, batch_size, learning_rate):
(train_set_x, train_set_y) = datasets[0] (valid_set_x, valid_set_y) = datasets[1] (test_set_x, test_set_y) = datasets[2] n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] n_valid_batches //= batch_size n_test_batches = test_set_x.get_value(borrow=True).shape[0] n_test_batches //= batch_size index = T.lscalar('index') gparams = T.grad(self.finetune_cost, self.params) updates = [] for (param, gparam) in zip(self.params, gparams): updates.append((param, (param - (gparam * learning_rate)))) train_fn = theano.function(inputs=[index], outputs=self.finetune_cost, updates=updates, givens={self.x: train_set_x[(index * batch_size):((index + 1) * batch_size)], self.y: train_set_y[(index * batch_size):((index + 1) * batch_size)]}) test_score_i = theano.function([index], self.errors, givens={self.x: test_set_x[(index * batch_size):((index + 1) * batch_size)], self.y: test_set_y[(index * batch_size):((index + 1) * batch_size)]}) valid_score_i = theano.function([index], self.errors, givens={self.x: valid_set_x[(index * batch_size):((index + 1) * batch_size)], self.y: valid_set_y[(index * batch_size):((index + 1) * batch_size)]}) def valid_score(): return [valid_score_i(i) for i in range(n_valid_batches)] def test_score(): return [test_score_i(i) for i in range(n_test_batches)] return (train_fn, valid_score, test_score)
'nh :: dimension of the hidden layer nc :: number of classes ne :: number of word embeddings in the vocabulary de :: dimension of the word embeddings cs :: word window context size'
def __init__(self, nh, nc, ne, de, cs):
self.emb = theano.shared(name='embeddings', value=(0.2 * numpy.random.uniform((-1.0), 1.0, ((ne + 1), de)).astype(theano.config.floatX))) self.wx = theano.shared(name='wx', value=(0.2 * numpy.random.uniform((-1.0), 1.0, ((de * cs), nh)).astype(theano.config.floatX))) self.wh = theano.shared(name='wh', value=(0.2 * numpy.random.uniform((-1.0), 1.0, (nh, nh)).astype(theano.config.floatX))) self.w = theano.shared(name='w', value=(0.2 * numpy.random.uniform((-1.0), 1.0, (nh, nc)).astype(theano.config.floatX))) self.bh = theano.shared(name='bh', value=numpy.zeros(nh, dtype=theano.config.floatX)) self.b = theano.shared(name='b', value=numpy.zeros(nc, dtype=theano.config.floatX)) self.h0 = theano.shared(name='h0', value=numpy.zeros(nh, dtype=theano.config.floatX)) self.params = [self.emb, self.wx, self.wh, self.w, self.bh, self.b, self.h0] idxs = T.imatrix() x = self.emb[idxs].reshape((idxs.shape[0], (de * cs))) y_sentence = T.ivector('y_sentence') def recurrence(x_t, h_tm1): h_t = T.nnet.sigmoid(((T.dot(x_t, self.wx) + T.dot(h_tm1, self.wh)) + self.bh)) s_t = T.nnet.softmax((T.dot(h_t, self.w) + self.b)) return [h_t, s_t] ([h, s], _) = theano.scan(fn=recurrence, sequences=x, outputs_info=[self.h0, None], n_steps=x.shape[0]) p_y_given_x_sentence = s[:, 0, :] y_pred = T.argmax(p_y_given_x_sentence, axis=1) lr = T.scalar('lr') sentence_nll = (- T.mean(T.log(p_y_given_x_sentence)[(T.arange(x.shape[0]), y_sentence)])) sentence_gradients = T.grad(sentence_nll, self.params) sentence_updates = OrderedDict(((p, (p - (lr * g))) for (p, g) in zip(self.params, sentence_gradients))) self.classify = theano.function(inputs=[idxs], outputs=y_pred) self.sentence_train = theano.function(inputs=[idxs, y_sentence, lr], outputs=sentence_nll, updates=sentence_updates) self.normalize = theano.function(inputs=[], updates={self.emb: (self.emb / T.sqrt((self.emb ** 2).sum(axis=1)).dimshuffle(0, 'x'))})
'Allocate a LeNetConvPoolLayer with shared variable internal parameters. :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.dtensor4 :param input: symbolic image tensor, of shape image_shape :type filter_shape: tuple or list of length 4 :param filter_shape: (number of filters, num input feature maps, filter height, filter width) :type image_shape: tuple or list of length 4 :param image_shape: (batch size, num input feature maps, image height, image width) :type poolsize: tuple or list of length 2 :param poolsize: the downsampling (pooling) factor (#rows, #cols)'
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
assert (image_shape[1] == filter_shape[1]) self.input = input fan_in = numpy.prod(filter_shape[1:]) fan_out = ((filter_shape[0] * numpy.prod(filter_shape[2:])) // numpy.prod(poolsize)) W_bound = numpy.sqrt((6.0 / (fan_in + fan_out))) self.W = theano.shared(numpy.asarray(rng.uniform(low=(- W_bound), high=W_bound, size=filter_shape), dtype=theano.config.floatX), borrow=True) b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, borrow=True) conv_out = conv2d(input=input, filters=self.W, filter_shape=filter_shape, input_shape=image_shape) pooled_out = pool.pool_2d(input=conv_out, ds=poolsize, ignore_border=True) self.output = T.tanh((pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))) self.params = [self.W, self.b] self.input = input
'Constructs and compiles Theano functions for training and sequence generation. n_hidden : integer Number of hidden units of the conditional RBMs. n_hidden_recurrent : integer Number of hidden units of the RNN. lr : float Learning rate r : (integer, integer) tuple Specifies the pitch range of the piano-roll in MIDI note numbers, including r[0] but not r[1], such that r[1]-r[0] is the number of visible units of the RBM at a given time step. The default (21, 109) corresponds to the full range of piano (88 notes). dt : float Sampling period when converting the MIDI files into piano-rolls, or equivalently the time difference between consecutive time steps.'
def __init__(self, n_hidden=150, n_hidden_recurrent=100, lr=0.001, r=(21, 109), dt=0.3):
self.r = r self.dt = dt (v, v_sample, cost, monitor, params, updates_train, v_t, updates_generate) = build_rnnrbm((r[1] - r[0]), n_hidden, n_hidden_recurrent) gradient = T.grad(cost, params, consider_constant=[v_sample]) updates_train.update(((p, (p - (lr * g))) for (p, g) in zip(params, gradient))) self.train_function = theano.function([v], monitor, updates=updates_train) self.generate_function = theano.function([], v_t, updates=updates_generate)
'Train the RNN-RBM via stochastic gradient descent (SGD) using MIDI files converted to piano-rolls. files : list of strings List of MIDI files that will be loaded as piano-rolls for training. batch_size : integer Training sequences will be split into subsequences of at most this size before applying the SGD updates. num_epochs : integer Number of epochs (pass over the training set) performed. The user can safely interrupt training with Ctrl+C at any time.'
def train(self, files, batch_size=100, num_epochs=200):
assert (len(files) > 0), 'Training set is empty! (did you download the data files?)' dataset = [midiread(f, self.r, self.dt).piano_roll.astype(theano.config.floatX) for f in files] try: for epoch in range(num_epochs): numpy.random.shuffle(dataset) costs = [] for (s, sequence) in enumerate(dataset): for i in range(0, len(sequence), batch_size): cost = self.train_function(sequence[i:(i + batch_size)]) costs.append(cost) print(('Epoch %i/%i' % ((epoch + 1), num_epochs))) print(numpy.mean(costs)) sys.stdout.flush() except KeyboardInterrupt: print('Interrupted by user.')
'Generate a sample sequence, plot the resulting piano-roll and save it as a MIDI file. filename : string A MIDI file will be created at this location. show : boolean If True, a piano-roll of the generated sequence will be shown.'
def generate(self, filename, show=True):
piano_roll = self.generate_function() midiwrite(filename, piano_roll, self.r, self.dt) if show: extent = ((0, (self.dt * len(piano_roll))) + self.r) pylab.figure() pylab.imshow(piano_roll.T, origin='lower', aspect='auto', interpolation='nearest', cmap=pylab.cm.gray_r, extent=extent) pylab.xlabel('time (s)') pylab.ylabel('MIDI note number') pylab.title('generated piano-roll')
'Initialize the parameters of the logistic regression :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture ( one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoint lies :type n_out: int :param n_out: number of output units, the dimension of the space in which the target lies'
def __init__(self, input, n_in, n_out):
self.theta = theano.shared(value=numpy.zeros(((n_in * n_out) + n_out), dtype=theano.config.floatX), name='theta', borrow=True) self.W = self.theta[0:(n_in * n_out)].reshape((n_in, n_out)) self.b = self.theta[(n_in * n_out):((n_in * n_out) + n_out)] self.p_y_given_x = T.nnet.softmax((T.dot(input, self.W) + self.b)) self.y_pred = T.argmax(self.p_y_given_x, axis=1) self.input = input
'Return the negative log-likelihood of the prediction of this model under a given target distribution. .. math:: rac{1}{|\mathcal{D}|}\mathcal{L} ( heta=\{W,b\}, \mathcal{D}) = rac{1}{|\mathcal{D}|}\sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \ \ell ( heta=\{W,b\}, \mathcal{D}) :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label'
def negative_log_likelihood(self, y):
return (- T.mean(T.log(self.p_y_given_x)[(T.arange(y.shape[0]), y)]))
'Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label'
def errors(self, y):
if (y.ndim != self.y_pred.ndim): raise TypeError('y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type)) if y.dtype.startswith('int'): return T.mean(T.neq(self.y_pred, y)) else: raise NotImplementedError()
'Typical hidden layer of a MLP: units are fully-connected and have sigmoidal activation function. Weight matrix W is of shape (n_in,n_out) and the bias vector b is of shape (n_out,). NOTE : The nonlinearity used here is tanh Hidden unit activation is given by: tanh(dot(input,W) + b) :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.dmatrix :param input: a symbolic tensor of shape (n_examples, n_in) :type n_in: int :param n_in: dimensionality of input :type n_out: int :param n_out: number of hidden units :type activation: theano.Op or function :param activation: Non linearity to be applied in the hidden layer'
def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh):
self.input = input if (W is None): W_values = numpy.asarray(rng.uniform(low=(- numpy.sqrt((6.0 / (n_in + n_out)))), high=numpy.sqrt((6.0 / (n_in + n_out))), size=(n_in, n_out)), dtype=theano.config.floatX) if (activation == theano.tensor.nnet.sigmoid): W_values *= 4 W = theano.shared(value=W_values, name='W', borrow=True) if (b is None): b_values = numpy.zeros((n_out,), dtype=theano.config.floatX) b = theano.shared(value=b_values, name='b', borrow=True) self.W = W self.b = b lin_output = (T.dot(input, self.W) + self.b) self.output = (lin_output if (activation is None) else activation(lin_output)) self.params = [self.W, self.b]
'Initialize the parameters for the multilayer perceptron :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_hidden: int :param n_hidden: number of hidden units :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie'
def __init__(self, rng, input, n_in, n_hidden, n_out):
self.hiddenLayer = HiddenLayer(rng=rng, input=input, n_in=n_in, n_out=n_hidden, activation=T.tanh) self.logRegressionLayer = LogisticRegression(input=self.hiddenLayer.output, n_in=n_hidden, n_out=n_out) self.L1 = (abs(self.hiddenLayer.W).sum() + abs(self.logRegressionLayer.W).sum()) self.L2_sqr = ((self.hiddenLayer.W ** 2).sum() + (self.logRegressionLayer.W ** 2).sum()) self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood self.errors = self.logRegressionLayer.errors self.params = (self.hiddenLayer.params + self.logRegressionLayer.params) self.input = input
'This class is made to support a variable number of layers. :type numpy_rng: numpy.random.RandomState :param numpy_rng: numpy random number generator used to draw initial weights :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams :param theano_rng: Theano random generator; if None is given one is generated based on a seed drawn from `rng` :type n_ins: int :param n_ins: dimension of the input to the sdA :type hidden_layers_sizes: list of ints :param hidden_layers_sizes: intermediate layers size, must contain at least one value :type n_outs: int :param n_outs: dimension of the output of the network :type corruption_levels: list of float :param corruption_levels: amount of corruption to use for each layer'
def __init__(self, numpy_rng, theano_rng=None, n_ins=784, hidden_layers_sizes=[500, 500], n_outs=10, corruption_levels=[0.1, 0.1]):
self.sigmoid_layers = [] self.dA_layers = [] self.params = [] self.n_layers = len(hidden_layers_sizes) assert (self.n_layers > 0) if (not theano_rng): theano_rng = RandomStreams(numpy_rng.randint((2 ** 30))) self.x = T.matrix('x') self.y = T.ivector('y') for i in range(self.n_layers): if (i == 0): input_size = n_ins else: input_size = hidden_layers_sizes[(i - 1)] if (i == 0): layer_input = self.x else: layer_input = self.sigmoid_layers[(-1)].output sigmoid_layer = HiddenLayer(rng=numpy_rng, input=layer_input, n_in=input_size, n_out=hidden_layers_sizes[i], activation=T.nnet.sigmoid) self.sigmoid_layers.append(sigmoid_layer) self.params.extend(sigmoid_layer.params) dA_layer = dA(numpy_rng=numpy_rng, theano_rng=theano_rng, input=layer_input, n_visible=input_size, n_hidden=hidden_layers_sizes[i], W=sigmoid_layer.W, bhid=sigmoid_layer.b) self.dA_layers.append(dA_layer) self.logLayer = LogisticRegression(input=self.sigmoid_layers[(-1)].output, n_in=hidden_layers_sizes[(-1)], n_out=n_outs) self.params.extend(self.logLayer.params) self.finetune_cost = self.logLayer.negative_log_likelihood(self.y) self.errors = self.logLayer.errors(self.y)
'Generates a list of functions, each of them implementing one step in trainnig the dA corresponding to the layer with same index. The function will require as input the minibatch index, and to train a dA you just need to iterate, calling the corresponding function on all minibatch indexes. :type train_set_x: theano.tensor.TensorType :param train_set_x: Shared variable that contains all datapoints used for training the dA :type batch_size: int :param batch_size: size of a [mini]batch :type learning_rate: float :param learning_rate: learning rate used during training for any of the dA layers'
def pretraining_functions(self, train_set_x, batch_size):
index = T.lscalar('index') corruption_level = T.scalar('corruption') learning_rate = T.scalar('lr') batch_begin = (index * batch_size) batch_end = (batch_begin + batch_size) pretrain_fns = [] for dA in self.dA_layers: (cost, updates) = dA.get_cost_updates(corruption_level, learning_rate) fn = theano.function(inputs=[index, theano.In(corruption_level, value=0.2), theano.In(learning_rate, value=0.1)], outputs=cost, updates=updates, givens={self.x: train_set_x[batch_begin:batch_end]}) pretrain_fns.append(fn) return pretrain_fns
'Generates a function `train` that implements one step of finetuning, a function `validate` that computes the error on a batch from the validation set, and a function `test` that computes the error on a batch from the testing set :type datasets: list of pairs of theano.tensor.TensorType :param datasets: It is a list that contain all the datasets; the has to contain three pairs, `train`, `valid`, `test` in this order, where each pair is formed of two Theano variables, one for the datapoints, the other for the labels :type batch_size: int :param batch_size: size of a minibatch :type learning_rate: float :param learning_rate: learning rate used during finetune stage'
def build_finetune_functions(self, datasets, batch_size, learning_rate):
(train_set_x, train_set_y) = datasets[0] (valid_set_x, valid_set_y) = datasets[1] (test_set_x, test_set_y) = datasets[2] n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] n_valid_batches //= batch_size n_test_batches = test_set_x.get_value(borrow=True).shape[0] n_test_batches //= batch_size index = T.lscalar('index') gparams = T.grad(self.finetune_cost, self.params) updates = [(param, (param - (gparam * learning_rate))) for (param, gparam) in zip(self.params, gparams)] train_fn = theano.function(inputs=[index], outputs=self.finetune_cost, updates=updates, givens={self.x: train_set_x[(index * batch_size):((index + 1) * batch_size)], self.y: train_set_y[(index * batch_size):((index + 1) * batch_size)]}, name='train') test_score_i = theano.function([index], self.errors, givens={self.x: test_set_x[(index * batch_size):((index + 1) * batch_size)], self.y: test_set_y[(index * batch_size):((index + 1) * batch_size)]}, name='test') valid_score_i = theano.function([index], self.errors, givens={self.x: valid_set_x[(index * batch_size):((index + 1) * batch_size)], self.y: valid_set_y[(index * batch_size):((index + 1) * batch_size)]}, name='valid') def valid_score(): return [valid_score_i(i) for i in range(n_valid_batches)] def test_score(): return [test_score_i(i) for i in range(n_test_batches)] return (train_fn, valid_score, test_score)
'Initialize the parameters of the logistic regression :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie'
def __init__(self, input, n_in, n_out):
self.W = theano.shared(value=numpy.zeros((n_in, n_out), dtype=theano.config.floatX), name='W', borrow=True) self.b = theano.shared(value=numpy.zeros((n_out,), dtype=theano.config.floatX), name='b', borrow=True) self.p_y_given_x = T.nnet.softmax((T.dot(input, self.W) + self.b)) self.y_pred = T.argmax(self.p_y_given_x, axis=1) self.params = [self.W, self.b] self.input = input
'Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution. .. math:: rac{1}{|\mathcal{D}|} \mathcal{L} ( heta=\{W,b\}, \mathcal{D}) = rac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \ \ell ( heta=\{W,b\}, \mathcal{D}) :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size'
def negative_log_likelihood(self, y):
return (- T.mean(T.log(self.p_y_given_x)[(T.arange(y.shape[0]), y)]))
'Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one loss over the size of the minibatch :type y: theano.tensor.TensorType :param y: corresponds to a vector that gives for each example the correct label'
def errors(self, y):
if (y.ndim != self.y_pred.ndim): raise TypeError('y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type)) if y.dtype.startswith('int'): return T.mean(T.neq(self.y_pred, y)) else: raise NotImplementedError()
'Construct a DataSet. Arguments: width, height - image size max_steps - the number of time steps to store phi_length - number of images to concatenate into a state rng - initialized numpy random number generator, used to choose random minibatches'
def __init__(self, width, height, rng, max_steps=1000, phi_length=4):
self.width = width self.height = height self.max_steps = max_steps self.phi_length = phi_length self.rng = rng self.imgs = np.zeros((max_steps, height, width), dtype='uint8') self.actions = np.zeros(max_steps, dtype='int32') self.rewards = np.zeros(max_steps, dtype=floatX) self.terminal = np.zeros(max_steps, dtype='bool') self.bottom = 0 self.top = 0 self.size = 0