desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'.. todo:: WRITEME'
def lmul(self, x):
reshaped = x.reshape((self.input_groups, (x.shape[0] / self.input_groups), x.shape[1], x.shape[2], x.shape[3])) out = LocalDot.rmul(self, reshaped) return out.reshape(((out.shape[0] * out.shape[1]), out.shape[2], out.shape[3], out.shape[4]))
'.. todo:: WRITEME'
def get_params(self):
return [self._filters]
'Set up a test image and filter to re-use'
def setUp(self):
skip_if_no_gpu() self.image = numpy.random.rand(16, 3, 3, 1).astype(theano.config.floatX) self.image_tensor = tensor.tensor4() self.filters_values = numpy.ones((2, 2, 16, 2, 2, 1, 16), dtype=theano.config.floatX) self.filters = sharedX(self.filters_values, name='filters') self.local = Local(self.filters, (3, 3), 1)
'Check whether the local receptive field has stored the correct filters'
def test_get_params(self):
assert (self.local.get_params() == [self.filters])
'Make sure the shape of the output is correct'
def test_lmul(self):
f = theano.function([self.image_tensor], self.local.lmul(self.image_tensor)) assert (f(self.image).shape == (16, 2, 2, 1))
'Create random local receptive fields and check whether they can be applied and give a sensible output shape'
def test_make_random_local(self):
local = make_random_local(1, 16, ('c', 0, 1, 'b'), 1, (3, 3), 16, ('c', 0, 1, 'b'), (2, 2)) f = theano.function([self.image_tensor], local.lmul(self.image_tensor)) assert (f(self.image).shape == (16, 2, 2, 1))
'Set up a test image and filter to re-use.'
def setUp(self):
skip_if_no_gpu() if (not dnn_available()): raise SkipTest('Skipping tests cause cudnn is not available') self.orig_floatX = theano.config.floatX theano.config.floatX = 'float32' self.image = np.random.rand(1, 1, 3, 3).astype(theano.config.floatX) self.image_tensor = tensor.tensor4() self.input_space = Conv2DSpace((3, 3), 1, axes=('b', 'c', 0, 1)) self.filters_values = np.ones((1, 1, 2, 2), dtype=theano.config.floatX) self.filters = sharedX(self.filters_values, name='filters') self.batch_size = 1 self.cudnn2d = Cudnn2D(self.filters, self.batch_size, self.input_space)
'After test clean up.'
def tearDown(self):
theano.config.floatX = self.orig_floatX
'Check correct errors are raised when bad input is given.'
def test_value_errors(self):
with self.assertRaises(AssertionError): Cudnn2D(filters=self.filters, batch_size=(-1), input_space=self.input_space)
'Check whether the cudnn has stored the correct filters.'
def test_get_params(self):
self.assertEqual(self.cudnn2d.get_params(), [self.filters])
'Check whether the cudnn has stored the correct filters.'
def test_get_weights_topo(self):
self.assertTrue(np.all((self.cudnn2d.get_weights_topo(borrow=True) == np.transpose(self.filters.get_value(borrow=True), (0, 2, 3, 1)))))
'Use conv2D to check whether the convolution worked correctly.'
def test_lmul(self):
conv2d = Conv2D(self.filters, self.batch_size, self.input_space, output_axes=('b', 'c', 0, 1)) f_co = theano.function([self.image_tensor], conv2d.lmul(self.image_tensor)) f_cu = theano.function([self.image_tensor], self.cudnn2d.lmul(self.image_tensor)) self.assertTrue(np.allclose(f_co(self.image), f_cu(self.image)))
'Make sure that setting the batch size actually changes the property.'
def test_set_batch_size(self):
img_shape = self.cudnn2d._img_shape self.cudnn2d.set_batch_size((self.batch_size + 10)) np.testing.assert_equal(self.cudnn2d._img_shape[0], (self.batch_size + 10)) np.testing.assert_equal(self.cudnn2d._img_shape[1:], img_shape[1:])
'Test different output axes. Use different output axes and see whether the output is what we expect.'
def test_axes(self):
default_axes = ('b', 'c', 0, 1) axes = (0, 'b', 1, 'c') another_axes = (0, 1, 'c', 'b') map_to_default = tuple((axes.index(axis) for axis in default_axes)) map_to_another_axes = tuple((default_axes.index(axis) for axis in another_axes)) input_space = Conv2DSpace((3, 3), num_channels=1, axes=another_axes) cudnn2d = Cudnn2D(self.filters, 1, input_space, output_axes=axes) f = theano.function([self.image_tensor], cudnn2d.lmul(self.image_tensor)) f_def = theano.function([self.image_tensor], self.cudnn2d.lmul(self.image_tensor)) output = f(np.transpose(self.image, map_to_another_axes)) output_def = np.array(f_def(self.image)) output = np.transpose(output, map_to_default) np.testing.assert_allclose(output_def, output) np.testing.assert_equal(output_def.shape, output.shape)
'Go from 2 to 3 channels and see whether the shape is correct.'
def test_channels(self):
input_space = Conv2DSpace((3, 3), num_channels=3) filters_values = np.ones((2, 3, 2, 2), dtype=theano.config.floatX) filters = sharedX(filters_values) image = np.random.rand(1, 3, 3, 3).astype(theano.config.floatX) cudnn2d = Cudnn2D(filters, 1, input_space) f = theano.function([self.image_tensor], cudnn2d.lmul(self.image_tensor)) assert (f(image).shape == (1, 2, 2, 2))
'Test a random convolution. Create a random convolution and check whether the shape, axes and input space are all what we expect.'
def test_make_random_conv2D(self):
output_space = Conv2DSpace((2, 2), 1) cudnn2d = make_random_conv2D(1, self.input_space, output_space, (2, 2), 1) f = theano.function([self.image_tensor], cudnn2d.lmul(self.image_tensor)) assert (f(self.image).shape == (1, 2, 2, 1)) assert (cudnn2d._input_space == self.input_space) assert (cudnn2d._output_axes == output_space.axes)
'Set up a test image and filter to re-use'
def setUp(self):
self.orig_floatX = theano.config.floatX theano.config.floatX = 'float32' theano.sandbox.cuda.use('gpu') self.image = numpy.random.rand(16, 3, 3, 1).astype(theano.config.floatX) self.image_tensor = tensor.tensor4() self.filters_values = numpy.random.rand(16, 2, 2, 32).astype(theano.config.floatX) self.filters = sharedX(self.filters_values, name='filters') self.conv2d = Conv2D(self.filters)
'Emulate c01b convolution with scipy'
def scipy_conv_c01b(self, images, filters):
assert (images.ndim == 4) assert (filters.ndim == 4) (in_chans, rows, cols, bs) = images.shape (in_chans_, rows_, cols_, out_chans) = filters.shape assert (in_chans_ == in_chans) out_bc01 = [[sum((scipy.ndimage.filters.convolve(images[c, :, :, b], filters[c, ::(-1), ::(-1), i]) for c in xrange(in_chans))) for i in xrange(out_chans)] for b in xrange(bs)] out_c01b = numpy.array(out_bc01).transpose(1, 2, 3, 0) return out_c01b
'Check whether the conv2d has stored the correct filters'
def test_get_params(self):
assert (self.conv2d.get_params() == [self.filters])
'Use SciPy\'s ndimage to check whether the convolution worked correctly'
def test_lmul(self):
f = theano.function([self.image_tensor], self.conv2d.lmul(self.image_tensor)) if scipy_available: self.assertTrue(numpy.allclose(f(self.image), self.scipy_conv_c01b(self.image, self.filters_values)[:, :2, :2, :]))
'Check whether this function outputs the right shape'
def test_lmul_T(self):
conv2d = self.conv2d.lmul(self.image_tensor) f = theano.function([self.image_tensor], self.conv2d.lmul_T(conv2d)) assert (f(self.image).shape == self.image.shape)
'Use custom output axes and check whether it worked'
def test_axes(self):
default_axes = ('c', 0, 1, 'b') axes = (0, 'b', 1, 'c') mapping = tuple((axes.index(axis) for axis in default_axes)) conv2d = Conv2D(self.filters, output_axes=axes) f_axes = theano.function([self.image_tensor], conv2d.lmul(self.image_tensor)) f = theano.function([self.image_tensor], self.conv2d.lmul(self.image_tensor)) output_axes = f_axes(self.image) output = f(self.image) output_axes = numpy.transpose(output_axes, mapping) numpy.testing.assert_allclose(output, output_axes) assert (output.shape == output_axes.shape)
'Go from 32 to 16 channels and see whether that works without error'
def test_channels(self):
filters_values = numpy.ones((32, 2, 2, 16), dtype=theano.config.floatX) filters = sharedX(filters_values) image = numpy.random.rand(32, 3, 3, 1).astype(theano.config.floatX) conv2d = Conv2D(filters) f = theano.function([self.image_tensor], conv2d.lmul(self.image_tensor)) assert (f(image).shape == (16, 2, 2, 1))
'Make random filters'
def test_make_random_conv2D(self):
default_axes = ('c', 0, 1, 'b') conv2d = make_random_conv2D(1, 16, default_axes, default_axes, 16, (2, 2)) f = theano.function([self.image_tensor], conv2d.lmul(self.image_tensor)) assert (f(self.image).shape == (16, 2, 2, 1)) assert (conv2d.output_axes == default_axes)
'Make random sparse filters, count whether the number of non-zero elements is sensible'
def test_make_sparse_random_conv2D(self):
axes = ('c', 0, 1, 'b') input_space = Conv2DSpace((3, 3), 16, axes=axes) output_space = Conv2DSpace((3, 3), 16, axes=axes) num_nonzero = 2 kernel_shape = (2, 2) conv2d = make_sparse_random_conv2D(num_nonzero, input_space, output_space, kernel_shape) f = theano.function([self.image_tensor], conv2d.lmul(self.image_tensor)) assert (f(self.image).shape == (16, 2, 2, 1)) assert (conv2d.output_axes == axes) assert (numpy.count_nonzero(conv2d._filters.get_value()) >= 32)
'Very basic test to see whether a detector layer can be set up without error. Not checking much for the actual output.'
def test_setup_detector_layer_c01b(self):
axes = ('c', 0, 1, 'b') layer = MaxoutConvC01B(16, 2, (2, 2), (2, 2), (1, 1), 'maxout', irange=1.0) input_space = Conv2DSpace((3, 3), 16, axes=axes) MLP(layers=[layer], input_space=input_space) layer.set_input_space(input_space) assert isinstance(layer.input_space, Conv2DSpace) input = theano.tensor.tensor4() f = theano.function([input], layer.fprop(input)) f(numpy.random.rand(16, 3, 3, 1).astype(theano.config.floatX))
'Set up a test image and filter to re-use'
def setUp(self):
self.image = numpy.random.rand(1, 3, 3, 1).astype(theano.config.floatX) self.image_tensor = tensor.tensor4() self.input_space = Conv2DSpace((3, 3), 1) self.filters_values = numpy.ones((1, 1, 2, 2), dtype=theano.config.floatX) self.filters = sharedX(self.filters_values, name='filters') self.conv2d = Conv2D(self.filters, 1, self.input_space)
'Check correct errors are raised when bad input is given'
def test_value_errors(self):
bad_filters = sharedX(numpy.zeros((1, 3, 2))) self.assertRaises(ValueError, Conv2D, bad_filters, 1, self.input_space) self.assertRaises(AssertionError, Conv2D, self.filters, 0, self.input_space)
'Check whether the conv2d has stored the correct filters'
def test_get_params(self):
assert (self.conv2d.get_params() == [self.filters])
'Use SciPy\'s ndimage to check whether the convolution worked correctly'
def test_lmul(self):
f = theano.function([self.image_tensor], self.conv2d.lmul(self.image_tensor)) if scipy_available: numpy.allclose(f(self.image).reshape((2, 2)), scipy.ndimage.filters.convolve(self.image.reshape((3, 3)), self.filters_values.reshape((2, 2)))[:2, :2])
'Check whether this function outputs the right shape'
def test_lmul_T(self):
conv2d = self.conv2d.lmul(self.image_tensor) f = theano.function([self.image_tensor], self.conv2d.lmul_T(conv2d)) assert (f(self.image).shape == self.image.shape)
'Check whether this function outputs the same values as when taking the square manually'
def test_lmul_sq_T(self):
conv2d_sq = Conv2D(sharedX(numpy.square(self.filters_values)), 1, self.input_space).lmul(self.image_tensor) conv2d = self.conv2d.lmul(self.image_tensor) f = theano.function([self.image_tensor], self.conv2d.lmul_T(conv2d_sq)) f2 = theano.function([self.image_tensor], self.conv2d.lmul_sq_T(conv2d)) numpy.testing.assert_allclose(f(self.image), f2(self.image))
'Make sure that setting the batch size actually changes the property'
def test_set_batch_size(self):
cur_img_shape = self.conv2d._img_shape cur_batch_size = self.conv2d._img_shape[0] self.conv2d.set_batch_size((cur_batch_size + 10)) assert (self.conv2d._img_shape[0] == (cur_batch_size + 10)) assert (self.conv2d._img_shape[1:] == cur_img_shape[1:])
'Use different output axes and see whether the output is what we expect'
def test_axes(self):
default_axes = ('b', 0, 1, 'c') axes = (0, 'b', 1, 'c') mapping = tuple((axes.index(axis) for axis in default_axes)) input_space = Conv2DSpace((3, 3), num_channels=1, axes=axes) conv2d = Conv2D(self.filters, 1, input_space, output_axes=axes) f_axes = theano.function([self.image_tensor], conv2d.lmul(self.image_tensor)) f = theano.function([self.image_tensor], self.conv2d.lmul(self.image_tensor)) output_axes = f_axes(numpy.transpose(self.image, mapping)) output = f(self.image) output_axes = numpy.transpose(output_axes, mapping) numpy.testing.assert_allclose(output, output_axes) assert (output.shape == output_axes.shape)
'Go from 2 to 3 channels and see whether the shape is correct'
def test_channels(self):
input_space = Conv2DSpace((3, 3), num_channels=3) filters_values = numpy.ones((2, 3, 2, 2), dtype=theano.config.floatX) filters = sharedX(filters_values) image = numpy.random.rand(1, 3, 3, 3).astype(theano.config.floatX) conv2d = Conv2D(filters, 1, input_space) f = theano.function([self.image_tensor], conv2d.lmul(self.image_tensor)) assert (f(image).shape == (1, 2, 2, 2))
'Create a random convolution and check whether the shape, axes and input space are all what we expect'
def test_make_random_conv2D(self):
output_space = Conv2DSpace((2, 2), 1) conv2d = make_random_conv2D(1, self.input_space, output_space, (2, 2), 1) f = theano.function([self.image_tensor], conv2d.lmul(self.image_tensor)) assert (f(self.image).shape == (1, 2, 2, 1)) assert (conv2d.input_space == self.input_space) assert (conv2d.output_axes == output_space.axes)
'.. todo:: WRITEME'
@functools.wraps(P2LT.get_params) def get_params(self):
return [self._filters]
'.. todo:: WRITEME'
@functools.wraps(P2LT.get_weights_topo) def get_weights_topo(self, borrow):
return np.transpose(self._filters.get_value(borrow=borrow), (0, 2, 3, 1))
'.. todo:: WRITEME properly dot(x, A) This method overrides the original Conv2D lmul to make it work with arbitrary axis orders'
def lmul(self, x):
assert (x.ndim == 4) axes = self.input_space.axes assert (len(axes) == 4) op_axes = ('b', 'c', 0, 1) if (tuple(axes) != op_axes): x = x.dimshuffle(axes.index('b'), axes.index('c'), axes.index(0), axes.index(1)) rval = conv2d(x, self._filters, image_shape=self._img_shape, filter_shape=self._filters_shape, subsample=self._subsample, border_mode=self._border_mode) axes = self.output_axes assert (len(axes) == 4) if (tuple(axes) != op_axes): rval = rval.dimshuffle(op_axes.index(axes[0]), op_axes.index(axes[1]), op_axes.index(axes[2]), op_axes.index(axes[3])) return rval
'.. todo:: WRITEME properly Override the original Conv2D lmul_T to make it work with pylearn format of topological data using dimshuffles'
def lmul_T(self, x):
assert (x.dtype == self._filters.dtype) op_axes = ('b', 'c', 0, 1) axes = self.output_axes if (tuple(axes) != op_axes): x = x.dimshuffle(axes.index('b'), axes.index('c'), axes.index(0), axes.index(1)) dummy_v = T.tensor4() dummy_v.name = 'dummy_v' if (theano.config.compute_test_value == 'raise'): dummy_v.tag.test_value = np.zeros((x.tag.test_value.shape[0], self.input_space.num_channels, self.input_space.shape[0], self.input_space.shape[1]), dtype=dummy_v.dtype) z_hs = conv2d(dummy_v, self._filters, image_shape=self._img_shape, filter_shape=self._filters_shape, subsample=self._subsample, border_mode=self._border_mode) (rval, xdummy) = z_hs.owner.op.grad((dummy_v, self._filters), (x,)) axes = self.input_space.axes assert (len(axes) == 4) if (tuple(axes) != op_axes): rval = rval.dimshuffle(op_axes.index(axes[0]), op_axes.index(axes[1]), op_axes.index(axes[2]), op_axes.index(axes[3])) return rval
'.. todo:: WRITEME properly Kind of a stupid hacky method used to support convolutional score matching. Ought to find a way to make _filters symbolic rather than shared.'
def lmul_sq_T(self, x):
assert (x.dtype == self._filters.dtype) op_axes = ('b', 'c', 0, 1) axes = self.output_axes if (tuple(axes) != op_axes): x = x.dimshuffle(axes.index('b'), axes.index('c'), axes.index(0), axes.index(1)) dummy_v = T.tensor4() sqfilt = T.square(self._filters) z_hs = conv2d(dummy_v, sqfilt, image_shape=self._img_shape, filter_shape=self._filters_shape, subsample=self._subsample, border_mode=self._border_mode) (rval, xdummy) = z_hs.owner.op.grad((dummy_v, sqfilt), (x,)) axes = self.input_space.axes assert (len(axes) == 4) if (tuple(axes) != op_axes): rval = rval.dimshuffle(op_axes.index(axes[0]), op_axes.index(axes[1]), op_axes.index(axes[2]), op_axes.index(axes[3])) return rval
'.. todo:: WRITEME'
def set_batch_size(self, batch_size):
self._img_shape = tuple(([batch_size] + list(self._img_shape[1:])))
'Return self._filters.'
@functools.wraps(P2LT.get_params) def get_params(self):
return [self._filters]
'Parameters borrow : TODO TODO'
@functools.wraps(P2LT.get_weights_topo) def get_weights_topo(self, borrow):
return np.transpose(self._filters.get_value(borrow=borrow), (0, 2, 3, 1))
'.. todo:: WRITEME properly dot(x, A) This method overrides the original Conv2D lmul to make it work with arbitrary axis orders Parameters x : TODO TODO'
def lmul(self, x):
assert (x.ndim == 4) axes = self._input_space.axes assert (len(axes) == 4) op_axes = ('b', 'c', 0, 1) if (tuple(axes) != op_axes): x = x.dimshuffle(*[axes.index(ax) for ax in op_axes]) img = gpu_contiguous(x) kerns = gpu_contiguous(self._filters) shape = GpuDnnConv.get_out_shape(img.shape, kerns.shape, self._border_mode, self._subsample) rval = gpu_alloc_empty(*shape) desc = self._desc(img.shape, kerns.shape) rval = self._conv_op(img, kerns, rval, desc) axes = self._output_axes assert (len(axes) == 4) if (tuple(self._output_axes) != op_axes): rval = rval.dimshuffle(*[op_axes.index(ax) for ax in self._output_axes]) return rval
'.. todo:: WRITEME Parameters batch_size : TODO TODO'
def set_batch_size(self, batch_size):
self._img_shape = tuple(([batch_size] + list(self._img_shape[1:])))
'.. todo:: WRITEME'
@functools.wraps(LinearTransform.get_params) def get_params(self):
return [self._filters]
'.. todo:: WRITEME'
@functools.wraps(LinearTransform.get_weights_topo) def get_weights_topo(self, borrow=False):
(inp, rows, cols, outp) = range(4) raw = self._filters.get_value(borrow=borrow) return np.transpose(raw, (outp, rows, cols, inp))
'.. todo:: WRITEME properly dot(x, A) aka, do convolution with input image x'
def lmul(self, x):
check_cuda((str(type(self)) + '.lmul')) cpu = ('Cuda' not in str(type(x))) if cpu: x = gpu_from_host(x) assert (x.ndim == 4) x_axes = self.input_axes assert (len(x_axes) == 4) op_axes = ('c', 0, 1, 'b') if (tuple(x_axes) != op_axes): x = x.dimshuffle(*[x_axes.index(axis) for axis in op_axes]) x = gpu_contiguous(x) if (not hasattr(self, 'kernel_stride')): self.kernel_stride = (1, 1) rval = FilterActs(self.pad, self.partial_sum, self.kernel_stride[0])(x, self._filters) rval_axes = self.output_axes assert (len(rval_axes) == 4) if cpu: rval = host_from_gpu(rval) if (tuple(rval_axes) != op_axes): rval = rval.dimshuffle(*[op_axes.index(axis) for axis in rval_axes]) return rval
'.. todo:: WRITEME'
def lmul_T(self, x):
check_cuda((str(type(self)) + '.lmul_T')) assert (x.dtype == self._filters.dtype) op_axes = ('c', 0, 1, 'b') axes = self.output_axes if (tuple(axes) != op_axes): x = x.dimshuffle(*[axes.index(ax) for ax in op_axes]) x = gpu_contiguous(x) rval = ImageActs(pad=self.pad, partial_sum=self.partial_sum, stride=self.kernel_stride[0])(x, self._filters, output_shape=self.input_shape) axes = self.input_axes assert (len(axes) == 4) if (tuple(axes) != op_axes): rval = rval.dimshuffle(op_axes.index(axes[0]), op_axes.index(axes[1]), op_axes.index(axes[2]), op_axes.index(axes[3])) return rval
'.. todo:: WRITEME'
def lmul_sq_T(self, x):
raise NotImplementedError('This method is not yet modified since copy-pasting from pylearn2.linear.conv2d') ' Kind of a stupid hacky method used to support convolutional score\n matching. Ought to find a way to make _filters symbolic rather than\n shared.\n ' assert (x.dtype == self._filters.dtype) op_axes = ('b', 'c', 0, 1) axes = self.output_axes if (tuple(axes) != op_axes): x = x.dimshuffle(axes.index('b'), axes.index('c'), axes.index(0), axes.index(1)) dummy_v = T.tensor4() sqfilt = T.square(self._filters) z_hs = 0.0 (rval, xdummy) = z_hs.owner.op.grad((dummy_v, sqfilt), (x,)) axes = self.input_space.axes assert (len(axes) == 4) if (tuple(axes) != op_axes): rval = rval.dimshuffle(op_axes.index(axes[0]), op_axes.index(axes[1]), op_axes.index(axes[2]), op_axes.index(axes[3])) return rval
'.. todo:: WRITEME'
def set_batch_size(self, batch_size):
pass
'Return a list of parameters that govern the linear transformation'
def get_params(self):
raise NotImplementedError()
'Return a batch of filters, formatted topologically. This only really makes sense if you are working with a topological space, such as for a convolution operator. If your transformation is defined on a VectorSpace then some other class like a ViewConverter will need to transform your vector into a topological space; you are not responsible for doing so here.'
def get_weights_topo(self):
raise NotImplementedError()
'Some transformers such as Conv2D have a fixed batch size. Use this method to change the batch size. Parameters batch_size : int The size of the batch'
def set_batch_size(self, batch_size):
pass
'Sets the initial values of the matrix'
def __init__(self, W):
self._W = W
'.. todo:: WRITEME'
@functools.wraps(LinearTransform.get_params) def get_params(self):
return [self._W]
'.. todo:: WRITEME Parameters x : ndarray, 1d or 2d The input data'
def lmul(self, x):
return T.dot(x, self._W)
'.. todo:: WRITEME Parameters x : ndarray, 1d or 2d The input data'
def lmul_T(self, x):
return T.dot(x, self._W.T)
'Reads a given configuration file, and updates the internal installed packages list. :param from_location: a path (string) to a directory containing an installed.lst file'
def read_installed_packages_list(self, from_location):
try: installed_list_file = open((from_location + '/installed.lst')) except IOError: pass else: for line in installed_list_file: l = line.rstrip().split(' ') if l: self.installed_packages_list[l[0]] = this_package = self.package_info(from_location, l[0], l[1], l[2], urllib.unquote(l[3]), urllib.unquote(l[4])) else: pass
'Looks up a dataset name and return its location, or None if it\'s unknown. :param dataset_name: a canonical dataset name, \'mnist\' for e.g. :returns: A path, if dataset is found, or None otherwise.'
def resolve_dataset(self, dataset_name):
if (dataset_name in self.installed_packages_list): return os.path.join(self.installed_packages_list[dataset_name].where, self.installed_packages_list[dataset_name].name) else: return None
'Scans possible locations to load installed.lst files. It first scans the root install, the user install, then the paths, from left to right, specified by the PYLEARN2_DATA_PATH environment variable.'
def __init__(self):
paths = ['/etc/pylearn/', (os.environ['HOME'] + '/.local/share/pylearn/')] try: paths += re.split(':|;', os.environ['PYLEARN2_DATA_PATH']) except Exception: pass for path in paths: self.read_installed_packages_list(path)
'which_set: A string specifying which portion of the dataset to load. Valid values are \'train\' or \'public_test\' base_path: The directory containing the .csv files from kaggle.com. This directory should be writable; if the .csv files haven\'t already been converted to npy, this class will convert them to save memory the next time they are loaded. fit_preprocessor: True if the preprocessor is allowed to fit the data. fit_test_preprocessor: If we construct a test set based on this dataset, should it be allowed to fit the test set?'
def __init__(self, which_set, base_path='${PYLEARN2_DATA_PATH}/icml_2013_black_box', start=None, stop=None, preprocessor=None, fit_preprocessor=False, fit_test_preprocessor=False):
self.test_args = locals() self.test_args['which_set'] = 'public_test' self.test_args['fit_preprocessor'] = fit_test_preprocessor del self.test_args['start'] del self.test_args['stop'] del self.test_args['self'] files = {'train': 'train.csv', 'public_test': 'test.csv'} sizes = {'train': 1000, 'public_test': 10000, 'extra': 135735} if (which_set == 'extra'): path = ((base_path + '/') + 'extra_unsupervised_data.npy') X = serial.load(path).T y = None else: try: filename = files[which_set] except KeyError: raise ValueError(('Unrecognized dataset name: ' + which_set)) path = ((base_path + '/') + filename) path = preprocess(path) expect_labels = (which_set == 'train') (X, y) = self._load_data(path, expect_labels) size = sizes[which_set] if (X.shape[0] != size): raise ValueError(((('Expected ' + str(size)) + ' examples, got ') + str(X.shape[0]))) if (start is not None): assert (which_set != 'test') assert isinstance(start, int) assert isinstance(stop, int) assert (start >= 0) assert (start < stop) if (not (stop <= X.shape[0])): raise ValueError(((((('stop must be less than the # of examples but ' + 'stop is ') + str(stop)) + ' and there are ') + str(X.shape[0])) + ' examples.')) X = X[start:stop, :] if (y is not None): y = y[start:stop, :] super(BlackBoxDataset, self).__init__(X=X, y=y, y_labels=9) if preprocessor: preprocessor.apply(self, can_fit=fit_preprocessor)
'which_set: A string specifying which portion of the dataset to load. Valid values are \'train\' or \'public_test\' base_path: The directory containing the .csv files from kaggle.com. This directory should be writable; if the .csv files haven\'t already been converted to npy, this class will convert them to save memory the next time they are loaded. fit_preprocessor: True if the preprocessor is allowed to fit the data. fit_test_preprocessor: If we construct a test set based on this dataset, should it be allowed to fit the test set?'
def __init__(self, which_set, base_path='${PYLEARN2_DATA_PATH}/icml_2013_emotions', start=None, stop=None, preprocessor=None, fit_preprocessor=False, axes=('b', 0, 1, 'c'), fit_test_preprocessor=False):
self.test_args = locals() self.test_args['which_set'] = 'public_test' self.test_args['fit_preprocessor'] = fit_test_preprocessor del self.test_args['start'] del self.test_args['stop'] del self.test_args['self'] del self.test_args['__class__'] files = {'train': 'train.csv', 'public_test': 'test.csv'} try: filename = files[which_set] except KeyError: raise ValueError(('Unrecognized dataset name: ' + which_set)) path = ((base_path + '/') + filename) path = preprocess(path) (X, y) = self._load_data(path, (which_set == 'train')) if (start is not None): assert (which_set != 'test') assert isinstance(start, int) assert isinstance(stop, int) assert (start >= 0) assert (start < stop) assert (stop <= X.shape[0]) X = X[start:stop, :] if (y is not None): y = y[start:stop, :] view_converter = DefaultViewConverter(shape=[48, 48, 1], axes=axes) if (y is None): y_labels = None else: y_labels = 7 super(EmotionsDataset, self).__init__(X=X, y=y, y_labels=y_labels, view_converter=view_converter) if preprocessor: preprocessor.apply(self, can_fit=fit_preprocessor)
'f: the theano function whose work needs to be split nhid: we assume f takes a matrix of shape (m,nvis) and returns a matrix of shape (m,nhid)'
def __init__(self, f, nhid):
self.f = f self.nhid = nhid
'dataset_maker: A callable that returns a Dataset num_examples: the number of examples we expect the dataset to have (just for error checking purposes)'
def __init__(self, dataset_maker, num_examples, pipeline_path):
self.dataset_maker = dataset_maker self.num_examples = num_examples self.pipeline_path = pipeline_path
'batch_size: the number of images to process simultaneously this does not affect the final result, it is just for performance larger values allow more parallel processing but require more memory kmeans_path: a path to a .pkl file containing a pylearn2.kmeans.KMeans instance save_path: the base path to save to, should end in .npy dataset_family: extract_features.stl10, extract_features.cifar10, etc. which_set: \'train\' or \'test\' num_output_features: the number of randomly selected pooled features to extract per image chunk_size: will build a design matrix of this many processed examples before serializing them. if you use a chunk size of 10,000 on a dataset with 50,000 examples, and a save_path of foo.npy, this will result in you obtaining foo_A.npy through foo_E.npy. Use a small chunk_size to avoid running out of memory. restrict: a tuple of of (start,end) indices restrict feature extraction to only these examples the restrict option is used internally to implement the chunk_size option, so you may not specify both for the same FeatureExtractor pool_mode: \'max\' or \'mean\''
def __init__(self, batch_size, kmeans_path, save_path, dataset_family, which_set, num_output_features, chunk_size=None, restrict=None, pool_mode='mean'):
if ((chunk_size is not None) and (restrict is not None)): raise NotImplementedError('Currently restrict is used internally to implement chunk_size, so a client may not specify both') self.batch_size = batch_size self.model_path = kmeans_path self.restrict = restrict self.pool_mode = pool_mode assert (save_path is not None) assert save_path.endswith('npy') assert (pool_mode in ['mean', 'max']) self.save_path = save_path self.which_set = which_set self.dataset_family = dataset_family self.chunk_size = chunk_size self.num_output_features = num_output_features
'.. todo:: WRITEME'
def __init__(self, encoder, dataset, path, batch_size=None, topo=False):
self.encoder = encoder self.dataset = dataset self.path = path self.batch_size = batch_size self.topo = topo
'.. todo:: WRITEME Parameters **kwargs : dict, optional WRITEME'
def main_loop(self, **kwargs):
if (self.batch_size is None): if self.topo: data = self.dataset.get_topological_view() else: data = self.dataset.get_design_matrix() output = self.encoder.perform(data) else: myiterator = self.dataset.iterator(mode='sequential', batch_size=self.batch_size, topo=self.topo) chunks = [] for data in myiterator: chunks.append(self.encoder.perform(data)) output = np.concatenate(chunks) np.save(self.path, output)
'.. todo:: WRITEME'
def __call__(self, inputs):
if self.input_space: self.input_space.validate(inputs) return self.theano_rng.binomial(p=inputs, size=inputs.shape, dtype=inputs.dtype)
'.. todo:: WRITEME'
def set_input_space(self, space):
self.input_space = space
'.. todo:: WRITEME'
def get_input_space(self):
if (self.input_space is not None): return self.input_space raise ValueError(('No input space was specified for this Block (%s). You can call set_input_space to correct that.' % str(self)))
'.. todo:: WRITEME'
def get_output_space(self):
return self.get_input_space()
'.. todo:: WRITEME'
def __call__(self, bc01):
half = (self.n // 2) sq = T.sqr(bc01) (b, ch, r, c) = bc01.shape extra_channels = T.alloc(0.0, b, (ch + (2 * half)), r, c) sq = T.set_subtensor(extra_channels[:, half:(half + ch), :, :], sq) scale = self.k for i in xrange(self.n): scale += (self.alpha * sq[:, i:(i + ch), :, :]) scale = (scale ** self.beta) return (bc01 / scale)
'.. todo:: WRITEME'
def __call__(self, c01b):
half = (self.n // 2) sq = T.sqr(c01b) (ch, r, c, b) = c01b.shape extra_channels = T.alloc(0.0, (ch + (2 * half)), r, c, b) sq = T.set_subtensor(extra_channels[half:(half + ch), :, :, :], sq) scale = self.k for i in xrange(self.n): scale += (self.alpha * sq[i:(i + ch), :, :, :]) scale = (scale ** self.beta) return (c01b / scale)
'.. todo:: WRITEME properly NOTE: c01b must be CudaNdarrayType.'
def __call__(self, c01b):
return self._op(c01b)[0]
'.. todo:: WRITEME'
def __call__(self, inputs):
if self.input_space: self.input_space.validate(inputs) return inputs
'.. todo:: WRITEME'
def set_input_space(self, space):
self.input_space = space
'.. todo:: WRITEME'
def get_input_space(self):
if (self.input_space is not None): return self.input_space raise ValueError(('No input space was specified for this Block (%s). You can call set_input_space to correct that.' % str(self)))
'.. todo:: WRITEME'
def get_output_space(self):
return self.get_input_space()
'Sets the shape of the display (in pixels) Parameters shape : tuple The (rows, columns) of the display.'
def set_shape(self, shape):
self.rows = shape[0] self.cols = shape[1]
'Sets the range of space that is plotted in the graph. Parameters xlim : tuple The range (xmin, xmax)'
def set_xlim(self, xlim):
self.xmin = xlim[0] self.xmax = xlim[1] self.delta_x = ((self.xmax - self.xmin) / float((self.cols - 1)))
'Sets the y coordinate of the central pixel of the display. Parameters ycenter : float The desired coordinate.'
def set_ycenter(self, ycenter):
self.delta_y = self.delta_x self.ymin = (ycenter - ((self.rows / 2) * self.delta_y)) self.ymax = (self.ymin + ((self.rows - 1) * self.delta_y))
'Renders the graph. Returns output : ndarray An ndarray in (rows, cols, RGB) format.'
def render(self):
rval = N.zeros((self.rows, self.cols, 3)) for component in self.components: rval = component.render(prev_layer=rval, parent=self) assert (rval is not None) return rval
'Returns the coordinates of every pixel in column i of the graph. Parameters i : int Column index Returns coords : ndarray A vector containing the real-number coordinates of every pixel in column i of the graph.'
def get_coords_for_col(self, i):
X = N.zeros((self.rows, 2), dtype=config.floatX) X[:, 0] = (self.xmin + (float(i) * self.delta_x)) X[:, 1] = (self.ymin + (N.cast[config.floatX](N.asarray(range((self.rows - 1), (-1), (-1)))) * self.delta_y)) return X
'Renders the heatmap. Parameters prev_layer : numpy ndarray An image that will be copied into the new output. The new image will be rendered on top of the first one, i.e., `prev_layer` will be visible through the new heatmap if the new heatmap is not rendered in fully opaque mode. parent : Graph2D A Graph2D object that defines the coordinate system of the heatmap. Returns img : The rendered heatmap'
def render(self, prev_layer, parent):
my_img = (prev_layer * 0.0) for i in xrange(prev_layer.shape[1]): X = parent.get_coords_for_col(i) f = self.f(X) if (len(f.shape) == 1): for j in xrange(3): my_img[:, i, j] = f else: my_img[:, i, :] = f if (self.normalizer is not None): my_img = self.normalizer(my_img) assert (my_img is not None) if (self.render_mode == 'r'): my_img[:, :, 1:] = prev_layer[:, :, 1:] elif (self.render_mode == 'o'): pass else: raise NotImplementedError() return my_img
'.. todo:: WRITEME'
def clear(self):
if self.is_color: for i in xrange(3): self.image[:, :, i] = ((self.background[i] * 0.5) + 0.5) else: self.image[:] = ((self.background * 0.5) + 0.5) self.cur_pos = (0, 0)
'Adds an image patch to the `PatchViewer`. Patches are added left to right, top to bottom. If this method is called when the `PatchViewer` is already full, it will clear the viewer and start adding patches at the upper left again. Parameters patch : ndarray If this `PatchViewer` is in color (controlled by the `is_color` parameter of the constructor) `patch` should be a 3D ndarray, with the first axis being the rows of the image, the second axis being the columsn of the image, and the third being RGB color channels. If this `PatchViewer` is grayscale, `patch` should be either a 3D ndarray with the third axis having length 1, or a 2D ndarray. The values of the ndarray should be floating point. 0 is displayed as gray. Negative numbers are displayed as blacker. Positive numbers are displayed as whiter. See the `rescale` parameter for more detail. This color convention was chosen because it is useful for displaying weight matrices. rescale : bool If True, the maximum absolute value of a pixel in `patch` sets the scale, so that abs(patch).max() is absolute white and -abs(patch).max() is absolute black. If False, `patch` should lie in [-1, 1]. recenter : bool If True (default), if `patch` has smaller dimensions than were specified to the constructor\'s `patch_shape` argument, we will display the patch in the center of the area allocated to it in the display grid. If False, we will raise an exception if `patch` is not exactly the specified shape. activation : WRITEME WRITEME warn_blank_patch : WRITEME WRITEME'
def add_patch(self, patch, rescale=True, recenter=True, activation=None, warn_blank_patch=True):
if (warn_blank_patch and (patch.min() == patch.max()) and (rescale or (patch.min() == 0.0))): warnings.warn('displaying totally blank patch') if self.is_color: assert (patch.ndim == 3) if (not (patch.shape[(-1)] == 3)): raise ValueError(('Expected color image to have shape[-1]=3, but shape[-1] is ' + str(patch.shape[(-1)]))) else: assert (patch.ndim in [2, 3]) if (patch.ndim == 3): if (patch.shape[(-1)] != 1): raise ValueError(('Expected 2D patch or 3D patch with 1 channel, but got patch with shape ' + str(patch.shape))) if recenter: assert (patch.shape[0] <= self.patch_shape[0]) if (patch.shape[1] > self.patch_shape[1]): raise ValueError(('Given patch of width %d but only patches up to width %d fit' % (patch.shape[1], self.patch_shape[1]))) rs_pad = ((self.patch_shape[0] - patch.shape[0]) // 2) re_pad = ((self.patch_shape[0] - rs_pad) - patch.shape[0]) cs_pad = ((self.patch_shape[1] - patch.shape[1]) // 2) ce_pad = ((self.patch_shape[1] - cs_pad) - patch.shape[1]) else: if (patch.shape[0:2] != self.patch_shape): raise ValueError(('Expected patch with shape %s, got %s' % (str(self.patch_shape), str(patch.shape)))) rs_pad = 0 re_pad = 0 cs_pad = 0 ce_pad = 0 temp = patch.copy() assert isfinite(temp) if rescale: scale = np.abs(temp).max() if (scale > 0): temp /= scale elif ((temp.min() < (-1.0)) or (temp.max() > 1.0)): raise ValueError(('When rescale is set to False, pixel values must lie in [-1,1]. Got [%f, %f].' % (temp.min(), temp.max()))) temp *= 0.5 temp += 0.5 assert (temp.min() >= 0.0) assert (temp.max() <= 1.0) if (self.cur_pos == (0, 0)): self.clear() rs = (self.pad[0] + (self.cur_pos[0] * (self.patch_shape[0] + self.pad[0]))) re = (rs + self.patch_shape[0]) assert (self.cur_pos[1] <= self.grid_shape[1]) cs = (self.pad[1] + (self.cur_pos[1] * (self.patch_shape[1] + self.pad[1]))) ce = (cs + self.patch_shape[1]) assert (ce <= self.image.shape[1]), (ce, self.image.shape[1]) temp *= (temp > 0) if (len(temp.shape) == 2): temp = temp[:, :, np.newaxis] assert ((ce - ce_pad) <= self.image.shape[1]) self.image[(rs + rs_pad):(re - re_pad), (cs + cs_pad):(ce - ce_pad), :] = temp if (activation is not None): if ((not isinstance(activation, tuple)) and (not isinstance(activation, list))): activation = (activation,) for (shell, amt) in enumerate(activation): assert (((2 * shell) + 2) < self.pad[0]) assert (((2 * shell) + 2) < self.pad[1]) if (amt >= 0): act = (amt * np.asarray(self.colors[shell])) self.image[(((rs + rs_pad) - shell) - 1), (((cs + cs_pad) - shell) - 1):(((ce - ce_pad) + 1) + shell), :] = act self.image[((re - re_pad) + shell), (((cs + cs_pad) - 1) - shell):(((ce - ce_pad) + 1) + shell), :] = act self.image[(((rs + rs_pad) - 1) - shell):(((re - re_pad) + 1) + shell), (((cs + cs_pad) - 1) - shell), :] = act self.image[(((rs + rs_pad) - shell) - 1):(((re - re_pad) + shell) + 1), ((ce - ce_pad) + shell), :] = act self.cur_pos = (self.cur_pos[0], (self.cur_pos[1] + 1)) if (self.cur_pos[1] == self.grid_shape[1]): self.cur_pos = ((self.cur_pos[0] + 1), 0) if (self.cur_pos[0] == self.grid_shape[0]): self.cur_pos = (0, 0)
'.. todo:: WRITEME'
def show(self):
show(self.image)
'.. todo:: WRITEME'
def get_img(self):
x = np.cast['uint8']((self.image * 255.0)) if (x.shape[2] == 1): x = x[:, :, 0] ensure_Image() img = Image.fromarray(x) return img
'.. todo:: WRITEME'
def save(self, path):
self.get_img().save(path)
'.. todo:: WRITEME properly Returns a shape that fits n elements. If exact, fits exactly n elements'
def pick_shape(n, exact=False):
if (not isinstance(n, py_integer_types)): raise TypeError(('n must be an integer, but is ' + str(type(n)))) if exact: best_r = (-1) best_c = (-1) best_ratio = 0 for r in xrange(1, (int(np.sqrt(n)) + 1)): if ((n % r) != 0): continue c = (n // r) ratio = min((float(r) / float(c)), (float(c) / float(r))) if (ratio > best_ratio): best_ratio = ratio best_r = r best_c = c return (best_r, best_c) sqrt = np.sqrt(n) r = c = int(np.floor(sqrt)) while ((r * c) < n): c += 1 return (r, c)
'The partition function makes this intractable. Parameters model : DBM data : Batch in get_data_specs format'
def expr(self, model, data):
self.get_data_specs(model)[0].validate(data) return None
'.. todo:: WRITEME'
def _get_toronto_neg(self, model, layer_to_chains):
assert isinstance(model.visible_layer, BinaryVector) assert isinstance(model.hidden_layers[0], BinaryVectorMaxPool) assert (model.hidden_layers[0].pool_size == 1) assert isinstance(model.hidden_layers[1], BinaryVectorMaxPool) assert (model.hidden_layers[1].pool_size == 1) assert isinstance(model.hidden_layers[2], Softmax) assert (len(model.hidden_layers) == 3) params = list(model.get_params()) V_samples = layer_to_chains[model.visible_layer] (H1_samples, H2_samples, Y_samples) = [layer_to_chains[layer] for layer in model.hidden_layers] H1_mf = model.hidden_layers[0].mf_update(state_below=model.visible_layer.upward_state(V_samples), state_above=model.hidden_layers[1].downward_state(H2_samples), layer_above=model.hidden_layers[1]) Y_mf = model.hidden_layers[2].mf_update(state_below=model.hidden_layers[1].upward_state(H2_samples)) H2_mf = model.hidden_layers[1].mf_update(state_below=model.hidden_layers[0].upward_state(H1_mf), state_above=model.hidden_layers[2].downward_state(Y_mf), layer_above=model.hidden_layers[2]) expected_energy_p = model.energy(V_samples, [H1_mf, H2_mf, Y_samples]).mean() constants = flatten([V_samples, H1_mf, H2_mf, Y_samples]) neg_phase_grads = OrderedDict(safe_zip(params, T.grad((- expected_energy_p), params, consider_constant=constants))) return neg_phase_grads
'.. todo:: WRITEME TODO:reduce variance of negative phase by integrating out the even-numbered layers. The Rao-Blackwellize method can do this for you when expected gradient = gradient of expectation, but doing this in general is trickier.'
def _get_standard_neg(self, model, layer_to_chains):
params = list(model.get_params()) expected_energy_p = model.energy(layer_to_chains[model.visible_layer], [layer_to_chains[layer] for layer in model.hidden_layers]).mean() samples = flatten(layer_to_chains.values()) for (i, sample) in enumerate(samples): if (sample.name is None): sample.name = ('sample_' + str(i)) neg_phase_grads = OrderedDict(safe_zip(params, T.grad((- expected_energy_p), params, consider_constant=samples, disconnected_inputs='ignore'))) return neg_phase_grads
'.. todo:: WRITEME'
def _get_variational_pos(self, model, X, Y):
if self.supervised: assert (Y is not None) assert isinstance(model.hidden_layers[(-1)], Softmax) q = model.mf(X, Y) '\n Use the non-negativity of the KL divergence to construct a lower\n bound on the log likelihood. We can drop all terms that are\n constant with repsect to the model parameters:\n\n log P(v) = L(v, q) + KL(q || P(h|v))\n L(v, q) = log P(v) - KL(q || P(h|v))\n L(v, q) = log P(v) - sum_h q(h) log q(h) + q(h) log P(h | v)\n L(v, q) = log P(v) + sum_h q(h) log P(h | v) + const\n L(v, q) = log P(v) + sum_h q(h) log P(h, v)\n - sum_h q(h) log P(v) + const\n L(v, q) = sum_h q(h) log P(h, v) + const\n L(v, q) = sum_h q(h) -E(h, v) - log Z + const\n\n so the cost we want to minimize is\n expected_energy + log Z + const\n\n\n Note: for the RBM, this bound is exact, since the KL divergence\n goes to 0.\n ' variational_params = flatten(q) expected_energy_q = model.expected_energy(X, q).mean() params = list(model.get_params()) gradients = OrderedDict(safe_zip(params, T.grad(expected_energy_q, params, consider_constant=variational_params, disconnected_inputs='ignore'))) return gradients
'.. todo:: WRITEME'
def _get_sampling_pos(self, model, X, Y):
layer_to_clamp = OrderedDict([(model.visible_layer, True)]) layer_to_pos_samples = OrderedDict([(model.visible_layer, X)]) if self.supervised: assert isinstance(model.hidden_layers[(-1)], Softmax) layer_to_clamp[model.hidden_layers[(-1)]] = True layer_to_pos_samples[model.hidden_layers[(-1)]] = Y hid = model.hidden_layers[:(-1)] else: assert (Y is None) hid = model.hidden_layers for layer in hid: mf_state = layer.init_mf_state() def recurse_zeros(x): if isinstance(x, tuple): return tuple([recurse_zeros(e) for e in x]) return x.zeros_like() layer_to_pos_samples[layer] = recurse_zeros(mf_state) layer_to_pos_samples = model.sampling_procedure.sample(layer_to_state=layer_to_pos_samples, layer_to_clamp=layer_to_clamp, num_steps=self.num_gibbs_steps, theano_rng=self.theano_rng) q = [layer_to_pos_samples[layer] for layer in model.hidden_layers] pos_samples = flatten(q) expected_energy_q = model.energy(X, q).mean() params = list(model.get_params()) gradients = OrderedDict(safe_zip(params, T.grad(expected_energy_q, params, consider_constant=pos_samples, disconnected_inputs='ignore'))) return gradients
'Computes the positive phase using Gibbs sampling. Returns gradients : OrderedDict A dictionary mapping parameters to positive phase gradients. updates : OrderedDict An empty dictionary'
def _get_positive_phase(self, model, X, Y=None):
return (self._get_sampling_pos(model, X, Y), OrderedDict())
'.. todo:: WRITEME'
def _get_negative_phase(self, model, X, Y=None):
layer_to_chains = model.make_layer_to_state(self.num_chains) def recurse_check(l): if isinstance(l, (list, tuple, collections.ValuesView)): for elem in l: recurse_check(elem) else: assert (l.get_value().shape[0] == self.num_chains) recurse_check(layer_to_chains.values()) model.layer_to_chains = layer_to_chains (updates, layer_to_chains) = model.get_sampling_updates(layer_to_chains, self.theano_rng, num_steps=self.num_gibbs_steps, return_layer_to_updated=True) if self.toronto_neg: neg_phase_grads = self._get_toronto_neg(model, layer_to_chains) else: neg_phase_grads = self._get_standard_neg(model, layer_to_chains) return (neg_phase_grads, updates)
'The partition function makes this intractable. Parameters model : Model data : Minibatch in get_data_specs format Returns None : (Always returns None)'
def expr(self, model, data):
self.get_data_specs(model)[0].validate(data) return None