desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'.. todo:: WRITEME'
def make_node(self, images, acts, denoms, dout):
if (not isinstance(images.type, CudaNdarrayType)): inputs = (images, acts, denoms, dout) names = ('images', 'acts', 'denoms', 'dout') for (name, var) in zip(names, inputs): if (not isinstance(var.type, CudaNdarrayType)): raise TypeError('CrossMapNormUndo: expected %s.type to be CudaNdarrayType, got %s'(name, str(images.type))) assert (images.ndim == 4) assert (acts.ndim == 4) assert (denoms.ndim == 4) assert (dout.ndim == 4) assert (images.type.broadcastable == acts.type.broadcastable) assert (images.type.broadcastable == denoms.type.broadcastable) assert (images.type.broadcastable == dout.type.broadcastable) targets_broadcastable = tuple(images.type.broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() out_acts = targets_type() return Apply(self, [images, acts, denoms, dout], [targets, out_acts])
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(images, acts, denoms, dout) = inputs (targets, out_acts) = outputs fail = sub['fail'] num_braces = 0 size_f = self._size_f add_scale = self._add_scale pow_scale = self._pow_scale blocked = ('true' if self._blocked else 'false') inplace = ('1' if self._inplace else '0') scale_targets = int(self._scale_targets) scale_outputs = int(self._scale_outputs) class_name = self.__class__.__name__ class_name_upper = class_name.upper() basic_setup = self._basic_setup scaling_setup = '\n float scaleTargets = %(scale_targets)s;\n float scaleOutput = %(scale_outputs)s;\n ' setup_nv_images = ((contiguity_check('images') + dimension_check('images', 4)) + self._images_setup) num_braces += 2 setup_acts = ((((contiguity_check('acts') + dimension_check('acts', 4)) + '\n { //setup_nv_images brace 1\n const int * acts_dims = CudaNdarray_HOST_DIMS(%(acts)s);\n ') + ensure_same_shape('acts', 'images')) + '\n { // setup_nv_acts brace 2\n ') num_braces += 2 setup_nv_denoms = ((((contiguity_check('denoms') + dimension_check('denoms', 4)) + '\n {\n const int *denoms_dims = images_dims;\n ') + ensure_same_shape('denoms', 'images')) + nv_matrix_create('denoms')) num_braces += 2 setup_nv_dout = ((((contiguity_check('dout') + dimension_check('dout', 4)) + '\n { // setup_nv_dout brace\n const int *dout_dims = CudaNdarray_HOST_DIMS(%(dout)s);\n ') + ensure_same_shape('dout', 'images')) + nv_matrix_create('dout')) num_braces += 2 setup_nv_targets = output_same_shape('targets', 'images') num_braces += 1 setup_nv_out_acts = ('\n const int *out_acts_dims = images_dims;\n\n #if %(inplace)s\n // XXX: is this right?\n Py_XDECREF(%(out_acts)s);\n %(out_acts)s = %(acts)s;\n Py_INCREF(%(out_acts)s);\n #else\n if (CudaNdarray_prep_output(& %(out_acts)s, 4, out_acts_dims)) {\n Py_DECREF(%(targets)s);\n %(fail)s;\n }\n if (CudaNdarray_CopyFromCudaNdarray(%(out_acts)s, %(acts)s)) {\n Py_DECREF(%(targets)s);\n Py_DECREF(%(out_acts)s);\n %(fail)s;\n }\n #endif\n ' + nv_matrix_create('out_acts')) num_braces += 1 undo_normalize = '\n convResponseNormCrossMapUndo(nv_dout, nv_denoms, nv_images,\n nv_out_acts, nv_targets, numFilters,\n sizeF, addScale, powScale, blocked,\n scaleTargets, scaleOutput);\n ' rval = '\n'.join((basic_setup, scaling_setup, setup_nv_images, setup_acts, setup_nv_denoms, setup_nv_dout, setup_nv_targets, setup_nv_out_acts, undo_normalize, ('}' * num_braces))) return (rval % locals())
'.. todo:: WRITEME'
def grad(self, inputs, dout):
raise NotImplementedError()
'.. todo:: WRITEME'
@property def inplace(self):
return self._inplace
'.. todo:: WRITEME'
def as_inplace(self):
if self._inplace: raise ValueError(("%s instance is already inplace, can't convert" % self.__class__.__name__)) return self.__class__(self._size_f, self._add_scale, self._pow_scale, self._blocked, inplace=True)
'.. todo:: WRITEME'
def __str__(self):
return (self.__class__.__name__ + ('[size_f=%d,add_scale=%.2f,pow_scale=%.2f,blocked=%s,inplace=%s]' % (self._size_f, self._add_scale, self._pow_scale, self._blocked, self._inplace)))
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (8,)
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self.ds == other.ds) and (self.stride == other.stride) and (self.start == other.start))
'.. todo:: WRITEME'
def __hash__(self):
return (((hash(type(self)) ^ hash(self.ds)) ^ hash(self.stride)) ^ hash(self.start))
'.. todo:: WRITEME'
def c_header_dirs(self):
return ([this_dir, config.pthreads.inc_dir] if config.pthreads.inc_dir else [this_dir])
'.. todo:: WRITEME'
def c_headers(self):
return ['nvmatrix.cuh', 'conv_util.cuh']
'.. todo:: WRITEME'
def c_lib_dirs(self):
return ([cuda_convnet_loc, config.pthreads.lib_dir] if config.pthreads.lib_dir else [cuda_convnet_loc])
'.. todo:: WRITEME'
def c_libraries(self):
return (['cuda_convnet', config.pthreads.lib] if config.pthreads.lib else ['cuda_convnet'])
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (1,)
'.. todo:: WRITEME'
def _argument_contiguity_check(self, arg_name):
return ('\n if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))\n {\n if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {\n PyErr_SetString(PyExc_ValueError,\n "%(class)s: %(arg_name)s must be C contiguous");\n %%(fail)s;\n }\n }\n ' % {'class': self.__class__.__name__, 'arg_name': arg_name, 'class_name_caps': self.__class__.__name__.upper()})
'.. todo:: WRITEME'
def make_node(self, images):
images = as_cuda_ndarray_variable(images) assert (images.ndim == 4) channels_broadcastable = images.type.broadcastable[0] batch_broadcastable = images.type.broadcastable[3] rows_broadcastable = False cols_broadcastable = False targets_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() return Apply(self, [images], [targets])
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(images,) = inputs (targets,) = outputs fail = sub['fail'] num_braces = 0 if self.copy_non_contiguous: raise UnimplementedError() else: basic_setup = '#define MAXPOOL_COPY_NON_CONTIGUOUS 0\n' setup_nv_images = (self._argument_contiguity_check('images') + '\n if (%(images)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "images must have nd=4, got nd=%%i", %(images)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n\n const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);\n const int img_channels = images_dims[0];\n const int imgSizeY = images_dims[1];\n const int imgSizeX = images_dims[2];\n const int batch_size = images_dims[3];\n\n if(imgSizeY != imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",\n img_channels, imgSizeY, imgSizeX, batch_size);\n %(fail)s;\n }\n if(%(ds)s > imgSizeY){\n PyErr_Format(PyExc_ValueError,\n "ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",\n %(ds)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n if(%(start)s >= imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "start is %%d but must be smaller then the images size of %%d x %%d.",\n %(start)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n\n NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,\n "MaxPool:nv_images");\n ') num_braces += 1 setup_nv_targets = '\n //int _outputsX = int(ceil((dic[\'imgSize\'] - dic[\'start\'] - dic[\'sizeX\']) / float(dic[\'stride\']))) + 1;\n int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;\n\n int target_dims [] = {\n img_channels,\n _outputsX,\n _outputsX,\n batch_size };\n\n if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_target brace # 1\n\n NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1] * target_dims[2],\n target_dims[3], "MaxPool:nv_targets");\n\n ' num_braces += 1 do_pool = '\n convLocalPool(nv_images, nv_targets, img_channels, %(ds)s,\n %(start)s, %(stride)s, _outputsX, MaxPooler());\n ' braces = ('}' * num_braces) rval = ((((basic_setup + setup_nv_images) + setup_nv_targets) + do_pool) + braces) start = self.start stride = self.stride ds = self.ds rval = (rval % locals()) return rval
'.. todo:: WRITEME'
def R_op(self, inp, evals):
(x,) = inp (ev,) = evals if (ev is not None): ev = gpu_contiguous(ev) return [MaxPoolRop(self.ds, self.stride, self.start)(x, ev)] else: return [None]
'.. todo:: WRITEME'
def grad(self, inp, grads):
(x,) = inp (gz,) = grads gz = gpu_contiguous(gz) maxout = self(x) return [MaxPoolGrad(self.ds, self.stride, self.start)(x, maxout, gz)]
'.. todo:: WRITEME'
def make_thunk(self, *args, **kwargs):
if (not convnet_available()): raise RuntimeError('Could not compile cuda_convnet') return super(MaxPool, self).make_thunk(*args, **kwargs)
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self.ds == other.ds) and (self.stride == other.stride) and (self.start == other.start))
'.. todo:: WRITEME'
def __hash__(self):
return (((hash(type(self)) ^ hash(self.ds)) ^ hash(self.stride)) ^ hash(self.start))
'.. todo:: WRITEME'
def c_header_dirs(self):
return [this_dir]
'.. todo:: WRITEME'
def c_headers(self):
return ['nvmatrix.cuh', 'conv_util.cuh', 'pool_rop.cuh']
'.. todo:: WRITEME'
def c_lib_dirs(self):
return [cuda_convnet_loc]
'.. todo:: WRITEME'
def c_libraries(self):
return ['cuda_convnet']
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (1,)
'.. todo:: WRITEME'
def _argument_contiguity_check(self, arg_name):
return ('\n if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))\n {\n if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {\n PyErr_SetString(PyExc_ValueError,\n "%(class)s: %(arg_name)s must be C contiguous");\n %%(fail)s;\n }\n }\n ' % {'class': self.__class__.__name__, 'arg_name': arg_name, 'class_name_caps': self.__class__.__name__.upper()})
'.. todo:: WRITEME'
def make_node(self, images, evals):
images = as_cuda_ndarray_variable(images) evals = as_cuda_ndarray_variable(evals) assert (images.ndim == 4) assert (evals.ndim == 4) channels_broadcastable = images.type.broadcastable[0] batch_broadcastable = images.type.broadcastable[3] rows_broadcastable = False cols_broadcastable = False targets_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() return Apply(self, [images, evals], [targets])
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(images, evals) = inputs (targets,) = outputs fail = sub['fail'] num_braces = 0 if self.copy_non_contiguous: raise UnimplementedError() else: basic_setup = '#define MAXPOOLROP_COPY_NON_CONTIGUOUS 0\n' setup_nv_images = (self._argument_contiguity_check('images') + '\n if (%(images)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "images must have nd=4, got nd=%%i", %(images)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n\n const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);\n const int img_channels = images_dims[0];\n const int imgSizeY = images_dims[1];\n const int imgSizeX = images_dims[2];\n const int batch_size = images_dims[3];\n\n if(imgSizeY != imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",\n img_channels, imgSizeY, imgSizeX, batch_size);\n %(fail)s;\n }\n if(%(ds)s > imgSizeY){\n PyErr_Format(PyExc_ValueError,\n "ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",\n %(ds)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n if(%(start)s >= imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "start is %%d but must be smaller then the images size of %%d x %%d.",\n %(start)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n\n NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,\n "MaxPoolRop:nv_images");\n NVMatrix nv_evals(%(evals)s, img_channels * imgSizeY * imgSizeX,\n batch_size, "MaxPoolRop:nv_evals");\n ') num_braces += 1 setup_nv_targets = '\n //int _outputsX = int(ceil((dic[\'imgSize\'] - dic[\'start\'] - dic[\'sizeX\']) / float(dic[\'stride\']))) + 1;\n int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;\n\n int target_dims [] = {\n img_channels,\n _outputsX,\n _outputsX,\n batch_size };\n\n if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_target brace # 1\n\n NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1] * target_dims[2],\n target_dims[3], "MaxPoolRop:nv_targets");\n\n ' num_braces += 1 do_pool = '\n convLocalPoolR(nv_images, nv_evals, nv_targets, img_channels, %(ds)s,\n %(start)s, %(stride)s, _outputsX, MaxPoolerR());\n ' braces = ('}' * num_braces) rval = ((((basic_setup + setup_nv_images) + setup_nv_targets) + do_pool) + braces) start = self.start stride = self.stride ds = self.ds rval = (rval % locals()) return rval
'.. todo:: WRITEME'
def make_thunk(self, node, storage_map, compute_map, no_recycling):
if (not convnet_available()): raise RuntimeError('Could not compile cuda_convnet') return super(MaxPoolRop, self).make_thunk(node, storage_map, storage_map, no_recycling)
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self.ds == other.ds) and (self.stride == other.stride) and (self.start == other.start))
'.. todo:: WRITEME'
def __hash__(self):
return (((hash(type(self)) ^ hash(self.ds)) ^ hash(self.stride)) ^ hash(self.start))
'.. todo:: WRITEME'
def c_header_dirs(self):
return ([this_dir, config.pthreads.inc_dir] if config.pthreads.inc_dir else [this_dir])
'.. todo:: WRITEME'
def c_headers(self):
return ['nvmatrix.cuh', 'conv_util.cuh']
'.. todo:: WRITEME'
def c_lib_dirs(self):
return ([cuda_convnet_loc, config.pthreads.lib_dir] if config.pthreads.lib_dir else [cuda_convnet_loc])
'.. todo:: WRITEME'
def c_libraries(self):
return (['cuda_convnet', config.pthreads.lib] if config.pthreads.lib else ['cuda_convnet'])
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (1,)
'.. todo:: WRITEME'
def make_node(self, images, maxout, gz):
images = as_cuda_ndarray_variable(images) maxout = as_cuda_ndarray_variable(maxout) gz = as_cuda_ndarray_variable(gz) assert (images.ndim == 4) assert (maxout.ndim == 4) assert (gz.ndim == 4) try: nb_channel = int(get_scalar_constant_value(images.shape[0])) assert ((nb_channel % 16) == 0) except NotScalarConstantError: pass return Apply(self, [images, maxout, gz], [images.type()])
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(images, maxout, gz) = inputs (targets,) = outputs fail = sub['fail'] num_braces = 0 if self.copy_non_contiguous: raise UnimplementedError() else: basic_setup = '#define MAXPOOLGRAD_COPY_NON_CONTIGUOUS 0\n' setup_nv_images = (self._argument_contiguity_check('images') + '\n if (%(images)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "images must have nd=4, got nd=%%i", %(images)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n\n const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);\n const int img_channels = images_dims[0];\n const int imgSizeY = images_dims[1];\n const int imgSizeX = images_dims[2];\n const int batch_size = images_dims[3];\n\n if(imgSizeY != imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",\n img_channels, imgSizeY, imgSizeX, batch_size);\n %(fail)s;\n }\n if(%(ds)s > imgSizeY){\n PyErr_Format(PyExc_ValueError,\n "ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",\n %(ds)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n\n NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,\n "MaxPool:nv_images");\n ') num_braces += 1 setup_nv_maxout = (self._argument_contiguity_check('maxout') + '\n if (%(maxout)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "maxout must have nd=4, got nd=%%i", %(maxout)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_maxout brace 1\n\n const int * maxout_dims = CudaNdarray_HOST_DIMS(%(maxout)s);\n const int maxout_channels = maxout_dims[0];\n const int maxoutSizeY = maxout_dims[1];\n const int maxoutSizeX = maxout_dims[2];\n\n if(maxoutSizeY != maxoutSizeX){\n PyErr_Format(PyExc_ValueError,\n "maxout must be square(dims[1] == dims[2])."\n " Shape (%%i,%%i,%%i,%%i)",\n maxout_channels, maxoutSizeY, maxoutSizeX, batch_size);\n %(fail)s;\n }\n if(img_channels != maxout_channels){\n PyErr_Format(PyExc_ValueError,\n "img_channels(%%d) should be equal to maxout_channels(%%d).",\n img_channels, maxout_channels);\n %(fail)s;\n }\n if(maxout_dims[3] != batch_size){\n PyErr_Format(PyExc_ValueError,\n "batch_size(%%d) should be equal to maxout_dims[3](%%d)",\n batch_size, maxout_dims[3]);\n %(fail)s;\n }\n\n NVMatrix nv_maxout(%(maxout)s, img_channels * maxoutSizeY * maxoutSizeX,\n batch_size, "MaxPool:nv_maxout");\n ') num_braces += 1 setup_nv_gz = (self._argument_contiguity_check('gz') + '\n if (%(gz)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "gz must have nd=4, got nd=%%i", %(gz)s->nd);\n %(fail)s;\n }\n if (CudaNdarray_HOST_DIMS(%(gz)s)[0] %% 16 != 0)\n {\n PyErr_Format(PyExc_ValueError,\n "gz must have a number of channels that is a multiple of 16. Got %%d",\n CudaNdarray_HOST_DIMS(%(gz)s)[0]);\n %(fail)s;\n }\n\n { //setup_nv_gz brace 1\n\n const int * gz_dims = CudaNdarray_HOST_DIMS(%(gz)s);\n const int gz_channels = gz_dims[0];\n const int gzSizeY = gz_dims[1];\n const int gzSizeX = gz_dims[2];\n\n if(maxout_dims[0] != gz_dims[0] ||\n maxout_dims[1] != gz_dims[1] ||\n maxout_dims[2] != gz_dims[2] ||\n maxout_dims[3] != gz_dims[3]){\n PyErr_Format(PyExc_ValueError,\n "gz shape(%%d, %%d, %%d, %%d) must be the same"\n " as maxout(%%d, %%d, %%d, %%d)",\n maxout_dims[0], maxout_dims[1], maxout_dims[2], maxout_dims[3],\n gz_dims[0], gz_dims[1], gz_dims[2], gz_dims[3]);\n %(fail)s;\n }\n\n NVMatrix nv_gz(%(gz)s, img_channels * maxoutSizeY * maxoutSizeX,\n batch_size, "MaxPool:nv_gz");\n ') num_braces += 1 setup_nv_targets = '\n //int _outputsX = int(ceil((dic[\'imgSize\'] - dic[\'start\'] - dic[\'sizeX\']) / float(dic[\'stride\']))) + 1;\n int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;\n\n int target_dims [] = {\n img_channels,\n imgSizeX,\n imgSizeY,\n batch_size };\n\n if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_target brace # 1\n\n NVMatrix nv_targets(%(targets)s,\n target_dims[0] * target_dims[1] * target_dims[2],\n target_dims[3], "MaxPool:nv_targets");\n\n ' num_braces += 1 undo_pool = '\n convLocalMaxUndo(nv_images, nv_gz, nv_maxout, nv_targets,\n %(ds)s, %(start)s, %(stride)s, _outputsX, 0, 1);\n ' braces = ('}' * num_braces) rval = ((((((basic_setup + setup_nv_images) + setup_nv_maxout) + setup_nv_gz) + setup_nv_targets) + undo_pool) + braces) start = self.start stride = self.stride ds = self.ds rval = (rval % locals()) return rval
'.. todo:: WRITEME'
def make_thunk(self, node, storage_map, compute_map, no_recycling):
if (not convnet_available()): raise RuntimeError('Could not compile cuda_convnet') return super(MaxPoolGrad, self).make_thunk(node, storage_map, compute_map, no_recycling)
'.. todo:: WRITEME Parameters hid_acts : WRITEME filters : WRITEME output_shape : 2-element TensorVariable, optional The spatial shape of the image'
def make_node(self, hid_acts, filters, output_shape=None):
if (not isinstance(hid_acts.type, CudaNdarrayType)): raise TypeError(('ImageActs: expected hid_acts.type to be CudaNdarrayType, got ' + str(hid_acts.type))) if (not isinstance(filters.type, CudaNdarrayType)): raise TypeError(('ImageActs: expected filters.type to be CudaNdarrayType, got ' + str(filters.type))) if (output_shape is None): if (self.stride != 1): raise ValueError('You must specify an output_shape for ImageActs if the stride is not 1.') hid_shape = hid_acts.shape[1:3] kernel_shape = filters.shape[1:3] output_shape = (((hid_shape + kernel_shape) - (2 * self.pad)) - 1) assert (hid_acts.ndim == 4) assert (filters.ndim == 4) channels_broadcastable = filters.type.broadcastable[3] batch_broadcastable = hid_acts.type.broadcastable[3] rows_broadcastable = False cols_broadcastable = False targets_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() return Apply(self, [hid_acts, filters, output_shape], [targets])
'Useful with the hack in profilemode to print the MFlops'
def flops(self, inputs, outputs):
(hid_acts, filters, output_shape) = inputs (out,) = outputs assert (hid_acts[0] == filters[3]) flops = (((((((hid_acts[3] * filters[0]) * hid_acts[0]) * filters[1]) * filters[2]) * hid_acts[1]) * hid_acts[2]) * 2) return flops
'.. todo:: WRITEME'
def connection_pattern(self, node):
return [[1], [1], [0]]
'.. todo:: WRITEME'
def grad(self, inputs, g_outputs):
(hid_acts, filters, output_shape) = inputs (g_images,) = g_outputs g_images = as_cuda_ndarray_variable(g_images) assert (not isinstance(g_images, list)) global FilterActs global WeightActs if (FilterActs is None): from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs from pylearn2.sandbox.cuda_convnet.weight_acts import WeightActs g_filters = WeightActs(stride=self.stride, partial_sum=self.partial_sum, pad=self.pad)(g_images, hid_acts, filters.shape[1:3])[0] assert (not isinstance(g_filters, list)) g_hid_acts = FilterActs(stride=self.stride, pad=self.pad, partial_sum=self.partial_sum)(g_images, filters) return [g_hid_acts, g_filters, DisconnectedType()()]
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(hid_acts, filters, output_shape) = inputs (targets,) = outputs fail = sub['fail'] basic_setup = '\n #define scaleTargets 0\n #define scaleOutput 1\n ' if self.dense_connectivity: basic_setup += '\n #define numGroups 1\n ' basic_setup += ('\n #define paddingStart (-%d)\n ' % self.pad) basic_setup += ('\n #define moduleStride %d\n ' % self.stride) if self.copy_non_contiguous: raise UnimplementedError() else: basic_setup += '#define IMAGEACTS_COPY_NON_CONTIGUOUS 0\n' num_braces = 0 setup_nv_hid_acts = (self._argument_contiguity_check('hid_acts') + '\n if (%(hid_acts)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "hid_acts must have nd=4, got nd=%%i", %(hid_acts)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_hid_acts brace 1\n const int *hid_act_dims = CudaNdarray_HOST_DIMS(%(hid_acts)s);\n const int numFilters = hid_act_dims[0];\n const int hidActsSizeY = hid_act_dims[1];\n const int hidActsSizeX = hid_act_dims[2];\n //printf("hidActs shape: %%d %%d\\n", hidActsSizeY, hidActsSizeX);\n const int batch_size = hid_act_dims[3];\n NVMatrix nv_hid_acts(%(hid_acts)s, numFilters * hidActsSizeY *\n hidActsSizeX, batch_size, "image_acts:nv_hid_acts");\n int img_channels = -1;\n ') num_braces += 1 setup_nv_filters = (self._argument_contiguity_check('filters') + '\n if (%(filters)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "filters must have nd=4, got nd=%%i", %(filters)s->nd);\n %(fail)s;\n }\n\n { // setup_nv_filters brace 1\n const int * filters_dims = CudaNdarray_HOST_DIMS(%(filters)s);\n const int filter_channels = filters_dims[0];\n const int filter_rows = filters_dims[1];\n const int filter_cols = filters_dims[2];\n const int num_filters = filters_dims[3];\n\n if ((num_filters %% (numGroups * 16)) != 0)\n {\n PyErr_Format(PyExc_ValueError,\n "Each group must have a multiple of 16 channels, but num_filters %%%% (numGroups * 16) = %%d %%%% ( %%d * 16) = %%d.",\n num_filters, numGroups, num_filters %% (numGroups * 16));\n %(fail)s;\n }\n\n if (filter_rows != filter_cols)\n {\n PyErr_Format(PyExc_ValueError,\n "filter must be square, but have shape (%%d, %%d).",\n filter_rows, filter_cols);\n %(fail)s;\n }\n else if (moduleStride > filter_rows) {\n PyErr_Format(PyExc_ValueError,\n "stride %%d greater than filter size (%%d, %%d)",\n moduleStride, filter_rows, filter_cols);\n %(fail)s;\n }\n\n { // setup_nv_filters brace 2\n\n\n NVMatrix nv_filters(%(filters)s, filter_channels * filter_rows *\n filter_cols, num_filters, "img_acts:nv_filters");\n ') num_braces += 2 setup_nv_targets = '\n\n #define numModulesY hid_act_dims[1]\n #define numModulesX hid_act_dims[2]\n npy_intp *shape_dims = PyArray_DIMS(%(output_shape)s);\n npy_intp target_rows, target_cols;\n PyArrayObject *casted_shape;\n PyArray_Descr *intp_dtype;\n if (PyArray_NDIM(%(output_shape)s) != 1) {\n PyErr_Format(PyExc_ValueError,\n "output shape must be a vector, got %%d-tensor",\n PyArray_NDIM(%(output_shape)s));\n %(fail)s;\n }\n else if (shape_dims[0] != 2)\n {\n PyErr_Format(PyExc_ValueError,\n "output shape must be length 2, got %%d",\n (int)shape_dims[0]);\n %(fail)s;\n }\n else if ((PyArray_DESCR(%(output_shape)s))->kind != \'i\' &&\n (PyArray_DESCR(%(output_shape)s))->kind != \'u\')\n {\n PyErr_SetString(PyExc_TypeError,\n "output shape must have integer or uint dtype");\n %(fail)s;\n }\n intp_dtype = PyArray_DescrFromType(NPY_INTP);\n casted_shape = (PyArrayObject *)PyArray_CastToType(%(output_shape)s,\n intp_dtype, 0);\n target_rows = *((npy_intp *)PyArray_GETPTR1(casted_shape, 0));\n target_cols = *((npy_intp *)PyArray_GETPTR1(casted_shape, 1));\n {\n int target_dims [] = {\n filter_channels,\n target_rows,\n target_cols,\n batch_size };\n #define filterSize filter_rows\n #define MAX_ROWS (paddingStart + (numModulesY-1) * moduleStride + filterSize)\n if ((target_rows > MAX_ROWS)\n || (paddingStart + (numModulesX-1) * moduleStride + filterSize < target_cols))\n {\n PyErr_Format(PyExc_ValueError, "pylearn2.sandbox.cuda_convnet.image_acts.ImageActs: incompatible target image size (%%d, %%d), maximum (%%d, %%d)",\n (int)target_rows, (int)target_cols,\n (int)MAX_ROWS,\n (int)(paddingStart + (numModulesX-1) * moduleStride + filterSize));\n %(fail)s;\n }\n if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_filters brace # 1\n const int imgSizeY = (int)target_rows;\n const int imgSizeX = (int)target_cols;\n\n NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1]\n * target_dims[2], target_dims[3], "image_acts: nv_targets");\n\n ' num_braces += 2 do_convolution = '\n convImgActs(nv_hid_acts, nv_filters, nv_targets,\n imgSizeY, imgSizeX, numModulesY,\n paddingStart, moduleStride, filter_channels,\n numGroups);\n ' braces = ('}' * num_braces) rval = (((((basic_setup + setup_nv_hid_acts) + setup_nv_filters) + setup_nv_targets) + do_convolution) + braces) rval = (rval % locals()) return rval
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (9,)
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self.ds == other.ds) and (self.stride == other.stride) and (self.start == other.start))
'.. todo:: WRITEME'
def __hash__(self):
return (((hash(type(self)) ^ hash(self.ds)) ^ hash(self.stride)) ^ hash(self.start))
'.. todo:: WRITEME'
def c_header_dirs(self):
return [this_dir]
'.. todo:: WRITEME'
def c_headers(self):
return ['nvmatrix.cuh', 'conv_util.cuh']
'.. todo:: WRITEME'
def c_lib_dirs(self):
return [cuda_convnet_loc]
'.. todo:: WRITEME'
def c_libraries(self):
return ['cuda_convnet']
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (1,)
'.. todo:: WRITEME'
def _argument_contiguity_check(self, arg_name):
return ('\n if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))\n {\n if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {\n PyErr_SetString(PyExc_ValueError,\n "%(class)s: %(arg_name)s must be C contiguous");\n %%(fail)s;\n }\n }\n ' % {'class': self.__class__.__name__, 'arg_name': arg_name, 'class_name_caps': self.__class__.__name__.upper()})
'.. todo:: WRITEME'
def make_node(self, images):
images = as_cuda_ndarray_variable(images) assert (images.ndim == 4) channels_broadcastable = images.type.broadcastable[0] batch_broadcastable = images.type.broadcastable[3] rows_broadcastable = False cols_broadcastable = False targets_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() seed = self.seed_state seed = as_cuda_ndarray_variable(seed) return Apply(self, [images, seed], [targets])
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(images, seed) = inputs (targets,) = outputs fail = sub['fail'] num_braces = 0 if self.copy_non_contiguous: raise UnimplementedError() else: basic_setup = '#define STOCHASTICMAXPOOL_COPY_NON_CONTIGUOUS 0\n' setup_nv_images = (self._argument_contiguity_check('images') + '\n if (%(images)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "images must have nd=4, got nd=%%i", %(images)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n\n const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);\n const int img_channels = images_dims[0];\n const int imgSizeY = images_dims[1];\n const int imgSizeX = images_dims[2];\n const int batch_size = images_dims[3];\n\n if(imgSizeY != imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",\n img_channels, imgSizeY, imgSizeX, batch_size);\n %(fail)s;\n }\n if(%(ds)s > imgSizeY){\n PyErr_Format(PyExc_ValueError,\n "ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",\n %(ds)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n if(%(start)s >= imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "start is %%d but must be smaller then the images size of %%d x %%d.",\n %(start)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n\n NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,\n "MaxPool:nv_images");\n\n //int * seed = CudaNdarray_HOST_DIMS%(seed)s;\n float * seed = CudaNdarray_DEV_DATA(%(seed)s);\n //int * seed = %(seed)s;\n ') num_braces += 1 setup_nv_targets = '\n //int _outputsX = int(ceil((dic[\'imgSize\'] - dic[\'start\'] - dic[\'sizeX\']) / float(dic[\'stride\']))) + 1;\n int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;\n\n int target_dims [] = {\n img_channels,\n _outputsX,\n _outputsX,\n batch_size };\n\n if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_target brace # 1\n\n NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1] * target_dims[2],\n target_dims[3], "MaxPool:nv_targets");\n\n ' num_braces += 1 do_pool = '\n convLocalStochasticMaxPool(nv_images, nv_targets, img_channels, %(ds)s,\n %(start)s, %(stride)s, _outputsX, MaxPooler(), seed);\n ' braces = ('}' * num_braces) rval = ((((basic_setup + setup_nv_images) + setup_nv_targets) + do_pool) + braces) start = self.start stride = self.stride ds = self.ds rval = (rval % locals()) return rval
'.. todo:: WRITEME'
def grad(self, inp, grads):
(x, seed) = inp (gz,) = grads gz = gpu_contiguous(gz) maxout = self(x) return [MaxPoolGrad(self.ds, self.stride, self.start)(x, maxout, gz), zeros_like(seed)]
'.. todo:: WRITEME'
def make_thunk(self, *args, **kwargs):
if (not convnet_available()): raise RuntimeError('Could not compile cuda_convnet') return super(StochasticMaxPool, self).make_thunk(*args, **kwargs)
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self.ds == other.ds) and (self.stride == other.stride) and (self.start == other.start))
'.. todo:: WRITEME'
def __hash__(self):
return (((hash(type(self)) ^ hash(self.ds)) ^ hash(self.stride)) ^ hash(self.start))
'.. todo:: WRITEME'
def c_header_dirs(self):
return [this_dir]
'.. todo:: WRITEME'
def c_headers(self):
return ['nvmatrix.cuh', 'conv_util.cuh']
'.. todo:: WRITEME'
def c_lib_dirs(self):
return [cuda_convnet_loc]
'.. todo:: WRITEME'
def c_libraries(self):
return ['cuda_convnet']
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (1,)
'.. todo:: WRITEME'
def _argument_contiguity_check(self, arg_name):
return ('\n if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s))\n {\n if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) {\n PyErr_SetString(PyExc_ValueError,\n "%(class)s: %(arg_name)s must be C contiguous");\n %%(fail)s;\n }\n }\n ' % {'class': self.__class__.__name__, 'arg_name': arg_name, 'class_name_caps': self.__class__.__name__.upper()})
'.. todo:: WRITEME'
def make_node(self, images):
images = as_cuda_ndarray_variable(images) assert (images.ndim == 4) channels_broadcastable = images.type.broadcastable[0] batch_broadcastable = images.type.broadcastable[3] rows_broadcastable = False cols_broadcastable = False targets_broadcastable = (channels_broadcastable, rows_broadcastable, cols_broadcastable, batch_broadcastable) targets_type = CudaNdarrayType(broadcastable=targets_broadcastable) targets = targets_type() return Apply(self, [images], [targets])
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
(images,) = inputs (targets,) = outputs fail = sub['fail'] num_braces = 0 if self.copy_non_contiguous: raise UnimplementedError() else: basic_setup = '#define WEIGHTEDMAXPOOL_COPY_NON_CONTIGUOUS 0\n' setup_nv_images = (self._argument_contiguity_check('images') + '\n if (%(images)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "images must have nd=4, got nd=%%i", %(images)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_images brace 1\n\n const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);\n const int img_channels = images_dims[0];\n const int imgSizeY = images_dims[1];\n const int imgSizeX = images_dims[2];\n const int batch_size = images_dims[3];\n\n if(imgSizeY != imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "images must be square(dims[1] == dims[2]). Shape (%%i,%%i,%%i,%%i)",\n img_channels, imgSizeY, imgSizeX, batch_size);\n %(fail)s;\n }\n if(%(ds)s > imgSizeY){\n PyErr_Format(PyExc_ValueError,\n "ds(%%d) must be <= imgSizeX(%%d) and imgSizeY(%%d).",\n %(ds)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n if(%(start)s >= imgSizeX){\n PyErr_Format(PyExc_ValueError,\n "start is %%d but must be smaller then the images size of %%d x %%d.",\n %(start)s, imgSizeX, imgSizeY);\n %(fail)s;\n }\n\n NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size,\n "MaxPool:nv_images");\n ') num_braces += 1 setup_nv_targets = '\n //int _outputsX = int(ceil((dic[\'imgSize\'] - dic[\'start\'] - dic[\'sizeX\']) / float(dic[\'stride\']))) + 1;\n int _outputsX = ((int)(ceil((imgSizeY - %(start)s - %(ds)s) / ((float)%(stride)s)))) + 1;\n\n int target_dims [] = {\n img_channels,\n _outputsX,\n _outputsX,\n batch_size };\n\n if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_target brace # 1\n\n NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1] * target_dims[2],\n target_dims[3], "MaxPool:nv_targets");\n\n ' num_braces += 1 do_pool = '\n convLocalWeightedPool(nv_images, nv_targets, img_channels, %(ds)s,\n %(start)s, %(stride)s, _outputsX, MaxPooler());\n ' braces = ('}' * num_braces) rval = ((((basic_setup + setup_nv_images) + setup_nv_targets) + do_pool) + braces) start = self.start stride = self.stride ds = self.ds rval = (rval % locals()) return rval
'.. todo:: WRITEME'
def grad(self, inp, grads):
raise NotImplementedError()
'.. todo:: WRITEME'
def make_thunk(self, node, storage_map, compute_map, no_recycling):
if (not convnet_available()): raise RuntimeError('Could not compile cuda_convnet') return super(WeightedMaxPool, self).make_thunk(node, storage_map, compute_map, no_recycling)
'.. todo:: WRITEME'
def make_node(self, images, hid_grads, output_shape):
if (not isinstance(images.type, CudaNdarrayType)): raise TypeError(('WeightActs: expected images.type to be CudaNdarrayType, got ' + str(images.type))) if (not isinstance(hid_grads.type, CudaNdarrayType)): raise TypeError(('WeightActs: expected hid_acts.type to be CudaNdarrayType, got ' + str(hid_grads.type))) assert (images.ndim == 4) assert (hid_grads.ndim == 4) input_channels_broadcastable = images.type.broadcastable[0] filter_rows_broadcastable = False filter_cols_broadcastable = False output_channels_broadcastable = hid_grads.type.broadcastable[0] weights_grads_type = CudaNdarrayType((input_channels_broadcastable, filter_rows_broadcastable, filter_cols_broadcastable, output_channels_broadcastable)) partial_sums_type = CudaNdarrayType(((False,) * 5)) weights_grads = weights_grads_type() partial_sums = partial_sums_type() return Apply(self, [images, hid_grads, output_shape], [weights_grads, partial_sums])
'Useful with the hack in profilemode to print the MFlops'
def flops(self, inputs, outputs):
(images, kerns, output_shape) = inputs (out, partial) = outputs assert (images[3] == kerns[3]) flops = ((kerns[1] * kerns[2]) * 2) flops *= (out[1] * out[2]) flops *= ((images[3] * kerns[0]) * images[0]) return flops
'.. todo:: WRITEME'
def c_headers(self):
headers = super(WeightActs, self).c_headers() headers.append('weight_acts.cuh') return headers
'.. todo:: WRITEME'
def c_code(self, node, name, inputs, outputs, sub):
partial_sum = (self.partial_sum if (self.partial_sum is not None) else 0) (images, hid_grads, output_shape) = inputs (weights_grads, partialsum_storage) = outputs fail = sub['fail'] pad = self.pad basic_setup = '\n #define scaleTargets 0\n #define scaleOutput 1\n ' if self.dense_connectivity: basic_setup += '\n #define numGroups 1\n ' basic_setup += '\n #define paddingStart (-%(pad)d)\n const int *hid_grads_dims = CudaNdarray_HOST_DIMS(%(hid_grads)s);\n const int hidGradsSizeY = hid_grads_dims[1];\n const int hidGradsSizeX = hid_grads_dims[2];\n const int numModules = hidGradsSizeX * hidGradsSizeY;\n int partialSum = %(partial_sum)d > 0 ? %(partial_sum)d : numModules;\n\n // using this expression instead of numModules %% partialSum\n // because nvcc+msvc9 yield a strange behaviour when using %%\n if ( numModules - (numModules / partialSum) * partialSum != 0) {\n PyErr_Format(PyExc_ValueError,\n "partialSum must divide numModules, but partialSum=%%d and "\n "numModules=%%d", partialSum, numModules);\n %(fail)s;\n }\n ' basic_setup += ('\n #define moduleStride %d\n ' % self.stride) if self.copy_non_contiguous: raise UnimplementedError() else: basic_setup += '#define WEIGHTACTS_COPY_NON_CONTIGUOUS 0\n' num_braces = 0 setup_nv_images = (self._argument_contiguity_check('images') + '\n if (%(images)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "images must have nd=4, got nd=%%i", %(images)s->nd);\n %(fail)s;\n }\n { //setup_nv_images brace 1\n const int * images_dims = CudaNdarray_HOST_DIMS(%(images)s);\n const int img_channels = images_dims[0];\n if (img_channels > 3 && img_channels %% 4 != 0)\n {\n PyErr_Format(PyExc_ValueError,\n "images must have 3 or fewer channels, or have a multiple of 4 channels, got %%i",\n img_channels);\n %(fail)s;\n }\n\n { //setup_nv_images brace 2\n const int * hid_grads_dims = CudaNdarray_HOST_DIMS(%(hid_grads)s);\n const int imgSizeY = images_dims[1];\n const int imgSizeX = images_dims[2];\n const int batch_size = images_dims[3];\n NVMatrix nv_images(%(images)s, img_channels * imgSizeY * imgSizeX, batch_size, "weight_acts: nv_images");\n ') num_braces += 2 setup_nv_hid_grads = (self._argument_contiguity_check('hid_grads') + '\n if (%(hid_grads)s->nd != 4)\n {\n PyErr_Format(PyExc_ValueError,\n "hid_grads must have nd=4, got nd=%%i", %(hid_grads)s->nd);\n %(fail)s;\n }\n\n { //setup_nv_hid_grads brace 1\n const int numFilters = hid_grads_dims[0];\n const int batch_size = hid_grads_dims[3];\n NVMatrix nv_hid_grads(%(hid_grads)s, numFilters * hidGradsSizeY *\n hidGradsSizeX, batch_size, "weight_acts:nv_hid_grads");\n ') num_braces += 1 setup_nv_weights_grads = '\n int filters_dims[4];\n // filters: (input channels, filter rows, filter cols, output channels)\n\n npy_intp *shape_dims = PyArray_DIMS(%(output_shape)s);\n npy_intp target_rows, target_cols;\n PyArrayObject *casted_shape;\n PyArray_Descr *intp_dtype;\n if (PyArray_NDIM(%(output_shape)s) != 1) {\n PyErr_Format(PyExc_ValueError,\n "output shape must be a vector, got %%d-tensor",\n PyArray_NDIM(%(output_shape)s));\n %(fail)s;\n }\n else if (shape_dims[0] != 2)\n {\n PyErr_Format(PyExc_ValueError,\n "output shape must be length 2, got %%d",\n (int)shape_dims[0]);\n %(fail)s;\n }\n else if ((PyArray_DESCR(%(output_shape)s))->kind != \'i\' &&\n (PyArray_DESCR(%(output_shape)s))->kind != \'u\')\n {\n PyErr_SetString(PyExc_TypeError,\n "output shape must have integer or uint dtype");\n %(fail)s;\n }\n intp_dtype = PyArray_DescrFromType(NPY_INTP);\n casted_shape = (PyArrayObject *)PyArray_CastToType(%(output_shape)s,\n intp_dtype, 0);\n target_rows = *((npy_intp *)PyArray_GETPTR1(casted_shape, 0));\n target_cols = *((npy_intp *)PyArray_GETPTR1(casted_shape, 1));\n filters_dims[0] = img_channels;\n filters_dims[1] = target_rows;\n filters_dims[2] = target_cols;\n if (filters_dims[1] != filters_dims[2])\n {\n PyErr_Format(PyExc_ValueError,\n "filter must be square, but have shape (%%d, %%d).",\n filters_dims[1], filters_dims[2]);\n %(fail)s;\n }\n else if (moduleStride > filters_dims[1]) {\n PyErr_Format(PyExc_ValueError,\n "stride %%d greater than filter size (%%d, %%d)",\n moduleStride, filters_dims[1], filters_dims[2]);\n %(fail)s;\n }\n filters_dims[3] = numFilters;\n const int filterSize = filters_dims[1];\n int partialsum_storage_dims[5];\n for (int i = 1; i < 5; i++)\n {\n partialsum_storage_dims[i] = filters_dims[i - 1];\n }\n partialsum_storage_dims[0] = numModules / partialSum;\n if (partialSum != numModules &&\n CudaNdarray_prep_output(&%(partialsum_storage)s, 5,\n partialsum_storage_dims))\n {\n %(fail)s;\n }\n\n for (int i = 0; i < 4; i++)\n {\n if (filters_dims[i] <= 0)\n {\n printf("filters_dims[%%d] = %%d\\n", i, filters_dims[i]);\n assert(false);\n }\n }\n if (CudaNdarray_prep_output(& %(weights_grads)s, 4, filters_dims))\n {\n %(fail)s;\n }\n\n { // setup_nv_weights_grad brace # 1\n\n NVMatrix nv_weights_grads(%(weights_grads)s, filters_dims[0] * filterSize\n * filterSize, numFilters,\n "weight_acts:nv_weights_grads");\n\n ' num_braces += 1 run_kernel = '\n\n if (partialSum == numModules)\n _weightActs(nv_images, nv_hid_grads, nv_weights_grads,\n imgSizeY, hidGradsSizeY, hidGradsSizeX, filterSize,\n paddingStart, moduleStride, img_channels, numGroups,\n partialSum, 0, 1);\n else {\n NVMatrix nv_partialsum(%(partialsum_storage)s, (numModules / partialSum) *\n filters_dims[0] * filterSize * filterSize, numFilters,\n "weight_acts: nv_partialsum");\n _weightActs(nv_images, nv_hid_grads, nv_partialsum,\n imgSizeY, hidGradsSizeY, hidGradsSizeX, filterSize,\n paddingStart, moduleStride, img_channels, numGroups,\n partialSum, 0, 1);\n nv_partialsum.reshape((numModules / partialSum), filters_dims[0] * filterSize * filterSize * numFilters);\n\n // sum out axis 0 of nv_partialsum\n #define AXIS 0\n // scale the contents of nv_weights_grads by 0\n // i.e., clear out its pre-existing content\n #define SCALE_THIS 0\n // scale the new sum by 1, i.e., don\'t do any scaling\n #define SCALE_SUM 1\n nv_weights_grads.addSum(nv_partialsum, AXIS, SCALE_THIS, SCALE_SUM);\n }\n ' braces = ('}' * num_braces) rval = (((((basic_setup + setup_nv_images) + setup_nv_hid_grads) + setup_nv_weights_grads) + run_kernel) + braces) rval = render_string(rval, locals()) return rval
'.. todo:: WRITEME'
def c_code_cache_version(self):
return (7,)
'If a layer receives a SequenceSpace it should receive a tuple of (data, mask). For layers that cannot deal with this we do the following: - Unpack (data, mask) and perform the fprop with the data only - Add the mask back just before returning, so that the next layer receives a tuple again Besides the mask, we also need to take are of reshaping the data. This reshaping needs to happen even if we receive SequenceDataSpace data instead of SequenceSpace data. The format is (time, batch, data, ..., data) which needs to be reshaped to (time * batch, data, ..., data) before calling the original fprop, after which we need to reshape it back. Parameters fprop : method The fprop method to be wrapped'
@classmethod def fprop_wrapper(cls, name, fprop):
@functools.wraps(fprop) def outer(self, state_below, return_all=False): if self._requires_reshape: if self._requires_unmask: (state_below, mask) = state_below if isinstance(state_below, tuple): ndim = state_below[0].ndim reshape_size = state_below[0].shape else: ndim = state_below.ndim reshape_size = state_below.shape if (ndim > 2): if isinstance(state_below, tuple): inp_shape = [([(state_below[j].shape[0] * state_below[j].shape[1])] + [state_below[j].shape[i] for i in xrange(2, state_below[j].ndim)]) for j in xrange(len(state_below))] reshaped_below = () for i in xrange(len(state_below)): reshaped_below += (state_below[i].reshape(inp_shape[i]),) else: inp_shape = ([(state_below.shape[0] * state_below.shape[1])] + [state_below.shape[i] for i in xrange(2, state_below.ndim)]) reshaped_below = state_below.reshape(inp_shape) reshaped = fprop(self, reshaped_below) if isinstance(reshaped, tuple): output_shape = [([reshape_size[0], reshape_size[1]] + [reshaped[j].shape[i] for i in xrange(1, reshaped[j].ndim)]) for j in xrange(len(reshaped))] state = () for i in xrange(len(reshaped)): state += (reshaped[i].reshape(output_shape[i]),) else: output_shape = ([reshape_size[0], reshape_size[1]] + [reshaped.shape[i] for i in xrange(1, reshaped.ndim)]) state = reshaped.reshape(output_shape) else: state = fprop(self, state_below) if self._requires_unmask: return (state, mask) else: return state elif return_all: return fprop(self, state_below, return_all) else: return fprop(self, state_below) return outer
'Reshapes and unmasks the data before retrieving the monitoring channels Parameters get_layer_monitoring_channels : method The get_layer_monitoring_channels method to be wrapped'
@classmethod def get_layer_monitoring_channels_wrapper(cls, name, get_layer_monitoring_channels):
@functools.wraps(get_layer_monitoring_channels) def outer(self, state_below=None, state=None, targets=None): if (self._requires_reshape and (self.__class__.__name__ == name)): if self._requires_unmask: if (state_below is not None): (state_below, state_below_mask) = state_below if (state is not None): (state, state_mask) = state if (targets is not None): (targets, targets_mask) = targets if (state_below is not None): state_below_shape = ([(state_below.shape[0] * state_below.shape[1])] + [state_below.shape[i] for i in xrange(2, state_below.ndim)]) state_below = state_below.reshape(state_below_shape) if self._requires_unmask: state_below = state_below[state_below_mask.flatten().nonzero()] if (state is not None): state_shape = ([(state.shape[0] * state.shape[1])] + [state.shape[i] for i in xrange(2, state.ndim)]) state = state.reshape(state_shape) if self._requires_unmask: state = state[state_mask.flatten().nonzero()] if (targets is not None): targets_shape = ([(targets.shape[0] * targets.shape[1])] + [targets.shape[i] for i in xrange(2, targets.ndim)]) targets = targets.reshape(targets_shape) if self._requires_unmask: targets = targets[targets_mask.flatten().nonzero()] return get_layer_monitoring_channels(self, state_below, state, targets) else: return get_layer_monitoring_channels(self, state_below, state, targets) return outer
'This layer wraps cost methods by reshaping the tensor (merging the time and batch axis) and then taking out all the masked values before applying the cost method.'
@classmethod def cost_wrapper(cls, name, cost):
@functools.wraps(cost) def outer(self, Y, Y_hat): if self._requires_reshape: if self._requires_unmask: try: (Y, Y_mask) = Y (Y_hat, Y_hat_mask) = Y_hat except: log.warning("Lost the mask when wrapping cost. This can happen if this function is called from within another wrapped function. Most likely this won't cause any problem") return cost(self, Y, Y_hat) input_shape = ([(Y.shape[0] * Y.shape[1])] + [Y.shape[i] for i in xrange(2, Y.ndim)]) reshaped_Y = Y.reshape(input_shape) if isinstance(Y_hat, tuple): input_shape = [([(Y_hat[j].shape[0] * Y_hat[j].shape[1])] + [Y_hat[j].shape[i] for i in xrange(2, Y_hat[j].ndim)]) for j in xrange(len(Y_hat))] reshaped_Y_hat = [] for i in xrange(len(Y_hat)): reshaped_Y_hat.append(Y_hat[i].reshape(input_shape[i])) reshaped_Y_hat = tuple(reshaped_Y_hat) else: input_shape = ([(Y_hat.shape[0] * Y_hat.shape[1])] + [Y_hat.shape[i] for i in xrange(2, Y_hat.ndim)]) reshaped_Y_hat = Y_hat.reshape(input_shape) if self._requires_unmask: return cost(self, reshaped_Y[Y_mask.flatten().nonzero()], reshaped_Y_hat[Y_mask.flatten().nonzero()]) return cost(self, reshaped_Y, reshaped_Y_hat) else: return cost(self, Y, Y_hat) return outer
'If the cost_matrix is called from within a cost function, everything is fine, since things were reshaped and unpacked. In any other case we raise a warning (after which it most likely crashes).'
@classmethod def cost_matrix_wrapper(cls, name, cost_matrix):
@functools.wraps(cost_matrix) def outer(self, Y, Y_hat): if (self._requires_reshape and (inspect.stack()[1][3] != 'cost')): log.warning('You are using the `cost_matrix` method on a layer which has been wrapped to accept sequence input, might or might not be problematic.') return cost_matrix(self, Y, Y_hat) return outer
'If the cost_from_cost_matrix is called from within a cost function, everything is fine, since things were reshaped and unpacked. In any other case we raise a warning (after which it most likely crashes).'
@classmethod def cost_from_cost_matrix_wrapper(cls, name, cost_from_cost_matrix):
@functools.wraps(cost_from_cost_matrix) def outer(self, cost_matrix): if (self._requires_reshape and (inspect.stack()[1][3] != 'cost')): log.warning('You are using the `cost_from_cost_matrix` method on a layer which has been wrapped to accept sequence input, might or might not be problematic.') return cost_from_cost_matrix(self, cost_matrix) return outer
'If this layer is not RNN-adapted, we intercept the call to the set_input_space method and set the space to a non-sequence space. This transformation is only applied to whitelisted layers. Parameters set_input_space : method The set_input_space method to be wrapped'
@classmethod def set_input_space_wrapper(cls, name, set_input_space):
@functools.wraps(set_input_space) def outer(self, input_space): if ((not self.rnn_friendly) and (name != 'MLP')): def find_sequence_space(input_space): '\n Recursive helper function that searches the (possibly\n nested) input space to see if it contains SequenceSpace\n ' if isinstance(input_space, CompositeSpace): return any((find_sequence_space(component) for component in input_space.components)) if isinstance(input_space, SequenceDataSpace): return True return False if find_sequence_space(input_space): if (name in BLACKLIST): raise ValueError(('%s received a SequenceSpace input, but is unable to deal with it. Please use an RNN-friendly alternative from the sandbox instead' % self)) elif (name not in WHITELIST): log.warning(('%s received a SequenceSpace but is not able to deal with it. We will try to change to non-sequence spaces and reshape the data, but this is not guaranteed to work! It normally works if your input and output space are not nested and you are not calling other fprop methods from within your fprop.' % self)) if isinstance(input_space, SequenceSpace): self._requires_unmask = True self._requires_reshape = True input_space = input_space.space elif isinstance(input_space, SequenceDataSpace): self._requires_reshape = True input_space = input_space.space return set_input_space(self, input_space) return outer
'Same thing as set_input_space_wrapper. Parameters get_output_space : method The get_output_space method to be wrapped'
@classmethod def get_output_space_wrapper(cls, name, get_output_space):
@functools.wraps(get_output_space) def outer(self): if ((not self.rnn_friendly) and self._requires_reshape and ((not isinstance(get_output_space(self), SequenceSpace)) and (not isinstance(get_output_space(self), SequenceDataSpace)))): if isinstance(self.mlp.input_space, SequenceSpace): return SequenceSpace(get_output_space(self)) elif isinstance(self.mlp.input_space, SequenceDataSpace): return SequenceDataSpace(get_output_space(self)) else: return get_output_space(self) return outer
'Same thing as set_input_space_wrapper. Parameters get_target_space : method The get_target_space method to be wrapped'
@classmethod def get_target_space_wrapper(cls, name, get_target_space):
@functools.wraps(get_target_space) def outer(self): if ((not self.rnn_friendly) and self._requires_reshape and ((not isinstance(get_target_space(self), SequenceSpace)) and (not isinstance(get_target_space(self), SequenceDataSpace)))): if isinstance(self.mlp.input_space, SequenceSpace): return SequenceSpace(get_target_space(self)) elif isinstance(self.mlp.input_space, SequenceDataSpace): return SequenceDataSpace(get_target_space(self)) else: return get_target_space(self) return outer
'Skip all tests.'
def setUp(self):
raise SkipTest('Sandbox RNNs are disabled.')
'Use an RNN without non-linearity to create the Mersenne numbers (2 ** n - 1) to check whether fprop works correctly.'
def test_fprop(self):
rnn = RNN(input_space=SequenceSpace(VectorSpace(dim=1)), layers=[Recurrent(dim=1, layer_name='recurrent', irange=0.1, indices=[(-1)], nonlinearity=(lambda x: x))]) (W, U, b) = rnn.layers[0].get_params() W.set_value([[1]]) U.set_value([[2]]) (X_data, X_mask) = rnn.get_input_space().make_theano_batch() y_hat = rnn.fprop((X_data, X_mask)) seq_len = 20 X_data_vals = np.ones((seq_len, seq_len, 1)) X_mask_vals = np.triu(np.ones((seq_len, seq_len))) f = function([X_data, X_mask], y_hat, allow_input_downcast=True) np.testing.assert_allclose(((2 ** np.arange(1, (seq_len + 1))) - 1), f(X_data_vals, X_mask_vals).flatten())
'Use an RNN to calculate Mersenne number sequences of different lengths and check whether the costs make sense.'
def test_cost(self):
rnn = RNN(input_space=SequenceSpace(VectorSpace(dim=1)), layers=[Recurrent(dim=1, layer_name='recurrent', irange=0, nonlinearity=(lambda x: x)), Linear(dim=1, layer_name='linear', irange=0)]) (W, U, b) = rnn.layers[0].get_params() W.set_value([[1]]) U.set_value([[2]]) (W, b) = rnn.layers[1].get_params() W.set_value([[1]]) (X_data, X_mask) = rnn.get_input_space().make_theano_batch() (y_data, y_mask) = rnn.get_output_space().make_theano_batch() (y_data_hat, y_mask_hat) = rnn.fprop((X_data, X_mask)) seq_len = 20 X_data_vals = np.ones((seq_len, seq_len, 1)) X_mask_vals = np.triu(np.ones((seq_len, seq_len))) y_data_vals = np.tile(((2 ** np.arange(1, (seq_len + 1))) - 1), (seq_len, 1)).T[:, :, np.newaxis] y_mask_vals = np.triu(np.ones((seq_len, seq_len))) f = function([X_data, X_mask, y_data, y_mask], rnn.cost((y_data, y_mask), (y_data_hat, y_mask_hat)), allow_input_downcast=True) assert (f(X_data_vals, X_mask_vals, y_data_vals, y_mask_vals) == 0) assert (f((X_data_vals + 1), X_mask_vals, y_data_vals, y_mask_vals) != 0) assert (f(X_data_vals, X_mask_vals, (y_data_vals + 1), y_mask_vals) == 1) X_data_vals_plus = (X_data_vals + (1 - X_mask_vals[:, :, None])) assert (f(X_data_vals_plus, X_mask_vals, y_data_vals, y_mask_vals) == 0) y_data_vals_plus = (y_data_vals + (1 - y_mask_vals[:, :, None])) assert (f(X_data_vals, X_mask_vals, y_data_vals_plus, y_mask_vals) == 0)
'Testing to see whether the gradient can be calculated when using a 1-dimensional hidden state.'
def test_1d_gradient(self):
rnn = RNN(input_space=SequenceSpace(VectorSpace(dim=1)), layers=[Recurrent(dim=1, layer_name='recurrent', irange=0, nonlinearity=(lambda x: x)), Linear(dim=1, layer_name='linear', irange=0)]) (X_data, X_mask) = rnn.get_input_space().make_theano_batch() (y_data, y_mask) = rnn.get_output_space().make_theano_batch() default_cost = Default() cost = default_cost.expr(rnn, ((X_data, X_mask), (y_data, y_mask))) tensor.grad(cost, rnn.get_params(), disconnected_inputs='ignore')
'Testing to see whether the gradient can be calculated.'
def test_gradient(self):
rnn = RNN(input_space=SequenceSpace(VectorSpace(dim=1)), layers=[Recurrent(dim=2, layer_name='recurrent', irange=0, nonlinearity=(lambda x: x)), Linear(dim=1, layer_name='linear', irange=0)]) (X_data, X_mask) = rnn.get_input_space().make_theano_batch() (y_data, y_mask) = rnn.get_output_space().make_theano_batch() default_cost = Default() cost = default_cost.expr(rnn, ((X_data, X_mask), (y_data, y_mask))) tensor.grad(cost, rnn.get_params(), disconnected_inputs='ignore')
'This is a recursive helper function to go through the nested spaces and tuples Parameters space : Space source : string'
@classmethod def add_mask_source(cls, space, source):
if isinstance(space, CompositeSpace): if (not isinstance(space, SequenceSpace)): source = tuple((cls.add_mask_source(component, source) for (component, source) in zip(space.components, source))) else: assert isinstance(source, six.string_types) source = (source, (source + '_mask')) return source
'Block monitoring channels if not necessary Parameters : todo'
@wraps(Layer.get_layer_monitoring_channels) def get_layer_monitoring_channels(self, state_below=None, state=None, targets=None):
rval = OrderedDict() if self.use_monitoring_channels: state = state_below x = state state_conc = None for layer in self.layers: state_below = state if (self.x_shortcut and (layer is not self.layers[0]) and (layer is not self.layers[(-1)])): state = self.create_shortcut_batch(state, x, 2, 1) if (self.y_shortcut and (layer is self.layers[(-1)])): state = layer.fprop(state_conc) else: state = layer.fprop(state) if (self.y_shortcut and (layer is not self.layers[(-1)])): if (layer is self.layers[0]): state_conc = state else: state_conc = self.create_shortcut_batch(state_conc, state, 2) args = [state_below, state] if ((layer is self.layers[(-1)]) and (targets is not None)): args.append(targets) ch = layer.get_layer_monitoring_channels(*args) if (not isinstance(ch, OrderedDict)): raise TypeError(str((type(ch), layer.layer_name))) for key in ch: value = ch[key] doc = get_monitor_doc(value) if (doc is None): doc = (((str(type(layer)) + '.get_monitoring_channels_from_state did') + ' not provide any further documentation for') + ' this channel.') doc = ((('This channel came from a layer called "' + layer.layer_name) + '" of an MLP.\n') + doc) value.__doc__ = doc rval[((layer.layer_name + '_') + key)] = value return rval
'A function that adds additive Gaussian noise Parameters param : sharedX model parameter to be regularized Returns param : sharedX model parameter with additive noise'
def add_noise(self, param):
param += self.mlp.theano_rng.normal(size=param.shape, avg=0.0, std=self._std_dev, dtype=param.dtype) return param
'Scan function for case using masks Parameters : todo state_below : TheanoTensor'
def fprop_step_mask(self, state_below, mask, state_before, U):
z = self.nonlinearity((state_below + tensor.dot(state_before, U))) z = ((mask[:, None] * z) + ((1 - mask[:, None]) * state_before)) return z
'Scan function for case without masks Parameters : todo state_below : TheanoTensor'
def fprop_step(self, state_below, state_before, U):
z = self.nonlinearity((state_below + tensor.dot(state_before, U))) return z
'Scan function for case using masks Parameters : todo state_below : TheanoTensor'
def fprop_step_mask(self, state_below, mask, state_before, U):
g_on = (state_below + tensor.dot(state_before[:, :self.dim], U)) i_on = tensor.nnet.sigmoid(g_on[:, :self.dim]) f_on = tensor.nnet.sigmoid(g_on[:, self.dim:(2 * self.dim)]) o_on = tensor.nnet.sigmoid(g_on[:, (2 * self.dim):(3 * self.dim)]) z = tensor.set_subtensor(state_before[:, self.dim:], ((f_on * state_before[:, self.dim:]) + (i_on * tensor.tanh(g_on[:, (3 * self.dim):])))) z = tensor.set_subtensor(z[:, :self.dim], (o_on * tensor.tanh(z[:, self.dim:]))) z = ((mask[:, None] * z) + ((1 - mask[:, None]) * state_before)) return z
'Scan function for case without masks Parameters : todo state_below : TheanoTensor'
def fprop_step(self, state_below, z, U):
g_on = (state_below + tensor.dot(z[:, :self.dim], U)) i_on = tensor.nnet.sigmoid(g_on[:, :self.dim]) f_on = tensor.nnet.sigmoid(g_on[:, self.dim:(2 * self.dim)]) o_on = tensor.nnet.sigmoid(g_on[:, (2 * self.dim):(3 * self.dim)]) z = tensor.set_subtensor(z[:, self.dim:], ((f_on * z[:, self.dim:]) + (i_on * tensor.tanh(g_on[:, (3 * self.dim):])))) z = tensor.set_subtensor(z[:, :self.dim], (o_on * tensor.tanh(z[:, self.dim:]))) return z
'Scan function for case using masks Parameters : todo state_below : TheanoTensor'
def fprop_step_mask(self, state_below, mask, state_before, U):
g_on = tensor.inc_subtensor(state_below[:, self.dim:], tensor.dot(state_before, U[:, self.dim:])) r_on = tensor.nnet.sigmoid(g_on[:, self.dim:(2 * self.dim)]) u_on = tensor.nnet.sigmoid(g_on[:, (2 * self.dim):]) z_t = tensor.tanh((g_on[:, :self.dim] + tensor.dot((r_on * state_before), U[:, :self.dim]))) z_t = ((u_on * state_before) + ((1.0 - u_on) * z_t)) z_t = ((mask[:, None] * z_t) + ((1 - mask[:, None]) * state_before)) return z_t
'Scan function for case without masks Parameters : todo state_below : TheanoTensor'
def fprop_step(self, state_below, state_before, U):
g_on = tensor.inc_subtensor(state_below[:, self.dim:], tensor.dot(state_before, U[:, self.dim:])) r_on = tensor.nnet.sigmoid(g_on[:, self.dim:(2 * self.dim)]) u_on = tensor.nnet.sigmoid(g_on[:, (2 * self.dim):]) z_t = tensor.tanh((g_on[:, :self.dim] + tensor.dot((r_on * state_before), U[:, :self.dim]))) z_t = ((u_on * state_before) + ((1.0 - u_on) * z_t)) return z_t
'Called by self._format_as(space), to check whether self and space have compatible sizes. Throws a ValueError if they don\'t.'
@wraps(space.Space._check_sizes) def _check_sizes(self, space):
my_dimension = self.get_total_dimension() other_dimension = space.get_total_dimension() if (my_dimension != other_dimension): if isinstance(space, Conv2DSpace): if ((my_dimension * space.shape[0]) != other_dimension): raise ValueError(((((((str(self) + ' with total dimension ') + str(my_dimension)) + " can't format a batch into ") + str(space)) + 'because its total dimension is ') + str(other_dimension)))
'Create a known gradient and check whether it is being clipped correctly'
def test_gradient_clipping(self):
mlp = MLP(layers=[Linear(dim=1, irange=0, layer_name='linear')], nvis=1) (W, b) = mlp.layers[0].get_params() W.set_value([[10]]) X = mlp.get_input_space().make_theano_batch() y = mlp.get_output_space().make_theano_batch() cost = Default() (gradients, _) = cost.get_gradients(mlp, (X, y)) clipped_cost = GradientClipping(20, Default()) (clipped_gradients, _) = clipped_cost.get_gradients(mlp, (X, y)) f = function([X, y], [gradients[W].sum(), clipped_gradients[W].sum()], allow_input_downcast=True) np.testing.assert_allclose(f([[1]], [[0]]), [20, (20 / np.sqrt(2))])