desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'.. todo:: WRITEME'
def print_status(self):
raise NotImplementedError('TODO fix broken method')
'.. todo:: WRITEME'
def rmul(self, x):
assert (x.ndim == 5) return self._filter_acts(x, self._filters)
'.. todo:: WRITEME'
def rmul_T(self, x):
return self._img_acts(self._filters, x, self._irows, self._icols)
'.. todo:: WRITEME'
def col_shape(self):
ishape = (self.row_shape() + ((-99),)) fshape = self._filters_shape (hshape,) = self._filter_acts.infer_shape(None, (ishape, fshape)) assert (hshape[(-1)] == (-99)) return hshape[:(-1)]
'.. todo:: WRITEME'
def row_shape(self):
fshape = self._filters_shape (fmodulesR, fmodulesC, fcolors, frows, fcols) = fshape[:(-2)] (fgroups, filters_per_group) = fshape[(-2):] return (fgroups, fcolors, self._irows, self._icols)
'.. todo:: WRITEME'
def print_status(self):
raise NotImplementedError('TODO: fix dependence on non-existent ndarray_status function') "print ndarray_status(\n self._filters.get_value(borrow=True),\n msg='%s{%s}'% (self.__class__.__name__,\n self._message))\n "
'.. todo:: WRITEME'
def imshow_gray(self):
filters = self._filters.get_value() (modR, modC, colors, rows, cols, grps, fs_per_grp) = filters.shape logger.info(filters.shape) rval = np.zeros((((modR * (rows + 1)) - 1), ((modC * (cols + 1)) - 1))) for (rr, modr) in enumerate(xrange(0, rval.shape[0], (rows + 1))): for (cc, modc) in enumerate(xrange(0, rval.shape[1], (cols + 1))): rval[modr:(modr + rows), modc:(modc + cols)] = filters[rr, cc, 0, :, :, 0, 0] plt.imshow(rval, cmap='gray') return rval
'.. todo:: WRITEME'
def _attributes(self):
return (self.module_stride, self.partial_sum)
'.. todo:: WRITEME'
def __eq__(self, other):
return ((type(self) == type(other)) and (self._attributes() == other._attributes()))
'.. todo:: WRITEME'
def __hash__(self):
return hash((type(self), self._attributes()))
'.. todo:: WRITEME'
def __str__(self):
return ('%s{module_stride=%i,partial_sum=%i}' % (self.__class__.__name__, self.module_stride, self.partial_sum))
'.. todo:: WRITEME'
def make_node(self, images, filters):
ibcast = images.broadcastable fbcast = filters.broadcastable (igroups, icolors_per_group, irows, icols, icount) = ibcast (fmodulesR, fmodulesC, fcolors, frows, fcols) = fbcast[:(-2)] (fgroups, filters_per_group) = fbcast[(-2):] hbcast = (fgroups, filters_per_group, fmodulesR, fmodulesC, icount) if (not isinstance(images.type, CudaNdarrayType)): raise TypeError('gpu_filter_acts requires CudaNdarray images', images) if (not isinstance(filters.type, CudaNdarrayType)): raise TypeError('gpu_filter_acts requires CudaNdarray filters', filters) htype = CudaNdarrayType(broadcastable=hbcast) return theano.gof.Apply(self, [images, filters], [htype()])
'.. todo:: WRITEME'
def c_support_code(self):
cufile = open(os.path.join(_this_dir, 'filter_acts.cu')) return cufile.read()
'.. todo:: WRITEME'
def c_code_cache_version(self):
return ()
'.. todo:: WRITEME'
def c_code(self, node, nodename, inputs, outputs, sub):
(images, filters) = inputs (responses,) = outputs fail = sub['fail'] moduleStride = str(self.module_stride) sio = StringIO.StringIO() print('\n\n //XXX: actually the rightmost images dimension can be strided\n if (!CudaNdarray_is_c_contiguous(%(images)s))\n {\n PyErr_Format(PyExc_NotImplementedError,\n "images not c contiguous");\n %(fail)s;\n }\n\n if (!CudaNdarray_is_c_contiguous(%(filters)s))\n {\n PyErr_Format(PyExc_NotImplementedError,\n "filters not c contiguous");\n %(fail)s;\n }\n\n if (%(images)s->nd != 5)\n {\n PyErr_Format(PyExc_TypeError,\n "images ndim (%%i) must be 5",\n %(images)s->nd);\n %(fail)s;\n }\n\n if (%(filters)s->nd != 7)\n {\n PyErr_Format(PyExc_TypeError,\n "filters ndim (%%i) must be 7",\n %(filters)s->nd);\n %(fail)s;\n }\n //fprintf(stderr, "really running on GPU\\n");\n\n { // new scope, new vars\n\n int igroups = CudaNdarray_HOST_DIMS(%(images)s)[0];\n int icolors_per_group = CudaNdarray_HOST_DIMS(%(images)s)[1];\n int irows = CudaNdarray_HOST_DIMS(%(images)s)[2];\n int icols = CudaNdarray_HOST_DIMS(%(images)s)[3];\n int icount = CudaNdarray_HOST_DIMS(%(images)s)[4];\n\n int fmodulesR = CudaNdarray_HOST_DIMS(%(filters)s)[0];\n int fmodulesC = CudaNdarray_HOST_DIMS(%(filters)s)[1];\n int fcolors = CudaNdarray_HOST_DIMS(%(filters)s)[2];\n int frows = CudaNdarray_HOST_DIMS(%(filters)s)[3];\n int fcols = CudaNdarray_HOST_DIMS(%(filters)s)[4];\n int fgroups = CudaNdarray_HOST_DIMS(%(filters)s)[5];\n int filters_per_group = CudaNdarray_HOST_DIMS(%(filters)s)[6];\n\n // XXX: use this parameter properly\n int paddingStart = 0;\n int imgStride = icount;\n float scaleTargets = 0.0;\n float scaleOutput = 1.0;\n bool conv = false;\n\n if (igroups != fgroups)\n {\n PyErr_Format(PyExc_ValueError,\n "igroups != fgroups (%%i != %%i)",\n igroups, fgroups);\n %(fail)s;\n }\n\n if (icolors_per_group != fcolors)\n {\n PyErr_Format(PyExc_ValueError,\n "icolors_per_group != fcolors (%%i != %%i)",\n icolors_per_group,\n fcolors);\n %(fail)s;\n }\n\n if (!%(responses)s)\n {\n Py_XDECREF(%(responses)s);\n int dims[5];\n dims[0] = fgroups;\n dims[1] = filters_per_group;\n dims[2] = fmodulesR;\n dims[3] = fmodulesC;\n dims[4] = icount;\n %(responses)s = (CudaNdarray*)CudaNdarray_NewDims(5, dims);\n if (!%(responses)s)\n {\n %(fail)s;\n }\n }\n\n assert(CudaNdarray_is_c_contiguous(%(responses)s));\n\n if (_filterActs(\n igroups,\n icolors_per_group,\n irows,\n icols,\n icount,\n fmodulesR,\n fmodulesC,\n frows,\n fcols,\n filters_per_group,\n CudaNdarray_DEV_DATA(%(images)s),\n CudaNdarray_DEV_DATA(%(filters)s),\n CudaNdarray_DEV_DATA(%(responses)s),\n paddingStart,\n %(moduleStride)s,\n imgStride,\n scaleTargets,\n scaleOutput,\n conv))\n {\n %(fail)s;\n }\n } // end bogus scope used for vars\n\n ', file=sio) return (sio.getvalue() % locals())
'.. todo:: WRITEME'
def make_node(self, images, hidacts, frows, fcols):
if (self.partial_sum != 1): raise NotImplementedError('partial sum') frows = theano.tensor.as_tensor_variable(frows) fcols = theano.tensor.as_tensor_variable(fcols) if (frows.dtype[:3] not in ('int', 'uin')): raise TypeError(frows) if (fcols.dtype[:3] not in ('int', 'uin')): raise TypeError(frows) if frows.ndim: raise TypeError('frows should be scalar', frows) if fcols.ndim: raise TypeError('fcols should be scalar', fcols) (igroups, icolors, irows, icols, icount) = images.type.broadcastable (hgroups, hcolors, hrows, hcols, hcount) = hidacts.type.broadcastable otype = theano.sandbox.cuda.CudaNdarrayType(broadcastable=(hrows, hcols, icolors, False, False, hgroups, hcolors)) return theano.Apply(self, [images, hidacts, frows, fcols], [otype()])
'.. todo:: WRITEME'
def c_support_code(self):
cufile = open(os.path.join(_this_dir, 'weight_acts.cu')) return cufile.read()
'.. todo:: WRITEME'
def c_code_cache_version(self):
return ()
'.. todo:: WRITEME'
def c_code(self, node, nodename, inames, onames, sub):
(images, hidacts, frows, fcols) = inames (dweights,) = onames fail = sub['fail'] moduleStride = str(self.module_stride) sio = StringIO.StringIO() print('\n\n if (!CudaNdarray_is_c_contiguous(%(images)s))\n {\n //XXX: Alex\'s code actually supports the rightmost images\n // dimension strided\n PyErr_Format(PyExc_NotImplementedError,\n "images not c contiguous");\n %(fail)s;\n }\n\n if (!CudaNdarray_is_c_contiguous(%(hidacts)s))\n {\n PyErr_Format(PyExc_NotImplementedError,\n "hidacts not c contiguous");\n %(fail)s;\n }\n\n if (%(images)s->nd != 5)\n {\n PyErr_Format(PyExc_TypeError,\n "images ndim (%%i) must be 5",\n %(images)s->nd);\n %(fail)s;\n }\n\n if (%(hidacts)s->nd != 5)\n {\n PyErr_Format(PyExc_TypeError,\n "hidacts ndim (%%i) must be 5",\n %(images)s->nd);\n %(fail)s;\n }\n\n if (PyArray_NDIM(%(frows)s) != 0)\n {\n PyErr_Format(PyExc_TypeError,\n "frows ndim (%%i) must be 0",\n PyArray_NDIM(%(frows)s));\n %(fail)s;\n }\n\n if (PyArray_NDIM(%(fcols)s) != 0)\n {\n PyErr_Format(PyExc_TypeError,\n "fcols ndim (%%i) must be 0",\n PyArray_NDIM(%(fcols)s));\n %(fail)s;\n }\n\n { // new scope, new vars\n\n int igroups = CudaNdarray_HOST_DIMS(%(images)s)[0];\n int icolors_per_group = CudaNdarray_HOST_DIMS(%(images)s)[1];\n int irows = CudaNdarray_HOST_DIMS(%(images)s)[2];\n int icols = CudaNdarray_HOST_DIMS(%(images)s)[3];\n int icount = CudaNdarray_HOST_DIMS(%(images)s)[4];\n\n int hgroups = CudaNdarray_HOST_DIMS(%(hidacts)s)[0];\n int hcolors_per_group = CudaNdarray_HOST_DIMS(%(hidacts)s)[1];\n int hrows = CudaNdarray_HOST_DIMS(%(hidacts)s)[2];\n int hcols = CudaNdarray_HOST_DIMS(%(hidacts)s)[3];\n int hcount = CudaNdarray_HOST_DIMS(%(hidacts)s)[4];\n\n int fmodulesR = hrows;\n int fmodulesC = hcols;\n int fcolors = icolors_per_group;\n int frows = ((dtype_%(frows)s *) PyArray_DATA(%(frows)s))[0];\n int fcols = ((dtype_%(fcols)s *) PyArray_DATA(%(fcols)s))[0];\n int fgroups = hgroups;\n int filters_per_group = hcolors_per_group;\n\n // XXX: use this parameter properly\n int paddingStart = 0;\n int imgStride = icount;\n float scaleTargets = 0.0;\n float scaleOutput = 1.0;\n int moduleStride = %(moduleStride)s;\n int partialSum = 1; // set to 0 for convolution.\n\n if (igroups != hgroups)\n {\n PyErr_Format(PyExc_ValueError,\n "igroups != hgroups (%%i != %%i)",\n igroups, hgroups);\n %(fail)s;\n }\n\n if (icolors_per_group != fcolors)\n {\n PyErr_Format(PyExc_ValueError,\n "icolors_per_group != fcolors (%%i != %%i)",\n icolors_per_group,\n fcolors);\n %(fail)s;\n }\n\n if (icount != hcount)\n {\n PyErr_Format(PyExc_ValueError,\n "icount != hcount (%%i != %%i)",\n icount,\n hcount);\n %(fail)s;\n }\n\n // XXX: CHECK SHAPE IS CORRECT\n if (!%(dweights)s)\n {\n Py_XDECREF(%(dweights)s);\n int dims[7];\n dims[0] = fmodulesR;\n dims[1] = fmodulesC;\n dims[2] = fcolors;\n dims[3] = frows;\n dims[4] = fcols;\n dims[5] = fgroups;\n dims[6] = filters_per_group;\n\n %(dweights)s = (CudaNdarray*)CudaNdarray_NewDims(7, dims);\n if (!%(dweights)s)\n {\n %(fail)s;\n }\n }\n\n assert(CudaNdarray_is_c_contiguous(%(dweights)s));\n\n if (_weightActs(\n igroups,\n icolors_per_group,\n irows,\n icols,\n icount,\n fmodulesR,\n fmodulesC,\n frows,\n fcols,\n filters_per_group,\n CudaNdarray_DEV_DATA(%(images)s),\n CudaNdarray_DEV_DATA(%(hidacts)s),\n CudaNdarray_DEV_DATA(%(dweights)s),\n paddingStart,\n moduleStride,\n imgStride,\n scaleTargets,\n scaleOutput,\n partialSum))\n {\n %(fail)s;\n }\n } // end bogus scope used for vars\n\n ', file=sio) return (sio.getvalue() % locals())
'.. todo:: WRITEME'
def make_node(self, filters, hidacts, irows, icols):
irows = theano.tensor.as_tensor_variable(irows) icols = theano.tensor.as_tensor_variable(icols) if (irows.dtype[:3] not in ('int', 'uin')): raise TypeError(irows) if (icols.dtype[:3] not in ('int', 'uin')): raise TypeError(irows) if irows.ndim: raise TypeError('irows should be scalar', irows) if icols.ndim: raise TypeError('icols should be scalar', icols) return theano.gof.Apply(self, [filters, hidacts, irows, icols], [hidacts.type()])
'.. todo:: WRITEME'
def c_support_code(self):
cufile = open(os.path.join(_this_dir, 'raw_img_acts.cu')) return cufile.read()
'.. todo:: WRITEME'
def c_code_cache_version(self):
return ()
'.. todo:: WRITEME'
def c_code(self, node, nodename, inames, onames, sub):
(filters, hidacts, irows, icols) = inames (dimages,) = onames fail = sub['fail'] moduleStride = str(self.module_stride) sio = StringIO.StringIO() print('\n\n if (!CudaNdarray_is_c_contiguous(%(filters)s))\n {\n //XXX: Alex\'s code actually supports the rightmost images\n // dimension strided\n PyErr_Format(PyExc_NotImplementedError,\n "images not c contiguous");\n %(fail)s;\n }\n\n if (!CudaNdarray_is_c_contiguous(%(hidacts)s))\n {\n PyErr_Format(PyExc_NotImplementedError,\n "hidacts not c contiguous");\n %(fail)s;\n }\n\n if (%(filters)s->nd != 7)\n {\n PyErr_Format(PyExc_TypeError,\n "images ndim (%%i) must be 7",\n %(filters)s->nd);\n %(fail)s;\n }\n\n if (%(hidacts)s->nd != 5)\n {\n PyErr_Format(PyExc_TypeError,\n "hidacts ndim (%%i) must be 5",\n %(hidacts)s->nd);\n %(fail)s;\n }\n\n if (PyArray_NDIM(%(irows)s) != 0)\n {\n PyErr_Format(PyExc_TypeError,\n "frows ndim (%%i) must be 0",\n PyArray_NDIM(%(irows)s));\n %(fail)s;\n }\n\n if (PyArray_NDIM(%(icols)s) != 0)\n {\n PyErr_Format(PyExc_TypeError,\n "fcols ndim (%%i) must be 0",\n PyArray_NDIM(%(icols)s));\n %(fail)s;\n }\n\n { // new scope, new vars\n\n int fmodulesR = CudaNdarray_HOST_DIMS(%(filters)s)[0];\n int fmodulesC = CudaNdarray_HOST_DIMS(%(filters)s)[1];\n int fcolors = CudaNdarray_HOST_DIMS(%(filters)s)[2];\n int frows = CudaNdarray_HOST_DIMS(%(filters)s)[3];\n int fcols = CudaNdarray_HOST_DIMS(%(filters)s)[4];\n int fgroups = CudaNdarray_HOST_DIMS(%(filters)s)[5];\n int filters_per_group = CudaNdarray_HOST_DIMS(%(filters)s)[6];\n\n int hgroups = CudaNdarray_HOST_DIMS(%(hidacts)s)[0];\n int hcolors_per_group = CudaNdarray_HOST_DIMS(%(hidacts)s)[1];\n int hrows = CudaNdarray_HOST_DIMS(%(hidacts)s)[2];\n int hcols = CudaNdarray_HOST_DIMS(%(hidacts)s)[3];\n int hcount = CudaNdarray_HOST_DIMS(%(hidacts)s)[4];\n\n int igroups = fgroups;\n int icolors_per_group = fcolors;\n int irows = ((dtype_%(irows)s *) PyArray_DATA(%(irows)s))[0];\n int icols = ((dtype_%(icols)s *) PyArray_DATA(%(icols)s))[0];\n int icount = hcount;\n\n\n // TODO: use this parameter properly\n int paddingStart = 0;\n float scaleTargets = 0.0;\n float scaleOutput = 1.0;\n int moduleStride = %(moduleStride)s;\n bool conv = 0;\n\n if (hgroups != fgroups)\n {\n PyErr_Format(PyExc_ValueError,\n "hgroups != fgroups (%%i != %%i)",\n hgroups, fgroups);\n %(fail)s;\n }\n\n if (hcolors_per_group != filters_per_group)\n {\n PyErr_Format(PyExc_ValueError,\n "hcolors_per_group != filters_per_group (%%i != %%i)",\n hcolors_per_group,\n filters_per_group);\n %(fail)s;\n }\n\n // XXX: CHECK SHAPE IS CORRECT\n if (!%(dimages)s)\n {\n Py_XDECREF(%(dimages)s);\n int dims[5];\n dims[0] = igroups;\n dims[1] = icolors_per_group;\n dims[2] = irows;\n dims[3] = icols;\n dims[4] = icount;\n\n %(dimages)s = (CudaNdarray*)CudaNdarray_NewDims(5, dims);\n if (!%(dimages)s)\n {\n %(fail)s;\n }\n }\n\n assert(CudaNdarray_is_c_contiguous(%(dimages)s));\n\n if (paddingStart + (fmodulesR - 1) * moduleStride + frows < irows)\n {\n PyErr_Format(PyExc_ValueError,\n "uhoh123: %%i %%i %%i %%i %%i",\n paddingStart,\n fmodulesR,\n moduleStride,\n frows,\n irows);\n %(fail)s;\n }\n\n if (_imgActs(\n fgroups,\n filters_per_group,\n fcolors,\n hcount,\n fmodulesR,\n fmodulesC,\n frows,\n fcols,\n irows,\n icols,\n CudaNdarray_DEV_DATA(%(filters)s),\n CudaNdarray_DEV_DATA(%(hidacts)s),\n CudaNdarray_DEV_DATA(%(dimages)s),\n paddingStart,\n moduleStride,\n scaleTargets,\n scaleOutput,\n conv))\n {\n %(fail)s;\n }\n } // end bogus scope used for vars\n\n ', file=sio) return (sio.getvalue() % locals())
'.. todo:: WRITEME'
@staticmethod def row_col_channel(row, col, channel, n_rows, n_cols, n_channels):
return ((((row * n_cols) * n_channels) + (col * n_channels)) + channel)
'.. todo:: WRITEME'
@staticmethod def channel_row_col(row, col, channel, n_rows, n_cols, n_channels):
return ((((channel * n_rows) * n_cols) + (row * n_cols)) + col)
'.. todo:: WRITEME'
def make_node(self, x):
return gof.Apply(self, [x], [x.type()])
'.. todo:: WRITEME'
def perform(self, node, xs, zs):
x = xs[0] z = zs[0] if (x.format != 'csc'): raise TypeError('Remove0 only works on csc matrices') (M, N) = x.shape data = x.data indices = x.indices indptr = x.indptr new_data = [] new_indices = [] new_indptr = [0] for j in xrange(0, N): for i_idx in xrange(indptr[j], indptr[(j + 1)]): if (data[i_idx] != 0): new_data.append(data[i_idx]) new_indices.append(indices[i_idx]) new_indptr.append(len(new_indices)) z[0] = sparse.csc_matrix((new_data, new_indices, new_indptr), (M, N))
'.. todo:: WRITEME'
def grad(self, x, gz):
return [gz[0]]
'.. todo:: WRITEME'
def make_node(self, x):
return gof.Apply(self, [x], [x.type()])
'.. todo:: WRITEME'
def perform(self, node, xs, zs):
zs[0][0] = xs[0].ensure_sorted_indices(inplace=self.inplace)
'.. todo:: WRITEME'
def grad(self, xs, gz):
return [gz[0]]
'.. todo:: WRITEME'
@staticmethod def sparse_eval(inshp, kshp, nkern, offset=(1, 1), mode='valid'):
return convolution_indices.evaluate(inshp, kshp, offset, nkern, mode=mode, ws=False)
'.. todo:: WRITEME'
@staticmethod def conv_eval(IR, IC, KR, KC, C, subsample=(1, 1), mode='valid'):
raise NotImplementedError('TODO: fix broken method')
'Build a sparse matrix which can be used for performing... * convolution: in this case, the dot product of this matrix with the input images will generate a stack of images patches. Convolution is then a tensordot operation of the filters and the patch stack. * sparse local connections: in this case, the sparse matrix allows us to operate the weight matrix as if it were fully-connected. The structured-dot with the input image gives the output for the following layer. Parameters ker_shape : tuple Shape of kernel to apply (smaller than image) img_shape: tuple Shape of input images mode : str \'valid\' generates output only when kernel and image overlap. \'full\' full convolution obtained by zero-padding the input ws : bool True if weight sharing, False otherwise offset : tuple of int Offset parameter. In the case of no weight sharing, gives the pixel offset between two receptive fields. With weight sharing gives the offset between the top-left pixels of the generated patches Returns rval : tuple(indices, indptr, logical_shape, sp_type, out_img_shp) The structure of a sparse matrix, and the logical dimensions of the image which will be the result of filtering.'
@staticmethod def evaluate(imshp, kshp, offset=(1, 1), nkern=1, mode='valid', ws=True):
N = numpy (dx, dy) = offset if (N.size(imshp) == 2): inshp = ((1,) + imshp) inshp = N.array(imshp) kshp = N.array(kshp) ksize = N.prod(kshp) kern = ((ksize - 1) - N.arange(ksize)) fulloutshp = ((inshp[1:] + kshp) - 1) s = ((-1) if (mode == 'valid') else 1) outshp = N.int64(N.ceil((((inshp[1:] + (s * kshp)) - (s * 1)) / N.array([dy, dx], dtype='float')))) if any((outshp <= 0)): err = ('Invalid kernel', kshp, 'and/or step size', (dx, dy), 'for given input shape', inshp) raise ValueError(err) outsize = N.prod(outshp) insize = N.prod(inshp) lbound = (N.array([(kshp[0] - 1), (kshp[1] - 1)]) if (mode == 'valid') else N.zeros(2)) ubound = ((lbound + ((inshp[1:] - kshp) + 1)) if (mode == 'valid') else fulloutshp) topleft = N.array([(kshp[0] - 1), (kshp[1] - 1)]) botright = (topleft + inshp[1:]) spmatshp = ((((outsize * N.prod(kshp)) * inshp[0]), insize) if ws else ((nkern * outsize), insize)) spmat = scipy_sparse.lil_matrix(spmatshp) (z, zz) = (0, 0) (tapi, ntaps) = (0, 0) for fmapi in range(inshp[0]): for n in range(nkern): for oy in N.arange(lbound[0], ubound[0], dy): for ox in N.arange(lbound[1], ubound[1], dx): l = 0 for ky in (oy + N.arange(kshp[0])): for kx in (ox + N.arange(kshp[1])): if (all(((ky, kx) >= topleft)) and all(((ky, kx) < botright))): (iy, ix) = (N.array((ky, kx)) - topleft) col = (((iy * inshp[2]) + ix) + (fmapi * N.prod(inshp[1:]))) (y, x) = ((oy, ox) if (mode == 'full') else ((oy, ox) - topleft)) (y, x) = (N.array([y, x]) / (dy, dx)) row = (((((((y * outshp[1]) + x) * inshp[0]) * ksize) + l) + (fmapi * ksize)) if ws else ((y * outshp[1]) + x)) spmat[((row + (n * outsize)), col)] = (tapi + 1) ntaps += 1 tapi += 1 l += 1 if (spmat.format != 'csc'): spmat = spmat.tocsc().ensure_sorted_indices() else: spmat = spmat.ensure_sorted_indices() if ws: kmap = None else: kmap = N.zeros(ntaps, dtype='int') k = 0 for j in xrange(spmat.shape[1]): for i_idx in xrange(spmat.indptr[j], spmat.indptr[(j + 1)]): if (spmat.data[i_idx] != 0): kmap[k] = (spmat.data[i_idx] - 1) k += 1 assert (spmat.format == 'csc') sptype = 'csc' use_csr_type = 0 if (use_csr_type and (mode == 'valid')): spmat = spmat.tocsr() rval = (spmat.indices[:spmat.size], spmat.indptr, spmatshp, sptype, outshp) rval += ((kmap,) if (kmap != None) else ()) return rval
'.. todo:: WRITEME'
def perform(self, node, shape, out):
(inshp, kshp) = shape (out_indices, out_indptr, spmat_shape) = out (indices, indptr, spmatshp, outshp) = self.evaluate(inshp, kshp) out_indices[0] = indices out_indptr[0] = indptr spmat_shape[0] = numpy.asarray(spmatshp)
'Computes a nested data_specs for input and all channels Also computes the mapping to flatten it. This function is called from redo_theano.'
def _build_data_specs(self):
(m_space, m_source) = self.model.get_monitoring_data_specs() input_spaces = [m_space] input_sources = [m_source] for channel in self.channels.values(): space = channel.data_specs[0] assert isinstance(space, Space) input_spaces.append(space) input_sources.append(channel.data_specs[1]) nested_space = CompositeSpace(input_spaces) nested_source = tuple(input_sources) self._nested_data_specs = (nested_space, nested_source) self._data_specs_mapping = DataSpecsMapping(self._nested_data_specs) flat_space = self._data_specs_mapping.flatten(nested_space, return_tuple=True) flat_source = self._data_specs_mapping.flatten(nested_source, return_tuple=True) self._flat_data_specs = (CompositeSpace(flat_space), flat_source)
'.. todo:: WRITEME Parameters mode : theano.compile.Mode Theano functions for the monitoring channels will be compiled and run using this mode.'
def set_theano_function_mode(self, mode):
if (self.theano_function_mode != mode): self._dirty = True self.theano_function_mode = mode
'Determines the data used to calculate the values of each channel. Parameters dataset : object A `pylearn2.datasets.Dataset` object. mode : str or object, optional Iteration mode; see the docstring of the `iterator` method on `pylearn2.datasets.Dataset` for details. batch_size : int, optional The size of an individual batch. Optional if `mode` is \'sequential\' and `num_batches` is specified (batch size will be calculated based on full dataset size). num_batches : int, optional The total number of batches. Unnecessary if `mode` is \'sequential\' and `batch_size` is specified (number of batches will be calculated based on full dataset size). seed : int, optional Optional. The seed to be used for random iteration modes.'
def add_dataset(self, dataset, mode='sequential', batch_size=None, num_batches=None, seed=None):
if (not isinstance(dataset, list)): dataset = [dataset] if (not isinstance(mode, list)): mode = [mode] if (not isinstance(batch_size, list)): batch_size = [batch_size] if (not isinstance(num_batches, list)): num_batches = [num_batches] if (seed is None): seed = ([None] * len(dataset)) if (not isinstance(seed, list)): seed = [seed] if (len(mode) != len(dataset)): raise ValueError((((('Received ' + str(len(dataset))) + ' dataset but ') + str(len(mode))) + ' modes.')) if any([(len(l) != len(dataset)) for l in [batch_size, seed]]): raise ValueError(('make sure each dataset has its iteration ' + 'batch size and number of batches.')) for (d, m, b, n, sd) in safe_izip(dataset, mode, batch_size, num_batches, seed): try: it = d.iterator(mode=m, batch_size=b, num_batches=n, data_specs=self._flat_data_specs, return_tuple=True, rng=sd) except ValueError as exc: reraise_as(ValueError((('invalid iteration parameters in ' + 'Monitor.add_dataset: ') + str(exc)))) if it.stochastic: if (sd is None): raise TypeError(('Monitor requires a seed when using ' + 'stochastic iteration modes.')) if (not isinstance(sd, (list, tuple, int))): raise TypeError((('Monitor requires a seed (not a random ' + 'number generator) when using ') + 'stochastic iteration modes.')) else: assert (sd is None) if (d not in self._datasets): self._datasets.append(d) self._iteration_mode.append(m) self._batch_size.append(b) self._num_batches.append(n) self._rng_seed.append(sd)
'Runs the model on the monitoring dataset in order to add one data point to each of the channels.'
def __call__(self):
if self._dirty: self.redo_theano() datasets = self._datasets self.begin_record_entry() for (d, i, b, n, a, sd, ne) in safe_izip(datasets, self._iteration_mode, self._batch_size, self._num_batches, self.accum, self._rng_seed, self.num_examples): if isinstance(d, six.string_types): d = yaml_parse.load(d) raise NotImplementedError() myiterator = d.iterator(mode=i, batch_size=b, num_batches=n, data_specs=self._flat_data_specs, return_tuple=True, rng=sd) if (len(self._flat_data_specs[1]) == 0): X = () self.run_prereqs(X, d) a(*X) else: actual_ne = 0 for X in myiterator: self.run_prereqs(X, d) a(*X) actual_ne += self._flat_data_specs[0].np_batch_size(X) if (actual_ne != ne): raise RuntimeError(('At compile time, your iterator said it had %d examples total, but at runtime it gave us %d.' % (ne, actual_ne))) log.info('Monitoring step:') log.info((' DCTB Epochs seen: %d' % self._epochs_seen)) log.info((' DCTB Batches seen: %d' % self._num_batches_seen)) log.info((' DCTB Examples seen: %d' % self._examples_seen)) t = (time.time() - self.t0) for channel_name in sorted(self.channels.keys(), key=number_aware_alphabetical_key): channel = self.channels[channel_name] channel.time_record.append(t) channel.batch_record.append(self._num_batches_seen) channel.example_record.append(self._examples_seen) channel.epoch_record.append(self._epochs_seen) val = channel.val_shared.get_value() channel.val_record.append(val) if (abs(val) < 10000.0): val_str = str(val) else: val_str = ('%.3e' % val) log.info((' DCTB %s: %s' % (channel_name, val_str)))
'Runs all "prerequistie functions" on a batch of data. Always called right before computing the monitoring channels on that batch. Parameters data : tuple or Variable a member of the Space used as input to the monitoring functions dataset : Dataset the Dataset the data was drawn from'
def run_prereqs(self, data, dataset):
if (dataset not in self.prereqs): return for prereq in self.prereqs[dataset]: prereq(*data)
'Returns the number of batches the model has learned on (assuming that the learning code has been calling Monitor.report_batch correctly).'
def get_batches_seen(self):
return self._num_batches_seen
'.. todo:: WRITEME Returns epochs_seen : int The number of epochs the model has been trained on. One "epoch" is one pass through Dataset.iterator.'
def get_epochs_seen(self):
return self._epochs_seen
'.. todo:: WRITEME Returns examples_seen : int The number of examples the model has learned on (assuming that the learning code has been calling Monitor.report_batch correctly)'
def get_examples_seen(self):
return self._examples_seen
'Call this whenever the model has learned on another batch of examples. Report how many examples were learned on. Parameters num_examples : int The number of examples learned on in this minibatch.'
def report_batch(self, num_examples):
self._examples_seen += num_examples self._num_batches_seen += 1
'Call this whenever the model has completed another "epoch" of learning. We regard one pass through Dataset.iterator as one epoch.'
def report_epoch(self):
self._epochs_seen += 1
'Recompiles Theano functions used by this monitor. This is called any time we need to evaluate the channels and the channel definitions have changed since last we called it, or if the theano functions are unavailable for any other reason (first time they are needed after construction or deserialization, etc.) All channels are compiled as part of the same theano function so that the theano optimizations can eliminate subexpressions that are shared between multiple channels.'
def redo_theano(self):
self._dirty = False self._build_data_specs() init_names = dir(self) self.prereqs = OrderedDict() for channel in self.channels.values(): if (channel.prereqs is not None): dataset = channel.dataset if (dataset not in self.prereqs): self.prereqs[dataset] = [] prereqs = self.prereqs[dataset] for prereq in channel.prereqs: if (prereq not in prereqs): prereqs.append(prereq) updates = OrderedDict() for channel in self.channels.values(): updates[channel.val_shared] = np.cast[config.floatX](0.0) with log_timing(log, 'compiling begin_record_entry'): self.begin_record_entry = function(inputs=[], updates=updates, mode=self.theano_function_mode, name='Monitor.begin_record_entry') updates = OrderedDict() givens = OrderedDict() batch_names = [('monitoring_%s' % s) for s in self._flat_data_specs[1]] theano_args = self._flat_data_specs[0].make_theano_batch(batch_names) batch_size = self._flat_data_specs[0].batch_size(theano_args) nested_theano_args = self._data_specs_mapping.nest(theano_args) if (not isinstance(nested_theano_args, tuple)): nested_theano_args = (nested_theano_args,) assert (len(nested_theano_args) == (len(self.channels) + 1)) log.info('Monitored channels: ') for key in sorted(self.channels.keys()): mode = self.theano_function_mode if ((mode is not None) and hasattr(mode, 'record')): mode.record.handle_line(((('compiling monitor including ' + 'channel ') + key) + '\n')) log.info((' DCTB %s' % key)) it = [] for (d, i, n, b) in safe_izip(self._datasets, self._iteration_mode, self._num_batches, self._batch_size): it.append(d.iterator(mode=i, num_batches=n, batch_size=b, data_specs=self._flat_data_specs, return_tuple=True)) self.num_examples = [i.num_examples for i in it] givens = [OrderedDict() for d in self._datasets] updates = [OrderedDict() for d in self._datasets] for (i, channel) in enumerate(self.channels.values()): index = self._datasets.index(channel.dataset) d = self._datasets[index] g = givens[index] cur_num_examples = self.num_examples[index] u = updates[index] c_mapping = DataSpecsMapping(channel.data_specs) channel_inputs = c_mapping.flatten(channel.graph_input, return_tuple=True) inputs = c_mapping.flatten(nested_theano_args[(i + 1)], return_tuple=True) for (channel_X, X) in safe_izip(channel_inputs, inputs): assert ((channel_X not in g) or (g[channel_X] is X)) assert (channel_X.type == X.type), (channel_X.type, X.type) g[channel_X] = X if (batch_size == 0): assert (len(self._flat_data_specs[1]) == 0) val = channel.val else: if (n == 0): raise ValueError(('Iterating over 0 examples results in ' + 'divide by 0')) val = T.cast(((channel.val * T.cast(batch_size, 'float64')) / cur_num_examples), config.floatX) u[channel.val_shared] = (channel.val_shared + val) with log_timing(log, 'Compiling accum'): for up in updates: for key in up: if (key.dtype != up[key].dtype): raise TypeError((((((('Monitoring channel shared variable ' + key.name) + ' has dtype ') + key.dtype) + ' but is driven by an expression ') + 'with type ') + up[key].dtype)) self.accum = [] for (idx, packed) in enumerate(safe_izip(givens, updates)): (g, u) = packed mode = self.theano_function_mode if ((mode is not None) and hasattr(mode, 'record')): for elem in g: mode.record.handle_line((('g key ' + var_descriptor(elem)) + '\n')) mode.record.handle_line((('g val ' + var_descriptor(g[elem])) + '\n')) for elem in u: mode.record.handle_line((('u key ' + var_descriptor(elem)) + '\n')) mode.record.handle_line((('u val ' + var_descriptor(u[elem])) + '\n')) function_name = ('Monitor.accum[%d]' % idx) if ((mode is not None) and hasattr(mode, 'record')): mode.record.handle_line('compiling supervised accum\n') self.accum.append(function(theano_args, givens=g, updates=u, mode=self.theano_function_mode, name=function_name)) for a in self.accum: if ((mode is not None) and hasattr(mode, 'record')): for elem in a.maker.fgraph.outputs: mode.record.handle_line((('accum output ' + var_descriptor(elem)) + '\n')) log.info(('graph size: %d' % len(a.maker.fgraph.toposort()))) final_names = dir(self) self.register_names_to_del([name for name in final_names if (name not in init_names)])
'Register names of fields that should be deleted before pickling. Parameters names : list A list of attribute names as strings.'
def register_names_to_del(self, names):
for name in names: if (name not in self.names_to_del): self.names_to_del.append(name)
'In order to avoid pickling a copy of the dataset whenever a monitor is saved, the __getstate__ method replaces the dataset field with the dataset\'s yaml source. This is not a perfect solution because it won\'t work with job resuming, which would require saving the state of the dataset\'s random number generator. Like in the Model class, we also need to avoid saving any Theano functions, so we delete everything that can be regenerated with `redo_theano` by deleting the fields in `self.names_to_del`'
def __getstate__(self):
if (not hasattr(self, '_datasets')): self._datasets = [self._dataset] del self._dataset temp = self._datasets if self._datasets: self._datasets = [] for dataset in temp: if isinstance(dataset, six.string_types): self._datasets.append(dataset) else: try: self._datasets.append(dataset.yaml_src) except AttributeError: warnings.warn(('Trained model saved without ' + 'indicating yaml_src')) d = copy.copy(self.__dict__) self._datasets = temp for name in self.names_to_del: if (name in d): del d[name] return d
'Sets the object to have the state described by `d`. Parameters d : dict A dictionary mapping string names of fields to values for these fields.'
def __setstate__(self, d):
if ('_dataset' in d): d['_datasets'] = [d['_dataset']] del d['_dataset'] self.__dict__.update(d)
'Asks the monitor to start tracking a new value. Can be called even after the monitor is already in use. Parameters name : str The display name in the monitor. ipt : tensor_like The symbolic tensor which should be clamped to the data. (or a list/tuple containing symbolic tensors, following the data_specs) val : tensor_like The value (function of `ipt`) to be tracked. dataset : pylearn2.datasets.Dataset Which dataset to compute this channel on prereqs : list of callables that take a list of numpy tensors Each prereq must be called exactly once per each new batch of data drawn *from dataset* before the channel value is computed if two channels provide a prereq with exactly the same id, that prereq will only be called once data_specs : (space, source) pair Identifies the order, format and semantics of ipt'
def add_channel(self, name, ipt, val, dataset=None, prereqs=None, data_specs=None):
if six.PY3: numeric = (float, int) else: numeric = (float, int, long) if isinstance(val, numeric): val = np.cast[theano.config.floatX](val) val = T.as_tensor_variable(val) if (data_specs is None): warnings.warn(("parameter 'data_specs' should be provided when " + 'calling add_channel. We will build a default one.'), stacklevel=2) if isinstance(ipt, list): ipt = tuple(ipt) if ((ipt is not None) and (not isinstance(ipt, tuple))): ipt = (ipt,) if (ipt is None): data_specs = (NullSpace(), '') elif (len(ipt) == 0): data_specs = (CompositeSpace([]), ()) elif hasattr(dataset, 'get_data_specs'): (dataset_space, dataset_source) = dataset.get_data_specs() if ((len(ipt) == 1) and (dataset_source is not None) and ((not isinstance(dataset_source, tuple)) or (len(dataset_source) == 1)) and ('features' in dataset_source)): data_specs = (dataset_space, dataset_source) elif ((len(ipt) == 2) and (dataset_source == ('features', 'targets'))): data_specs = (dataset_space, dataset_source) else: raise ValueError((('Cannot infer default data_specs for ' + 'the following input points and ') + ('dataset: ipt = %s, dataset = %s' % (ipt, dataset)))) data_specs[0].validate(ipt) mapping = DataSpecsMapping(data_specs) flat_ipt = mapping.flatten(ipt) if (not isinstance(flat_ipt, tuple)): flat_ipt = (flat_ipt,) inputs = theano.gof.graph.inputs([val]) for elem in inputs: if ((not hasattr(elem, 'get_value')) and (not isinstance(elem, theano.gof.graph.Constant))): if (elem not in flat_ipt): raise ValueError((((((('Unspecified input: ' + str(elem)) + '. This may be due to an incorrect ') + "implementation of a cost's ") + 'get_data_specs() method, or of a ') + "model's get_monitoring_data_specs() ") + 'method.')) mode = self.theano_function_mode if ((mode is not None) and hasattr(mode, 'record')): mode.record.handle_line((('Adding monitor channel ' + name) + '\n')) assert isinstance(flat_ipt, tuple) if (len(flat_ipt) != 1): for elem in flat_ipt: mode.record.handle_line((('Includes input var ' + var_descriptor(elem)) + '\n')) else: mode.record.handle_line((((name + ' input var is ') + var_descriptor(flat_ipt[0])) + '\n')) mode.record.handle_line((((('channel ' + name) + ' is ') + var_descriptor(val)) + '\n')) if (dataset is None): if (len(self._datasets) == 1): dataset = self._datasets[0] elif (len(self._datasets) == 0): raise ValueError(_err_no_data) else: raise ValueError(_err_ambig_data) try: self._datasets.index(dataset) except ValueError: reraise_as(ValueError(('The dataset specified is not one of the ' + "monitor's datasets"))) if (self.on_channel_conflict not in ('error', 'copy_history', 'overwrite')): raise ValueError(("on_channel_conflict should be either 'error'" + "'copy_history', or 'overwrite'")) if ((name in self.channels) and (self.on_channel_conflict == 'error')): raise ValueError(('Tried to create the same channel twice (%s)' % name)) elif ((name in self.channels) and (self.on_channel_conflict == 'copy_history')): self.channels[name] = MonitorChannel(ipt, val, name, data_specs, dataset, prereqs, self.channels[name]) elif ((name not in self.channels) or (self.on_channel_conflict == 'overwrite')): self.channels[name] = MonitorChannel(ipt, val, name, data_specs, dataset, prereqs) self._dirty = True
'Sometimes we serialize models and then load them somewhere else but still try to use their Monitor, and the Monitor is in a mangled state. I\'ve added some calls to _sanity_check to try to catch when that happens. Not sure what to do for a long term fix. I think it requires making theano graphs serializable first.'
def _sanity_check(self):
for name in self.channels: channel = self.channels[name] assert hasattr(channel, 'prereqs')
'Returns a model\'s monitor. If the model doesn\'t have a monitor yet, installs one and returns that. Parameters model : object An object that implements the `Model` interface specified in `pylearn2.models`.'
@classmethod def get_monitor(cls, model):
if hasattr(model, 'monitor'): rval = model.monitor rval._sanity_check() else: rval = Monitor(model) model.monitor = rval return rval
'.. todo:: WRITEME Returns batch_size : int The size of the batches used for monitoring'
@property def batch_size(self):
return self._batch_size
'.. todo:: WRITEME Returns num_batches : int The number of batches used for monitoring'
@property def num_batches(self):
return self._num_batches
'Sets up the monitor for a cost minimization problem. Adds channels defined by both the model and the cost for the specified dataset(s), as well as a channel called \'objective\' defined by the costs\' __call__ method. Parameters dataset : pylearn2.datasets.Dataset Dataset or dictionary mapping string names to Datasets. If string names are used, then for every dataset, each channel defined by the model or cost will be replicated with that dataset\'s name followed by an underscore as the prefix. For example, if your cost defines a channel called \'misclass\', and datasets is {\'train\' : train_dataset, \'valid\' : valid_dataset}, you will get channels called \'train_misclass\' and \'valid_misclass\'. cost : pylearn2.costs.Cost The cost being optimized by training. The value of the cost will appear as the `objective` channel. Its `get_monitoring_channels` method will also be used to supply other channels. extra_costs : OrderedDict, optional A dictionary mapping channel names to Cost objects. Their value will appear as the specified channel name. They will also provide more monitoring channels via their `get_monitoring_channels` method. obj_prereqs : None, or list of functions Functions to pass as prerequisites to the `objective` channel. cost_monitoring_args : dict Dictionary of kwargs that will be passed to `cost.get_monitoring_channels()` (but not for the extra_costs).'
def setup(self, dataset, cost, batch_size, num_batches=None, extra_costs=None, mode='sequential', obj_prereqs=None, cost_monitoring_args=None):
if (dataset is None): return if isinstance(dataset, Dataset): dataset = {'': dataset} else: assert isinstance(dataset, dict) assert all((isinstance(key, str) for key in dataset)) assert all((isinstance(dataset[key], Dataset) for key in dataset)) if (extra_costs is None): costs = {} else: assert isinstance(extra_costs, (OrderedDict, dict)) costs = extra_costs assert ('' not in costs) costs[''] = cost if (cost_monitoring_args is None): cost_monitoring_args = {} model = self.model cost_names = sorted(costs.keys()) spaces = [] sources = [] for c in cost_names: (c_space, c_source) = costs[c].get_data_specs(model) spaces.append(c_space) sources.append(c_source) (m_space, m_source) = model.get_monitoring_data_specs() spaces.append(m_space) sources.append(m_source) nested_space = CompositeSpace(spaces) nested_sources = tuple(sources) mapping = DataSpecsMapping((nested_space, nested_sources)) space_tuple = mapping.flatten(nested_space, return_tuple=True) source_tuple = mapping.flatten(nested_sources, return_tuple=True) ipt = tuple((space.make_theano_batch(name=('monitor_%s' % source), batch_size=None) for (space, source) in safe_zip(space_tuple, source_tuple))) nested_ipt = mapping.nest(ipt) custom_channels = {} for (i, cost_name) in enumerate(cost_names): if (cost_name == ''): prefix = '' else: prefix = (cost_name + '_') cost = costs[cost_name] cost_ipt = nested_ipt[i] raw_channels = cost.get_monitoring_channels(model, cost_ipt) channels = {} for name in raw_channels: channels[(prefix + name)] = (raw_channels[name], cost_ipt, (spaces[i], sources[i])) custom_channels.update(channels) model_channels = model.get_monitoring_channels(nested_ipt[(-1)]) channels = {} for name in model_channels: channels[name] = (model_channels[name], nested_ipt[(-1)], (spaces[(-1)], sources[(-1)])) custom_channels.update(channels) if is_stochastic(mode): seed = [[2013, 2, 22]] else: seed = None for dataset_name in dataset: cur_dataset = dataset[dataset_name] self.add_dataset(dataset=cur_dataset, mode=mode, batch_size=batch_size, num_batches=num_batches, seed=seed) if (dataset_name == ''): dprefix = '' else: dprefix = (dataset_name + '_') for (i, cost_name) in enumerate(cost_names): cost = costs[cost_name] cost_ipt = nested_ipt[i] cost_value = cost.expr(model, cost_ipt) if (cost_value is not None): if (cost_name == ''): name = (dprefix + 'objective') prereqs = obj_prereqs else: name = (dprefix + cost_name) prereqs = None cost.get_data_specs(model)[0].validate(cost_ipt) self.add_channel(name=name, ipt=cost_ipt, val=cost_value, data_specs=cost.get_data_specs(model), dataset=cur_dataset, prereqs=prereqs) for key in custom_channels: (val, ipt, data_specs) = custom_channels[key] data_specs[0].validate(ipt) self.add_channel(name=(dprefix + key), ipt=ipt, val=val, data_specs=data_specs, dataset=cur_dataset)
'.. todo:: WRITEME Returns s : str A reasonably human-readable string representation of the object.'
def __str__(self):
try: graph_input_str = str(self.graph_input) except Exception: graph_input_str = '<bad graph input>' try: val_str = str(self.val) except Exception: val_str = '<bad val>' try: name_str = str(self.name) except Exception: name_str = '<bad name>' try: prereqs_str = str(self.prereqs) except Exception: prereqs_str = '<bad prereqs>' return ('MonitorChannel(%s,%s,%s,%s)' % (graph_input_str, val_str, name_str, prereqs_str))
'.. todo:: WRITEME Returns d : dict A dictionary mapping the string names of the fields of the class to values appropriate for pickling.'
def __getstate__(self):
if hasattr(self, 'val'): doc = get_monitor_doc(self.val) elif hasattr(self, 'doc'): doc = self.doc else: doc = None return {'doc': doc, 'example_record': self.example_record, 'batch_record': self.batch_record, 'time_record': self.time_record, 'epoch_record': self.epoch_record, 'val_record': self.val_record}
'Sets the object to have the state described by `d`. Parameters d : dict A dictionary mapping string names of fields to values for these fields.'
def __setstate__(self, d):
self.__dict__.update(d) if ('batch_record' not in d): self.batch_record = ([None] * len(self.val_record)) if ('epoch_record' not in d): self.epoch_record = range(len(self.val_record)) if ('time_record' not in d): self.time_record = ([None] * len(self.val_record))
'Set the inverse temperature parameters of the AIS procedure. Parameters betas : numpy.ndarray, optional Vector of temperatures specifying interpolating distributions key_betas : numpy.ndarray, optional If specified (not None), specifies specific temperatures at which we want to compute the AIS estimate. AIS.run will then return a vector, containing AIS at each key_beta temperature, including the nominal temperature.'
def set_betas(self, betas=None, key_betas=None):
self.key_betas = (None if (key_betas is None) else numpy.sort(key_betas)) betas = (numpy.array(betas, dtype=config.floatX) if (betas is not None) else self.dflt_beta) if (key_betas is not None): betas = numpy.hstack((betas, key_betas)) betas.sort() self.betas = betas
'Performs the grunt-work, implementing .. math:: log\:w^{(i)} += \mathcal{F}_{k-1}(v_{k-1}) - \mathcal{F}_{k}(v_{k-1}) recursively for all temperatures. Parameters n_steps : int, optional WRITEME'
def run(self, n_steps=1):
if (not hasattr(self, 'betas')): self.set_betas() self.std_ais_w = [] self.logz_beta = [] self.var_logz_beta = [] state = self.v_sample0 ki = 0 for i in range((len(self.betas) - 1)): (bp, bp1) = (self.betas[i], self.betas[(i + 1)]) self.log_ais_w += (self.free_energy_fn(bp, state) - self.free_energy_fn(bp1, state)) if (((i + 1) % self.log_int) == 0): m = numpy.max(self.log_ais_w) std_ais = ((numpy.log(numpy.std(numpy.exp((self.log_ais_w - m)))) + m) - (numpy.log(self.n_runs) / 2)) self.std_ais_w.append(std_ais) if ((self.key_betas is not None) and (ki < len(self.key_betas)) and (bp1 == self.key_betas[ki])): (log_ais_w_bi, var_log_ais_w_bi) = self.estimate_from_weights(self.log_ais_w) self.logz_beta.insert(0, log_ais_w_bi) self.var_logz_beta.insert(0, var_log_ais_w_bi) ki += 1 state = self.sample_fn(bp1, state)
'Once run() method has been called, estimates the mean and variance of log(Zb/Za). Parameters log_ais_w : None or 1D numpy.ndarray optional override for log_ais_w. When None, estimates log(Zb/Za) using the log AIS weights computed by AIS.run() method. Returns f : float Estimated mean of log(Zb/Za), log-ratio of partition functions of model B and A. v : float Estimated variance of log(Zb/Za)'
def estimate_from_weights(self, log_ais_w=None):
log_ais_w = (self.log_ais_w if (log_ais_w is None) else log_ais_w) dlogz = self.log_mean(log_ais_w) m = numpy.max(log_ais_w) var_dlogz = (((log_ais_w.shape[0] * numpy.sum(numpy.exp((2 * (log_ais_w - m))))) / (numpy.sum(numpy.exp((log_ais_w - m))) ** 2)) - 1.0) return (dlogz, var_dlogz)
'Looks whether the model performs better than earlier. If it\'s the case, records the model\'s parameters. Parameters model : pylearn2.models.model.Model Not used dataset : pylearn2.datasets.dataset.Dataset Not used algorithm : TrainingAlgorithm Not used'
def on_monitor(self, model, dataset, algorithm):
if self.supervised: it = self.dataset.iterator('sequential', batch_size=self.batch_size, targets=True) new_cost = numpy.mean([self.cost_function(minibatch, target) for (minibatch, target) in it]) else: it = self.dataset.iterator('sequential', batch_size=self.batch_size, targets=False) new_cost = numpy.mean([self.cost_function(minibatch) for minibatch in it]) if (new_cost < self.best_cost): self.best_cost = new_cost self.best_params = self.model.get_param_values()
'Returns the best parameters up to now for the model.'
def get_best_params(self):
return self.best_params
'Sets some model tag entries. Parameters model : pylearn2.models.model.Model dataset : pylearn2.datasets.dataset.Dataset Not used algorithm : TrainingAlgorithm Not used'
def setup(self, model, dataset, algorithm):
if (self._tag_key in model.tag): log.warning('Model tag key "%s" already found. This may indicate multiple instances of %s trying to use the same tag entry.', self._tag_key, self.__class__.__name__) log.warning('If this is the case, specify tag key manually in %s constructor.', self.__class__.__name__) model.tag[self._tag_key]['channel_name'] = self.channel_name if (self.save_path is not None): model.tag[self._tag_key]['save_path'] = os.path.abspath(self.save_path) model.tag[self._tag_key]['hostname'] = socket.gethostname() self._update_tag(model)
'Looks whether the model performs better than earlier. If it\'s the case, saves the model. Parameters model : pylearn2.models.model.Model model.monitor must contain a channel with name given by self.channel_name dataset : pylearn2.datasets.dataset.Dataset Not used algorithm : TrainingAlgorithm Not used'
def on_monitor(self, model, dataset, algorithm):
monitor = model.monitor channels = monitor.channels channel = channels[self.channel_name] val_record = channel.val_record new_cost = val_record[(-1)] if (((self.coeff * new_cost) < (self.coeff * self.best_cost)) and (monitor._epochs_seen >= self.start_epoch)): self.best_cost = new_cost self._update_tag(model) if self.store_best_model: self.best_model = deepcopy(model) if (self.save_path is not None): with log_timing(log, ('Saving to ' + self.save_path)): serial.save(self.save_path, model, on_overwrite='backup')
'Update `model.tag` with information about the current best. Parameters model : pylearn2.models.model.Model The model to update.'
def _update_tag(self, model):
model.tag[self._tag_key]['best_cost'] = self.best_cost
'Method that instantiates a response message for a given request message. It is not necessary to implement this function on response messages.'
def get_response(self):
raise NotImplementedError('get_response is not implemented.')
''
def __init__(self, address='127.0.0.1', req_port=5555):
if (not zmq_available): raise ImportError('zeromq needs to be installed to use this module.') self.address = ('tcp://%s' % address) assert (req_port > 0) self.req_port = req_port self.context = zmq.Context() self.req_sock = self.context.socket(zmq.REQ) self.req_sock.connect(((self.address + ':') + str(self.req_port))) self.channels = {}
'Returns a list of the channels being monitored.'
def list_channels(self):
self.req_sock.send_pyobj(ChannelListRequest()) return self.req_sock.recv_pyobj()
'Retrieves data for a specified set of channels and combines that data with any previously retrived data. This assumes all the channels have the same number of values. It is unclear as to whether this is a reasonable assumption. If they do not have the same number of values then it may request to much or too little data leading to duplicated data or wholes in the data respectively. This could be made more robust by making a call to retrieve all the data for all of the channels. Parameters channel_list : list A list of the channels for which data should be requested. start : int The starting epoch for which data should be requested. step : int The number of epochs to be skipped between data points.'
def update_channels(self, channel_list, start=(-1), end=(-1), step=1):
assert (((start == (-1)) and (end == (-1))) or (end > start)) if (start == (-1)): start = 0 if (len(self.channels.keys()) > 0): channel_name = list(self.channels.keys())[0] start = len(self.channels[channel_name].epoch_record) self.req_sock.send_pyobj(ChannelsRequest(channel_list, start=start, end=end, step=step)) rsp_msg = self.req_sock.recv_pyobj() if isinstance(rsp_msg.data, Exception): raise rsp_msg.data for channel in rsp_msg.data.keys(): rsp_chan = rsp_msg.data[channel] if isinstance(rsp_chan, Exception): raise rsp_chan if (channel not in self.channels.keys()): self.channels[channel] = rsp_chan else: chan = self.channels[channel] chan.batch_record += rsp_chan.batch_record chan.epoch_record += rsp_chan.epoch_record chan.example_record += rsp_chan.example_record chan.time_record += rsp_chan.time_record chan.val_record += rsp_chan.val_record
'Tracks and plots a specified set of channels in real time. Parameters channel_list : list A list of the channels for which data has been requested.'
def follow_channels(self, channel_list):
if (not pyplot_available): raise ImportError('pyplot needs to be installed for this functionality.') plt.clf() plt.ion() while True: self.update_channels(channel_list) plt.clf() for channel_name in self.channels: plt.plot(self.channels[channel_name].epoch_record, self.channels[channel_name].val_record, label=channel_name) plt.legend() plt.ion() plt.draw()
'Add WMAPE Numerator channels for monitoring dataset(s) to model.monitor. Parameters model : object The model being trained. dataset : object Training dataset. algorithm : object Training algorithm.'
def setup(self, model, dataset, algorithm):
(m_space, m_source) = model.get_monitoring_data_specs() (state, target) = m_space.make_theano_batch() y = target[:, 0] y_hat = model.fprop(state)[:, 0] wmape_numerator = abs((y - y_hat)).sum() wmape_numerator = T.cast(wmape_numerator, config.floatX) for (dataset_name, dataset) in algorithm.monitoring_dataset.items(): if dataset_name: channel_name = '{0}_{1}'.format(dataset_name, self.channel_name_suffix) else: channel_name = self.channel_name_suffix model.monitor.add_channel(name=channel_name, ipt=(state, target), val=wmape_numerator, data_specs=(m_space, m_source), dataset=dataset)
'Add WMAPE Denominator channels for monitoring dataset(s) to model.monitor. Parameters model : object The model being trained. dataset : object Training dataset. algorithm : object Training algorithm.'
def setup(self, model, dataset, algorithm):
(m_space, m_source) = model.get_monitoring_data_specs() (state, target) = m_space.make_theano_batch() y = target[:, 0] wmape_denominator = abs(y).sum() wmape_denominator = T.cast(wmape_denominator, config.floatX) for (dataset_name, dataset) in algorithm.monitoring_dataset.items(): if dataset_name: channel_name = '{0}_{1}'.format(dataset_name, self.channel_name_suffix) else: channel_name = self.channel_name_suffix model.monitor.add_channel(name=channel_name, ipt=(state, target), val=wmape_denominator, data_specs=(m_space, m_source), dataset=dataset)
'Calculate ROC AUC score. Parameters y_true : tensor_like Target class labels. y_score : tensor_like Predicted class labels or probabilities for positive class.'
def make_node(self, y_true, y_score):
y_true = T.as_tensor_variable(y_true) y_score = T.as_tensor_variable(y_score) output = [T.scalar(name=self.name, dtype=config.floatX)] return gof.Apply(self, [y_true, y_score], output)
'Calculate ROC AUC score. Parameters node : Apply instance Symbolic inputs and outputs. inputs : list Sequence of inputs. output_storage : list List of mutable 1-element lists.'
def perform(self, node, inputs, output_storage):
if (roc_auc_score is None): raise RuntimeError('Could not import from sklearn.') (y_true, y_score) = inputs try: roc_auc = roc_auc_score(y_true, y_score) except ValueError: roc_auc = np.nan output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
'Add ROC AUC channels for monitoring dataset(s) to model.monitor. Parameters model : object The model being trained. dataset : object Training dataset. algorithm : object Training algorithm.'
def setup(self, model, dataset, algorithm):
(m_space, m_source) = model.get_monitoring_data_specs() (state, target) = m_space.make_theano_batch() y = T.argmax(target, axis=1) y_hat = model.fprop(state)[:, self.positive_class_index] if (self.negative_class_index is None): y = T.eq(y, self.positive_class_index) else: pos = T.eq(y, self.positive_class_index) neg = T.eq(y, self.negative_class_index) keep = T.add(pos, neg).nonzero() y = T.eq(y[keep], self.positive_class_index) y_hat = y_hat[keep] roc_auc = RocAucScoreOp(self.channel_name_suffix)(y, y_hat) roc_auc = T.cast(roc_auc, config.floatX) for (dataset_name, dataset) in algorithm.monitoring_dataset.items(): if dataset_name: channel_name = '{0}_{1}'.format(dataset_name, self.channel_name_suffix) else: channel_name = self.channel_name_suffix model.monitor.add_channel(name=channel_name, ipt=(state, target), val=roc_auc, data_specs=(m_space, m_source), dataset=dataset)
'.. todo:: WRITEME Notes `dataset` argument is ignored'
def setup(self, model, dataset, algorithm):
dataset = None preprocessor = CentralWindow(self._window_shape) for data in self._center: preprocessor.apply(data) randomize_now = (self._randomize + self._randomize_once) self._original = dict(((data, _zero_pad(data.get_topological_view().astype('float32'), self._pad_randomized)) for data in randomize_now)) self.randomize_datasets(randomize_now)
'Applies random translations and flips to the selected datasets. Parameters datasets : WRITEME'
def randomize_datasets(self, datasets):
for dataset in datasets: if (tuple(dataset.view_converter.axes) == ('c', 0, 1, 'b')): wf_func = random_window_and_flip_c01b elif (tuple(dataset.view_converter.axes) == ('b', 0, 1, 'c')): wf_func = random_window_and_flip_b01c else: raise ValueError(('Axes of dataset is not supported: %s' % str(dataset.view_converter.axes))) arr = wf_func(self._original[dataset], self._window_shape, rng=self._rng, flip=self._flip) dataset.set_topological_view(arr, axes=dataset.view_converter.axes)
'.. todo:: WRITEME Notes All arguments are ignored.'
def on_monitor(self, model, dataset, algorithm):
model = None dataset = None algorithm = None self.randomize_datasets(self._randomize)
'Setup the plotters. Parameters model : pylearn2.models.Model The model trained dataset : pylearn2.datasets.Dataset The dataset on which the model is trained algorithm : pylearn2.training_algorithms.TrainingAlgorithm The algorithm the model is trained with'
def setup(self, model, dataset, algorithm):
raise NotImplementedError((str(type(self)) + ' does not implement setup.'))
'The method that draw and save the desired figure, which depend on the object and its attribute. This method is called by the PlotManager object as frequently as the `freq` attribute defines it.'
def plot(self):
raise NotImplementedError((str(type(self)) + ' does not implement plot.'))
'Make the produced files readable by everyone. Parameters public : bool If public is True, then the associated files are readable by everyone.'
def set_permissions(self, public):
if public: for filename in self.filenames: make_readable(filename)
'.. todo:: WRITEME'
def score(self, X):
assert (X.dtype.find('int') == (-1)) X_name = ('X' if (X.name is None) else X.name) E = self.free_energy(X) assert (len(E.type.broadcastable) == 1) dummy = T.sum(E) rval = T.grad(dummy, X) rval.name = (('score(' + X_name) + ')') return rval
'.. todo:: WRITEME'
def free_energy(self, X):
raise NotImplementedError((str(type(self)) + ' has not implemented free_energy(self,X)'))
'.. todo:: WRITEME'
def energy(self, varlist):
raise NotImplementedError((str(type(self)) + ' has not implemented energy(self,varlist)'))
'.. todo:: WRITEME'
def __call__(self, varlist):
return self.energy(varlist)
'.. todo:: WRITEME'
def supports_vector_sigma(self):
raise NotImplementedError()
'.. todo:: WRITEME'
def log_P_H_given_V(self, H, V):
p_one = self.mean_H_given_V(V) rval = T.log(((H * p_one) + ((1.0 - H) * (1.0 - p_one)))).sum(axis=1) return rval
'.. todo:: WRITEME'
def mean_H_given_V(self, V):
raise NotImplementedError()
'.. todo:: WRITEME'
@classmethod def supports_vector_sigma(cls):
return False
'.. todo:: WRITEME'
def energy(self, varlist):
(V, H) = varlist return ((- (((T.dot(V, self.bias_vis) + (self.transformer.lmul(V) * H).sum(axis=1)) + T.dot(H, self.bias_hid)) - (0.5 * T.sqr(V).sum(axis=1)))) / T.sqr(self.sigma))
'.. todo:: WRITEME'
def mean_H_given_V(self, V):
V_name = 'V' if (hasattr(V, 'name') and (V.name is not None)): V_name = V.name rval = T.nnet.sigmoid(((self.bias_hid + self.transformer.lmul(V)) / T.sqr(self.sigma))) rval.name = ('mean_H_given_V( %s )' % V_name) return rval
'.. todo:: WRITEME'
def reconstruct(self, V):
H = self.mean_H_given_V(V) R = self.mean_V_given_H(H) return R
'.. todo:: WRITEME'
def mean_V_given_H(self, H):
H_name = 'H' if (hasattr(H, 'name') and (H.name is not None)): H_name = H.name transpose = self.transformer.lmul_T(H) transpose.name = 'transpose' rval = (self.bias_vis + transpose) rval.name = ('mean_V_given_H(%s)' % H_name) return rval
'.. todo:: WRITEME'
def free_energy(self, V):
V_name = ('V' if (V.name is None) else V.name) assert (V.ndim == 2) bias_term = T.dot(V, self.bias_vis) bias_term.name = 'bias_term' assert (len(bias_term.type.broadcastable) == 1) sq_term = (0.5 * T.sqr(V).sum(axis=1)) sq_term.name = 'sq_term' assert (len(sq_term.type.broadcastable) == 1) softplus_term = T.nnet.softplus(((self.transformer.lmul(V) + self.bias_hid) / T.sqr(self.sigma))).sum(axis=1) assert (len(softplus_term.type.broadcastable) == 1) softplus_term.name = 'softplus_term' return (((sq_term - bias_term) / T.sqr(self.sigma)) - softplus_term)
'.. todo:: WRITEME'
def score(self, V):
rval = ((- (V - self.reconstruct(V))) / T.sqr(self.sigma)) rval.name = 'score' return rval
'Returns True if training should continue for this model, False otherwise Parameters model : a Model instance Returns bool True or False as described above'
def continue_learning(self, model):
raise NotImplementedError(((str(type(self)) + ' does not implement ') + 'continue_learning.'))
'The optimization should stop if the model has run for N epochs without sufficient improvement. Parameters model : Model The model used in the experiment and from which the monitor used in the termination criterion will be extracted. Returns bool True if training should continue'
def continue_learning(self, model):
monitor = model.monitor if (self._channel_name is None): v = monitor.channels['objective'].val_record else: v = monitor.channels[self._channel_name].val_record if (v[(-1)] < ((1.0 - self.prop_decrease) * self.best_value)): self.countdown = self.N else: self.countdown = (self.countdown - 1) if (v[(-1)] < self.best_value): self.best_value = v[(-1)] return (self.countdown > 0)
'Calls setup on all extensions.'
def setup_extensions(self):
for ext in self.extensions: ext.setup(self.model, self.dataset, self.algorithm)
'.. todo:: WRITEME'
def exceeded_time_budget(self, t0, time_budget):
dt = total_seconds((datetime.now() - t0)) if ((time_budget is not None) and (dt >= time_budget)): log.warning('Time budget exceeded (%.3f/%d seconds).', dt, time_budget) self.model.monitor.time_budget_exceeded = True return True else: return False