text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Return the path of the revrand data dir. <END_TASK> <USER_TASK:> Description: def get_data_home(data_home=None): """ Return the path of the revrand data dir. This folder is used by some large dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'revrand_data' in the user home folder. Alternatively, it can be set by the 'REVRAND_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. """
data_home_default = Path(__file__).ancestor(3).child('demos', '_revrand_data') if data_home is None: data_home = os.environ.get('REVRAND_DATA', data_home_default) if not os.path.exists(data_home): os.makedirs(data_home) return data_home
<SYSTEM_TASK:> Fetch the SARCOS dataset from the internet and parse appropriately into <END_TASK> <USER_TASK:> Description: def fetch_gpml_sarcos_data(transpose_data=True, data_home=None): """ Fetch the SARCOS dataset from the internet and parse appropriately into python arrays >>> gpml_sarcos = fetch_gpml_sarcos_data() >>> gpml_sarcos.train.data.shape (44484, 21) >>> gpml_sarcos.train.targets.shape (44484,) >>> gpml_sarcos.train.targets.round(2) # doctest: +ELLIPSIS array([ 50.29, 44.1 , 37.35, ..., 22.7 , 17.13, 6.52]) >>> gpml_sarcos.test.data.shape (4449, 21) >>> gpml_sarcos.test.targets.shape (4449,) """
train_src_url = "http://www.gaussianprocess.org/gpml/data/sarcos_inv.mat" test_src_url = ("http://www.gaussianprocess.org/gpml/data/sarcos_inv_test" ".mat") data_home = get_data_home(data_home=data_home) train_filename = os.path.join(data_home, 'sarcos_inv.mat') test_filename = os.path.join(data_home, 'sarcos_inv_test.mat') if not os.path.exists(train_filename): urllib.request.urlretrieve(train_src_url, train_filename) if not os.path.exists(test_filename): urllib.request.urlretrieve(test_src_url, test_filename) train_data = loadmat(train_filename).get('sarcos_inv') test_data = loadmat(test_filename).get('sarcos_inv_test') train_bunch = Bunch(data=train_data[:, :21], targets=train_data[:, 21]) test_bunch = Bunch(data=test_data[:, :21], targets=test_data[:, 21]) return Bunch(train=train_bunch, test=test_bunch)
<SYSTEM_TASK:> Fetch the USPS handwritten digits dataset from the internet and parse <END_TASK> <USER_TASK:> Description: def fetch_gpml_usps_resampled_data(transpose_data=True, data_home=None): """ Fetch the USPS handwritten digits dataset from the internet and parse appropriately into python arrays >>> usps_resampled = fetch_gpml_usps_resampled_data() >>> usps_resampled.train.targets.shape (4649,) >>> usps_resampled.train.targets # doctest: +ELLIPSIS array([6, 0, 1, ..., 9, 2, 7]) >>> usps_resampled.train.data.shape (4649, 256) >>> np.all(-1 <= usps_resampled.train.data) True >>> np.all(usps_resampled.train.data < 1) True >>> usps_resampled.test.targets.shape (4649,) >>> usps_resampled.test.data.shape (4649, 256) >>> usps_resampled = fetch_gpml_usps_resampled_data(transpose_data=False) >>> usps_resampled.train.data.shape (256, 4649) """
data_home = get_data_home(data_home=data_home) data_filename = os.path.join(data_home, 'usps_resampled/usps_resampled.mat') if not os.path.exists(data_filename): r = requests.get('http://www.gaussianprocess.org/gpml/data/' 'usps_resampled.tar.bz2') with tarfile.open(fileobj=BytesIO(r.content)) as tar_infile: tar_infile.extract('usps_resampled/usps_resampled.mat', path=data_home) matlab_dict = loadmat(data_filename) train_data = matlab_dict['train_patterns'] test_data = matlab_dict['test_patterns'] if transpose_data: train_data = train_data.T test_data = test_data.T train_targets = matlab_dict['train_labels'].T train_targets = np.argwhere(train_targets == 1)[:, 1] test_targets = matlab_dict['test_labels'].T test_targets = np.argwhere(test_targets == 1)[:, 1] train_bunch = Bunch(data=train_data, targets=train_targets) test_bunch = Bunch(data=test_data, targets=test_targets) return Bunch(train=train_bunch, test=test_bunch)
<SYSTEM_TASK:> Parse a DJANGO_COLORS environment variable to produce the system palette <END_TASK> <USER_TASK:> Description: def parse_color_setting(config_string): """Parse a DJANGO_COLORS environment variable to produce the system palette The general form of a pallete definition is: "palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option" where: palette is a named palette; one of 'light', 'dark', or 'nocolor'. role is a named style used by Django fg is a background color. bg is a background color. option is a display options. Specifying a named palette is the same as manually specifying the individual definitions for each role. Any individual definitions following the pallete definition will augment the base palette definition. Valid roles: 'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table', 'http_info', 'http_success', 'http_redirect', 'http_bad_request', 'http_not_found', 'http_server_error' Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold', 'underscore', 'blink', 'reverse', 'conceal' """
if not config_string: return PALETTES[DEFAULT_PALETTE] # Split the color configuration into parts parts = config_string.lower().split(';') palette = PALETTES[NOCOLOR_PALETTE].copy() for part in parts: if part in PALETTES: # A default palette has been specified palette.update(PALETTES[part]) elif '=' in part: # Process a palette defining string definition = {} # Break the definition into the role, # plus the list of specific instructions. # The role must be in upper case role, instructions = part.split('=') role = role.upper() styles = instructions.split(',') styles.reverse() # The first instruction can contain a slash # to break apart fg/bg. colors = styles.pop().split('/') colors.reverse() fg = colors.pop() if fg in color_names: definition['fg'] = fg if colors and colors[-1] in color_names: definition['bg'] = colors[-1] # All remaining instructions are options opts = tuple(s for s in styles if s in opt_dict.keys()) if opts: definition['opts'] = opts # The nocolor palette has all available roles. # Use that palette as the basis for determining # if the role is valid. if role in PALETTES[NOCOLOR_PALETTE] and definition: palette[role] = definition # If there are no colors specified, return the empty palette. if palette == PALETTES[NOCOLOR_PALETTE]: return None return palette
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def couple(f, g): r""" Compose a function thate returns two arguments. Given a pair of functions that take the same arguments, return a single function that returns a pair consisting of the return values of each function. Notes ----- Equivalent to:: lambda f, g: lambda *args, **kwargs: (f(*args, **kwargs), g(*args, **kwargs)) Examples -------- >>> f = lambda x: 2*x**3 >>> df = lambda x: 6*x**2 >>> f_new = couple(f, df) >>> f_new(5) (250, 150) """
def coupled(*args, **kwargs): return f(*args, **kwargs), g(*args, **kwargs) return coupled
<SYSTEM_TASK:> Inverse operation of couple. <END_TASK> <USER_TASK:> Description: def decouple(fn): """ Inverse operation of couple. Create two functions of one argument and one return from a function that takes two arguments and has two returns Examples -------- >>> h = lambda x: (2*x**3, 6*x**2) >>> f, g = decouple(h) >>> f(5) 250 >>> g(5) 150 """
def fst(*args, **kwargs): return fn(*args, **kwargs)[0] def snd(*args, **kwargs): return fn(*args, **kwargs)[1] return fst, snd
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def nwise(iterable, n): r""" Sliding window iterator. Iterator that acts like a sliding window of size `n`; slides over some iterable `n` items at a time. If iterable has `m` elements, this function will return an iterator over `m-n+1` tuples. Parameters ---------- iterable : iterable An iterable object. n : int Window size. Returns ------- iterator of tuples. Iterator of size `n` tuples Notes ----- First `n` iterators are created:: iters = tee(iterable, n) Next, iterator `i` is advanced `i` times:: for i, it in enumerate(iters): for _ in range(i): next(it, None) Finally, the iterators are zipped back up again:: return zip(*iters) Examples -------- >>> a = [2, 5, 7, 4, 2, 8, 6] >>> list(nwise(a, n=3)) [(2, 5, 7), (5, 7, 4), (7, 4, 2), (4, 2, 8), (2, 8, 6)] >>> pairwise = partial(nwise, n=2) >>> list(pairwise(a)) [(2, 5), (5, 7), (7, 4), (4, 2), (2, 8), (8, 6)] >>> list(nwise(a, n=1)) [(2,), (5,), (7,), (4,), (2,), (8,), (6,)] >>> list(nwise(a, n=7)) [(2, 5, 7, 4, 2, 8, 6)] .. todo:: These should probably raise `ValueError`... >>> list(nwise(a, 8)) [] >>> list(nwise(a, 9)) [] A sliding window of size `n` over a list of `m` elements gives `m-n+1` windows >>> len(a) - len(list(nwise(a, 2))) == 1 True >>> len(a) - len(list(nwise(a, 3))) == 2 True >>> len(a) - len(list(nwise(a, 7))) == 6 True """
iters = tee(iterable, n) for i, it in enumerate(iters): for _ in range(i): next(it, None) return zip(*iters)
<SYSTEM_TASK:> Reshape, but also return scalars or empty lists. <END_TASK> <USER_TASK:> Description: def scalar_reshape(a, newshape, order='C'): """ Reshape, but also return scalars or empty lists. Identical to `numpy.reshape` except in the case where `newshape` is the empty tuple, in which case we return a scalar instead of a 0-dimensional array. Examples -------- >>> a = np.arange(6) >>> np.array_equal(np.reshape(a, (3, 2)), scalar_reshape(a, (3, 2))) True >>> scalar_reshape(np.array([3.14]), newshape=()) 3.14 >>> scalar_reshape(np.array([2.71]), newshape=(1,)) array([ 2.71]) >>> scalar_reshape(np.array([]), newshape=(0,)) [] """
if newshape == (): return np.asscalar(a) if newshape == (0,): return [] return np.reshape(a, newshape, order)
<SYSTEM_TASK:> Flatten a potentially recursive list of multidimensional objects. <END_TASK> <USER_TASK:> Description: def flatten(arys, returns_shapes=True, hstack=np.hstack, ravel=np.ravel, shape=np.shape): """ Flatten a potentially recursive list of multidimensional objects. .. note:: Not to be confused with `np.ndarray.flatten()` (a more befitting might be `chain` or `stack` or maybe something else entirely since this function is more than either `concatenate` or `np.flatten` itself. Rather, it is the composition of the former with the latter. Parameters ---------- arys : list of objects One or more input arrays of possibly heterogenous shapes and sizes. returns_shapes : bool, optional Default is `True`. If `True`, the tuple `(flattened, shapes)` is returned, otherwise only `flattened` is returned. hstack : callable, optional a function that implements horizontal stacking ravel : callable, optional a function that flattens the object shape : callable, optional a function that returns the shape of the object Returns ------- flattened,[shapes] : {1dobject, list of tuples} Return the flat (1d) object resulting from the concatenation of flattened multidimensional objects. When `returns_shapes` is `True`, return a list of tuples containing also the shapes of each array as the second element. See Also -------- revrand.utils.unflatten : its inverse Examples -------- >>> a = 9 >>> b = np.array([4, 7, 4, 5, 2]) >>> c = np.array([[7, 3, 1], ... [2, 6, 6]]) >>> d = np.array([[[6, 5, 5], ... [1, 6, 9]], ... [[3, 9, 1], ... [9, 4, 1]]]) >>> flatten([a, b, c, d]) # doctest: +NORMALIZE_WHITESPACE (array([9, 4, 7, 4, 5, 2, 7, 3, 1, 2, 6, 6, 6, 5, 5, 1, 6, 9, 3, 9, 1, 9, 4, 1]), [(), (5,), (2, 3), (2, 2, 3)]) Note that scalars and 0-dimensional arrays are treated differently from 1-dimensional singleton arrays. >>> flatten([3.14, np.array(2.71), np.array([1.61])]) ... # doctest: +NORMALIZE_WHITESPACE (array([ 3.14, 2.71, 1.61]), [(), (), (1,)]) >>> flatten([a, b, c, d], returns_shapes=False) ... # doctest: +NORMALIZE_WHITESPACE array([9, 4, 7, 4, 5, 2, 7, 3, 1, 2, 6, 6, 6, 5, 5, 1, 6, 9, 3, 9, 1, 9, 4, 1]) >>> w, x, y, z = unflatten(*flatten([a, b, c, d])) >>> w == a True >>> np.array_equal(x, b) True >>> np.array_equal(y, c) True >>> np.array_equal(z, d) True >>> flatten([3.14, [np.array(2.71), np.array([1.61])]]) ... # doctest: +NORMALIZE_WHITESPACE (array([ 3.14, 2.71, 1.61]), [(), [(), (1,)]]) """
if issequence(arys) and len(arys) > 0: flat = partial(flatten, returns_shapes=True, hstack=hstack, ravel=ravel, shape=shape ) flat_arys, shapes = zip(*map(flat, arys)) flat_ary = hstack(flat_arys) shapes = list(shapes) else: flat_ary = ravel(arys) shapes = shape(arys) return (flat_ary, shapes) if returns_shapes else flat_ary
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def unflatten(ary, shapes, reshape=scalar_reshape): r""" Inverse opertation of flatten. Given a flat (1d) array, and a list of shapes (represented as tuples), return a list of ndarrays with the specified shapes. Parameters ---------- ary : a 1d array A flat (1d) array. shapes : list of tuples A list of ndarray shapes (tuple of array dimensions) Returns ------- list of ndarrays A list of ndarrays with the specified shapes. See Also -------- revrand.utils.flatten : its inverse Notes ----- Equivalent to:: lambda ary, shapes, order='C': \ map(partial(custom_reshape, order=order), np.hsplit(ary, np.cumsum(map(partial(np.prod, dtype=int), shapes))), shapes) Examples -------- >>> a = np.array([7, 4, 5, 8, 9, 1, 4, 2, 5, 3, 4, 3]) >>> list(unflatten(a, [(1,), (1,), (4,), (2, 3)])) ... # doctest: +NORMALIZE_WHITESPACE [array([7]), array([4]), array([5, 8, 9, 1]), array([[4, 2, 5], [3, 4, 3]])] >>> list(unflatten(a, [(), (1,), (4,), (2, 3)])) ... # doctest: +NORMALIZE_WHITESPACE [7, array([4]), array([5, 8, 9, 1]), array([[4, 2, 5], [3, 4, 3]])] >>> list(unflatten(a, [(), (1,), (3,), (2, 3)])) ... # doctest: +NORMALIZE_WHITESPACE [7, array([4]), array([5, 8, 9]), array([[1, 4, 2], [5, 3, 4]])] >>> list(unflatten(a, [(), (1,), (5,), (2, 3)])) ... # doctest: +NORMALIZE_WHITESPACE +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: total size of new array must be unchanged >>> flatten(list(unflatten(a, [(), (1,), (4,), (2, 3)]))) ... # doctest: +NORMALIZE_WHITESPACE (array([7, 4, 5, 8, 9, 1, 4, 2, 5, 3, 4, 3]), [(), (1,), (4,), (2, 3)]) >>> list(unflatten(a, [[(1,), (1,)], (4,), (2, 3)])) ... # doctest: +NORMALIZE_WHITESPACE [[array([7]), array([4])], array([5, 8, 9, 1]), array([[4, 2, 5], [3, 4, 3]])] >>> flatten(list(unflatten(a, [(), (1,), [(4,), (2, 3)]]))) ... # doctest: +NORMALIZE_WHITESPACE (array([7, 4, 5, 8, 9, 1, 4, 2, 5, 3, 4, 3]), [(), (1,), [(4,), (2, 3)]]) """
if isinstance(shapes, list): sizes = list(map(sumprod, shapes)) ends = np.cumsum(sizes) begs = np.concatenate(([0], ends[:-1])) struct_arys = [unflatten(ary[b:e], s, reshape=reshape) for b, e, s in zip(begs, ends, shapes)] return struct_arys else: struct_ary = reshape(ary, shapes) return struct_ary
<SYSTEM_TASK:> Product of tuple, or sum of products of lists of tuples. <END_TASK> <USER_TASK:> Description: def sumprod(seq): """ Product of tuple, or sum of products of lists of tuples. Parameters ---------- seq : tuple or list Returns ------- int : the product of input tuples, or the sum of products of lists of tuples, recursively. Examples -------- >>> tup = (1, 2, 3) >>> sumprod(tup) 6 >>> lis = [(1, 2, 3), (2, 2)] >>> sumprod(lis) 10 >>> lis = [(1, 2, 3), [(2, 1), (3,)]] >>> sumprod(lis) 11 """
if isinstance(seq, tuple): # important to make sure dtype is int # since prod on empty tuple is a float (1.0) return np.prod(seq, dtype=int) else: return np.sum((sumprod(s) for s in seq), dtype=int)
<SYSTEM_TASK:> Apply a function of a potentially nested list of lists. <END_TASK> <USER_TASK:> Description: def map_recursive(fn, iterable, output_type=None): """ Apply a function of a potentially nested list of lists. Parameters ---------- fn : callable The function to apply to each element (and sub elements) in iterable iterable : iterable An iterable, sequence, sequence of sequences etc. :code:`fn` will be applied to each element in each list. output_type : callable, optional if None, a map with sub-maps in the same structure as :code:`iterable` will be returned, otherwise the callable will be applied to each sequence (i.e. :code:`list` will return lists of lists etc). Returns ------- map or iterable type : if :code:`output_type` is None, a map with sub-maps in the same structure as :code:`iterable` will be returned, otherwise the callable will be applied to each sequence (i.e. :code:`list` will return lists of lists etc). Examples -------- >>> seq = [1, 2, [3, 4, [5, 6]], 7] >>> map_recursive(lambda x: x > 4, seq, output_type=list) [False, False, [False, False, [True, True]], True] >>> map_recursive(lambda x: 2 * x, seq, output_type=tuple) (2, 4, (6, 8, (10, 12)), 14) """
def applyormap(it): if issequence(it): return map_recursive(fn, it, output_type) else: return fn(it) applied = map(applyormap, iterable) return output_type(applied) if output_type else applied
<SYSTEM_TASK:> Decorator for adding partial application functionality to a basis object. <END_TASK> <USER_TASK:> Description: def slice_init(func): """ Decorator for adding partial application functionality to a basis object. This will add an "apply_ind" argument to a basis object initialiser that can be used to apply the basis function to only the dimensions specified in apply_ind. E.g., >>> X = np.ones((100, 20)) >>> base = LinearBasis(onescol=False, apply_ind=slice(0, 10)) >>> base.transform(X).shape (100, 10) """
@wraps(func) def new_init(self, *args, **kwargs): apply_ind = kwargs.pop('apply_ind', None) if np.isscalar(apply_ind): apply_ind = [apply_ind] func(self, *args, **kwargs) self.apply_ind = apply_ind return new_init
<SYSTEM_TASK:> Decorator for implementing partial application. <END_TASK> <USER_TASK:> Description: def slice_transform(func, self, X, *vargs, **kwargs): """ Decorator for implementing partial application. This must decorate the ``transform`` and ``grad`` methods of basis objects if the ``slice_init`` decorator was used. """
X = X if self.apply_ind is None else X[:, self.apply_ind] return func(self, X, *vargs, **kwargs)
<SYSTEM_TASK:> Apply a function that takes a gradient matrix to a sequence of 2 or 3 <END_TASK> <USER_TASK:> Description: def apply_grad(fun, grad): """ Apply a function that takes a gradient matrix to a sequence of 2 or 3 dimensional gradients. This is partucularly useful when the gradient of a basis concatenation object is quite complex, eg. >>> X = np.random.randn(100, 3) >>> y = np.random.randn(100) >>> N, d = X.shape >>> base = RandomRBF(Xdim=d, nbases=5) + RandomRBF(Xdim=d, nbases=5, ... lenscale=Parameter(np.ones(d), Positive())) >>> Phi = base.transform(X, 1., np.ones(d)) >>> dffun = lambda dPhi: y.dot(Phi).dot(dPhi.T).dot(y) >>> df = apply_grad(dffun, base.grad(X, 1., np.ones(d))) >>> np.isscalar(df[0]) True >>> df[1].shape (3,) Parameters ---------- fun: callable the function too apply to the (2d) gradient. grad: ndarray or generator the gradient of the basis function (output of base.grad). Returns ------- scalar, ndarray or sequence: the result of applying fun(grad) for a structured grad. """
if issequence(grad): fgrad = [apply_grad(fun, g) for g in grad] return fgrad if len(fgrad) != 1 else fgrad[0] elif len(grad) == 0: return [] elif (grad.ndim == 1) or (grad.ndim == 2): return fun(grad) elif grad.ndim == 3: return np.array([fun(grad[:, :, i]) for i in range(grad.shape[2])]) else: raise ValueError("Only up to 3d gradients allowed!")
<SYSTEM_TASK:> Get the output dimensionality of this basis. <END_TASK> <USER_TASK:> Description: def get_dim(self, X): """ Get the output dimensionality of this basis. This makes a cheap call to transform with the initial parameter values to ascertain the dimensionality of the output features. Parameters ---------- X : ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. Returns ------- int : The dimensionality of the basis. """
# Cache if not hasattr(self, '_D'): self._D = self.transform(X[[0]], *self.params_values()).shape[1] return self._D
<SYSTEM_TASK:> Get a list of the ``Parameter`` values if they have a value. <END_TASK> <USER_TASK:> Description: def params_values(self): """ Get a list of the ``Parameter`` values if they have a value. This does not include the basis regularizer. """
return [p.value for p in atleast_list(self.params) if p.has_value]
<SYSTEM_TASK:> Apply the RBF to X. <END_TASK> <USER_TASK:> Description: def transform(self, X, lenscale=None): """ Apply the RBF to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: scalar or ndarray, optional scalar or array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, D) where D is number of RBF centres. """
N, d = X.shape lenscale = self._check_dim(d, lenscale) den = (2 * lenscale**2) return np.exp(- cdist(X / den, self.C / den, 'sqeuclidean'))
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def transform(self, X, lenscale=None): r""" Apply the sigmoid basis function to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: float the length scale (scalar) of the RBFs to apply to X. If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, D) where D is number of centres. """
N, d = X.shape lenscale = self._check_dim(d, lenscale) return expit(cdist(X / lenscale, self.C / lenscale, 'euclidean'))
<SYSTEM_TASK:> Apply the random basis to X. <END_TASK> <USER_TASK:> Description: def transform(self, X, lenscale=None): """ Apply the random basis to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: scalar or ndarray, optional scalar or array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, 2*nbases) where nbases is number of random bases to use, given in the constructor. """
N, D = X.shape lenscale = self._check_dim(D, lenscale)[:, np.newaxis] WX = np.dot(X, self.W / lenscale) return np.hstack((np.cos(WX), np.sin(WX))) / np.sqrt(self.n)
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def grad(self, X, lenscale=None): r""" Get the gradients of this basis w.r.t.\ the length scales. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: scalar or ndarray, optional scalar or array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, 2*nbases[, d]) where d is number of lenscales (if not ARD, i.e. scalar lenscale, this is just a 2D array). This is :math:`\partial \Phi(\mathbf{X}) / \partial \mathbf{l}` """
N, D = X.shape lenscale = self._check_dim(D, lenscale)[:, np.newaxis] WX = np.dot(X, self.W / lenscale) sinWX = - np.sin(WX) cosWX = np.cos(WX) dPhi = [] for i, l in enumerate(lenscale): dWX = np.outer(X[:, i], - self.W[i, :] / l**2) dPhi.append(np.hstack((dWX * sinWX, dWX * cosWX)) / np.sqrt(self.n)) return np.dstack(dPhi) if len(lenscale) != 1 else dPhi[0]
<SYSTEM_TASK:> Apply the Fast Food RBF basis to X. <END_TASK> <USER_TASK:> Description: def transform(self, X, lenscale=None): """ Apply the Fast Food RBF basis to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: scalar or ndarray, optional scalar or array of shape (d,) length scales (one for each dimension of X).If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, 2*nbases) where nbases is number of random bases to use, given in the constructor (to nearest larger two power). """
lenscale = self._check_dim(X.shape[1], lenscale) VX = self._makeVX(X / lenscale) Phi = np.hstack((np.cos(VX), np.sin(VX))) / np.sqrt(self.n) return Phi
<SYSTEM_TASK:> Apply the spectral mixture component basis to X. <END_TASK> <USER_TASK:> Description: def transform(self, X, mean=None, lenscale=None): """ Apply the spectral mixture component basis to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. mean: ndarray, optional array of shape (d,) frequency means (one for each dimension of X). If not input, this uses the value of the initial mean. lenscale: ndarray, optional array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, 4*nbases) where nbases is number of random bases to use, given in the constructor (to nearest larger two power). """
mean = self._check_dim(X.shape[1], mean, paramind=0) lenscale = self._check_dim(X.shape[1], lenscale, paramind=1) VX = self._makeVX(X / lenscale) mX = X.dot(mean)[:, np.newaxis] Phi = np.hstack((np.cos(VX + mX), np.sin(VX + mX), np.cos(VX - mX), np.sin(VX - mX))) / \ np.sqrt(2 * self.n) return Phi
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def grad(self, X, mean=None, lenscale=None): r""" Get the gradients of this basis w.r.t.\ the mean and length scales. Parameters ---------- x: ndarray (n, d) array of observations where n is the number of samples, and d is the dimensionality of x. mean: ndarray, optional array of shape (d,) frequency means (one for each dimension of X). If not input, this uses the value of the initial mean. lenscale: ndarray, optional array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: shape (n, 4*nbases) where nbases is number of random rbf bases, again to the nearest larger two power. This is :math:`\partial \phi(\mathbf{x}) / \partial \boldsymbol\mu` ndarray: shape (n, 4*nbases) where nbases is number of random rbf bases, again to the nearest larger two power. This is :math:`\partial \phi(\mathbf{x}) / \partial \mathbf{l}` """
d = X.shape[1] mean = self._check_dim(d, mean, paramind=0) lenscale = self._check_dim(d, lenscale, paramind=1) VX = self._makeVX(X / lenscale) mX = X.dot(mean)[:, np.newaxis] sinVXpmX = - np.sin(VX + mX) sinVXmmX = - np.sin(VX - mX) cosVXpmX = np.cos(VX + mX) cosVXmmX = np.cos(VX - mX) dPhi_len = [] dPhi_mean = [] for i, l in enumerate(lenscale): # Means dmX = X[:, [i]] dPhi_mean.append(np.hstack((dmX * sinVXpmX, dmX * cosVXpmX, -dmX * sinVXmmX, -dmX * cosVXmmX)) / np.sqrt(2 * self.n)) # Lenscales indlen = np.zeros(d) indlen[i] = 1. / l**2 dVX = - self._makeVX(X * indlen) # FIXME make this more efficient? dPhi_len.append(np.hstack((dVX * sinVXpmX, dVX * cosVXpmX, dVX * sinVXmmX, dVX * cosVXmmX)) / np.sqrt(2 * self.n)) dPhi_mean = np.dstack(dPhi_mean) if d != 1 else dPhi_mean[0] dPhi_len = np.dstack(dPhi_len) if d != 1 else dPhi_len[0] return dPhi_mean, dPhi_len
<SYSTEM_TASK:> Return the basis function applied to X. <END_TASK> <USER_TASK:> Description: def transform(self, X, *params): """ Return the basis function applied to X. I.e. Phi(X, params), where params can also optionally be used and learned. Parameters ---------- X : ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. *params : optional parameter aguments, these are the parameters of the concatenated bases `in the order` they were concatenated. Returns ------- ndarray : of shape (N, D) where D is the number of basis functions. """
Phi = [] args = list(params) for base in self.bases: phi, args = base._transform_popargs(X, *args) Phi.append(phi) return np.hstack(Phi)
<SYSTEM_TASK:> Return the gradient of the basis function for each parameter. <END_TASK> <USER_TASK:> Description: def grad(self, X, *params): """ Return the gradient of the basis function for each parameter. Parameters ---------- X : ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. *params : optional parameter aguments, these are the parameters of the concatenated bases `in the order` they were concatenated. Returns ------- list or ndarray : this will be a list of ndarrays if there are multiple parameters, or just an ndarray if there is a single parameter. The ndarrays can have more than two dimensions (i.e. tensors of rank > 2), depending on the dimensions of the basis function parameters. If there are *no* parameters, ``[]`` is returned. """
# Establish a few dimensions N = X.shape[0] D = self.get_dim(X) endinds = self.__base_locations(X) # for the Padding indices args = list(params) # Generate structured gradients with appropriate zero padding def make_dPhi(i, g): # Pad the gradient with respect to the total basis dimensionality dPhi_dim = (N, D) if g.ndim < 3 else (N, D, g.shape[2]) dPhi = np.zeros(dPhi_dim) dPhi[:, endinds[i]:endinds[i + 1]] = g return dPhi # Get gradients from each basis for i, base in enumerate(self.bases): # evaluate gradient and deal with multiple parameter gradients by # keeping track of the basis index g, args, sargs = base._grad_popargs(X, *args) for gg in atleast_tuple(g): if len(gg) == 0: continue yield make_dPhi(i, gg)
<SYSTEM_TASK:> Return a list of all of the ``Parameter`` objects. <END_TASK> <USER_TASK:> Description: def params(self): """ Return a list of all of the ``Parameter`` objects. Or a just a single ``Parameter`` is there is only one, and single empty ``Parameter`` if there are no parameters. """
paramlist = [b.params for b in self.bases if b.params.has_value] if len(paramlist) == 0: return Parameter() else: return paramlist if len(paramlist) > 1 else paramlist[0]
<SYSTEM_TASK:> Returns unified diff between given ``filenode_old`` and ``filenode_new``. <END_TASK> <USER_TASK:> Description: def get_udiff(filenode_old, filenode_new, show_whitespace=True): """ Returns unified diff between given ``filenode_old`` and ``filenode_new``. """
try: filenode_old_date = filenode_old.changeset.date except NodeError: filenode_old_date = None try: filenode_new_date = filenode_new.changeset.date except NodeError: filenode_new_date = None for filenode in (filenode_old, filenode_new): if not isinstance(filenode, FileNode): raise VCSError("Given object should be FileNode object, not %s" % filenode.__class__) if filenode_old_date and filenode_new_date: if not filenode_old_date < filenode_new_date: logging.debug("Generating udiff for filenodes with not increasing " "dates") vcs_udiff = unified_diff(filenode_old.content.splitlines(True), filenode_new.content.splitlines(True), filenode_old.name, filenode_new.name, filenode_old_date, filenode_old_date) return vcs_udiff
<SYSTEM_TASK:> Returns git style diff between given ``filenode_old`` and ``filenode_new``. <END_TASK> <USER_TASK:> Description: def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True): """ Returns git style diff between given ``filenode_old`` and ``filenode_new``. :param ignore_whitespace: ignore whitespaces in diff """
for filenode in (filenode_old, filenode_new): if not isinstance(filenode, FileNode): raise VCSError("Given object should be FileNode object, not %s" % filenode.__class__) old_raw_id = getattr(filenode_old.changeset, 'raw_id', '0' * 40) new_raw_id = getattr(filenode_new.changeset, 'raw_id', '0' * 40) repo = filenode_new.changeset.repository vcs_gitdiff = repo.get_diff(old_raw_id, new_raw_id, filenode_new.path, ignore_whitespace) return vcs_gitdiff
<SYSTEM_TASK:> make a fresh copy of generator, we should not iterate thru <END_TASK> <USER_TASK:> Description: def copy_iterator(self): """ make a fresh copy of generator, we should not iterate thru an original as it's needed for repeating operations on this instance of DiffProcessor """
self.__udiff, iterator_copy = itertools.tee(self.__udiff) return iterator_copy
<SYSTEM_TASK:> Extract the filename and revision hint from a line. <END_TASK> <USER_TASK:> Description: def _extract_rev(self, line1, line2): """ Extract the filename and revision hint from a line. """
try: if line1.startswith('--- ') and line2.startswith('+++ '): l1 = line1[4:].split(None, 1) old_filename = l1[0].lstrip('a/') if len(l1) >= 1 else None old_rev = l1[1] if len(l1) == 2 else 'old' l2 = line2[4:].split(None, 1) new_filename = l2[0].lstrip('b/') if len(l1) >= 1 else None new_rev = l2[1] if len(l2) == 2 else 'new' filename = old_filename if (old_filename != 'dev/null') else new_filename return filename, new_rev, old_rev except (ValueError, IndexError): pass return None, None, None
<SYSTEM_TASK:> Parse the diff an return data for the template. <END_TASK> <USER_TASK:> Description: def _parse_udiff(self): """ Parse the diff an return data for the template. """
lineiter = self.lines files = [] try: line = lineiter.next() # skip first context skipfirst = True while 1: # continue until we found the old file if not line.startswith('--- '): line = lineiter.next() continue chunks = [] filename, old_rev, new_rev = \ self._extract_rev(line, lineiter.next()) files.append({ 'filename': filename, 'old_revision': old_rev, 'new_revision': new_rev, 'chunks': chunks }) line = lineiter.next() while line: match = self._chunk_re.match(line) if not match: break lines = [] chunks.append(lines) old_line, old_end, new_line, new_end = \ [int(x or 1) for x in match.groups()[:-1]] old_line -= 1 new_line -= 1 context = len(match.groups()) == 5 old_end += old_line new_end += new_line if context: if not skipfirst: lines.append({ 'old_lineno': '...', 'new_lineno': '...', 'action': 'context', 'line': line, }) else: skipfirst = False line = lineiter.next() while old_line < old_end or new_line < new_end: if line: command, line = line[0], line[1:] else: command = ' ' affects_old = affects_new = False # ignore those if we don't expect them if command in '#@': continue elif command == '+': affects_new = True action = 'add' elif command == '-': affects_old = True action = 'del' else: affects_old = affects_new = True action = 'unmod' old_line += affects_old new_line += affects_new lines.append({ 'old_lineno': affects_old and old_line or '', 'new_lineno': affects_new and new_line or '', 'action': action, 'line': line }) line = lineiter.next() except StopIteration: pass # highlight inline changes for file in files: for chunk in chunks: lineiter = iter(chunk) #first = True try: while 1: line = lineiter.next() if line['action'] != 'unmod': nextline = lineiter.next() if nextline['action'] == 'unmod' or \ nextline['action'] == line['action']: continue self.differ(line, nextline) except StopIteration: pass return files
<SYSTEM_TASK:> Make a string safe for including in an id attribute. <END_TASK> <USER_TASK:> Description: def _safe_id(self, idstring): """Make a string safe for including in an id attribute. The HTML spec says that id attributes 'must begin with a letter ([A-Za-z]) and may be followed by any number of letters, digits ([0-9]), hyphens ("-"), underscores ("_"), colons (":"), and periods (".")'. These regexps are slightly over-zealous, in that they remove colons and periods unnecessarily. Whitespace is transformed into underscores, and then anything which is not a hyphen or a character that matches \w (alphanumerics and underscore) is removed. """
# Transform all whitespace to underscore idstring = re.sub(r'\s', "_", '%s' % idstring) # Remove everything that is not a hyphen or a member of \w idstring = re.sub(r'(?!-)\W', "", idstring).lower() return idstring
<SYSTEM_TASK:> Returns raw string as udiff <END_TASK> <USER_TASK:> Description: def raw_diff(self): """ Returns raw string as udiff """
udiff_copy = self.copy_iterator() if self.__format == 'gitdiff': udiff_copy = self._parse_gitdiff(udiff_copy) return u''.join(udiff_copy)
<SYSTEM_TASK:> Log-sum-exp trick for matrix X for summation along a specified axis. <END_TASK> <USER_TASK:> Description: def logsumexp(X, axis=0): """ Log-sum-exp trick for matrix X for summation along a specified axis. This performs the following operation in a stable fashion, .. math:: \log \sum^K_{k=1} \exp\{x_k\} Parameters ---------- X: ndarray 2D array of shape (N, D) to apply the log-sum-exp trick. axis: int, optional Axis to apply the summation along (works the same as axis in numpy.sum). Returns ------- lseX: ndarray results of applying the log-sum-exp trick, this will be shape (D,) if :code:`axis=0` or shape (N,) if :code:`axis=1`. """
mx = X.max(axis=axis) if (X.ndim > 1): mx = np.atleast_2d(mx).T if axis == 1 else np.atleast_2d(mx) return np.log(np.exp(X - mx).sum(axis=axis)) + np.ravel(mx)
<SYSTEM_TASK:> Pass X through a softmax function in a numerically stable way using the <END_TASK> <USER_TASK:> Description: def softmax(X, axis=0): """ Pass X through a softmax function in a numerically stable way using the log-sum-exp trick. This transformation is: .. math:: \\frac{\exp\{X_k\}}{\sum^K_{j=1} \exp\{X_j\}} and is appliedx to each row/column, `k`, of X. Parameters ---------- X: ndarray 2D array of shape (N, D) to apply the log-sum-exp trick. axis: int, optional Axis to apply the summation along (works the same as axis in numpy.sum). Returns ------- smX: ndarray results of applying the log-sum-exp trick, this will be shape (N, D), and each row will sum to 1 if :code:`axis=1` or each column will sum to 1 if :code:`axis=0`. """
if axis == 1: return np.exp(X - logsumexp(X, axis=1)[:, np.newaxis]) elif axis == 0: return np.exp(X - logsumexp(X, axis=0)) else: raise ValueError("This only works on 2D arrays for now.")
<SYSTEM_TASK:> Checks if credit card number fits the visa format. <END_TASK> <USER_TASK:> Description: def is_visa(n): """Checks if credit card number fits the visa format."""
n, length = str(n), len(str(n)) if length >= 13 and length <= 16: if n[0] == '4': return True return False
<SYSTEM_TASK:> Checks if credit card number fits the visa electron format. <END_TASK> <USER_TASK:> Description: def is_visa_electron(n): """Checks if credit card number fits the visa electron format."""
n, length = str(n), len(str(n)) form = ['026', '508', '844', '913', '917'] if length == 16: if n[0] == '4': if ''.join(n[1:4]) in form or ''.join(n[1:6]) == '17500': return True return False
<SYSTEM_TASK:> Checks if credit card number fits the mastercard format. <END_TASK> <USER_TASK:> Description: def is_mastercard(n): """Checks if credit card number fits the mastercard format."""
n, length = str(n), len(str(n)) if length >= 16 and length <= 19: if ''.join(n[:2]) in strings_between(51, 56): return True return False
<SYSTEM_TASK:> Checks if credit card number fits the american express format. <END_TASK> <USER_TASK:> Description: def is_amex(n): """Checks if credit card number fits the american express format."""
n, length = str(n), len(str(n)) if length == 15: if n[0] == '3' and (n[1] == '4' or n[1] == '7'): return True return False
<SYSTEM_TASK:> Checks if credit card number fits the discover card format. <END_TASK> <USER_TASK:> Description: def is_discover(n): """Checks if credit card number fits the discover card format."""
n, length = str(n), len(str(n)) if length == 16: if n[0] == '6': if ''.join(n[1:4]) == '011' or n[1] == '5': return True elif n[1] == '4' and n[2] in strings_between(4, 10): return True elif ''.join(n[1:6]) in strings_between(22126, 22926): return True return False
<SYSTEM_TASK:> Gets a list of the formats a credit card number fits. <END_TASK> <USER_TASK:> Description: def get_format(n): """Gets a list of the formats a credit card number fits."""
formats = [] if is_visa(n): formats.append('visa') if is_visa_electron(n): formats.append('visa electron') if is_mastercard(n): formats.append('mastercard') if is_amex(n): formats.append('amex') if is_maestro(n): formats.append('maestro') if is_discover(n): formats.append('discover') return formats
<SYSTEM_TASK:> Return full name of member <END_TASK> <USER_TASK:> Description: def full_name(self): """Return full name of member"""
if self.prefix is not None: return '.'.join([self.prefix, self.member]) return self.member
<SYSTEM_TASK:> Parse signature declartion string <END_TASK> <USER_TASK:> Description: def parse_signature(cls, signature): """Parse signature declartion string Uses :py:attr:`signature_pattern` to parse out pieces of constraint signatures. Pattern should provide the following named groups: prefix Object prefix, such as a namespace member Object member name arguments Declaration arguments, if this is a callable constraint :param signature: construct signature :type signature: string """
assert cls.signature_pattern is not None pattern = re.compile(cls.signature_pattern, re.VERBOSE) match = pattern.match(signature) if match: groups = match.groupdict() arguments = None if 'arguments' in groups and groups['arguments'] is not None: arguments = re.split(r'\,\s+', groups['arguments']) return DotNetSignature( prefix=groups.get('prefix', None), member=groups.get('member', None), arguments=arguments ) raise ValueError('Could not parse signature: {0}'.format(signature))
<SYSTEM_TASK:> Parses out pieces from construct signatures <END_TASK> <USER_TASK:> Description: def handle_signature(self, sig, signode): """Parses out pieces from construct signatures Parses out prefix and argument list from construct definition. This is assuming that the .NET languages this will support will be in a common format, such as:: Namespace.Class.method(argument, argument, ...) The namespace and class will be determined by the nesting of rST directives. Returns Altered :py:data:`signode` with attributes corrected for rST nesting/etc """
try: sig = self.parse_signature(sig.strip()) except ValueError: self.env.warn(self.env.docname, 'Parsing signature failed: "{}"'.format(sig), self.lineno) raise prefix = self.env.ref_context.get('dn:prefix', None) if prefix is not None: sig.prefix = prefix signode['object'] = sig.member signode['prefix'] = sig.prefix signode['fullname'] = sig.full_name() # Prefix modifiers if self.display_prefix: signode += addnodes.desc_annotation(self.display_prefix, self.display_prefix) for prefix in ['public', 'protected', 'static']: if prefix in self.options: signode += addnodes.desc_annotation(prefix + ' ', prefix + ' ') # Show prefix only on shorter declarations if sig.prefix is not None and not self.has_arguments: signode += addnodes.desc_addname(sig.prefix + '.', sig.prefix + '.') signode += addnodes.desc_name(sig.member, sig.member) if self.has_arguments: if not sig.arguments: signode += addnodes.desc_parameterlist() else: # TODO replace this _pseudo_parse_arglist(signode, ', '.join(sig.arguments)) if isinstance(self, DotNetObjectNested): return sig.full_name(), sig.full_name() return sig.full_name(), sig.prefix
<SYSTEM_TASK:> Add objects to the domain list of objects <END_TASK> <USER_TASK:> Description: def add_target_and_index(self, name, sig, signode): """Add objects to the domain list of objects This uses the directive short name along with the full object name to create objects and nodes that are type and name unique. """
full_name = name[0] target_name = '{0}-{1}'.format(self.short_name, full_name) if target_name not in self.state.document.ids: signode['names'].append(target_name) signode['ids'].append(target_name) signode['first'] = not self.names self.state.document.note_explicit_target(signode) # Update domain objects objects = self.env.domaindata['dn']['objects'] try: found_obj = objects[full_name] (found_doc, found_type) = found_obj self.state_machine.reporter.warning( ('duplicate object definition of {obj_type} {obj_name}' 'other instance in {path}' .format(obj_type=found_type, obj_name=full_name, path=self.env.doc2path(found_doc))), line=self.lineno) except KeyError: pass finally: objects[full_name] = (self.env.docname, self.objtype) index_text = self.get_index_text(None, name) if index_text: entry = ('single', index_text, full_name, '') if SPHINX_VERSION_14: entry = ('single', index_text, full_name, '', None) self.indexnode['entries'].append(entry)
<SYSTEM_TASK:> Produce index text by directive attributes <END_TASK> <USER_TASK:> Description: def get_index_text(self, prefix, name_obj): """Produce index text by directive attributes"""
(name, _) = name_obj msg = '{name} ({obj_type})' parts = { 'name': name, 'prefix': prefix, 'obj_type': self.long_name, } try: (obj_ns, obj_name) = name.rsplit('.', 1) parts['name'] = obj_name parts['namespace'] = obj_ns msg = '{name} ({namespace} {obj_type})' except ValueError: pass return msg.format(**parts)
<SYSTEM_TASK:> If element is considered hidden, drop the desc_signature node <END_TASK> <USER_TASK:> Description: def run(self): """If element is considered hidden, drop the desc_signature node The default handling of signatures by :py:cls:`ObjectDescription` returns a list of nodes with the signature nodes. We are going to remove them if this is a hidden declaration. """
nodes = super(DotNetObjectNested, self).run() if 'hidden' in self.options: for node in nodes: if isinstance(node, addnodes.desc): for (m, child) in enumerate(node.children): if isinstance(child, addnodes.desc_signature): node.children.pop(m) return nodes
<SYSTEM_TASK:> Build up prefix history for nested elements <END_TASK> <USER_TASK:> Description: def before_content(self): """Build up prefix history for nested elements The following keys are used in :py:attr:`self.env.ref_context`: dn:prefixes Stores the prefix history. With each nested element, we add the prefix to a list of prefixes. When we exit that object's nesting level, :py:meth:`after_content` is triggered and the prefix is removed from the end of the list. dn:prefix Current prefix. This should reflect the last element in the prefix history """
super(DotNetObjectNested, self).before_content() if self.names: (_, prefix) = self.names.pop() try: self.env.ref_context['dn:prefixes'].append(prefix) except (AttributeError, KeyError): self.env.ref_context['dn:prefixes'] = [prefix] finally: self.env.ref_context['dn:prefix'] = prefix
<SYSTEM_TASK:> This handles some special cases for reference links in .NET <END_TASK> <USER_TASK:> Description: def process_link(self, env, refnode, has_explicit_title, title, target): """This handles some special cases for reference links in .NET First, the standard Sphinx reference syntax of ``:ref:`Title<Link>```, where a reference to ``Link`` is created with title ``Title``, causes problems for the generic .NET syntax of ``:dn:cls:`FooBar<T>```. So, here we assume that ``<T>`` was the generic declaration, and fix the reference. This also uses :py:cls:`AnyXRefRole` to add `ref_context` onto the refnode. Add data there that you need it on refnodes. This method also resolves special reference operators ``~`` and ``.`` """
result = super(DotNetXRefRole, self).process_link(env, refnode, has_explicit_title, title, target) (title, target) = result if not has_explicit_title: # If the first character is a tilde, don't display the parent name title = title.lstrip('.') target = target.lstrip('~') if title[0:1] == '~': title = title[1:] dot = title.rfind('.') if dot != -1: title = title[dot + 1:] else: if title != target: target = title = '{title}<{target}>'.format(title=title, target=target) return title, target
<SYSTEM_TASK:> Find object reference <END_TASK> <USER_TASK:> Description: def find_obj(self, env, prefix, name, obj_type, searchorder=0): """Find object reference :param env: Build environment :param prefix: Object prefix :param name: Object name :param obj_type: Object type :param searchorder: Search for exact match """
# Skip parens if name[-2:] == '()': name = name[:-2] if not name: return [] object_types = list(self.object_types) if obj_type is not None: object_types = self.objtypes_for_role(obj_type) objects = self.data['objects'] newname = None fullname = name if prefix is not None: fullname = '.'.join([prefix, name]) if searchorder == 1: if prefix and fullname in objects and objects[fullname][1] in object_types: newname = fullname elif name in objects and objects[name][1] in object_types: newname = name else: try: matches = [obj_name for obj_name in objects if obj_name.endswith('.' + name)] newname = matches.pop() except IndexError: pass else: if name in objects: newname = name elif prefix and fullname in objects: newname = fullname if newname is None: return None return newname, objects.get(newname, (None, None))
<SYSTEM_TASK:> Look for any references, without object type <END_TASK> <USER_TASK:> Description: def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode): """Look for any references, without object type This always searches in "refspecific" mode """
prefix = node.get('dn:prefix') results = [] match = self.find_obj(env, prefix, target, None, 1) if match is not None: (name, obj) = match results.append(('dn:' + self.role_for_objtype(obj[1]), make_refnode(builder, fromdocname, obj[0], name, contnode, name))) return results
<SYSTEM_TASK:> Create an image of type. <END_TASK> <USER_TASK:> Description: def create(self, width, height): """Create an image of type. Parameters ---------- width: `int` Image width. height: `int` Image height. Returns ------- `PIL.Image.Image` """
return Image.new(self.mode, (width, height))
<SYSTEM_TASK:> Merge image channels. <END_TASK> <USER_TASK:> Description: def merge(self, imgs): """Merge image channels. Parameters ---------- imgs : `list` of `PIL.Image.Image` Returns ------- `PIL.Image.Image` Raises ------ ValueError If image channel list is empty. """
if not imgs: raise ValueError('empty channel list') if len(imgs) == 1: return imgs[0] return Image.merge(self.mode, imgs)
<SYSTEM_TASK:> a helper to inspect a link and see if we should keep the link boundary <END_TASK> <USER_TASK:> Description: def detect_keep_boundary(start, end, namespaces): """a helper to inspect a link and see if we should keep the link boundary """
result_start, result_end = False, False parent_start = start.getparent() parent_end = end.getparent() if parent_start.tag == "{%s}p" % namespaces['text']: # more than one child in the containing paragraph ? # we keep the boundary result_start = len(parent_start.getchildren()) > 1 if parent_end.tag == "{%s}p" % namespaces['text']: # more than one child in the containing paragraph ? # we keep the boundary result_end = len(parent_end.getchildren()) > 1 return result_start, result_end
<SYSTEM_TASK:> create proper namespaces for our document <END_TASK> <USER_TASK:> Description: def __prepare_namespaces(self): """create proper namespaces for our document """
# create needed namespaces self.namespaces = dict( text="urn:text", draw="urn:draw", table="urn:table", office="urn:office", xlink="urn:xlink", svg="urn:svg", manifest="urn:manifest", ) # copy namespaces from original docs for tree_root in self.tree_roots: self.namespaces.update(tree_root.nsmap) # remove any "root" namespace as lxml.xpath do not support them self.namespaces.pop(None, None) # declare the genshi namespace self.namespaces['py'] = GENSHI_URI # declare our own namespace self.namespaces['py3o'] = PY3O_URI
<SYSTEM_TASK:> Public method to help report engine to find all instructions <END_TASK> <USER_TASK:> Description: def get_user_instructions(self): """ Public method to help report engine to find all instructions """
res = [] # TODO: Check if instructions can be stored in other content_trees for e in get_instructions(self.content_trees[0], self.namespaces): childs = e.getchildren() if childs: res.extend([c.text for c in childs]) else: res.append(e.text) return res
<SYSTEM_TASK:> Public method to get the mapping of all <END_TASK> <USER_TASK:> Description: def get_user_instructions_mapping(self): """ Public method to get the mapping of all variables defined in the template """
instructions = self.get_user_instructions() user_variables = self.get_user_variables() # For now we just want for loops instructions = [i for i in instructions if i.startswith('for') or i == '/for'] # Now we call the decoder to get variable mapping from instructions d = Decoder() res = [] for_insts = {} tmp = res # Create a hierarchie with for loops for i in instructions: if i == '/for': tmp = tmp.parent else: # Decode the instruction: # inst.values() -> forloop variable # inst.keys() -> forloop iterable var, it = d.decode_py3o_instruction(i) # we keep all inst in a dict for_insts[var] = it # get the variable defined inside the for loop for_vars = [v for v in user_variables if v.split('.')[0] == var] # create a new ForList for the forloop and add it to the # children or list new_list = ForList(it, var) if isinstance(tmp, list): # We have a root for loop res.append(new_list) tmp = res[-1] tmp.parent = res else: tmp.add_child(new_list) tmp = new_list # Add the attributes to our new child for v in for_vars: tmp.add_attr(v) # Insert global variable in a second list user_vars = [v for v in user_variables if not v.split('.')[0] in for_insts.keys()] return res, user_vars
<SYSTEM_TASK:> transform a py3o link into a proper Genshi statement <END_TASK> <USER_TASK:> Description: def handle_link(self, link, py3o_base, closing_link): """transform a py3o link into a proper Genshi statement rebase a py3o link at a proper place in the tree to be ready for Genshi replacement """
# OLD open office version if link.text is not None and link.text.strip(): if not link.text == py3o_base: msg = "url and text do not match in '%s'" % link.text raise TemplateException(msg) # new open office version elif len(link): if not link[0].text == py3o_base: msg = "url and text do not match in '%s'" % link.text raise TemplateException(msg) else: raise TemplateException("Link text not found") # find out if the instruction is inside a table parent = link.getparent() keep_start_boundary = False keep_end_boundary = False if parent.getparent() is not None and parent.getparent().tag == ( "{%s}table-cell" % self.namespaces['table'] ): # we are in a table opening_paragraph = parent opening_cell = opening_paragraph.getparent() # same for closing closing_paragraph = closing_link.getparent() closing_cell = closing_paragraph.getparent() if opening_cell == closing_cell: # block is fully in a single cell opening_row = opening_paragraph closing_row = closing_paragraph else: opening_row = opening_cell.getparent() closing_row = closing_cell.getparent() elif parent.tag == "{%s}p" % self.namespaces['text']: # if we are using text we want to keep start/end nodes keep_start_boundary, keep_end_boundary = detect_keep_boundary( link, closing_link, self.namespaces ) # we are in a text paragraph opening_row = parent closing_row = closing_link.getparent() else: raise NotImplementedError( "We handle urls in tables or text paragraph only" ) # max split is one instruction, instruction_value = py3o_base.split("=", 1) instruction_value = instruction_value.strip('"') attribs = dict() attribs['{%s}strip' % GENSHI_URI] = 'True' attribs['{%s}%s' % (GENSHI_URI, instruction)] = instruction_value genshi_node = lxml.etree.Element( 'span', attrib=attribs, nsmap={'py': GENSHI_URI}, ) link.getparent().remove(link) closing_link.getparent().remove(closing_link) try: move_siblings( opening_row, closing_row, genshi_node, keep_start_boundary=keep_start_boundary, keep_end_boundary=keep_end_boundary, ) except ValueError as e: log.exception(e) raise TemplateException("Could not move siblings for '%s'" % py3o_base)
<SYSTEM_TASK:> a public method to help report engines to introspect <END_TASK> <USER_TASK:> Description: def get_user_variables(self): """a public method to help report engines to introspect a template and find what data it needs and how it will be used returns a list of user variable names without starting 'py3o.'"""
# TODO: Check if some user fields are stored in other content_trees return [ e.get('{%s}name' % e.nsmap.get('text'))[5:] for e in get_user_fields(self.content_trees[0], self.namespaces) ]
<SYSTEM_TASK:> Replace user-type text fields that start with "py3o." with genshi <END_TASK> <USER_TASK:> Description: def __prepare_usertexts(self): """Replace user-type text fields that start with "py3o." with genshi instructions. """
field_expr = "//text:user-field-get[starts-with(@text:name, 'py3o.')]" for content_tree in self.content_trees: for userfield in content_tree.xpath( field_expr, namespaces=self.namespaces ): parent = userfield.getparent() value = userfield.attrib[ '{%s}name' % self.namespaces['text'] ][5:] value_type = self.field_info[value]['value_type'] # we try to override global var type with local settings value_type_attr = '{%s}value-type' % self.namespaces['office'] rec = 0 parent_node = parent # special case for float which has a value info on top level # overriding local value found_node = False while rec <= 5: if parent_node is None: break # find an ancestor with an office:value-type attribute # this is the case when you are inside a table if value_type_attr in parent_node.attrib: value_type = parent_node.attrib[value_type_attr] found_node = True break rec += 1 parent_node = parent_node.getparent() if value_type == 'float': value_attr = '{%s}value' % self.namespaces['office'] rec = 0 if found_node: parent_node.attrib[value_attr] = "${%s}" % value else: parent_node = userfield while rec <= 7: if parent_node is None: break if value_attr in parent_node.attrib: parent_node.attrib[value_attr] = "${%s}" % value break rec += 1 parent_node = parent_node.getparent() value = "format_float(%s)" % value if value_type == 'percentage': del parent_node.attrib[value_attr] value = "format_percentage(%s)" % value parent_node.attrib[value_type_attr] = "string" attribs = dict() attribs['{%s}strip' % GENSHI_URI] = 'True' attribs['{%s}content' % GENSHI_URI] = value genshi_node = lxml.etree.Element( 'span', attrib=attribs, nsmap={'py': GENSHI_URI} ) if userfield.tail: genshi_node.tail = userfield.tail parent.replace(userfield, genshi_node)
<SYSTEM_TASK:> Add entries for py3o images into the manifest file. <END_TASK> <USER_TASK:> Description: def __add_images_to_manifest(self): """Add entries for py3o images into the manifest file."""
xpath_expr = "//manifest:manifest[1]" for content_tree in self.content_trees: # Find manifest:manifest tags. manifest_e = content_tree.xpath( xpath_expr, namespaces=self.namespaces ) if not manifest_e: continue for identifier in self.images.keys(): # Add a manifest:file-entry tag. lxml.etree.SubElement( manifest_e[0], '{%s}file-entry' % self.namespaces['manifest'], attrib={ '{%s}full-path' % self.namespaces['manifest']: ( PY3O_IMAGE_PREFIX + identifier ), '{%s}media-type' % self.namespaces['manifest']: '', } )
<SYSTEM_TASK:> prepare the flows without saving to file <END_TASK> <USER_TASK:> Description: def render_tree(self, data): """prepare the flows without saving to file this method has been decoupled from render_flow to allow better unit testing """
# TODO: find a way to make this localization aware... # because ATM it formats texts using French style numbers... # best way would be to let the user inject its own vars... # but this would not work on fusion servers... # so we must find a way to localize this a bit... or remove it and # consider our caller must pre - render its variables to the desired # locale...? new_data = dict( decimal=decimal, format_float=( lambda val: ( isinstance( val, decimal.Decimal ) or isinstance( val, float ) ) and str(val).replace('.', ',') or val ), format_percentage=( lambda val: ("%0.2f %%" % val).replace('.', ',') ) ) # Soft page breaks are hints for applications for rendering a page # break. Soft page breaks in for loops may compromise the paragraph # formatting especially the margins. Open-/LibreOffice will regenerate # the page breaks when displaying the document. Therefore it is save to # remove them. self.remove_soft_breaks() # first we need to transform the py3o template into a valid # Genshi template. starting_tags, closing_tags = self.handle_instructions( self.content_trees, self.namespaces ) parents = [tag[0].getparent() for tag in starting_tags] linknum = len(parents) parentnum = len(set(parents)) if not linknum == parentnum: raise TemplateException( "Every py3o link instruction should be on its own line" ) for link, py3o_base in starting_tags: self.handle_link( link, py3o_base, closing_tags[id(link)] ) self.__prepare_userfield_decl() self.__prepare_usertexts() self.__replace_image_links() self.__add_images_to_manifest() for fnum, content_tree in enumerate(self.content_trees): content = lxml.etree.tostring(content_tree.getroot()) if self.ignore_undefined_variables: template = MarkupTemplate(content, lookup='lenient') else: template = MarkupTemplate(content) # then we need to render the genshi template itself by # providing the data to genshi template_dict = {} template_dict.update(data.items()) template_dict.update(new_data.items()) self.output_streams.append( ( self.templated_files[fnum], template.generate(**template_dict) ) )
<SYSTEM_TASK:> render the OpenDocument with the user data <END_TASK> <USER_TASK:> Description: def render_flow(self, data): """render the OpenDocument with the user data @param data: the input stream of user data. This should be a dictionary mapping, keys being the values accessible to your report. @type data: dictionary """
self.render_tree(data) # then reconstruct a new ODT document with the generated content for status in self.__save_output(): yield status
<SYSTEM_TASK:> Set data for an image mentioned in the template. <END_TASK> <USER_TASK:> Description: def set_image_path(self, identifier, path): """Set data for an image mentioned in the template. @param identifier: Identifier of the image; refer to the image in the template by setting "py3o.[identifier]" as the name of that image. @type identifier: string @param path: Image path on the file system @type path: string """
f = open(path, 'rb') self.set_image_data(identifier, f.read()) f.close()
<SYSTEM_TASK:> Saves the output into a native OOo document format. <END_TASK> <USER_TASK:> Description: def __save_output(self): """Saves the output into a native OOo document format. """
out = zipfile.ZipFile(self.outputfilename, 'w') for info_zip in self.infile.infolist(): if info_zip.filename in self.templated_files: # Template file - we have edited these. # get a temp file streamout = open(get_secure_filename(), "w+b") fname, output_stream = self.output_streams[ self.templated_files.index(info_zip.filename) ] transformer = get_list_transformer(self.namespaces) remapped_stream = output_stream | transformer # write the whole stream to it for chunk in remapped_stream.serialize(): streamout.write(chunk.encode('utf-8')) yield True # close the temp file to flush all data and make sure we get # it back when writing to the zip archive. streamout.close() # write the full file to archive out.write(streamout.name, fname) # remove temp file os.unlink(streamout.name) else: # Copy other files straight from the source archive. out.writestr(info_zip, self.infile.read(info_zip.filename)) # Save images in the "Pictures" sub-directory of the archive. for identifier, data in self.images.items(): out.writestr(PY3O_IMAGE_PREFIX + identifier, data) # close the zipfile before leaving out.close() yield True
<SYSTEM_TASK:> returns the check digit of the card number. <END_TASK> <USER_TASK:> Description: def get_check_digit(unchecked): """returns the check digit of the card number."""
digits = digits_of(unchecked) checksum = sum(even_digits(unchecked)) + sum([ sum(digits_of(2 * d)) for d in odd_digits(unchecked)]) return 9 * checksum % 10
<SYSTEM_TASK:> determines whether the card number is valid. <END_TASK> <USER_TASK:> Description: def is_valid(number): """determines whether the card number is valid."""
n = str(number) if not n.isdigit(): return False return int(n[-1]) == get_check_digit(n[:-1])
<SYSTEM_TASK:> Generates random and valid card number which is returned as a string. <END_TASK> <USER_TASK:> Description: def generate(length): """Generates random and valid card number which is returned as a string."""
if not isinstance(length, int) or length < 2: raise TypeError('length must be a positive integer greater than 1.') # first digit cannot be 0 digits = [random.randint(1, 9)] for i in range(length-2): digits.append(random.randint(0, 9)) digits.append(get_check_digit(''.join(map(str, digits)))) return ''.join(map(str, digits))
<SYSTEM_TASK:> Reversed spiral generator. <END_TASK> <USER_TASK:> Description: def _rspiral(width, height): """Reversed spiral generator. Parameters ---------- width : `int` Spiral width. height : `int` Spiral height. Returns ------- `generator` of (`int`, `int`) Points. """
x0 = 0 y0 = 0 x1 = width - 1 y1 = height - 1 while x0 < x1 and y0 < y1: for x in range(x0, x1): yield x, y0 for y in range(y0, y1): yield x1, y for x in range(x1, x0, -1): yield x, y1 for y in range(y1, y0, -1): yield x0, y x0 += 1 y0 += 1 x1 -= 1 y1 -= 1 if x0 == x1: for y in range(y0, y1 + 1): yield x0, y elif y0 == y1: for x in range(x0, x1 + 1): yield x, y0
<SYSTEM_TASK:> Spiral generator. <END_TASK> <USER_TASK:> Description: def _spiral(width, height): """Spiral generator. Parameters ---------- width : `int` Spiral width. height : `int` Spiral height. Returns ------- `generator` of (`int`, `int`) Points. """
if width == 1: for y in range(height - 1, -1, -1): yield 0, y return if height == 1: for x in range(width - 1, -1, -1): yield x, 0 return if width <= height: x0 = width // 2 if width % 2: for y in range(height - 1 - x0, x0 - 1, -1): yield x0, y x0 -= 1 y0 = x0 else: y0 = height // 2 if height % 2: for x in range(width - 1 - y0, y0 - 1, -1): yield x, y0 y0 -= 1 x0 = y0 while x0 >= 0: x1 = width - x0 - 1 y1 = height - y0 - 1 for y in range(y0 + 1, y1): yield x0, y for x in range(x0, x1): yield x, y1 for y in range(y1, y0, -1): yield x1, y for x in range(x1, x0 - 1, -1): yield x, y0 x0 -= 1 y0 -= 1
<SYSTEM_TASK:> Get point coordinates in next block. <END_TASK> <USER_TASK:> Description: def get_point_in_block(cls, x, y, block_idx, block_size): """Get point coordinates in next block. Parameters ---------- x : `int` X coordinate in current block. y : `int` Y coordinate in current block. block_index : `int` Current block index in next block. block_size : `int` Current block size. Raises ------ IndexError If block index is out of range. Returns ------- (`int`, `int`) Point coordinates. """
if block_idx == 0: return y, x if block_idx == 1: return x, y + block_size if block_idx == 2: return x + block_size, y + block_size if block_idx == 3: x, y = block_size - 1 - y, block_size - 1 - x return x + block_size, y raise IndexError('block index out of range: %d' % block_idx)
<SYSTEM_TASK:> Get curve point coordinates by index. <END_TASK> <USER_TASK:> Description: def get_point(cls, idx, size): """Get curve point coordinates by index. Parameters ---------- idx : `int` Point index. size : `int` Curve size. Returns ------- (`int`, `int`) Point coordinates. """
x, y = cls.POSITION[idx % 4] idx //= 4 block_size = 2 while block_size < size: block_idx = idx % 4 x, y = cls.get_point_in_block(x, y, block_idx, block_size) idx //= 4 block_size *= 2 return x, y
<SYSTEM_TASK:> Recursive function that fills up the dictionary <END_TASK> <USER_TASK:> Description: def __recur_to_dict(forlist, data_dict, res): """Recursive function that fills up the dictionary """
# First we go through all attrs from the ForList and add respective # keys on the dict. for a in forlist.attrs: a_list = a.split('.') if len(a_list) == 1: res = data_dict[a_list[0]] return res if a_list[0] in data_dict: tmp = res for i in a_list[1:-1]: if not i in tmp: tmp[i] = {} tmp = tmp[i] if len(a_list) == 1: tmp[a_list[0]] = data_dict[a_list[0]] else: tmp[a_list[-1]] = reduce( getattr, a_list[1:], data_dict[a_list[0]] ) # Then create a list for all children, # modify the datadict to fit the new child # and call myself for c in forlist.childs: it = c.name.split('.') res[it[-1]] = [] for i, val in enumerate( reduce(getattr, it[1:], data_dict[it[0]]) ): new_data_dict = {c.var_from: val} if len(res[it[-1]]) <= i: res[it[-1]].append({}) res[it[-1]] = ForList.__recur_to_dict( c, new_data_dict, res[it[-1]][i] ) return res
<SYSTEM_TASK:> Construct a dict object from a list of ForList object <END_TASK> <USER_TASK:> Description: def to_dict(for_lists, global_vars, data_dict): """ Construct a dict object from a list of ForList object :param for_lists: list of for_list :param global_vars: list of global vars to add :param data_dict: data from an orm-like object (with dot notation) :return: a dict representation of the ForList objects """
res = {} # The first level is a little bit special # Manage global variables for a in global_vars: a_list = a.split('.') tmp = res for i in a_list[:-1]: if not i in tmp: tmp[i] = {} tmp = tmp[i] tmp[a_list[-1]] = reduce(getattr, a_list[1:], data_dict[a_list[0]]) # Then manage for lists recursively for for_list in for_lists: it = for_list.name.split('.') tmp = res for i in it[:-1]: if not i in tmp: tmp[i] = {} tmp = tmp[i] if not it[-1] in tmp: tmp[it[-1]] = [] tmp = tmp[it[-1]] if not it[0] in data_dict: continue if len(it) == 1: loop = enumerate(data_dict[it[0]]) else: loop = enumerate(reduce(getattr, it[-1:], data_dict[it[0]])) for i, val in loop: new_data_dict = {for_list.var_from: val} # We append a new dict only if we need if len(tmp) <= i: tmp.append({}) # Call myself with new context, and get result tmp[i] = ForList.__recur_to_dict( for_list, new_data_dict, tmp[i] ) return res
<SYSTEM_TASK:> Nothing hilariously hidden here, logs a user out. Strip this out if your <END_TASK> <USER_TASK:> Description: def logout(request, redirect_url=settings.LOGOUT_REDIRECT_URL): """ Nothing hilariously hidden here, logs a user out. Strip this out if your application already has hooks to handle this. """
django_logout(request) return HttpResponseRedirect(request.build_absolute_uri(redirect_url))
<SYSTEM_TASK:> The view function that initiates the entire handshake. <END_TASK> <USER_TASK:> Description: def begin_auth(request): """The view function that initiates the entire handshake. For the most part, this is 100% drag and drop. """
# Instantiate Twython with the first leg of our trip. twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET) # Request an authorization url to send the user to... callback_url = request.build_absolute_uri(reverse('twython_django_oauth.views.thanks')) auth_props = twitter.get_authentication_tokens(callback_url) # Then send them over there, durh. request.session['request_token'] = auth_props request.session['next_url'] = request.GET.get('next',None) return HttpResponseRedirect(auth_props['auth_url'])
<SYSTEM_TASK:> A user gets redirected here after hitting Twitter and authorizing your app to use their data. <END_TASK> <USER_TASK:> Description: def thanks(request, redirect_url=settings.LOGIN_REDIRECT_URL): """A user gets redirected here after hitting Twitter and authorizing your app to use their data. This is the view that stores the tokens you want for querying data. Pay attention to this. """
# Now that we've got the magic tokens back from Twitter, we need to exchange # for permanent ones and store them... oauth_token = request.session['request_token']['oauth_token'] oauth_token_secret = request.session['request_token']['oauth_token_secret'] twitter = Twython(settings.TWITTER_KEY, settings.TWITTER_SECRET, oauth_token, oauth_token_secret) # Retrieve the tokens we want... authorized_tokens = twitter.get_authorized_tokens(request.GET['oauth_verifier']) # If they already exist, grab them, login and redirect to a page displaying stuff. try: user = User.objects.get(username=authorized_tokens['screen_name']) except User.DoesNotExist: # We mock a creation here; no email, password is just the token, etc. user = User.objects.create_user(authorized_tokens['screen_name'], "[email protected]", authorized_tokens['oauth_token_secret']) profile = TwitterProfile() profile.user = user profile.oauth_token = authorized_tokens['oauth_token'] profile.oauth_secret = authorized_tokens['oauth_token_secret'] profile.save() user = authenticate( username=authorized_tokens['screen_name'], password=authorized_tokens['oauth_token_secret'] ) login(request, user) redirect_url = request.session.get('next_url', redirect_url) HttpResponseRedirect(redirect_url)
<SYSTEM_TASK:> Generate image pixels. <END_TASK> <USER_TASK:> Description: def _imgdata(self, width, height, state_size=None, start='', dataset=''): """Generate image pixels. Parameters ---------- width : `int` Image width. height : `int` Image height. state_size : `int` or `None`, optional State size to use for generation (default: `None`). start : `str`, optional Initial state (default: ''). dataset : `str`, optional Dataset key prefix (default: ''). Raises ------ RuntimeError If generator is empty. Returns ------- `generator` of `int` Pixel generator. """
size = width * height if size > 0 and start: yield state_to_pixel(start) size -= 1 while size > 0: prev_size = size pixels = self.generate(state_size, start, dataset) pixels = islice(pixels, 0, size) for pixel in pixels: yield state_to_pixel(pixel) size -= 1 if prev_size == size: if start: yield from repeat(state_to_pixel(start), size) else: raise RuntimeError('empty generator')
<SYSTEM_TASK:> Generate a channel. <END_TASK> <USER_TASK:> Description: def _channel(self, width, height, state_sizes, start_level, start_image, dataset): """Generate a channel. Parameters ---------- width : `int` Image width. height : `int` Image height. state_sizes : `list` of (`int` or `None`) Level state sizes. start_level : `int` Initial level. start_image : `PIL.Image` or `None` Initial level image. dataset : `str` Dataset key prefix. Returns ------- `PIL.Image` Generated image. """
ret = start_image for level, state_size in enumerate(state_sizes, start_level + 1): key = dataset + level_dataset(level) if start_image is not None: scale = self.scanner.level_scale[level - 1] width *= scale height *= scale ret = self.imgtype.create_channel(width, height) if start_image is None: tr = self.scanner.traversal[0](width, height, ends=False) data = self._imgdata(width, height, state_size, '', key) self._write_imgdata(ret, data, tr) else: tr = self.scanner.traversal[0]( start_image.size[0], start_image.size[1], ends=False ) for xy in tr: start = pixel_to_state(start_image.getpixel(xy)) data = self._imgdata(scale, scale, state_size, start, key) blk = self.scanner.traversal[level](scale, scale, False) x, y = xy x *= scale y *= scale self._write_imgdata(ret, data, blk, x, y) start_image = ret return ret
<SYSTEM_TASK:> Flatten a ``Parameter``. <END_TASK> <USER_TASK:> Description: def ravel(parameter, random_state=None): """ Flatten a ``Parameter``. Parameters ---------- parameter: Parameter A ``Parameter`` object Returns ------- flatvalue: ndarray a flattened array of shape ``(prod(parameter.shape),)`` flatbounds: list a list of bound tuples of length ``prod(parameter.shape)`` """
flatvalue = np.ravel(parameter.rvs(random_state=random_state)) flatbounds = [parameter.bounds for _ in range(np.prod(parameter.shape, dtype=int))] return flatvalue, flatbounds
<SYSTEM_TASK:> Horizontally stack a sequence of value bounds pairs. <END_TASK> <USER_TASK:> Description: def hstack(tup): """ Horizontally stack a sequence of value bounds pairs. Parameters ---------- tup: sequence a sequence of value, ``Bound`` pairs Returns ------- value: ndarray a horizontally concatenated array1d bounds: a list of Bounds """
vals, bounds = zip(*tup) stackvalue = np.hstack(vals) stackbounds = list(chain(*bounds)) return stackvalue, stackbounds
<SYSTEM_TASK:> Check a value falls within a bound. <END_TASK> <USER_TASK:> Description: def check(self, value): """ Check a value falls within a bound. Parameters ---------- value : scalar or ndarray value to test Returns ------- bool: If all values fall within bounds Example ------- >>> bnd = Bound(1, 2) >>> bnd.check(1.5) True >>> bnd.check(3) False >>> bnd.check(np.ones(10)) True >>> bnd.check(np.array([1, 3, 1.5])) False """
if self.lower: if np.any(value < self.lower): return False if self.upper: if np.any(value > self.upper): return False return True
<SYSTEM_TASK:> Clip a value to a bound. <END_TASK> <USER_TASK:> Description: def clip(self, value): """ Clip a value to a bound. Parameters ---------- value : scalar or ndarray value to clip Returns ------- scalar or ndarray : of the same shape as value, bit with each element clipped to fall within the specified bounds Example ------- >>> bnd = Bound(1, 2) >>> bnd.clip(1.5) 1.5 >>> bnd.clip(3) 2 >>> bnd.clip(np.array([1, 3, 1.5])) array([ 1. , 2. , 1.5]) >>> bnd = Bound(None, None) >>> bnd.clip(np.array([1, 3, 1.5])) array([ 1. , 3. , 1.5]) """
if not self.lower and not self.upper: return value return np.clip(value, self.lower, self.upper)
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def rvs(self, random_state=None): r""" Draw a random value from this Parameter's distribution. If ``value`` was not initialised with a ``scipy.stats`` object, then the scalar/ndarray value is returned. Parameters ---------- random_state : None, int or RandomState, optional random seed Returns ------- ndarray : of size ``self.shape``, a random draw from the distribution, or ``self.value`` if not initialised with a ``scipy.stats`` object. Note ---- Random draws are *clipped* to the bounds, and so it is up to the user to input a sensible sampling distribution! """
# No sampling distibution if self.dist is None: return self.value # Unconstrained samples rs = check_random_state(random_state) samples = self.dist.rvs(size=self.shape, random_state=rs) # Bound the samples samples = self.bounds.clip(samples) return samples
<SYSTEM_TASK:> Creates missing ``Tree`` objects for the given path. <END_TASK> <USER_TASK:> Description: def _get_missing_trees(self, path, root_tree): """ Creates missing ``Tree`` objects for the given path. :param path: path given as a string. It may be a path to a file node (i.e. ``foo/bar/baz.txt``) or directory path - in that case it must end with slash (i.e. ``foo/bar/``). :param root_tree: ``dulwich.objects.Tree`` object from which we start traversing (should be commit's root tree) """
dirpath = posixpath.split(path)[0] dirs = dirpath.split('/') if not dirs or dirs == ['']: return [] def get_tree_for_dir(tree, dirname): for name, mode, id in tree.iteritems(): if name == dirname: obj = self.repository._repo[id] if isinstance(obj, objects.Tree): return obj else: raise RepositoryError("Cannot create directory %s " "at tree %s as path is occupied and is not a " "Tree" % (dirname, tree)) return None trees = [] parent = root_tree for dirname in dirs: tree = get_tree_for_dir(parent, dirname) if tree is None: tree = objects.Tree() dirmode = 040000 parent.add(dirmode, dirname, tree.id) parent = tree # Always append tree trees.append(tree) return trees
<SYSTEM_TASK:> Convert a value to a list. <END_TASK> <USER_TASK:> Description: def to_list(x): """Convert a value to a list. Parameters ---------- x Value. Returns ------- `list` Examples -------- >>> to_list(0) [0] >>> to_list({'x': 0}) [{'x': 0}] >>> to_list(x ** 2 for x in range(3)) [0, 1, 4] >>> x = [1, 2, 3] >>> to_list(x) [1, 2, 3] >>> _ is x True """
if isinstance(x, list): return x if not isinstance(x, dict): try: return list(x) except TypeError: pass return [x]
<SYSTEM_TASK:> Convert a value to a list of specified length. <END_TASK> <USER_TASK:> Description: def fill(xs, length, copy=False): """Convert a value to a list of specified length. If the input is too short, fill it with its last element. Parameters ---------- xs Input list or value. length : `int` Output list length. copy : `bool`, optional Deep copy the last element to fill the list (default: False). Returns ------- `list` Raises ------ ValueError If `xs` is empty and `length` > 0 Examples -------- >>> fill(0, 3) [0, 0, 0] >>> fill((x ** 2 for x in range(3)), 1) [0] >>> x = [{'x': 0}, {'x': 1}] >>> y = fill(x, 4) >>> y [{'x': 0}, {'x': 1}, {'x': 1}, {'x': 1}] >>> y[2] is y[1] True >>> y[3] is y[2] True >>> y = fill(x, 4, True) >>> y [{'x': 0}, {'x': 1}, {'x': 1}, {'x': 1}] >>> y[2] is y[1] False >>> y[3] is y[2] False """
if isinstance(xs, list) and len(xs) == length: return xs if length <= 0: return [] try: xs = list(islice(xs, 0, length)) if not xs: raise ValueError('empty input') except TypeError: xs = [xs] if len(xs) < length: if copy: last = xs[-1] xs.extend(deepcopy(last) for _ in range(length - len(xs))) else: xs.extend(islice(repeat(xs[-1]), 0, length - len(xs))) return xs
<SYSTEM_TASK:> Get int enum value. <END_TASK> <USER_TASK:> Description: def int_enum(cls, val): """Get int enum value. Parameters ---------- cls : `type` Int enum class. val : `int` or `str` Name or value. Returns ------- `IntEnum` Raises ------ ValueError """
if isinstance(val, str): val = val.upper() try: return getattr(cls, val) except AttributeError: raise ValueError('{0}.{1}'.format(cls, val)) return cls(val)
<SYSTEM_TASK:> Create or load an object if necessary. <END_TASK> <USER_TASK:> Description: def load(obj, cls, default_factory): """Create or load an object if necessary. Parameters ---------- obj : `object` or `dict` or `None` cls : `type` default_factory : `function` Returns ------- `object` """
if obj is None: return default_factory() if isinstance(obj, dict): return cls.load(obj) return obj
<SYSTEM_TASK:> Add classes to the group. <END_TASK> <USER_TASK:> Description: def add_class(cls, *args): """Add classes to the group. Parameters ---------- *args : `type` Classes to add. """
for cls2 in args: cls.classes[cls2.__name__] = cls2
<SYSTEM_TASK:> Remove classes from the group. <END_TASK> <USER_TASK:> Description: def remove_class(cls, *args): """Remove classes from the group. Parameters ---------- *args : `type` Classes to remove. """
for cls2 in args: try: del cls.classes[cls2.__name__] except KeyError: pass
<SYSTEM_TASK:> Create an object from JSON data. <END_TASK> <USER_TASK:> Description: def load(cls, data): """Create an object from JSON data. Parameters ---------- data : `dict` JSON data. Returns ---------- `object` Created object. Raises ------ KeyError If `data` does not have the '__class__' key or the necessary class is not in the class group. """
ret = cls.classes[data['__class__']] data_cls = data['__class__'] del data['__class__'] try: ret = ret(**data) finally: data['__class__'] = data_cls return ret
<SYSTEM_TASK:> Sets the oauth_client attribute <END_TASK> <USER_TASK:> Description: def set_oauth_client(self, consumer_key, consumer_secret): """Sets the oauth_client attribute """
self.oauth_client = oauth1.Client(consumer_key, consumer_secret)
<SYSTEM_TASK:> Prepare the request body and headers <END_TASK> <USER_TASK:> Description: def prepare_request(self, method, url, body=''): """Prepare the request body and headers :returns: headers of the signed request """
headers = { 'Content-type': 'application/json', } # Note: we don't pass body to sign() since it's only for bodies that # are form-urlencoded. Similarly, we don't care about the body that # sign() returns. uri, signed_headers, signed_body = self.oauth_client.sign( url, http_method=method, headers=headers) if body: if method == 'GET': body = urllib.urlencode(body) else: body = json.dumps(body) headers.update(signed_headers) return {"headers": headers, "data": body}
<SYSTEM_TASK:> Extract error reason from the response. It might be either <END_TASK> <USER_TASK:> Description: def _get_error_reason(response): """Extract error reason from the response. It might be either the 'reason' or the entire response """
try: body = response.json() if body and 'reason' in body: return body['reason'] except ValueError: pass return response.content
<SYSTEM_TASK:> Prepare the headers, encode data, call API and provide <END_TASK> <USER_TASK:> Description: def fetch(self, method, url, data=None, expected_status_code=None): """Prepare the headers, encode data, call API and provide data it returns """
kwargs = self.prepare_request(method, url, data) log.debug(json.dumps(kwargs)) response = getattr(requests, method.lower())(url, **kwargs) log.debug(json.dumps(response.content)) if response.status_code >= 400: response.raise_for_status() if (expected_status_code and response.status_code != expected_status_code): raise NotExpectedStatusCode(self._get_error_reason(response)) return response
<SYSTEM_TASK:> Return json decoded data from fetch <END_TASK> <USER_TASK:> Description: def fetch_json(self, method, url, data=None, expected_status_code=None): """Return json decoded data from fetch """
return self.fetch(method, url, data, expected_status_code).json()
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def structured_minimizer(minimizer): r""" Allow an optimizer to accept nested sequences of Parameters to optimize. This decorator can intepret the :code:`Parameter` objects in `btypes.py`, and can accept nested sequences of *any* structure of these objects to optimise! It can also optionally evaluate *random starts* (i.e. random starting candidates) if the parameter objects have been initialised with distributions. For this, two additional parameters are exposed in the :code:`minimizer` interface. Parameters ---------- fun : callable objective function that takes in arbitrary ndarrays, floats or nested sequences of these. parameters : (nested) sequences of Parameter objects Initial guess of the parameters of the objective function nstarts : int, optional The number random starting candidates for optimisation to evaluate. This will only happen for :code:`nstarts > 0` and if at least one :code:`Parameter` object is random. random_state : None, int or RandomState, optional random seed Examples -------- >>> from scipy.optimize import minimize as sp_min >>> from ..btypes import Parameter, Bound Define a cost function that returns a pair. The first element is the cost value and the second element is the gradient represented by a tuple. Even if the cost is a function of a single variable, the gradient must be a tuple containing one element. >>> def cost(w, lambda_): ... sq_norm = w.T.dot(w) ... return .5 * lambda_ * sq_norm, (lambda_ * w, .5 * sq_norm) Augment the Scipy optimizer to take structured inputs >>> new_min = structured_minimizer(sp_min) Constant Initial values >>> w_0 = Parameter(np.array([.5, .1, .2]), Bound()) >>> lambda_0 = Parameter(.25, Bound()) >>> res = new_min(cost, (w_0, lambda_0), method='L-BFGS-B', jac=True) >>> res_w, res_lambda = res.x Random Initial values >>> from scipy.stats import norm, gamma >>> w_0 = Parameter(norm(), Bound(), shape=(3,)) >>> lambda_0 = Parameter(gamma(a=1), Bound()) >>> res = new_min(cost, (w_0, lambda_0), method='L-BFGS-B', jac=True, ... nstarts=100, random_state=None) >>> res_w, res_lambda = res.x """
@wraps(minimizer) def new_minimizer(fun, parameters, jac=True, args=(), nstarts=0, random_state=None, **minimizer_kwargs): (array1d, fbounds), shapes = flatten( parameters, hstack=bt.hstack, shape=bt.shape, ravel=partial(bt.ravel, random_state=random_state) ) # Find best random starting candidate if we are doing random starts if nstarts > 0: array1d = _random_starts( fun=fun, parameters=parameters, jac=jac, args=args, nstarts=nstarts, random_state=random_state ) # Wrap function calls to work with wrapped minimizer flatten_args_dec = flatten_args(shapes) new_fun = flatten_args_dec(fun) # Wrap gradient calls to work with wrapped minimizer if callable(jac): new_jac = flatten_args_dec(jac) else: new_jac = jac if bool(jac): new_fun = flatten_func_grad(new_fun) result = minimizer(new_fun, array1d, jac=new_jac, args=args, bounds=fbounds, **minimizer_kwargs) result['x'] = tuple(unflatten(result['x'], shapes)) if bool(jac): result['jac'] = tuple(unflatten(result['jac'], shapes)) return result return new_minimizer
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def structured_sgd(sgd): r""" Allow an SGD to accept nested sequences of Parameters to optimize. This decorator can intepret the :code:`Parameter` objects in `btypes.py`, and can accept nested sequences of *any* structure of these objects to optimise! It can also optionally evaluate *random starts* (i.e. random starting candidates) if the parameter objects have been initialised with distributions. For this, an additional parameter is exposed in the :code:`minimizer` interface. Parameters ---------- fun : callable objective function that takes in arbitrary ndarrays, floats or nested sequences of these. parameters : (nested) sequences of Parameter objects Initial guess of the parameters of the objective function nstarts : int, optional The number random starting candidates for optimisation to evaluate. This will only happen for :code:`nstarts > 0` and if at least one :code:`Parameter` object is random. Examples -------- >>> from ..optimize import sgd >>> from ..btypes import Parameter, Bound Define a cost function that returns a pair. The first element is the cost value and the second element is the gradient represented by a sequence. Even if the cost is a function of a single variable, the gradient must be a sequence containing one element. >>> def cost(w, lambda_, data): ... N = len(data) ... y, X = data[:, 0], data[:, 1:] ... y_est = X.dot(w) ... ww = w.T.dot(w) ... obj = (y - y_est).sum() / N + lambda_ * ww ... gradw = - 2 * X.T.dot(y - y_est) / N + 2 * lambda_ * w ... gradl = ww ... return obj, [gradw, gradl] Augment the SGD optimizer to take structured inputs >>> new_sgd = structured_sgd(sgd) Data >>> y = np.linspace(1, 10, 100) + np.random.randn(100) + 1 >>> X = np.array([np.ones(100), np.linspace(1, 100, 100)]).T >>> data = np.hstack((y[:, np.newaxis], X)) Constant Initial values >>> w_0 = Parameter(np.array([1., 1.]), Bound()) >>> lambda_0 = Parameter(.25, Bound()) >>> res = new_sgd(cost, [w_0, lambda_0], data, batch_size=10, ... eval_obj=True) >>> res_w, res_lambda = res.x Random Initial values >>> from scipy.stats import norm, gamma >>> w_0 = Parameter(norm(), Bound(), shape=(2,)) >>> lambda_0 = Parameter(gamma(1.), Bound()) >>> res = new_sgd(cost, [w_0, lambda_0], data, batch_size=10, ... eval_obj=True, nstarts=100) >>> res_w, res_lambda = res.x """
@wraps(sgd) def new_sgd(fun, parameters, data, eval_obj=False, batch_size=10, args=(), random_state=None, nstarts=100, **sgd_kwargs): (array1d, fbounds), shapes = flatten(parameters, hstack=bt.hstack, shape=bt.shape, ravel=bt.ravel ) flatten_args_dec = flatten_args(shapes) new_fun = flatten_args_dec(fun) # Find best random starting candidate if we are doing random starts if eval_obj and nstarts > 0: data_gen = gen_batch(data, batch_size, random_state=random_state) array1d = _random_starts( fun=fun, parameters=parameters, jac=True, args=args, data_gen=data_gen, nstarts=nstarts, random_state=random_state ) if bool(eval_obj): new_fun = flatten_func_grad(new_fun) else: new_fun = flatten_grad(new_fun) result = sgd(new_fun, array1d, data=data, bounds=fbounds, args=args, eval_obj=eval_obj, random_state=random_state, **sgd_kwargs) result['x'] = tuple(unflatten(result['x'], shapes)) return result return new_sgd