repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
oseledets/ttpy
tt/core/tools.py
delta
def delta(n, d=None, center=0): """ Create TT-vector for delta-function :math:`\\delta(x - x_0)`. """ if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size if center < 0: cind = [0] * d else: cind = [] for i in xrange(d): cind.append(center % n0[i]) center //= n0[i] if center > 0: cind = [0] * d cr = [] for i in xrange(d): cur_core = _np.zeros((1, n0[i], 1)) cur_core[0, cind[i], 0] = 1 cr.append(cur_core) return _vector.vector.from_list(cr)
python
def delta(n, d=None, center=0): """ Create TT-vector for delta-function :math:`\\delta(x - x_0)`. """ if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size if center < 0: cind = [0] * d else: cind = [] for i in xrange(d): cind.append(center % n0[i]) center //= n0[i] if center > 0: cind = [0] * d cr = [] for i in xrange(d): cur_core = _np.zeros((1, n0[i], 1)) cur_core[0, cind[i], 0] = 1 cr.append(cur_core) return _vector.vector.from_list(cr)
Create TT-vector for delta-function :math:`\\delta(x - x_0)`.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L712-L736
oseledets/ttpy
tt/core/tools.py
stepfun
def stepfun(n, d=None, center=1, direction=1): """ Create TT-vector for Heaviside step function :math:`\chi(x - x_0)`. Heaviside step function is defined as .. math:: \chi(x) = \\left\{ \\begin{array}{l} 1 \mbox{ when } x \ge 0, \\\\ 0 \mbox{ when } x < 0. \\end{array} \\right. For negative value of ``direction`` :math:`\chi(x_0 - x)` is approximated. """ if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size N = _np.prod(n0) if center >= N and direction < 0 or center <= 0 and direction > 0: return ones(n0) if center <= 0 and direction < 0 or center >= N and direction > 0: raise ValueError( "Heaviside function with specified center and direction gives zero tensor!") if direction > 0: center = N - center cind = [] for i in xrange(d): cind.append(center % n0[i]) center //= n0[i] def gen_notx(currcind, currn): return [0.0] * (currn - currcind) + [1.0] * currcind def gen_notx_rev(currcind, currn): return [1.0] * currcind + [0.0] * (currn - currcind) def gen_x(currcind, currn): result = [0.0] * currn result[currn - currcind - 1] = 1.0 return result def gen_x_rev(currcind, currn): result = [0.0] * currn result[currcind] = 1.0 return result if direction > 0: x = gen_x notx = gen_notx else: x = gen_x_rev notx = gen_notx_rev crs = [] prevrank = 1 for i in range(d)[::-1]: break_further = max([0] + cind[:i]) nextrank = 2 if break_further else 1 one = [1] * n0[i] cr = _np.zeros([nextrank, n0[i], prevrank], dtype=_np.float) tempx = x(cind[i], n0[i]) tempnotx = notx(cind[i], n0[i]) # high-conditional magic if not break_further: if cind[i]: if prevrank > 1: cr[0, :, 0] = one cr[0, :, 1] = tempnotx else: cr[0, :, 0] = tempnotx else: cr[0, :, 0] = one else: if prevrank > 1: cr[0, :, 0] = one if cind[i]: cr[0, :, 1] = tempnotx cr[1, :, 1] = tempx else: cr[1, :, 1] = tempx else: if cind[i]: cr[0, :, 0] = tempnotx cr[1, :, 0] = tempx else: nextrank = 1 cr = cr[:1, :, :] cr[0, :, 0] = tempx prevrank = nextrank crs.append(cr) return _vector.vector.from_list(crs[::-1])
python
def stepfun(n, d=None, center=1, direction=1): """ Create TT-vector for Heaviside step function :math:`\chi(x - x_0)`. Heaviside step function is defined as .. math:: \chi(x) = \\left\{ \\begin{array}{l} 1 \mbox{ when } x \ge 0, \\\\ 0 \mbox{ when } x < 0. \\end{array} \\right. For negative value of ``direction`` :math:`\chi(x_0 - x)` is approximated. """ if isinstance(n, six.integer_types): n = [n] if d is None: n0 = _np.asanyarray(n, dtype=_np.int32) else: n0 = _np.array(n * d, dtype=_np.int32) d = n0.size N = _np.prod(n0) if center >= N and direction < 0 or center <= 0 and direction > 0: return ones(n0) if center <= 0 and direction < 0 or center >= N and direction > 0: raise ValueError( "Heaviside function with specified center and direction gives zero tensor!") if direction > 0: center = N - center cind = [] for i in xrange(d): cind.append(center % n0[i]) center //= n0[i] def gen_notx(currcind, currn): return [0.0] * (currn - currcind) + [1.0] * currcind def gen_notx_rev(currcind, currn): return [1.0] * currcind + [0.0] * (currn - currcind) def gen_x(currcind, currn): result = [0.0] * currn result[currn - currcind - 1] = 1.0 return result def gen_x_rev(currcind, currn): result = [0.0] * currn result[currcind] = 1.0 return result if direction > 0: x = gen_x notx = gen_notx else: x = gen_x_rev notx = gen_notx_rev crs = [] prevrank = 1 for i in range(d)[::-1]: break_further = max([0] + cind[:i]) nextrank = 2 if break_further else 1 one = [1] * n0[i] cr = _np.zeros([nextrank, n0[i], prevrank], dtype=_np.float) tempx = x(cind[i], n0[i]) tempnotx = notx(cind[i], n0[i]) # high-conditional magic if not break_further: if cind[i]: if prevrank > 1: cr[0, :, 0] = one cr[0, :, 1] = tempnotx else: cr[0, :, 0] = tempnotx else: cr[0, :, 0] = one else: if prevrank > 1: cr[0, :, 0] = one if cind[i]: cr[0, :, 1] = tempnotx cr[1, :, 1] = tempx else: cr[1, :, 1] = tempx else: if cind[i]: cr[0, :, 0] = tempnotx cr[1, :, 0] = tempx else: nextrank = 1 cr = cr[:1, :, :] cr[0, :, 0] = tempx prevrank = nextrank crs.append(cr) return _vector.vector.from_list(crs[::-1])
Create TT-vector for Heaviside step function :math:`\chi(x - x_0)`. Heaviside step function is defined as .. math:: \chi(x) = \\left\{ \\begin{array}{l} 1 \mbox{ when } x \ge 0, \\\\ 0 \mbox{ when } x < 0. \\end{array} \\right. For negative value of ``direction`` :math:`\chi(x_0 - x)` is approximated.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L739-L831
oseledets/ttpy
tt/core/tools.py
unit
def unit(n, d=None, j=None, tt_instance=True): ''' Generates e_j _vector in tt.vector format --------- Parameters: n - modes (either integer or array) d - dimensionality (integer) j - position of 1 in full-format e_j (integer) tt_instance - if True, returns tt.vector; if False, returns tt cores as a list ''' if isinstance(n, int): if d is None: d = 1 n = n * _np.ones(d, dtype=_np.int32) else: d = len(n) if j is None: j = 0 rv = [] j = _ind2sub(n, j) for k in xrange(d): rv.append(_np.zeros((1, n[k], 1))) rv[-1][0, j[k], 0] = 1 if tt_instance: rv = _vector.vector.from_list(rv) return rv
python
def unit(n, d=None, j=None, tt_instance=True): ''' Generates e_j _vector in tt.vector format --------- Parameters: n - modes (either integer or array) d - dimensionality (integer) j - position of 1 in full-format e_j (integer) tt_instance - if True, returns tt.vector; if False, returns tt cores as a list ''' if isinstance(n, int): if d is None: d = 1 n = n * _np.ones(d, dtype=_np.int32) else: d = len(n) if j is None: j = 0 rv = [] j = _ind2sub(n, j) for k in xrange(d): rv.append(_np.zeros((1, n[k], 1))) rv[-1][0, j[k], 0] = 1 if tt_instance: rv = _vector.vector.from_list(rv) return rv
Generates e_j _vector in tt.vector format --------- Parameters: n - modes (either integer or array) d - dimensionality (integer) j - position of 1 in full-format e_j (integer) tt_instance - if True, returns tt.vector; if False, returns tt cores as a list
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L843-L870
oseledets/ttpy
tt/core/tools.py
IpaS
def IpaS(d, a, tt_instance=True): '''A special bidiagonal _matrix in the QTT-format M = IPAS(D, A) Generates I+a*S_{-1} _matrix in the QTT-format: 1 0 0 0 a 1 0 0 0 a 1 0 0 0 a 1 Convenient for Crank-Nicolson and time gradient matrices ''' if d == 1: M = _np.array([[1, 0], [a, 1]]).reshape((1, 2, 2, 1), order='F') else: M = [None] * d M[0] = _np.zeros((1, 2, 2, 2)) M[0][0, :, :, 0] = _np.array([[1, 0], [a, 1]]) M[0][0, :, :, 1] = _np.array([[0, a], [0, 0]]) for i in xrange(1, d - 1): M[i] = _np.zeros((2, 2, 2, 2)) M[i][:, :, 0, 0] = _np.eye(2) M[i][:, :, 1, 0] = _np.array([[0, 0], [1, 0]]) M[i][:, :, 1, 1] = _np.array([[0, 1], [0, 0]]) M[d - 1] = _np.zeros((2, 2, 2, 1)) M[d - 1][:, :, 0, 0] = _np.eye(2) M[d - 1][:, :, 1, 0] = _np.array([[0, 0], [1, 0]]) if tt_instance: M = _matrix.matrix.from_list(M) return M
python
def IpaS(d, a, tt_instance=True): '''A special bidiagonal _matrix in the QTT-format M = IPAS(D, A) Generates I+a*S_{-1} _matrix in the QTT-format: 1 0 0 0 a 1 0 0 0 a 1 0 0 0 a 1 Convenient for Crank-Nicolson and time gradient matrices ''' if d == 1: M = _np.array([[1, 0], [a, 1]]).reshape((1, 2, 2, 1), order='F') else: M = [None] * d M[0] = _np.zeros((1, 2, 2, 2)) M[0][0, :, :, 0] = _np.array([[1, 0], [a, 1]]) M[0][0, :, :, 1] = _np.array([[0, a], [0, 0]]) for i in xrange(1, d - 1): M[i] = _np.zeros((2, 2, 2, 2)) M[i][:, :, 0, 0] = _np.eye(2) M[i][:, :, 1, 0] = _np.array([[0, 0], [1, 0]]) M[i][:, :, 1, 1] = _np.array([[0, 1], [0, 0]]) M[d - 1] = _np.zeros((2, 2, 2, 1)) M[d - 1][:, :, 0, 0] = _np.eye(2) M[d - 1][:, :, 1, 0] = _np.array([[0, 0], [1, 0]]) if tt_instance: M = _matrix.matrix.from_list(M) return M
A special bidiagonal _matrix in the QTT-format M = IPAS(D, A) Generates I+a*S_{-1} _matrix in the QTT-format: 1 0 0 0 a 1 0 0 0 a 1 0 0 0 a 1 Convenient for Crank-Nicolson and time gradient matrices
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L873-L901
oseledets/ttpy
tt/core/tools.py
reshape
def reshape(tt_array, shape, eps=1e-14, rl=1, rr=1): ''' Reshape of the TT-vector [TT1]=TT_RESHAPE(TT,SZ) reshapes TT-vector or TT-matrix into another with mode sizes SZ, accuracy 1e-14 [TT1]=TT_RESHAPE(TT,SZ,EPS) reshapes TT-vector/matrix into another with mode sizes SZ and accuracy EPS [TT1]=TT_RESHAPE(TT,SZ,EPS, RL) reshapes TT-vector/matrix into another with mode size SZ and left tail rank RL [TT1]=TT_RESHAPE(TT,SZ,EPS, RL, RR) reshapes TT-vector/matrix into another with mode size SZ and tail ranks RL*RR Reshapes TT-vector/matrix into a new one, with dimensions specified by SZ. If the i_nput is TT-matrix, SZ must have the sizes for both modes, so it is a _matrix if sizes d2-by-2. If the i_nput is TT-vector, SZ may be either a column or a row _vector. ''' tt1 = _cp.deepcopy(tt_array) sz = _cp.deepcopy(shape) ismatrix = False if isinstance(tt1, _matrix.matrix): d1 = tt1.tt.d d2 = sz.shape[0] ismatrix = True # The size should be [n,m] in R^{d x 2} restn2_n = sz[:, 0] restn2_m = sz[:, 1] sz_n = _cp.copy(sz[:, 0]) sz_m = _cp.copy(sz[:, 1]) n1_n = tt1.n n1_m = tt1.m # We will split/convolve using the _vector form anyway sz = _np.prod(sz, axis=1) tt1 = tt1.tt else: d1 = tt1.d d2 = len(sz) # Recompute sz to include r0,rd, # and the items of tt1 sz[0] = sz[0] * rl sz[d2 - 1] = sz[d2 - 1] * rr tt1.n[0] = tt1.n[0] * tt1.r[0] tt1.n[d1 - 1] = tt1.n[d1 - 1] * tt1.r[d1] if ismatrix: # in _matrix: 1st tail rank goes to the n-mode, last to the m-mode restn2_n[0] = restn2_n[0] * rl restn2_m[d2 - 1] = restn2_m[d2 - 1] * rr n1_n[0] = n1_n[0] * tt1.r[0] n1_m[d1 - 1] = n1_m[d1 - 1] * tt1.r[d1] tt1.r[0] = 1 tt1.r[d1] = 1 n1 = tt1.n assert _np.prod(n1) == _np.prod(sz), 'Reshape: incorrect sizes' needQRs = False if d2 > d1: needQRs = True if d2 <= d1: i2 = 0 n2 = _cp.deepcopy(sz) for i1 in range(d1): if n2[i2] == 1: i2 = i2 + 1 if i2 > d2: break if n2[i2] % n1[i1] == 0: n2[i2] = n2[i2] // n1[i1] else: needQRs = True break r1 = tt1.r tt1 = tt1.to_list(tt1) if needQRs: # We have to split some cores -> perform QRs for i in range(d1 - 1, 0, -1): cr = tt1[i] cr = _np.reshape(cr, (r1[i], n1[i] * r1[i + 1]), order='F') [cr, rv] = _np.linalg.qr(cr.T) # Size n*r2, r1new - r1nwe,r1 cr0 = tt1[i - 1] cr0 = _np.reshape(cr0, (r1[i - 1] * n1[i - 1], r1[i]), order='F') cr0 = _np.dot(cr0, rv.T) # r0*n0, r1new r1[i] = cr.shape[1] cr0 = _np.reshape(cr0, (r1[i - 1], n1[i - 1], r1[i]), order='F') cr = _np.reshape(cr.T, (r1[i], n1[i], r1[i + 1]), order='F') tt1[i] = cr tt1[i - 1] = cr0 r2 = _np.ones(d2 + 1, dtype=_np.int32) i1 = 0 # Working index in tt1 i2 = 0 # Working index in tt2 core2 = _np.zeros((0)) curcr2 = 1 restn2 = sz n2 = _np.ones(d2, dtype=_np.int32) if ismatrix: n2_n = _np.ones(d2, dtype=_np.int32) n2_m = _np.ones(d2, dtype=_np.int32) while i1 < d1: curcr1 = tt1[i1] if _gcd(restn2[i2], n1[i1]) == n1[i1]: # The whole core1 fits to core2. Convolve it if (i1 < d1 - 1) and (needQRs): # QR to the next core - for safety curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1 + 1]), order='F') [curcr1, rv] = _np.linalg.qr(curcr1) curcr12 = tt1[i1 + 1] curcr12 = _np.reshape( curcr12, (r1[i1 + 1], n1[i1 + 1] * r1[i1 + 2]), order='F') curcr12 = _np.dot(rv, curcr12) r1[i1 + 1] = curcr12.shape[0] tt1[i1 + 1] = _np.reshape(curcr12, (r1[i1 + 1], n1[i1 + 1], r1[i1 + 2]), order='F') # Actually merge is here curcr1 = _np.reshape( curcr1, (r1[i1], n1[i1] * r1[i1 + 1]), order='F') curcr2 = _np.dot(curcr2, curcr1) # size r21*nold, dn*r22 if ismatrix: # Permute if we are working with tt_matrix curcr2 = _np.reshape(curcr2, (r2[i2], n2_n[i2], n2_m[i2], n1_n[ i1], n1_m[i1], r1[i1 + 1]), order='F') curcr2 = _np.transpose(curcr2, [0, 1, 3, 2, 4, 5]) # Update the "matrix" sizes n2_n[i2] = n2_n[i2] * n1_n[i1] n2_m[i2] = n2_m[i2] * n1_m[i1] restn2_n[i2] = restn2_n[i2] // n1_n[i1] restn2_m[i2] = restn2_m[i2] // n1_m[i1] r2[i2 + 1] = r1[i1 + 1] # Update the sizes of tt2 n2[i2] = n2[i2] * n1[i1] restn2[i2] = restn2[i2] // n1[i1] curcr2 = _np.reshape( curcr2, (r2[i2] * n2[i2], r2[i2 + 1]), order='F') i1 = i1 + 1 # current core1 is over else: if (_gcd(restn2[i2], n1[i1]) != 1) or (restn2[i2] == 1): # There exists a nontrivial divisor, or a singleton requested # Split it and convolve n12 = _gcd(restn2[i2], n1[i1]) if ismatrix: # Permute before the truncation # _matrix sizes we are able to split n12_n = _gcd(restn2_n[i2], n1_n[i1]) n12_m = _gcd(restn2_m[i2], n1_m[i1]) curcr1 = _np.reshape(curcr1, (r1[i1], n12_n, n1_n[i1] // n12_n, n12_m, n1_m[i1] // n12_m, r1[i1 + 1]), order='F') curcr1 = _np.transpose(curcr1, [0, 1, 3, 2, 4, 5]) # Update the _matrix sizes of tt2 and tt1 n2_n[i2] = n2_n[i2] * n12_n n2_m[i2] = n2_m[i2] * n12_m restn2_n[i2] = restn2_n[i2] // n12_n restn2_m[i2] = restn2_m[i2] // n12_m n1_n[i1] = n1_n[i1] // n12_n n1_m[i1] = n1_m[i1] // n12_m curcr1 = _np.reshape( curcr1, (r1[i1] * n12, (n1[i1] // n12) * r1[i1 + 1]), order='F') [u, s, v] = _np.linalg.svd(curcr1, full_matrices=False) r = _my_chop2(s, eps * _np.linalg.norm(s) / (d2 - 1) ** 0.5) u = u[:, :r] v = v.T v = v[:, :r] * s[:r] u = _np.reshape(u, (r1[i1], n12 * r), order='F') # u is our admissible chunk, merge it to core2 curcr2 = _np.dot(curcr2, u) # size r21*nold, dn*r22 r2[i2 + 1] = r # Update the sizes of tt2 n2[i2] = n2[i2] * n12 restn2[i2] = restn2[i2] // n12 curcr2 = _np.reshape( curcr2, (r2[i2] * n2[i2], r2[i2 + 1]), order='F') r1[i1] = r # and tt1 n1[i1] = n1[i1] // n12 # keep v in tt1 for next operations curcr1 = _np.reshape( v.T, (r1[i1], n1[i1], r1[i1 + 1]), order='F') tt1[i1] = curcr1 else: # Bad case. We have to merge cores of tt1 until a common # divisor appears i1new = i1 + 1 curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1 + 1]), order='F') while (_gcd(restn2[i2], n1[i1]) == 1) and (i1new < d1): cr1new = tt1[i1new] cr1new = _np.reshape( cr1new, (r1[i1new], n1[i1new] * r1[i1new + 1]), order='F') # size r1(i1)*n1(i1), n1new*r1new curcr1 = _np.dot(curcr1, cr1new) if ismatrix: # Permutes and _matrix size updates curcr1 = _np.reshape(curcr1, (r1[i1], n1_n[i1], n1_m[i1], n1_n[ i1new], n1_m[i1new], r1[i1new + 1]), order='F') curcr1 = _np.transpose(curcr1, [0, 1, 3, 2, 4, 5]) n1_n[i1] = n1_n[i1] * n1_n[i1new] n1_m[i1] = n1_m[i1] * n1_m[i1new] n1[i1] = n1[i1] * n1[i1new] curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1new + 1]), order='F') i1new = i1new + 1 # Inner cores merged => squeeze tt1 data n1 = _np.concatenate((n1[:i1], n1[i1new:])) r1 = _np.concatenate((r1[:i1], r1[i1new:])) tt1[i] = _np.reshape( curcr1, (r1[i1], n1[i1], r1[i1new]), order='F') tt1 = tt1[:i1] + tt1[i1new:] d1 = len(n1) if (restn2[i2] == 1) and ((i1 >= d1) or ((i1 < d1) and (n1[i1] != 1))): # The core of tt2 is finished # The second condition prevents core2 from finishing until we # squeeze all tailing singletons in tt1. curcr2 = curcr2.flatten(order='F') core2 = _np.concatenate((core2, curcr2)) i2 = i2 + 1 # Start new core2 curcr2 = 1 # If we have been asked for singletons - just add them while (i2 < d2): core2 = _np.concatenate((core2, _np.ones(1))) r2[i2] = 1 i2 = i2 + 1 tt2 = ones(2, 1) # dummy tensor tt2.d = d2 tt2.n = n2 tt2.r = r2 tt2.core = core2 tt2.ps = _np.int32(_np.cumsum(_np.concatenate((_np.ones(1), r2[:-1] * n2 * r2[1:])))) tt2.n[0] = tt2.n[0] // rl tt2.n[d2 - 1] = tt2.n[d2 - 1] // rr tt2.r[0] = rl tt2.r[d2] = rr if ismatrix: ttt = eye(1, 1) # dummy tt _matrix ttt.n = sz_n ttt.m = sz_m ttt.tt = tt2 return ttt else: return tt2
python
def reshape(tt_array, shape, eps=1e-14, rl=1, rr=1): ''' Reshape of the TT-vector [TT1]=TT_RESHAPE(TT,SZ) reshapes TT-vector or TT-matrix into another with mode sizes SZ, accuracy 1e-14 [TT1]=TT_RESHAPE(TT,SZ,EPS) reshapes TT-vector/matrix into another with mode sizes SZ and accuracy EPS [TT1]=TT_RESHAPE(TT,SZ,EPS, RL) reshapes TT-vector/matrix into another with mode size SZ and left tail rank RL [TT1]=TT_RESHAPE(TT,SZ,EPS, RL, RR) reshapes TT-vector/matrix into another with mode size SZ and tail ranks RL*RR Reshapes TT-vector/matrix into a new one, with dimensions specified by SZ. If the i_nput is TT-matrix, SZ must have the sizes for both modes, so it is a _matrix if sizes d2-by-2. If the i_nput is TT-vector, SZ may be either a column or a row _vector. ''' tt1 = _cp.deepcopy(tt_array) sz = _cp.deepcopy(shape) ismatrix = False if isinstance(tt1, _matrix.matrix): d1 = tt1.tt.d d2 = sz.shape[0] ismatrix = True # The size should be [n,m] in R^{d x 2} restn2_n = sz[:, 0] restn2_m = sz[:, 1] sz_n = _cp.copy(sz[:, 0]) sz_m = _cp.copy(sz[:, 1]) n1_n = tt1.n n1_m = tt1.m # We will split/convolve using the _vector form anyway sz = _np.prod(sz, axis=1) tt1 = tt1.tt else: d1 = tt1.d d2 = len(sz) # Recompute sz to include r0,rd, # and the items of tt1 sz[0] = sz[0] * rl sz[d2 - 1] = sz[d2 - 1] * rr tt1.n[0] = tt1.n[0] * tt1.r[0] tt1.n[d1 - 1] = tt1.n[d1 - 1] * tt1.r[d1] if ismatrix: # in _matrix: 1st tail rank goes to the n-mode, last to the m-mode restn2_n[0] = restn2_n[0] * rl restn2_m[d2 - 1] = restn2_m[d2 - 1] * rr n1_n[0] = n1_n[0] * tt1.r[0] n1_m[d1 - 1] = n1_m[d1 - 1] * tt1.r[d1] tt1.r[0] = 1 tt1.r[d1] = 1 n1 = tt1.n assert _np.prod(n1) == _np.prod(sz), 'Reshape: incorrect sizes' needQRs = False if d2 > d1: needQRs = True if d2 <= d1: i2 = 0 n2 = _cp.deepcopy(sz) for i1 in range(d1): if n2[i2] == 1: i2 = i2 + 1 if i2 > d2: break if n2[i2] % n1[i1] == 0: n2[i2] = n2[i2] // n1[i1] else: needQRs = True break r1 = tt1.r tt1 = tt1.to_list(tt1) if needQRs: # We have to split some cores -> perform QRs for i in range(d1 - 1, 0, -1): cr = tt1[i] cr = _np.reshape(cr, (r1[i], n1[i] * r1[i + 1]), order='F') [cr, rv] = _np.linalg.qr(cr.T) # Size n*r2, r1new - r1nwe,r1 cr0 = tt1[i - 1] cr0 = _np.reshape(cr0, (r1[i - 1] * n1[i - 1], r1[i]), order='F') cr0 = _np.dot(cr0, rv.T) # r0*n0, r1new r1[i] = cr.shape[1] cr0 = _np.reshape(cr0, (r1[i - 1], n1[i - 1], r1[i]), order='F') cr = _np.reshape(cr.T, (r1[i], n1[i], r1[i + 1]), order='F') tt1[i] = cr tt1[i - 1] = cr0 r2 = _np.ones(d2 + 1, dtype=_np.int32) i1 = 0 # Working index in tt1 i2 = 0 # Working index in tt2 core2 = _np.zeros((0)) curcr2 = 1 restn2 = sz n2 = _np.ones(d2, dtype=_np.int32) if ismatrix: n2_n = _np.ones(d2, dtype=_np.int32) n2_m = _np.ones(d2, dtype=_np.int32) while i1 < d1: curcr1 = tt1[i1] if _gcd(restn2[i2], n1[i1]) == n1[i1]: # The whole core1 fits to core2. Convolve it if (i1 < d1 - 1) and (needQRs): # QR to the next core - for safety curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1 + 1]), order='F') [curcr1, rv] = _np.linalg.qr(curcr1) curcr12 = tt1[i1 + 1] curcr12 = _np.reshape( curcr12, (r1[i1 + 1], n1[i1 + 1] * r1[i1 + 2]), order='F') curcr12 = _np.dot(rv, curcr12) r1[i1 + 1] = curcr12.shape[0] tt1[i1 + 1] = _np.reshape(curcr12, (r1[i1 + 1], n1[i1 + 1], r1[i1 + 2]), order='F') # Actually merge is here curcr1 = _np.reshape( curcr1, (r1[i1], n1[i1] * r1[i1 + 1]), order='F') curcr2 = _np.dot(curcr2, curcr1) # size r21*nold, dn*r22 if ismatrix: # Permute if we are working with tt_matrix curcr2 = _np.reshape(curcr2, (r2[i2], n2_n[i2], n2_m[i2], n1_n[ i1], n1_m[i1], r1[i1 + 1]), order='F') curcr2 = _np.transpose(curcr2, [0, 1, 3, 2, 4, 5]) # Update the "matrix" sizes n2_n[i2] = n2_n[i2] * n1_n[i1] n2_m[i2] = n2_m[i2] * n1_m[i1] restn2_n[i2] = restn2_n[i2] // n1_n[i1] restn2_m[i2] = restn2_m[i2] // n1_m[i1] r2[i2 + 1] = r1[i1 + 1] # Update the sizes of tt2 n2[i2] = n2[i2] * n1[i1] restn2[i2] = restn2[i2] // n1[i1] curcr2 = _np.reshape( curcr2, (r2[i2] * n2[i2], r2[i2 + 1]), order='F') i1 = i1 + 1 # current core1 is over else: if (_gcd(restn2[i2], n1[i1]) != 1) or (restn2[i2] == 1): # There exists a nontrivial divisor, or a singleton requested # Split it and convolve n12 = _gcd(restn2[i2], n1[i1]) if ismatrix: # Permute before the truncation # _matrix sizes we are able to split n12_n = _gcd(restn2_n[i2], n1_n[i1]) n12_m = _gcd(restn2_m[i2], n1_m[i1]) curcr1 = _np.reshape(curcr1, (r1[i1], n12_n, n1_n[i1] // n12_n, n12_m, n1_m[i1] // n12_m, r1[i1 + 1]), order='F') curcr1 = _np.transpose(curcr1, [0, 1, 3, 2, 4, 5]) # Update the _matrix sizes of tt2 and tt1 n2_n[i2] = n2_n[i2] * n12_n n2_m[i2] = n2_m[i2] * n12_m restn2_n[i2] = restn2_n[i2] // n12_n restn2_m[i2] = restn2_m[i2] // n12_m n1_n[i1] = n1_n[i1] // n12_n n1_m[i1] = n1_m[i1] // n12_m curcr1 = _np.reshape( curcr1, (r1[i1] * n12, (n1[i1] // n12) * r1[i1 + 1]), order='F') [u, s, v] = _np.linalg.svd(curcr1, full_matrices=False) r = _my_chop2(s, eps * _np.linalg.norm(s) / (d2 - 1) ** 0.5) u = u[:, :r] v = v.T v = v[:, :r] * s[:r] u = _np.reshape(u, (r1[i1], n12 * r), order='F') # u is our admissible chunk, merge it to core2 curcr2 = _np.dot(curcr2, u) # size r21*nold, dn*r22 r2[i2 + 1] = r # Update the sizes of tt2 n2[i2] = n2[i2] * n12 restn2[i2] = restn2[i2] // n12 curcr2 = _np.reshape( curcr2, (r2[i2] * n2[i2], r2[i2 + 1]), order='F') r1[i1] = r # and tt1 n1[i1] = n1[i1] // n12 # keep v in tt1 for next operations curcr1 = _np.reshape( v.T, (r1[i1], n1[i1], r1[i1 + 1]), order='F') tt1[i1] = curcr1 else: # Bad case. We have to merge cores of tt1 until a common # divisor appears i1new = i1 + 1 curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1 + 1]), order='F') while (_gcd(restn2[i2], n1[i1]) == 1) and (i1new < d1): cr1new = tt1[i1new] cr1new = _np.reshape( cr1new, (r1[i1new], n1[i1new] * r1[i1new + 1]), order='F') # size r1(i1)*n1(i1), n1new*r1new curcr1 = _np.dot(curcr1, cr1new) if ismatrix: # Permutes and _matrix size updates curcr1 = _np.reshape(curcr1, (r1[i1], n1_n[i1], n1_m[i1], n1_n[ i1new], n1_m[i1new], r1[i1new + 1]), order='F') curcr1 = _np.transpose(curcr1, [0, 1, 3, 2, 4, 5]) n1_n[i1] = n1_n[i1] * n1_n[i1new] n1_m[i1] = n1_m[i1] * n1_m[i1new] n1[i1] = n1[i1] * n1[i1new] curcr1 = _np.reshape( curcr1, (r1[i1] * n1[i1], r1[i1new + 1]), order='F') i1new = i1new + 1 # Inner cores merged => squeeze tt1 data n1 = _np.concatenate((n1[:i1], n1[i1new:])) r1 = _np.concatenate((r1[:i1], r1[i1new:])) tt1[i] = _np.reshape( curcr1, (r1[i1], n1[i1], r1[i1new]), order='F') tt1 = tt1[:i1] + tt1[i1new:] d1 = len(n1) if (restn2[i2] == 1) and ((i1 >= d1) or ((i1 < d1) and (n1[i1] != 1))): # The core of tt2 is finished # The second condition prevents core2 from finishing until we # squeeze all tailing singletons in tt1. curcr2 = curcr2.flatten(order='F') core2 = _np.concatenate((core2, curcr2)) i2 = i2 + 1 # Start new core2 curcr2 = 1 # If we have been asked for singletons - just add them while (i2 < d2): core2 = _np.concatenate((core2, _np.ones(1))) r2[i2] = 1 i2 = i2 + 1 tt2 = ones(2, 1) # dummy tensor tt2.d = d2 tt2.n = n2 tt2.r = r2 tt2.core = core2 tt2.ps = _np.int32(_np.cumsum(_np.concatenate((_np.ones(1), r2[:-1] * n2 * r2[1:])))) tt2.n[0] = tt2.n[0] // rl tt2.n[d2 - 1] = tt2.n[d2 - 1] // rr tt2.r[0] = rl tt2.r[d2] = rr if ismatrix: ttt = eye(1, 1) # dummy tt _matrix ttt.n = sz_n ttt.m = sz_m ttt.tt = tt2 return ttt else: return tt2
Reshape of the TT-vector [TT1]=TT_RESHAPE(TT,SZ) reshapes TT-vector or TT-matrix into another with mode sizes SZ, accuracy 1e-14 [TT1]=TT_RESHAPE(TT,SZ,EPS) reshapes TT-vector/matrix into another with mode sizes SZ and accuracy EPS [TT1]=TT_RESHAPE(TT,SZ,EPS, RL) reshapes TT-vector/matrix into another with mode size SZ and left tail rank RL [TT1]=TT_RESHAPE(TT,SZ,EPS, RL, RR) reshapes TT-vector/matrix into another with mode size SZ and tail ranks RL*RR Reshapes TT-vector/matrix into a new one, with dimensions specified by SZ. If the i_nput is TT-matrix, SZ must have the sizes for both modes, so it is a _matrix if sizes d2-by-2. If the i_nput is TT-vector, SZ may be either a column or a row _vector.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L904-L1164
oseledets/ttpy
tt/core/tools.py
permute
def permute(x, order, eps=None, return_cores=False): ''' Permute dimensions (python translation of original matlab code) Y = permute(X, ORDER, EPS) permutes the dimensions of the TT-tensor X according to ORDER, delivering a result at relative accuracy EPS. This function is equivalent to Y = tt_tensor(permute(reshape(full(X), X.n'),ORDER), EPS) but avoids the conversion to the full format. Simon Etter, Summer 2015 Seminar of Applied Mathematics, ETH Zurich TT-Toolbox 2.2, 2009-2012 This is TT Toolbox, written by Ivan Oseledets et al. Institute of Numerical Mathematics, Moscow, Russia webpage: http://spring.inm.ras.ru/osel For all questions, bugs and suggestions please mail [email protected] --------------------------- This code basically performs insertion sort on the TT dimensions: for k = 2:d Bubble the kth dimension to the right (according to ORDER) position in the first 1:k dimensions. The current code could be optimised at the following places: - Instead of initially orthogonalising with respect to the first two vertices, orthogonalise directly with respect to the first inversion. - When performing the SVD, check on which side of the current position the next swap will occur and directly establish the appropriate orthogonality (current implementation always assumes we move left). Both changes reduce the number of QRs by at most O(d) and are therefore likely to produce negligible speedup while rendering the code more complicated. ''' def _reshape(tensor, shape): return _np.reshape(tensor, shape, order='F') # Parse input if eps is None: eps = _np.spacing(1) cores = _vector.vector.to_list(x) d = _cp.deepcopy(x.d) n = _cp.deepcopy(x.n) r = _cp.deepcopy(x.r) idx = _np.empty(len(order)) idx[order] = _np.arange(len(order)) eps /= d ** 1.5 # ^Numerical evidence suggests that eps = eps/d may be sufficient, however I can only prove correctness # for this choice of global-to-local conversion factor. assert len(order) > d - 1, 'ORDER must have at least D elements for a D-dimensional tensor' # RL-orthogonalise x for kk in xrange(d - 1, 1, -1): ########################################## new_shape = [r[kk], n[kk] * r[kk + 1]] Q, R = _np.linalg.qr(_reshape(cores[kk], new_shape).T) tr = min(new_shape) cores[kk] = _reshape(Q.T, [tr, n[kk], r[kk + 1]]) tmp = _reshape(cores[kk - 1], [r[kk - 1] * n[kk - 1], r[kk]]) tmp = _np.dot(tmp, R.T) cores[kk - 1] = _reshape(tmp, [r[kk - 1], n[kk - 1], tr]) r[kk] = tr k = 0 while (True): # Find next inversion nk = k while (nk < d - 1) and (idx[nk] < idx[nk + 1]): nk += 1 if (nk == d - 1): break # Move orthogonal centre there for kk in xrange(k, nk - 1): ############# new_shape = [r[kk] * n[kk], r[kk + 1]] Q, R = _np.linalg.qr(_reshape(cores[kk], new_shape)) tr = min(new_shape) new_shape = [r[kk], n[kk], tr] cores[kk] = _reshape(Q, new_shape) tmp = _reshape(cores[kk + 1], [r[kk + 1], n[kk + 1] * r[kk + 2]]) tmp = _np.dot(R, tmp) cores[kk + 1] = _reshape(tmp, [tr, n[kk + 1], r[kk + 2]]) r[kk + 1] = tr k = nk # Swap dimensions tmp = _reshape(cores[k], [r[k] * n[k], r[k + 1]]) tmp = _np.dot(tmp, _reshape(cores[k + 1], [r[k + 1], n[k + 1] * r[k + 2]])) c = _reshape(tmp, [r[k], n[k], n[k + 1], r[k + 2]]) c = _np.transpose(c, [0, 2, 1, 3]) tmp = _reshape(c, [r[k] * n[k + 1], n[k] * r[k + 2]]) U, S, Vt = _np.linalg.svd(tmp, full_matrices=False) r[k + 1] = max(_my_chop2(S, _np.linalg.norm(S) * eps), 1) lenS = len(S) tmp = U[:, :lenS] * S # multiplication by diagonal matrix cores[k] = _reshape(tmp[:, :r[k + 1]], [r[k], n[k + 1], r[k + 1]]) cores[k + 1] = _reshape(Vt[:r[k + 1], :], [r[k + 1], n[k], r[k + 2]]) idx[[k, k + 1]] = idx[[k + 1, k]] n[[k, k + 1]] = n[[k + 1, k]] k = max(k - 1, 0) ################## # Parse output if return_cores: return cores return _vector.vector.from_list(cores)
python
def permute(x, order, eps=None, return_cores=False): ''' Permute dimensions (python translation of original matlab code) Y = permute(X, ORDER, EPS) permutes the dimensions of the TT-tensor X according to ORDER, delivering a result at relative accuracy EPS. This function is equivalent to Y = tt_tensor(permute(reshape(full(X), X.n'),ORDER), EPS) but avoids the conversion to the full format. Simon Etter, Summer 2015 Seminar of Applied Mathematics, ETH Zurich TT-Toolbox 2.2, 2009-2012 This is TT Toolbox, written by Ivan Oseledets et al. Institute of Numerical Mathematics, Moscow, Russia webpage: http://spring.inm.ras.ru/osel For all questions, bugs and suggestions please mail [email protected] --------------------------- This code basically performs insertion sort on the TT dimensions: for k = 2:d Bubble the kth dimension to the right (according to ORDER) position in the first 1:k dimensions. The current code could be optimised at the following places: - Instead of initially orthogonalising with respect to the first two vertices, orthogonalise directly with respect to the first inversion. - When performing the SVD, check on which side of the current position the next swap will occur and directly establish the appropriate orthogonality (current implementation always assumes we move left). Both changes reduce the number of QRs by at most O(d) and are therefore likely to produce negligible speedup while rendering the code more complicated. ''' def _reshape(tensor, shape): return _np.reshape(tensor, shape, order='F') # Parse input if eps is None: eps = _np.spacing(1) cores = _vector.vector.to_list(x) d = _cp.deepcopy(x.d) n = _cp.deepcopy(x.n) r = _cp.deepcopy(x.r) idx = _np.empty(len(order)) idx[order] = _np.arange(len(order)) eps /= d ** 1.5 # ^Numerical evidence suggests that eps = eps/d may be sufficient, however I can only prove correctness # for this choice of global-to-local conversion factor. assert len(order) > d - 1, 'ORDER must have at least D elements for a D-dimensional tensor' # RL-orthogonalise x for kk in xrange(d - 1, 1, -1): ########################################## new_shape = [r[kk], n[kk] * r[kk + 1]] Q, R = _np.linalg.qr(_reshape(cores[kk], new_shape).T) tr = min(new_shape) cores[kk] = _reshape(Q.T, [tr, n[kk], r[kk + 1]]) tmp = _reshape(cores[kk - 1], [r[kk - 1] * n[kk - 1], r[kk]]) tmp = _np.dot(tmp, R.T) cores[kk - 1] = _reshape(tmp, [r[kk - 1], n[kk - 1], tr]) r[kk] = tr k = 0 while (True): # Find next inversion nk = k while (nk < d - 1) and (idx[nk] < idx[nk + 1]): nk += 1 if (nk == d - 1): break # Move orthogonal centre there for kk in xrange(k, nk - 1): ############# new_shape = [r[kk] * n[kk], r[kk + 1]] Q, R = _np.linalg.qr(_reshape(cores[kk], new_shape)) tr = min(new_shape) new_shape = [r[kk], n[kk], tr] cores[kk] = _reshape(Q, new_shape) tmp = _reshape(cores[kk + 1], [r[kk + 1], n[kk + 1] * r[kk + 2]]) tmp = _np.dot(R, tmp) cores[kk + 1] = _reshape(tmp, [tr, n[kk + 1], r[kk + 2]]) r[kk + 1] = tr k = nk # Swap dimensions tmp = _reshape(cores[k], [r[k] * n[k], r[k + 1]]) tmp = _np.dot(tmp, _reshape(cores[k + 1], [r[k + 1], n[k + 1] * r[k + 2]])) c = _reshape(tmp, [r[k], n[k], n[k + 1], r[k + 2]]) c = _np.transpose(c, [0, 2, 1, 3]) tmp = _reshape(c, [r[k] * n[k + 1], n[k] * r[k + 2]]) U, S, Vt = _np.linalg.svd(tmp, full_matrices=False) r[k + 1] = max(_my_chop2(S, _np.linalg.norm(S) * eps), 1) lenS = len(S) tmp = U[:, :lenS] * S # multiplication by diagonal matrix cores[k] = _reshape(tmp[:, :r[k + 1]], [r[k], n[k + 1], r[k + 1]]) cores[k + 1] = _reshape(Vt[:r[k + 1], :], [r[k + 1], n[k], r[k + 2]]) idx[[k, k + 1]] = idx[[k + 1, k]] n[[k, k + 1]] = n[[k + 1, k]] k = max(k - 1, 0) ################## # Parse output if return_cores: return cores return _vector.vector.from_list(cores)
Permute dimensions (python translation of original matlab code) Y = permute(X, ORDER, EPS) permutes the dimensions of the TT-tensor X according to ORDER, delivering a result at relative accuracy EPS. This function is equivalent to Y = tt_tensor(permute(reshape(full(X), X.n'),ORDER), EPS) but avoids the conversion to the full format. Simon Etter, Summer 2015 Seminar of Applied Mathematics, ETH Zurich TT-Toolbox 2.2, 2009-2012 This is TT Toolbox, written by Ivan Oseledets et al. Institute of Numerical Mathematics, Moscow, Russia webpage: http://spring.inm.ras.ru/osel For all questions, bugs and suggestions please mail [email protected] --------------------------- This code basically performs insertion sort on the TT dimensions: for k = 2:d Bubble the kth dimension to the right (according to ORDER) position in the first 1:k dimensions. The current code could be optimised at the following places: - Instead of initially orthogonalising with respect to the first two vertices, orthogonalise directly with respect to the first inversion. - When performing the SVD, check on which side of the current position the next swap will occur and directly establish the appropriate orthogonality (current implementation always assumes we move left). Both changes reduce the number of QRs by at most O(d) and are therefore likely to produce negligible speedup while rendering the code more complicated.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/tools.py#L1167-L1273
oseledets/ttpy
tt/optimize/tt_min.py
min_func
def min_func(fun, bounds_min, bounds_max, d=None, rmax=10, n0=64, nswp=10, verb=True, smooth_fun=None): """Find (approximate) minimal value of the function on a d-dimensional grid.""" if d is None: d = len(bounds_min) a = np.asanyarray(bounds_min).copy() b = np.asanyarray(bounds_max).copy() else: a = np.ones(d) * bounds_min b = np.ones(d) * bounds_max if smooth_fun is None: smooth_fun = lambda p, lam: (math.pi / 2 - np.arctan(p - lam)) #smooth_fun = lambda p, lam: np.exp(-10*(p - lam)) # We do not need to store the cores, only the interfaces! Rx = [[]] * (d + 1) # Python list for the interfaces Rx[0] = np.ones((1, 1)) Rx[d] = np.ones((1, 1)) Jy = [np.empty(0, dtype=np.int)] * (d + 1) ry = rmax * np.ones(d + 1, dtype=np.int) ry[0] = 1 ry[d] = 1 n = n0 * np.ones(d, dtype=np.int) fun_evals = 0 grid = [np.reshape(np.linspace(a[i], b[i], n[i]), (n[i], 1)) for i in xrange(d)] for i in xrange(d - 1): #cr1 = y[i] ry[i + 1] = min(ry[i + 1], n[i] * ry[i]) cr1 = np.random.randn(ry[i], n[i], ry[i + 1]) cr1 = reshape(cr1, (ry[i] * n[i], ry[i + 1])) q, r = np.linalg.qr(cr1) ind = maxvol(q) w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i]) w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int)) Jy[i + 1] = np.hstack((w1, w2)) Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1)) Jy[i + 1] = Jy[i + 1][ind, :] # Jy{i+1} = [kron(ones(n(i),1), Jy{i}), kron((1:n(i))', ones(ry(i),1))]; # Jy{i+1} = Jy{i+1}(ind,:); swp = 0 dirn = -1 i = d - 1 lm = float('Inf') while swp < nswp: # Right-to-left sweep # The idea: compute the current core; compute the function of it; # Shift locally or globally? Local shift would be the first try # Compute the current core if np.size(Jy[i]) == 0: w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int) else: w1 = mkron(np.ones((n[i] * ry[i + 1], 1), dtype=np.int), Jy[i]) w2 = mkron(mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i]), np.ones((ry[i], 1), dtype=np.int)) if np.size(Jy[i + 1]) == 0: w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int) else: w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1), dtype=np.int)) J = np.hstack((w1, w2, w3)) # Just add some random indices to J, which is rnr x d, need to make rn (r + r0) x add, # i.e., just generate random r, random n and random multiindex cry = fun(J) fun_evals += cry.size cry = reshape(cry, (ry[i], n[i], ry[i + 1])) min_cur = np.min(cry.flatten("F")) ind_cur = np.argmin(cry.flatten("F")) if lm > min_cur: lm = min_cur x_full = J[ind_cur, :] val = fun(x_full) if verb: print('New record:', val, 'Point:', x_full, 'fevals:', fun_evals) cry = smooth_fun(cry, lm) if (dirn < 0 and i > 0): cry = reshape(cry, (ry[i], n[i] * ry[i + 1])) cry = cry.T #q, r = np.linalg.qr(cry) u, s, v = mysvd(cry, full_matrices=False) ry[i] = min(ry[i], rmax) q = u[:, :ry[i]] ind = rect_maxvol(q)[0] # maxvol(q) ry[i] = ind.size w1 = mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i]) if np.size(Jy[i + 1]) == 0: w2 = np.zeros((n[i] * ry[i + 1], 0), dtype=np.int) else: w2 = mkron(Jy[i + 1], np.ones((n[i], 1), dtype=np.int)) Jy[i] = np.hstack((w1, w2)) Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1)) Jy[i] = Jy[i][ind, :] if (dirn > 0 and i < d - 1): cry = reshape(cry, (ry[i] * n[i], ry[i + 1])) q, r = np.linalg.qr(cry) #ind = maxvol(q) ind = rect_maxvol(q)[0] ry[i + 1] = ind.size w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i]) w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int)) Jy[i + 1] = np.hstack((w1, w2)) Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1)) Jy[i + 1] = Jy[i + 1][ind, :] i += dirn if i == d or i == -1: dirn = -dirn i += dirn swp = swp + 1 return val, x_full
python
def min_func(fun, bounds_min, bounds_max, d=None, rmax=10, n0=64, nswp=10, verb=True, smooth_fun=None): """Find (approximate) minimal value of the function on a d-dimensional grid.""" if d is None: d = len(bounds_min) a = np.asanyarray(bounds_min).copy() b = np.asanyarray(bounds_max).copy() else: a = np.ones(d) * bounds_min b = np.ones(d) * bounds_max if smooth_fun is None: smooth_fun = lambda p, lam: (math.pi / 2 - np.arctan(p - lam)) #smooth_fun = lambda p, lam: np.exp(-10*(p - lam)) # We do not need to store the cores, only the interfaces! Rx = [[]] * (d + 1) # Python list for the interfaces Rx[0] = np.ones((1, 1)) Rx[d] = np.ones((1, 1)) Jy = [np.empty(0, dtype=np.int)] * (d + 1) ry = rmax * np.ones(d + 1, dtype=np.int) ry[0] = 1 ry[d] = 1 n = n0 * np.ones(d, dtype=np.int) fun_evals = 0 grid = [np.reshape(np.linspace(a[i], b[i], n[i]), (n[i], 1)) for i in xrange(d)] for i in xrange(d - 1): #cr1 = y[i] ry[i + 1] = min(ry[i + 1], n[i] * ry[i]) cr1 = np.random.randn(ry[i], n[i], ry[i + 1]) cr1 = reshape(cr1, (ry[i] * n[i], ry[i + 1])) q, r = np.linalg.qr(cr1) ind = maxvol(q) w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i]) w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int)) Jy[i + 1] = np.hstack((w1, w2)) Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1)) Jy[i + 1] = Jy[i + 1][ind, :] # Jy{i+1} = [kron(ones(n(i),1), Jy{i}), kron((1:n(i))', ones(ry(i),1))]; # Jy{i+1} = Jy{i+1}(ind,:); swp = 0 dirn = -1 i = d - 1 lm = float('Inf') while swp < nswp: # Right-to-left sweep # The idea: compute the current core; compute the function of it; # Shift locally or globally? Local shift would be the first try # Compute the current core if np.size(Jy[i]) == 0: w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int) else: w1 = mkron(np.ones((n[i] * ry[i + 1], 1), dtype=np.int), Jy[i]) w2 = mkron(mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i]), np.ones((ry[i], 1), dtype=np.int)) if np.size(Jy[i + 1]) == 0: w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int) else: w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1), dtype=np.int)) J = np.hstack((w1, w2, w3)) # Just add some random indices to J, which is rnr x d, need to make rn (r + r0) x add, # i.e., just generate random r, random n and random multiindex cry = fun(J) fun_evals += cry.size cry = reshape(cry, (ry[i], n[i], ry[i + 1])) min_cur = np.min(cry.flatten("F")) ind_cur = np.argmin(cry.flatten("F")) if lm > min_cur: lm = min_cur x_full = J[ind_cur, :] val = fun(x_full) if verb: print('New record:', val, 'Point:', x_full, 'fevals:', fun_evals) cry = smooth_fun(cry, lm) if (dirn < 0 and i > 0): cry = reshape(cry, (ry[i], n[i] * ry[i + 1])) cry = cry.T #q, r = np.linalg.qr(cry) u, s, v = mysvd(cry, full_matrices=False) ry[i] = min(ry[i], rmax) q = u[:, :ry[i]] ind = rect_maxvol(q)[0] # maxvol(q) ry[i] = ind.size w1 = mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i]) if np.size(Jy[i + 1]) == 0: w2 = np.zeros((n[i] * ry[i + 1], 0), dtype=np.int) else: w2 = mkron(Jy[i + 1], np.ones((n[i], 1), dtype=np.int)) Jy[i] = np.hstack((w1, w2)) Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1)) Jy[i] = Jy[i][ind, :] if (dirn > 0 and i < d - 1): cry = reshape(cry, (ry[i] * n[i], ry[i + 1])) q, r = np.linalg.qr(cry) #ind = maxvol(q) ind = rect_maxvol(q)[0] ry[i + 1] = ind.size w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i]) w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int)) Jy[i + 1] = np.hstack((w1, w2)) Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1)) Jy[i + 1] = Jy[i + 1][ind, :] i += dirn if i == d or i == -1: dirn = -dirn i += dirn swp = swp + 1 return val, x_full
Find (approximate) minimal value of the function on a d-dimensional grid.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/optimize/tt_min.py#L29-L144
oseledets/ttpy
tt/optimize/tt_min.py
min_tens
def min_tens(tens, rmax=10, nswp=10, verb=True, smooth_fun=None): """Find (approximate) minimal element in a TT-tensor.""" if smooth_fun is None: smooth_fun = lambda p, lam: (math.pi / 2 - np.arctan(p - lam)) d = tens.d Rx = [[]] * (d + 1) # Python list for the interfaces Rx[0] = np.ones((1, 1)) Rx[d] = np.ones((1, 1)) Jy = [np.empty(0, dtype=np.int)] * (d + 1) ry = rmax * np.ones(d + 1, dtype=np.int) ry[0] = 1 ry[d] = 1 n = tens.n elements_seen = 0 phi_left = [np.empty(0)] * (d + 1) phi_left[0] = np.array([1]) phi_right = [np.empty(0)] * (d + 1) phi_right[d] = np.array([1]) cores = tt.tensor.to_list(tens) # Fill initial multiindex J randomly. grid = [np.reshape(range(n[i]), (n[i], 1)) for i in xrange(d)] for i in xrange(d - 1): ry[i + 1] = min(ry[i + 1], n[i] * ry[i]) ind = sorted(np.random.permutation(ry[i] * n[i])[0:ry[i + 1]]) w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i]) w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int)) Jy[i + 1] = np.hstack((w1, w2)) Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1)) Jy[i + 1] = Jy[i + 1][ind, :] phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1) phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1)) phi_left[i + 1] = phi_left[i + 1][ind, :] swp = 0 dirn = -1 i = d - 1 lm = float('Inf') while swp < nswp: # Right-to-left sweep # The idea: compute the current core; compute the function of it; # Shift locally or globally? Local shift would be the first try # Compute the current core if np.size(Jy[i]) == 0: w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int) else: w1 = mkron(np.ones((n[i] * ry[i + 1], 1), dtype=np.int), Jy[i]) w2 = mkron(mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i]), np.ones((ry[i], 1), dtype=np.int)) if np.size(Jy[i + 1]) == 0: w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int) else: w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1), dtype=np.int)) J = np.hstack((w1, w2, w3)) phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1) phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1])) cry = np.tensordot( phi_left[i], np.tensordot( cores[i], phi_right[ i + 1], 1), 1) elements_seen += cry.size cry = reshape(cry, (ry[i], n[i], ry[i + 1])) min_cur = np.min(cry.flatten("F")) ind_cur = np.argmin(cry.flatten("F")) if lm > min_cur: lm = min_cur x_full = J[ind_cur, :] val = tens[x_full] if verb: print('New record:', val, 'Point:', x_full, 'elements seen:', elements_seen) cry = smooth_fun(cry, lm) if dirn < 0 and i > 0: cry = reshape(cry, (ry[i], n[i] * ry[i + 1])) cry = cry.T #q, r = np.linalg.qr(cry) u, s, v = mysvd(cry, full_matrices=False) ry[i] = min(ry[i], rmax) q = u[:, :ry[i]] ind = rect_maxvol(q)[0] # maxvol(q) ry[i] = ind.size w1 = mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i]) if np.size(Jy[i + 1]) == 0: w2 = np.zeros((n[i] * ry[i + 1], 0), dtype=np.int) else: w2 = mkron(Jy[i + 1], np.ones((n[i], 1), dtype=np.int)) Jy[i] = np.hstack((w1, w2)) Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1)) Jy[i] = Jy[i][ind, :] phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1) phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1])) phi_right[i] = phi_right[i][:, ind] if dirn > 0 and i < d - 1: cry = reshape(cry, (ry[i] * n[i], ry[i + 1])) q, r = np.linalg.qr(cry) #ind = maxvol(q) ind = rect_maxvol(q)[0] ry[i + 1] = ind.size phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1) phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1)) phi_left[i + 1] = phi_left[i + 1][ind, :] w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i]) w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int)) Jy[i + 1] = np.hstack((w1, w2)) Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1)) Jy[i + 1] = Jy[i + 1][ind, :] i += dirn if i == d or i == -1: dirn = -dirn i += dirn swp = swp + 1 return val, x_full
python
def min_tens(tens, rmax=10, nswp=10, verb=True, smooth_fun=None): """Find (approximate) minimal element in a TT-tensor.""" if smooth_fun is None: smooth_fun = lambda p, lam: (math.pi / 2 - np.arctan(p - lam)) d = tens.d Rx = [[]] * (d + 1) # Python list for the interfaces Rx[0] = np.ones((1, 1)) Rx[d] = np.ones((1, 1)) Jy = [np.empty(0, dtype=np.int)] * (d + 1) ry = rmax * np.ones(d + 1, dtype=np.int) ry[0] = 1 ry[d] = 1 n = tens.n elements_seen = 0 phi_left = [np.empty(0)] * (d + 1) phi_left[0] = np.array([1]) phi_right = [np.empty(0)] * (d + 1) phi_right[d] = np.array([1]) cores = tt.tensor.to_list(tens) # Fill initial multiindex J randomly. grid = [np.reshape(range(n[i]), (n[i], 1)) for i in xrange(d)] for i in xrange(d - 1): ry[i + 1] = min(ry[i + 1], n[i] * ry[i]) ind = sorted(np.random.permutation(ry[i] * n[i])[0:ry[i + 1]]) w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i]) w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int)) Jy[i + 1] = np.hstack((w1, w2)) Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1)) Jy[i + 1] = Jy[i + 1][ind, :] phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1) phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1)) phi_left[i + 1] = phi_left[i + 1][ind, :] swp = 0 dirn = -1 i = d - 1 lm = float('Inf') while swp < nswp: # Right-to-left sweep # The idea: compute the current core; compute the function of it; # Shift locally or globally? Local shift would be the first try # Compute the current core if np.size(Jy[i]) == 0: w1 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int) else: w1 = mkron(np.ones((n[i] * ry[i + 1], 1), dtype=np.int), Jy[i]) w2 = mkron(mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i]), np.ones((ry[i], 1), dtype=np.int)) if np.size(Jy[i + 1]) == 0: w3 = np.zeros((ry[i] * n[i] * ry[i + 1], 0), dtype=np.int) else: w3 = mkron(Jy[i + 1], np.ones((ry[i] * n[i], 1), dtype=np.int)) J = np.hstack((w1, w2, w3)) phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1) phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1])) cry = np.tensordot( phi_left[i], np.tensordot( cores[i], phi_right[ i + 1], 1), 1) elements_seen += cry.size cry = reshape(cry, (ry[i], n[i], ry[i + 1])) min_cur = np.min(cry.flatten("F")) ind_cur = np.argmin(cry.flatten("F")) if lm > min_cur: lm = min_cur x_full = J[ind_cur, :] val = tens[x_full] if verb: print('New record:', val, 'Point:', x_full, 'elements seen:', elements_seen) cry = smooth_fun(cry, lm) if dirn < 0 and i > 0: cry = reshape(cry, (ry[i], n[i] * ry[i + 1])) cry = cry.T #q, r = np.linalg.qr(cry) u, s, v = mysvd(cry, full_matrices=False) ry[i] = min(ry[i], rmax) q = u[:, :ry[i]] ind = rect_maxvol(q)[0] # maxvol(q) ry[i] = ind.size w1 = mkron(np.ones((ry[i + 1], 1), dtype=np.int), grid[i]) if np.size(Jy[i + 1]) == 0: w2 = np.zeros((n[i] * ry[i + 1], 0), dtype=np.int) else: w2 = mkron(Jy[i + 1], np.ones((n[i], 1), dtype=np.int)) Jy[i] = np.hstack((w1, w2)) Jy[i] = reshape(Jy[i], (n[i] * ry[i + 1], -1)) Jy[i] = Jy[i][ind, :] phi_right[i] = np.tensordot(cores[i], phi_right[i + 1], 1) phi_right[i] = reshape(phi_right[i], (-1, n[i] * ry[i + 1])) phi_right[i] = phi_right[i][:, ind] if dirn > 0 and i < d - 1: cry = reshape(cry, (ry[i] * n[i], ry[i + 1])) q, r = np.linalg.qr(cry) #ind = maxvol(q) ind = rect_maxvol(q)[0] ry[i + 1] = ind.size phi_left[i + 1] = np.tensordot(phi_left[i], cores[i], 1) phi_left[i + 1] = reshape(phi_left[i + 1], (ry[i] * n[i], -1)) phi_left[i + 1] = phi_left[i + 1][ind, :] w1 = mkron(np.ones((n[i], 1), dtype=np.int), Jy[i]) w2 = mkron(grid[i], np.ones((ry[i], 1), dtype=np.int)) Jy[i + 1] = np.hstack((w1, w2)) Jy[i + 1] = reshape(Jy[i + 1], (ry[i] * n[i], -1)) Jy[i + 1] = Jy[i + 1][ind, :] i += dirn if i == d or i == -1: dirn = -dirn i += dirn swp = swp + 1 return val, x_full
Find (approximate) minimal element in a TT-tensor.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/optimize/tt_min.py#L147-L261
oseledets/ttpy
tt/riemannian/riemannian.py
cores_orthogonalization_step
def cores_orthogonalization_step(coresX, dim, left_to_right=True): """TT-Tensor X orthogonalization step. The function can change the shape of some cores. """ cc = coresX[dim] r1, n, r2 = cc.shape if left_to_right: # Left to right orthogonalization step. assert(0 <= dim < len(coresX) - 1) cc, rr = np.linalg.qr(reshape(cc, (-1, r2))) r2 = cc.shape[1] coresX[dim] = reshape(cc, (r1, n, r2)) coresX[dim+1] = np.tensordot(rr, coresX[dim+1], 1) else: # Right to left orthogonalization step. assert(0 < dim < len(coresX)) cc, rr = np.linalg.qr(reshape(cc, (r1, -1)).T) r1 = cc.shape[1] coresX[dim] = reshape(cc.T, (r1, n, r2)) coresX[dim-1] = np.tensordot(coresX[dim-1], rr.T, 1) return coresX
python
def cores_orthogonalization_step(coresX, dim, left_to_right=True): """TT-Tensor X orthogonalization step. The function can change the shape of some cores. """ cc = coresX[dim] r1, n, r2 = cc.shape if left_to_right: # Left to right orthogonalization step. assert(0 <= dim < len(coresX) - 1) cc, rr = np.linalg.qr(reshape(cc, (-1, r2))) r2 = cc.shape[1] coresX[dim] = reshape(cc, (r1, n, r2)) coresX[dim+1] = np.tensordot(rr, coresX[dim+1], 1) else: # Right to left orthogonalization step. assert(0 < dim < len(coresX)) cc, rr = np.linalg.qr(reshape(cc, (r1, -1)).T) r1 = cc.shape[1] coresX[dim] = reshape(cc.T, (r1, n, r2)) coresX[dim-1] = np.tensordot(coresX[dim-1], rr.T, 1) return coresX
TT-Tensor X orthogonalization step. The function can change the shape of some cores.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/riemannian/riemannian.py#L16-L37
oseledets/ttpy
tt/riemannian/riemannian.py
left
def left(X, i): """Compute the orthogonal matrix Q_{\leq i} as defined in [1].""" if i < 0: return np.ones([1, 1]) answ = np.ones([1, 1]) cores = tt.tensor.to_list(X) for dim in xrange(i+1): answ = np.tensordot(answ, cores[dim], 1) answ = reshape(answ, (-1, X.r[i+1])) return answ
python
def left(X, i): """Compute the orthogonal matrix Q_{\leq i} as defined in [1].""" if i < 0: return np.ones([1, 1]) answ = np.ones([1, 1]) cores = tt.tensor.to_list(X) for dim in xrange(i+1): answ = np.tensordot(answ, cores[dim], 1) answ = reshape(answ, (-1, X.r[i+1])) return answ
Compute the orthogonal matrix Q_{\leq i} as defined in [1].
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/riemannian/riemannian.py#L42-L51
oseledets/ttpy
tt/riemannian/riemannian.py
right
def right(X, i): """Compute the orthogonal matrix Q_{\geq i} as defined in [1].""" if i > X.d-1: return np.ones([1, 1]) answ = np.ones([1, 1]) cores = tt.tensor.to_list(X) for dim in xrange(X.d-1, i-1, -1): answ = np.tensordot(cores[dim], answ, 1) answ = reshape(answ, (X.r[i], -1)) return answ.T
python
def right(X, i): """Compute the orthogonal matrix Q_{\geq i} as defined in [1].""" if i > X.d-1: return np.ones([1, 1]) answ = np.ones([1, 1]) cores = tt.tensor.to_list(X) for dim in xrange(X.d-1, i-1, -1): answ = np.tensordot(cores[dim], answ, 1) answ = reshape(answ, (X.r[i], -1)) return answ.T
Compute the orthogonal matrix Q_{\geq i} as defined in [1].
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/riemannian/riemannian.py#L54-L63
oseledets/ttpy
tt/riemannian/riemannian.py
unfolding
def unfolding(tens, i): """Compute the i-th unfolding of a tensor.""" return reshape(tens.full(), (np.prod(tens.n[0:(i+1)]), -1))
python
def unfolding(tens, i): """Compute the i-th unfolding of a tensor.""" return reshape(tens.full(), (np.prod(tens.n[0:(i+1)]), -1))
Compute the i-th unfolding of a tensor.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/riemannian/riemannian.py#L66-L68
oseledets/ttpy
tt/riemannian/riemannian.py
_update_lhs
def _update_lhs(lhs, xCore, zCore, new_lhs): """ Function to be called from the project()""" # TODO: Use intermediate variable to use 5 nested loops instead of 6. r_old_x, n, r_x = xCore.shape num_obj, r_old_z, n, r_z = zCore.shape for idx in range(num_obj): for val in range(n): for alpha_old_z in range(r_old_z): for alpha_z in range(r_z): for alpha_old_x in range(r_old_x): for alpha_x in range(r_x): curr_value = lhs[idx, alpha_old_x, alpha_old_z] curr_value *= xCore[alpha_old_x, val, alpha_x] curr_value *= zCore[idx, alpha_old_z, val, alpha_z] new_lhs[idx, alpha_x, alpha_z] += curr_value
python
def _update_lhs(lhs, xCore, zCore, new_lhs): """ Function to be called from the project()""" # TODO: Use intermediate variable to use 5 nested loops instead of 6. r_old_x, n, r_x = xCore.shape num_obj, r_old_z, n, r_z = zCore.shape for idx in range(num_obj): for val in range(n): for alpha_old_z in range(r_old_z): for alpha_z in range(r_z): for alpha_old_x in range(r_old_x): for alpha_x in range(r_x): curr_value = lhs[idx, alpha_old_x, alpha_old_z] curr_value *= xCore[alpha_old_x, val, alpha_x] curr_value *= zCore[idx, alpha_old_z, val, alpha_z] new_lhs[idx, alpha_x, alpha_z] += curr_value
Function to be called from the project()
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/riemannian/riemannian.py#L75-L89
oseledets/ttpy
tt/riemannian/riemannian.py
_update_rhs
def _update_rhs(curr_rhs, xCore, zCore, new_rhs): """ Function to be called from the project()""" # TODO: Use intermediate variable to use 5 nested loops instead of 6. r_x, n, r_old_x = xCore.shape num_obj, r_z, n, r_old_z = zCore.shape for idx in range(num_obj): for val in range(n): for alpha_old_z in range(r_old_z): for alpha_z in range(r_z): for alpha_old_x in range(r_old_x): for alpha_x in range(r_x): curr_value = curr_rhs[idx, alpha_old_z, alpha_old_x] curr_value *= xCore[alpha_x, val, alpha_old_x] curr_value *= zCore[idx, alpha_z, val, alpha_old_z] new_rhs[idx, alpha_z, alpha_x] += curr_value
python
def _update_rhs(curr_rhs, xCore, zCore, new_rhs): """ Function to be called from the project()""" # TODO: Use intermediate variable to use 5 nested loops instead of 6. r_x, n, r_old_x = xCore.shape num_obj, r_z, n, r_old_z = zCore.shape for idx in range(num_obj): for val in range(n): for alpha_old_z in range(r_old_z): for alpha_z in range(r_z): for alpha_old_x in range(r_old_x): for alpha_x in range(r_x): curr_value = curr_rhs[idx, alpha_old_z, alpha_old_x] curr_value *= xCore[alpha_x, val, alpha_old_x] curr_value *= zCore[idx, alpha_z, val, alpha_old_z] new_rhs[idx, alpha_z, alpha_x] += curr_value
Function to be called from the project()
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/riemannian/riemannian.py#L93-L107
oseledets/ttpy
tt/riemannian/riemannian.py
project
def project(X, Z, use_jit=False, debug=False): """ Project tensor Z on the tangent space of tensor X. X is a tensor in the TT format. Z can be a tensor in the TT format or a list of tensors (in this case the function computes projection of the sum off all tensors in the list: project(X, Z) = P_X(\sum_i Z_i) ). This function implements an algorithm from the paper [1], theorem 3.1. The jit version of the code is much faster when projecting a lot of tensors simultaneously (in other words Z is a list with many tensors). Returns a tensor in the TT format with the TT-ranks equal 2 * rank(Z). """ zArr = None if isinstance(Z, tt.vector): zArr = [Z] else: zArr = Z # Get rid of redundant ranks (they cause technical difficulties). X = X.round(eps=0) numDims, modeSize = X.d, X.n coresX = tt.tensor.to_list(X) coresZ = [None] * len(zArr) for idx in xrange(len(zArr)): assert(modeSize == zArr[idx].n).all() coresZ[idx] = tt.tensor.to_list(zArr[idx]) if not use_jit and len(zArr) > 10: print('Consider using use_jit=True option to speed up the projection ' 'process.') if use_jit: for dim in xrange(numDims): r1, n, r2 = coresZ[0][dim].shape for idx in xrange(len(zArr)): if (r1, n, r2) != coresZ[idx][dim].shape: print('Warning: cannot use the jit version when not all ' 'the ranks in the Z array are equal each other. ' 'Switching to the non-jit version.') use_jit = False if use_jit: zCoresDim = [None] * numDims for dim in xrange(numDims): r1, n, r2 = coresZ[0][dim].shape zCoresDim[dim] = np.zeros([len(zArr), r1, n, r2]) for idx in xrange(len(zArr)): if (r1, n, r2) != coresZ[idx][dim].shape: print('Warning: cannot use the jit version when not all ' 'the ranks in the Z array are equal each other. ' 'Switching to the non-jit version.') use_jit = False zCoresDim[dim][idx, :, :, :] = coresZ[idx][dim] # Initialize the cores of the projection_X(sum z[i]). coresP = [] for dim in xrange(numDims): r1 = 2 * X.r[dim] r2 = 2 * X.r[dim+1] if dim == 0: r1 = 1 if dim == numDims - 1: r2 = 1 coresP.append(np.zeros((r1, modeSize[dim], r2))) # rhs[dim] is a len(zArr) x zArr[idx] x X.rank_dim.rank_dim ndarray. # Right to left orthogonalization of X and preparation of the rhs vectors. for dim in xrange(numDims-1, 0, -1): # Right to left orthogonalization of the X cores. coresX = cores_orthogonalization_step(coresX, dim, left_to_right=False) r1, n, r2 = coresX[dim].shape # Fill the right orthogonal part of the projection. for value in xrange(modeSize[dim]): coresP[dim][0:r1, value, 0:r2] = coresX[dim][:, value, :] rhs = [None] * (numDims+1) for dim in xrange(numDims): rhs[dim] = np.zeros([len(zArr), zArr[idx].r[dim], coresX[dim].shape[0]]) rhs[numDims] = np.ones([len(zArr), 1, 1]) for dim in xrange(numDims-1, 0, -1): _update_rhs(rhs[dim+1], coresX[dim], zCoresDim[dim], rhs[dim]) if debug: assert(np.allclose(X.full(), tt.tensor.from_list(coresX).full())) # lsh is a len(zArr) x X.rank_dim x zArr[idx].rank_dim ndarray. lhs = np.ones([len(zArr), 1, 1]) # Left to right sweep. for dim in xrange(numDims): cc = coresX[dim].copy() r1, n, r2 = cc.shape if dim < numDims-1: # Left to right orthogonalization. cc = reshape(cc, (-1, r2)) cc, rr = np.linalg.qr(cc) r2 = cc.shape[1] # Warning: since ranks can change here, do not use X.r! # Use coresX[dim].shape instead. if debug: # Need to do it before the move non orthogonal part rr to # the coresX[dim+1]. rightQ = right(tt.tensor.from_list(coresX), dim+1) coresX[dim] = reshape(cc, (r1, n, r2)).copy() coresX[dim+1] = np.tensordot(rr, coresX[dim+1], 1) new_lhs = np.zeros([len(zArr), r2, zArr[idx].r[dim+1]]) _update_lhs(lhs, coresX[dim], zCoresDim[dim], new_lhs) # See the correspondic section in the non-jit version of this # code for a less confusing implementation of # the transformation below. currPCore = np.einsum('ijk,iklm->ijlm', lhs, zCoresDim[dim]) currPCore = reshape(currPCore, (len(zArr), r1*n, -1)) currPCore -= np.einsum('ij,kjl->kil', cc, new_lhs) currPCore = np.einsum('ijk,ikl', currPCore, rhs[dim+1]) currPCore = reshape(currPCore, (r1, modeSize[dim], r2)) if dim == 0: coresP[dim][0:r1, :, 0:r2] += currPCore else: coresP[dim][r1:, :, 0:r2] += currPCore if debug: explicit_sum = np.zeros((r1, modeSize[dim], r2)) for idx in xrange(len(zArr)): leftQm1 = left(tt.tensor.from_list(coresX), dim-1) leftQ = left(tt.tensor.from_list(coresX), dim) first = np.tensordot(leftQm1.T, unfolding(zArr[idx], dim-1), 1) second = reshape(first, (-1, np.prod(modeSize[dim+1:]))) if dim < numDims-1: explicit = second.dot(rightQ) orth_cc = reshape(coresX[dim], (-1, coresX[dim].shape[2])) explicit -= orth_cc.dot(leftQ.T.dot(unfolding(zArr[idx], dim)).dot(rightQ)) else: explicit = second explicit_sum += reshape(explicit, currPCore.shape) assert(np.allclose(explicit_sum, currPCore)) lhs = new_lhs if dim == 0: coresP[dim][0:r1, :, r2:] = coresX[dim] else: coresP[dim][r1:, :, r2:] = coresX[dim] if dim == numDims-1: coresP[dim][r1:, :, 0:r2] += np.einsum('ijk,iklm->jlm', lhs, zCoresDim[dim]) if debug: assert(np.allclose(X.full(), tt.tensor.from_list(coresX).full())) return tt.tensor.from_list(coresP) else: # Non-jit version of the code. # Initialize the cores of the projection_X(sum z[i]). coresP = [] for dim in xrange(numDims): r1 = 2 * X.r[dim] r2 = 2 * X.r[dim+1] if dim == 0: r1 = 1 if dim == numDims - 1: r2 = 1 coresP.append(np.zeros((r1, modeSize[dim], r2))) # rhs[idx][dim] is an (Z.rank_dim * X.rank_dim) x 1 vector rhs = [[0] * (numDims+1) for _ in xrange(len(zArr))] for idx in xrange(len(zArr)): rhs[idx][numDims] = np.ones([1, 1]) # Right to left sweep to orthogonalize the cores and prepare rhs. for dim in xrange(numDims-1, 0, -1): # Right to left orthogonalization of the X cores. coresX = cores_orthogonalization_step(coresX, dim, left_to_right=False) r1, n, r2 = coresX[dim].shape # Fill the right orthogonal part of the projection. coresP[dim][0:r1, :, 0:r2] = coresX[dim] # Compute rhs. for idx in xrange(len(zArr)): coreProd = np.tensordot(coresZ[idx][dim], coresX[dim], axes=(1, 1)) coreProd = np.transpose(coreProd, (0, 2, 1, 3)) coreProd = reshape(coreProd, (zArr[idx].r[dim]*r1, zArr[idx].r[dim+1]*r2)) rhs[idx][dim] = np.dot(coreProd, rhs[idx][dim+1]) if debug: assert(np.allclose(X.full(), tt.tensor.from_list(coresX).full())) # lsh[idx] is an X.rank_dim x zArr[idx].rank_dim matrix. lhs = [np.ones([1, 1]) for _ in xrange(len(zArr))] # Left to right sweep. for dim in xrange(numDims - 1): if debug: rightQ = right(tt.tensor.from_list(coresX), dim+1) # Left to right orthogonalization of the X cores. coresX = cores_orthogonalization_step(coresX, dim, left_to_right=True) r1, n, r2 = coresX[dim].shape cc = reshape(coresX[dim], (-1, r2)) for idx in xrange(len(zArr)): currZCore = reshape(coresZ[idx][dim], (zArr[idx].r[dim], -1)) currPCore = np.dot(lhs[idx], currZCore) # TODO: consider using np.einsum. coreProd = np.tensordot(coresX[dim], coresZ[idx][dim], axes=(1, 1)) coreProd = np.transpose(coreProd, (0, 2, 1, 3)) coreProd = reshape(coreProd, (r1*zArr[idx].r[dim], r2*zArr[idx].r[dim+1])) lhs[idx] = reshape(lhs[idx], (1, -1)) lhs[idx] = np.dot(lhs[idx], coreProd) lhs[idx] = reshape(lhs[idx], (r2, zArr[idx].r[dim+1])) currPCore = reshape(currPCore, (-1, zArr[idx].r[dim+1])) currPCore -= np.dot(cc, lhs[idx]) rhs[idx][dim+1] = reshape(rhs[idx][dim+1], (zArr[idx].r[dim+1], r2)) currPCore = np.dot(currPCore, rhs[idx][dim+1]) currPCore = reshape(currPCore, (r1, modeSize[dim], r2)) if dim == 0: coresP[dim][0:r1, :, 0:r2] += currPCore else: coresP[dim][r1:, :, 0:r2] += currPCore if debug: leftQm1 = left(tt.tensor.from_list(coresX), dim-1) leftQ = left(tt.tensor.from_list(coresX), dim) first = np.tensordot(leftQm1.T, unfolding(zArr[idx], dim-1), 1) second = reshape(first, (-1, np.prod(modeSize[dim+1:]))) if dim < numDims-1: explicit = second.dot(rightQ) orth_cc = reshape(coresX[dim], (-1, coresX[dim].shape[2])) explicit -= orth_cc.dot(leftQ.T.dot(unfolding(zArr[idx], dim)).dot(rightQ)) else: explicit = second explicit = reshape(explicit, currPCore.shape) assert(np.allclose(explicit, currPCore)) if dim == 0: coresP[dim][0:r1, :, r2:] = coresX[dim] else: coresP[dim][r1:, :, r2:] = coresX[dim] for idx in xrange(len(zArr)): r1, n, r2 = coresX[numDims-1].shape currZCore = reshape(coresZ[idx][numDims-1], (zArr[idx].r[numDims-1], -1)) currPCore = np.dot(lhs[idx], currZCore) currPCore = reshape(currPCore, (r1, n, r2)) coresP[numDims-1][r1:, :, 0:r2] += currPCore if debug: assert(np.allclose(X.full(), tt.tensor.from_list(coresX).full())) return tt.tensor.from_list(coresP)
python
def project(X, Z, use_jit=False, debug=False): """ Project tensor Z on the tangent space of tensor X. X is a tensor in the TT format. Z can be a tensor in the TT format or a list of tensors (in this case the function computes projection of the sum off all tensors in the list: project(X, Z) = P_X(\sum_i Z_i) ). This function implements an algorithm from the paper [1], theorem 3.1. The jit version of the code is much faster when projecting a lot of tensors simultaneously (in other words Z is a list with many tensors). Returns a tensor in the TT format with the TT-ranks equal 2 * rank(Z). """ zArr = None if isinstance(Z, tt.vector): zArr = [Z] else: zArr = Z # Get rid of redundant ranks (they cause technical difficulties). X = X.round(eps=0) numDims, modeSize = X.d, X.n coresX = tt.tensor.to_list(X) coresZ = [None] * len(zArr) for idx in xrange(len(zArr)): assert(modeSize == zArr[idx].n).all() coresZ[idx] = tt.tensor.to_list(zArr[idx]) if not use_jit and len(zArr) > 10: print('Consider using use_jit=True option to speed up the projection ' 'process.') if use_jit: for dim in xrange(numDims): r1, n, r2 = coresZ[0][dim].shape for idx in xrange(len(zArr)): if (r1, n, r2) != coresZ[idx][dim].shape: print('Warning: cannot use the jit version when not all ' 'the ranks in the Z array are equal each other. ' 'Switching to the non-jit version.') use_jit = False if use_jit: zCoresDim = [None] * numDims for dim in xrange(numDims): r1, n, r2 = coresZ[0][dim].shape zCoresDim[dim] = np.zeros([len(zArr), r1, n, r2]) for idx in xrange(len(zArr)): if (r1, n, r2) != coresZ[idx][dim].shape: print('Warning: cannot use the jit version when not all ' 'the ranks in the Z array are equal each other. ' 'Switching to the non-jit version.') use_jit = False zCoresDim[dim][idx, :, :, :] = coresZ[idx][dim] # Initialize the cores of the projection_X(sum z[i]). coresP = [] for dim in xrange(numDims): r1 = 2 * X.r[dim] r2 = 2 * X.r[dim+1] if dim == 0: r1 = 1 if dim == numDims - 1: r2 = 1 coresP.append(np.zeros((r1, modeSize[dim], r2))) # rhs[dim] is a len(zArr) x zArr[idx] x X.rank_dim.rank_dim ndarray. # Right to left orthogonalization of X and preparation of the rhs vectors. for dim in xrange(numDims-1, 0, -1): # Right to left orthogonalization of the X cores. coresX = cores_orthogonalization_step(coresX, dim, left_to_right=False) r1, n, r2 = coresX[dim].shape # Fill the right orthogonal part of the projection. for value in xrange(modeSize[dim]): coresP[dim][0:r1, value, 0:r2] = coresX[dim][:, value, :] rhs = [None] * (numDims+1) for dim in xrange(numDims): rhs[dim] = np.zeros([len(zArr), zArr[idx].r[dim], coresX[dim].shape[0]]) rhs[numDims] = np.ones([len(zArr), 1, 1]) for dim in xrange(numDims-1, 0, -1): _update_rhs(rhs[dim+1], coresX[dim], zCoresDim[dim], rhs[dim]) if debug: assert(np.allclose(X.full(), tt.tensor.from_list(coresX).full())) # lsh is a len(zArr) x X.rank_dim x zArr[idx].rank_dim ndarray. lhs = np.ones([len(zArr), 1, 1]) # Left to right sweep. for dim in xrange(numDims): cc = coresX[dim].copy() r1, n, r2 = cc.shape if dim < numDims-1: # Left to right orthogonalization. cc = reshape(cc, (-1, r2)) cc, rr = np.linalg.qr(cc) r2 = cc.shape[1] # Warning: since ranks can change here, do not use X.r! # Use coresX[dim].shape instead. if debug: # Need to do it before the move non orthogonal part rr to # the coresX[dim+1]. rightQ = right(tt.tensor.from_list(coresX), dim+1) coresX[dim] = reshape(cc, (r1, n, r2)).copy() coresX[dim+1] = np.tensordot(rr, coresX[dim+1], 1) new_lhs = np.zeros([len(zArr), r2, zArr[idx].r[dim+1]]) _update_lhs(lhs, coresX[dim], zCoresDim[dim], new_lhs) # See the correspondic section in the non-jit version of this # code for a less confusing implementation of # the transformation below. currPCore = np.einsum('ijk,iklm->ijlm', lhs, zCoresDim[dim]) currPCore = reshape(currPCore, (len(zArr), r1*n, -1)) currPCore -= np.einsum('ij,kjl->kil', cc, new_lhs) currPCore = np.einsum('ijk,ikl', currPCore, rhs[dim+1]) currPCore = reshape(currPCore, (r1, modeSize[dim], r2)) if dim == 0: coresP[dim][0:r1, :, 0:r2] += currPCore else: coresP[dim][r1:, :, 0:r2] += currPCore if debug: explicit_sum = np.zeros((r1, modeSize[dim], r2)) for idx in xrange(len(zArr)): leftQm1 = left(tt.tensor.from_list(coresX), dim-1) leftQ = left(tt.tensor.from_list(coresX), dim) first = np.tensordot(leftQm1.T, unfolding(zArr[idx], dim-1), 1) second = reshape(first, (-1, np.prod(modeSize[dim+1:]))) if dim < numDims-1: explicit = second.dot(rightQ) orth_cc = reshape(coresX[dim], (-1, coresX[dim].shape[2])) explicit -= orth_cc.dot(leftQ.T.dot(unfolding(zArr[idx], dim)).dot(rightQ)) else: explicit = second explicit_sum += reshape(explicit, currPCore.shape) assert(np.allclose(explicit_sum, currPCore)) lhs = new_lhs if dim == 0: coresP[dim][0:r1, :, r2:] = coresX[dim] else: coresP[dim][r1:, :, r2:] = coresX[dim] if dim == numDims-1: coresP[dim][r1:, :, 0:r2] += np.einsum('ijk,iklm->jlm', lhs, zCoresDim[dim]) if debug: assert(np.allclose(X.full(), tt.tensor.from_list(coresX).full())) return tt.tensor.from_list(coresP) else: # Non-jit version of the code. # Initialize the cores of the projection_X(sum z[i]). coresP = [] for dim in xrange(numDims): r1 = 2 * X.r[dim] r2 = 2 * X.r[dim+1] if dim == 0: r1 = 1 if dim == numDims - 1: r2 = 1 coresP.append(np.zeros((r1, modeSize[dim], r2))) # rhs[idx][dim] is an (Z.rank_dim * X.rank_dim) x 1 vector rhs = [[0] * (numDims+1) for _ in xrange(len(zArr))] for idx in xrange(len(zArr)): rhs[idx][numDims] = np.ones([1, 1]) # Right to left sweep to orthogonalize the cores and prepare rhs. for dim in xrange(numDims-1, 0, -1): # Right to left orthogonalization of the X cores. coresX = cores_orthogonalization_step(coresX, dim, left_to_right=False) r1, n, r2 = coresX[dim].shape # Fill the right orthogonal part of the projection. coresP[dim][0:r1, :, 0:r2] = coresX[dim] # Compute rhs. for idx in xrange(len(zArr)): coreProd = np.tensordot(coresZ[idx][dim], coresX[dim], axes=(1, 1)) coreProd = np.transpose(coreProd, (0, 2, 1, 3)) coreProd = reshape(coreProd, (zArr[idx].r[dim]*r1, zArr[idx].r[dim+1]*r2)) rhs[idx][dim] = np.dot(coreProd, rhs[idx][dim+1]) if debug: assert(np.allclose(X.full(), tt.tensor.from_list(coresX).full())) # lsh[idx] is an X.rank_dim x zArr[idx].rank_dim matrix. lhs = [np.ones([1, 1]) for _ in xrange(len(zArr))] # Left to right sweep. for dim in xrange(numDims - 1): if debug: rightQ = right(tt.tensor.from_list(coresX), dim+1) # Left to right orthogonalization of the X cores. coresX = cores_orthogonalization_step(coresX, dim, left_to_right=True) r1, n, r2 = coresX[dim].shape cc = reshape(coresX[dim], (-1, r2)) for idx in xrange(len(zArr)): currZCore = reshape(coresZ[idx][dim], (zArr[idx].r[dim], -1)) currPCore = np.dot(lhs[idx], currZCore) # TODO: consider using np.einsum. coreProd = np.tensordot(coresX[dim], coresZ[idx][dim], axes=(1, 1)) coreProd = np.transpose(coreProd, (0, 2, 1, 3)) coreProd = reshape(coreProd, (r1*zArr[idx].r[dim], r2*zArr[idx].r[dim+1])) lhs[idx] = reshape(lhs[idx], (1, -1)) lhs[idx] = np.dot(lhs[idx], coreProd) lhs[idx] = reshape(lhs[idx], (r2, zArr[idx].r[dim+1])) currPCore = reshape(currPCore, (-1, zArr[idx].r[dim+1])) currPCore -= np.dot(cc, lhs[idx]) rhs[idx][dim+1] = reshape(rhs[idx][dim+1], (zArr[idx].r[dim+1], r2)) currPCore = np.dot(currPCore, rhs[idx][dim+1]) currPCore = reshape(currPCore, (r1, modeSize[dim], r2)) if dim == 0: coresP[dim][0:r1, :, 0:r2] += currPCore else: coresP[dim][r1:, :, 0:r2] += currPCore if debug: leftQm1 = left(tt.tensor.from_list(coresX), dim-1) leftQ = left(tt.tensor.from_list(coresX), dim) first = np.tensordot(leftQm1.T, unfolding(zArr[idx], dim-1), 1) second = reshape(first, (-1, np.prod(modeSize[dim+1:]))) if dim < numDims-1: explicit = second.dot(rightQ) orth_cc = reshape(coresX[dim], (-1, coresX[dim].shape[2])) explicit -= orth_cc.dot(leftQ.T.dot(unfolding(zArr[idx], dim)).dot(rightQ)) else: explicit = second explicit = reshape(explicit, currPCore.shape) assert(np.allclose(explicit, currPCore)) if dim == 0: coresP[dim][0:r1, :, r2:] = coresX[dim] else: coresP[dim][r1:, :, r2:] = coresX[dim] for idx in xrange(len(zArr)): r1, n, r2 = coresX[numDims-1].shape currZCore = reshape(coresZ[idx][numDims-1], (zArr[idx].r[numDims-1], -1)) currPCore = np.dot(lhs[idx], currZCore) currPCore = reshape(currPCore, (r1, n, r2)) coresP[numDims-1][r1:, :, 0:r2] += currPCore if debug: assert(np.allclose(X.full(), tt.tensor.from_list(coresX).full())) return tt.tensor.from_list(coresP)
Project tensor Z on the tangent space of tensor X. X is a tensor in the TT format. Z can be a tensor in the TT format or a list of tensors (in this case the function computes projection of the sum off all tensors in the list: project(X, Z) = P_X(\sum_i Z_i) ). This function implements an algorithm from the paper [1], theorem 3.1. The jit version of the code is much faster when projecting a lot of tensors simultaneously (in other words Z is a list with many tensors). Returns a tensor in the TT format with the TT-ranks equal 2 * rank(Z).
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/riemannian/riemannian.py#L110-L356
oseledets/ttpy
tt/riemannian/riemannian.py
projector_splitting_add
def projector_splitting_add(Y, delta, debug=False): """Compute Y + delta via the projector splitting scheme. This function implements the projector splitting scheme (section 4.2 of [1]). The result is a TT-tensor with the TT-ranks equal to the TT-ranks of Y.""" # Get rid of redundant ranks (they cause technical difficulties). delta = delta.round(eps=0) numDims = delta.d assert(numDims == Y.d) modeSize = delta.n assert(modeSize == Y.n).all() coresDelta = tt.tensor.to_list(delta) coresY = tt.tensor.to_list(Y) # rhs[dim] is an (delta.rank_dim * Y.rank_dim) x 1 vector rhs = [None] * (numDims+1) rhs[numDims] = np.ones([1, 1]) # Right to left sweep to orthogonalize the cores and prepare the rhs. for dim in xrange(numDims-1, 0, -1): # Right to left orthogonalization of the Y cores. coresY = cores_orthogonalization_step(coresY, dim, left_to_right=False) r1, n, r2 = coresY[dim].shape # rhs computation. coreProd = np.tensordot(coresDelta[dim], coresY[dim], axes=(1, 1)) coreProd = np.transpose(coreProd, (0, 2, 1, 3)) coreProd = reshape(coreProd, (delta.r[dim]*r1, delta.r[dim+1]*r2)) rhs[dim] = np.dot(coreProd, rhs[dim+1]) if debug: assert(np.allclose(Y.full(), tt.tensor.from_list(coresY).full())) # lsh is an Y.rank_dim x delta.rank_dim matrix. lhs = np.ones([1, 1]) # s is an Y.rank_dim x Y.rank_dim matrix. s = np.ones([1, 1]) # Left to right projector splitting sweep. for dim in xrange(numDims): # Y^+ (formula 4.10) cc = coresDelta[dim].copy() r1, n, r2 = coresY[dim].shape cc = np.tensordot(lhs, cc, 1) rhs[dim+1] = reshape(rhs[dim+1], (delta.r[dim+1], r2)) cc = reshape(cc, (-1, delta.r[dim+1])) cc = np.dot(cc, rhs[dim+1]) if debug: first = np.kron(np.eye(modeSize[dim]), left(tt.tensor.from_list(coresY), dim-1).T) second = np.dot(first, unfolding(delta, dim)) explicit = np.dot(second, right(tt.tensor.from_list(coresY), dim+1)) assert(np.allclose(explicit, cc)) cc += reshape(np.tensordot(s, coresY[dim], 1), (-1, Y.r[dim+1])) if dim < numDims-1: cc, rr = np.linalg.qr(cc) # TODO: do we need to use r1 = cc.shape[1] here???? cc = reshape(cc, coresY[dim].shape) coresY[dim] = cc.copy() if dim < numDims-1: coreProd = np.tensordot(coresY[dim], coresDelta[dim], axes=(1, 1)) coreProd = np.transpose(coreProd, (0, 2, 1, 3)) coreProd = reshape(coreProd, (r1*delta.r[dim], r2*delta.r[dim+1])) lhs = reshape(lhs, (1, -1)) lhs = np.dot(lhs, coreProd) lhs = reshape(lhs, (r2, delta.r[dim+1])) if dim < numDims-1: # Y^- (formula 4.7) s = rr - np.dot(lhs, rhs[dim+1]) if debug: first = left(tt.tensor.from_list(coresY), dim).T second = np.dot(first, unfolding(delta, dim)) explicit = np.dot(second, right(tt.tensor.from_list(coresY), dim+1)) assert(np.allclose(explicit, np.dot(lhs, rhs[dim+1]))) return tt.tensor.from_list(coresY)
python
def projector_splitting_add(Y, delta, debug=False): """Compute Y + delta via the projector splitting scheme. This function implements the projector splitting scheme (section 4.2 of [1]). The result is a TT-tensor with the TT-ranks equal to the TT-ranks of Y.""" # Get rid of redundant ranks (they cause technical difficulties). delta = delta.round(eps=0) numDims = delta.d assert(numDims == Y.d) modeSize = delta.n assert(modeSize == Y.n).all() coresDelta = tt.tensor.to_list(delta) coresY = tt.tensor.to_list(Y) # rhs[dim] is an (delta.rank_dim * Y.rank_dim) x 1 vector rhs = [None] * (numDims+1) rhs[numDims] = np.ones([1, 1]) # Right to left sweep to orthogonalize the cores and prepare the rhs. for dim in xrange(numDims-1, 0, -1): # Right to left orthogonalization of the Y cores. coresY = cores_orthogonalization_step(coresY, dim, left_to_right=False) r1, n, r2 = coresY[dim].shape # rhs computation. coreProd = np.tensordot(coresDelta[dim], coresY[dim], axes=(1, 1)) coreProd = np.transpose(coreProd, (0, 2, 1, 3)) coreProd = reshape(coreProd, (delta.r[dim]*r1, delta.r[dim+1]*r2)) rhs[dim] = np.dot(coreProd, rhs[dim+1]) if debug: assert(np.allclose(Y.full(), tt.tensor.from_list(coresY).full())) # lsh is an Y.rank_dim x delta.rank_dim matrix. lhs = np.ones([1, 1]) # s is an Y.rank_dim x Y.rank_dim matrix. s = np.ones([1, 1]) # Left to right projector splitting sweep. for dim in xrange(numDims): # Y^+ (formula 4.10) cc = coresDelta[dim].copy() r1, n, r2 = coresY[dim].shape cc = np.tensordot(lhs, cc, 1) rhs[dim+1] = reshape(rhs[dim+1], (delta.r[dim+1], r2)) cc = reshape(cc, (-1, delta.r[dim+1])) cc = np.dot(cc, rhs[dim+1]) if debug: first = np.kron(np.eye(modeSize[dim]), left(tt.tensor.from_list(coresY), dim-1).T) second = np.dot(first, unfolding(delta, dim)) explicit = np.dot(second, right(tt.tensor.from_list(coresY), dim+1)) assert(np.allclose(explicit, cc)) cc += reshape(np.tensordot(s, coresY[dim], 1), (-1, Y.r[dim+1])) if dim < numDims-1: cc, rr = np.linalg.qr(cc) # TODO: do we need to use r1 = cc.shape[1] here???? cc = reshape(cc, coresY[dim].shape) coresY[dim] = cc.copy() if dim < numDims-1: coreProd = np.tensordot(coresY[dim], coresDelta[dim], axes=(1, 1)) coreProd = np.transpose(coreProd, (0, 2, 1, 3)) coreProd = reshape(coreProd, (r1*delta.r[dim], r2*delta.r[dim+1])) lhs = reshape(lhs, (1, -1)) lhs = np.dot(lhs, coreProd) lhs = reshape(lhs, (r2, delta.r[dim+1])) if dim < numDims-1: # Y^- (formula 4.7) s = rr - np.dot(lhs, rhs[dim+1]) if debug: first = left(tt.tensor.from_list(coresY), dim).T second = np.dot(first, unfolding(delta, dim)) explicit = np.dot(second, right(tt.tensor.from_list(coresY), dim+1)) assert(np.allclose(explicit, np.dot(lhs, rhs[dim+1]))) return tt.tensor.from_list(coresY)
Compute Y + delta via the projector splitting scheme. This function implements the projector splitting scheme (section 4.2 of [1]). The result is a TT-tensor with the TT-ranks equal to the TT-ranks of Y.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/riemannian/riemannian.py#L359-L432
oseledets/ttpy
tt/riemannian/riemannian.py
tt_qr
def tt_qr(X, left_to_right=True): """ Orthogonalizes a TT tensor from left to right or from right to left. :param: X - thensor to orthogonalise :param: direction - direction. May be 'lr/LR' or 'rl/RL' for left/right orthogonalization :return: X_orth, R - orthogonal tensor and right (left) upper (lower) triangular matrix >>> import tt, numpy as np >>> x = tt.rand(np.array([2, 3, 4, 5]), d=4) >>> x_q, r = tt_qr(x, left_to_right=True) >>> np.allclose((rm[0][0]*x_q).norm(), x.norm()) True >>> x_u, l = tt_qr(x, left_to_right=False) >>> np.allclose((l[0][0]*x_u).norm(), x.norm()) True """ # Get rid of redundant ranks (they cause technical difficulties). X = X.round(eps=0) numDims = X.d coresX = tt.tensor.to_list(X) if left_to_right: # Left to right orthogonalization of the X cores. for dim in xrange(0, numDims-1): coresX = cores_orthogonalization_step( coresX, dim, left_to_right=left_to_right) last_core = coresX[numDims-1] r1, n, r2 = last_core.shape last_core, rr = np.linalg.qr(reshape(last_core, (-1, r2))) coresX[numDims-1] = reshape(last_core, (r1, n, -1)) else: # Right to left orthogonalization of X cores for dim in xrange(numDims-1, 0, -1): coresX = cores_orthogonalization_step( coresX, dim, left_to_right=left_to_right) last_core = coresX[0] r1, n, r2 = last_core.shape last_core, rr = np.linalg.qr( np.transpose(reshape(last_core, (r1, -1))) ) coresX[0] = reshape( np.transpose(last_core), (-1, n, r2)) rr = np.transpose(rr) return tt.tensor.from_list(coresX), rr
python
def tt_qr(X, left_to_right=True): """ Orthogonalizes a TT tensor from left to right or from right to left. :param: X - thensor to orthogonalise :param: direction - direction. May be 'lr/LR' or 'rl/RL' for left/right orthogonalization :return: X_orth, R - orthogonal tensor and right (left) upper (lower) triangular matrix >>> import tt, numpy as np >>> x = tt.rand(np.array([2, 3, 4, 5]), d=4) >>> x_q, r = tt_qr(x, left_to_right=True) >>> np.allclose((rm[0][0]*x_q).norm(), x.norm()) True >>> x_u, l = tt_qr(x, left_to_right=False) >>> np.allclose((l[0][0]*x_u).norm(), x.norm()) True """ # Get rid of redundant ranks (they cause technical difficulties). X = X.round(eps=0) numDims = X.d coresX = tt.tensor.to_list(X) if left_to_right: # Left to right orthogonalization of the X cores. for dim in xrange(0, numDims-1): coresX = cores_orthogonalization_step( coresX, dim, left_to_right=left_to_right) last_core = coresX[numDims-1] r1, n, r2 = last_core.shape last_core, rr = np.linalg.qr(reshape(last_core, (-1, r2))) coresX[numDims-1] = reshape(last_core, (r1, n, -1)) else: # Right to left orthogonalization of X cores for dim in xrange(numDims-1, 0, -1): coresX = cores_orthogonalization_step( coresX, dim, left_to_right=left_to_right) last_core = coresX[0] r1, n, r2 = last_core.shape last_core, rr = np.linalg.qr( np.transpose(reshape(last_core, (r1, -1))) ) coresX[0] = reshape( np.transpose(last_core), (-1, n, r2)) rr = np.transpose(rr) return tt.tensor.from_list(coresX), rr
Orthogonalizes a TT tensor from left to right or from right to left. :param: X - thensor to orthogonalise :param: direction - direction. May be 'lr/LR' or 'rl/RL' for left/right orthogonalization :return: X_orth, R - orthogonal tensor and right (left) upper (lower) triangular matrix >>> import tt, numpy as np >>> x = tt.rand(np.array([2, 3, 4, 5]), d=4) >>> x_q, r = tt_qr(x, left_to_right=True) >>> np.allclose((rm[0][0]*x_q).norm(), x.norm()) True >>> x_u, l = tt_qr(x, left_to_right=False) >>> np.allclose((l[0][0]*x_u).norm(), x.norm()) True
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/riemannian/riemannian.py#L435-L483
oseledets/ttpy
tt/core/utils.py
ind2sub
def ind2sub(siz, idx): ''' Translates full-format index into tt.vector one's. ---------- Parameters: siz - tt.vector modes idx - full-vector index Note: not vectorized. ''' n = len(siz) subs = _np.empty((n)) k = _np.cumprod(siz[:-1]) k = _np.concatenate((_np.ones(1), k)) for i in xrange(n - 1, -1, -1): subs[i] = _np.floor(idx / k[i]) idx = idx % k[i] return subs.astype(_np.int32)
python
def ind2sub(siz, idx): ''' Translates full-format index into tt.vector one's. ---------- Parameters: siz - tt.vector modes idx - full-vector index Note: not vectorized. ''' n = len(siz) subs = _np.empty((n)) k = _np.cumprod(siz[:-1]) k = _np.concatenate((_np.ones(1), k)) for i in xrange(n - 1, -1, -1): subs[i] = _np.floor(idx / k[i]) idx = idx % k[i] return subs.astype(_np.int32)
Translates full-format index into tt.vector one's. ---------- Parameters: siz - tt.vector modes idx - full-vector index Note: not vectorized.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/utils.py#L9-L25
oseledets/ttpy
tt/core/utils.py
gcd
def gcd(a, b): '''Greatest common divider''' f = _np.frompyfunc(_fractions.gcd, 2, 1) return f(a, b)
python
def gcd(a, b): '''Greatest common divider''' f = _np.frompyfunc(_fractions.gcd, 2, 1) return f(a, b)
Greatest common divider
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/utils.py#L27-L30
oseledets/ttpy
tt/ksl/ksl.py
ksl
def ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000, use_normest=1): """ Dynamical tensor-train approximation based on projector splitting This function performs one step of dynamical tensor-train approximation for the equation .. math :: \\frac{dy}{dt} = A y, \\quad y(0) = y_0 and outputs approximation for :math:`y(\\tau)` :References: 1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken. Time integration of tensor trains. arXiv preprint 1407.2042, 2014. http://arxiv.org/abs/1407.2042 2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator for dynamical low-rank approximation. BIT, 54(1):171-188, 2014. http://dx.doi.org/10.1007/s10543-013-0454-0 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial condition in the TT-format, :type y0: tensor :param tau: Timestep :type tau: float :param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order :type scheme: str :param space: Maximal dimension of the Krylov space for the local EXPOKIT solver. :type space: int :param use_normest: Use matrix norm estimation instead of the true 1-norm in KSL procedure. 0 -use true norm, 1 - Higham norm estimator, 2 - fixed norm=1.0 (for testing purposes only) :type use_normest: int, default: 1 :rtype: tensor :Example: >>> import tt >>> import tt.ksl >>> import numpy as np >>> d = 8 >>> a = tt.qlaplace_dd([d, d, d]) >>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0) Solving a block eigenvalue problem Looking for 1 eigenvalues with accuracy 1E-06 swp: 1 er = 1.1408 rmax:2 swp: 2 er = 190.01 rmax:2 swp: 3 er = 2.72582E-08 rmax:2 Total number of matvecs: 0 >>> y1 = tt.ksl.ksl(a, y0, 1e-2) Solving a real-valued dynamical problem with tau=1E-02 >>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change 0.0 """ y0 = y0.round(1e-14) # This will fix ranks # to be no more than maximal reasonable. # Fortran part doesn't handle excessive ranks ry = y0.r.copy() if scheme is 'symm': tp = 2 else: tp = 1 usenrm = int(use_normest) # Check for dtype y = tt.vector() if np.iscomplex(A.tt.core).any() or np.iscomplex(y0.core).any(): dyn_tt.dyn_tt.ztt_ksl( y0.d, A.n, A.m, A.tt.r, A.tt.core + 0j, y0.core + 0j, ry, tau, rmax, 0, 10, verb, tp, space, usenrm ) y.core = dyn_tt.dyn_tt.zresult_core.copy() else: A.tt.core = np.real(A.tt.core) y0.core = np.real(y0.core) dyn_tt.dyn_tt.tt_ksl( y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, tau, rmax, 0, 10, verb, tp, space, usenrm ) y.core = dyn_tt.dyn_tt.dresult_core.copy() dyn_tt.dyn_tt.deallocate_result() y.d = y0.d y.n = A.n.copy() y.r = ry y.get_ps() return y
python
def ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000, use_normest=1): """ Dynamical tensor-train approximation based on projector splitting This function performs one step of dynamical tensor-train approximation for the equation .. math :: \\frac{dy}{dt} = A y, \\quad y(0) = y_0 and outputs approximation for :math:`y(\\tau)` :References: 1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken. Time integration of tensor trains. arXiv preprint 1407.2042, 2014. http://arxiv.org/abs/1407.2042 2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator for dynamical low-rank approximation. BIT, 54(1):171-188, 2014. http://dx.doi.org/10.1007/s10543-013-0454-0 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial condition in the TT-format, :type y0: tensor :param tau: Timestep :type tau: float :param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order :type scheme: str :param space: Maximal dimension of the Krylov space for the local EXPOKIT solver. :type space: int :param use_normest: Use matrix norm estimation instead of the true 1-norm in KSL procedure. 0 -use true norm, 1 - Higham norm estimator, 2 - fixed norm=1.0 (for testing purposes only) :type use_normest: int, default: 1 :rtype: tensor :Example: >>> import tt >>> import tt.ksl >>> import numpy as np >>> d = 8 >>> a = tt.qlaplace_dd([d, d, d]) >>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0) Solving a block eigenvalue problem Looking for 1 eigenvalues with accuracy 1E-06 swp: 1 er = 1.1408 rmax:2 swp: 2 er = 190.01 rmax:2 swp: 3 er = 2.72582E-08 rmax:2 Total number of matvecs: 0 >>> y1 = tt.ksl.ksl(a, y0, 1e-2) Solving a real-valued dynamical problem with tau=1E-02 >>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change 0.0 """ y0 = y0.round(1e-14) # This will fix ranks # to be no more than maximal reasonable. # Fortran part doesn't handle excessive ranks ry = y0.r.copy() if scheme is 'symm': tp = 2 else: tp = 1 usenrm = int(use_normest) # Check for dtype y = tt.vector() if np.iscomplex(A.tt.core).any() or np.iscomplex(y0.core).any(): dyn_tt.dyn_tt.ztt_ksl( y0.d, A.n, A.m, A.tt.r, A.tt.core + 0j, y0.core + 0j, ry, tau, rmax, 0, 10, verb, tp, space, usenrm ) y.core = dyn_tt.dyn_tt.zresult_core.copy() else: A.tt.core = np.real(A.tt.core) y0.core = np.real(y0.core) dyn_tt.dyn_tt.tt_ksl( y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, tau, rmax, 0, 10, verb, tp, space, usenrm ) y.core = dyn_tt.dyn_tt.dresult_core.copy() dyn_tt.dyn_tt.deallocate_result() y.d = y0.d y.n = A.n.copy() y.r = ry y.get_ps() return y
Dynamical tensor-train approximation based on projector splitting This function performs one step of dynamical tensor-train approximation for the equation .. math :: \\frac{dy}{dt} = A y, \\quad y(0) = y_0 and outputs approximation for :math:`y(\\tau)` :References: 1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken. Time integration of tensor trains. arXiv preprint 1407.2042, 2014. http://arxiv.org/abs/1407.2042 2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator for dynamical low-rank approximation. BIT, 54(1):171-188, 2014. http://dx.doi.org/10.1007/s10543-013-0454-0 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial condition in the TT-format, :type y0: tensor :param tau: Timestep :type tau: float :param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order :type scheme: str :param space: Maximal dimension of the Krylov space for the local EXPOKIT solver. :type space: int :param use_normest: Use matrix norm estimation instead of the true 1-norm in KSL procedure. 0 -use true norm, 1 - Higham norm estimator, 2 - fixed norm=1.0 (for testing purposes only) :type use_normest: int, default: 1 :rtype: tensor :Example: >>> import tt >>> import tt.ksl >>> import numpy as np >>> d = 8 >>> a = tt.qlaplace_dd([d, d, d]) >>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0) Solving a block eigenvalue problem Looking for 1 eigenvalues with accuracy 1E-06 swp: 1 er = 1.1408 rmax:2 swp: 2 er = 190.01 rmax:2 swp: 3 er = 2.72582E-08 rmax:2 Total number of matvecs: 0 >>> y1 = tt.ksl.ksl(a, y0, 1e-2) Solving a real-valued dynamical problem with tau=1E-02 >>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change 0.0
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/ksl/ksl.py#L7-L123
oseledets/ttpy
tt/ksl/ksl.py
diag_ksl
def diag_ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000): """ Dynamical tensor-train approximation based on projector splitting This function performs one step of dynamical tensor-train approximation with diagonal matrix, i.e. it solves the equation for the equation .. math :: \\frac{dy}{dt} = V y, \\quad y(0) = y_0 and outputs approximation for :math:`y(\\tau)` :References: 1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken. Time integration of tensor trains. arXiv preprint 1407.2042, 2014. http://arxiv.org/abs/1407.2042 2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator for dynamical low-rank approximation. BIT, 54(1):171-188, 2014. http://dx.doi.org/10.1007/s10543-013-0454-0 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial condition in the TT-format, :type y0: tensor :param tau: Timestep :type tau: float :param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order :type scheme: str :param space: Maximal dimension of the Krylov space for the local EXPOKIT solver. :type space: int :rtype: tensor :Example: >>> import tt >>> import tt.ksl >>> import numpy as np >>> d = 8 >>> a = tt.qlaplace_dd([d, d, d]) >>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0) Solving a block eigenvalue problem Looking for 1 eigenvalues with accuracy 1E-06 swp: 1 er = 1.1408 rmax:2 swp: 2 er = 190.01 rmax:2 swp: 3 er = 2.72582E-08 rmax:2 Total number of matvecs: 0 >>> y1 = tt.ksl.ksl(a, y0, 1e-2) Solving a real-valued dynamical problem with tau=1E-02 >>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change 0.0 """ y0 = y0.round(1e-14) # This will fix ranks # to be no more than maximal reasonable. # Fortran part doesn't handle excessive ranks ry = y0.r.copy() if scheme is 'symm': tp = 2 else: tp = 1 # Check for dtype y = tt.vector() if np.iscomplex(A.core).any() or np.iscomplex(y0.core).any(): dyn_tt.dyn_diag_tt.ztt_diag_ksl( y0.d, A.n, A.r, A.core + 0j, y0.core + 0j, ry, tau, rmax, 0, 10, verb, tp, space) y.core = dyn_tt.dyn_diag_tt.zresult_core.copy() else: A.core = np.real(A.core) y0.core = np.real(y0.core) dyn_tt.dyn_diag_tt.dtt_diag_ksl( y0.d, A.n, A.r, A.core, y0.core, ry, tau, rmax, 0, 10, verb, tp, space) y.core = dyn_tt.dyn_diag_tt.dresult_core.copy() dyn_tt.dyn_diag_tt.deallocate_result() y.d = y0.d y.n = A.n.copy() y.r = ry y.get_ps() return y
python
def diag_ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000): """ Dynamical tensor-train approximation based on projector splitting This function performs one step of dynamical tensor-train approximation with diagonal matrix, i.e. it solves the equation for the equation .. math :: \\frac{dy}{dt} = V y, \\quad y(0) = y_0 and outputs approximation for :math:`y(\\tau)` :References: 1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken. Time integration of tensor trains. arXiv preprint 1407.2042, 2014. http://arxiv.org/abs/1407.2042 2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator for dynamical low-rank approximation. BIT, 54(1):171-188, 2014. http://dx.doi.org/10.1007/s10543-013-0454-0 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial condition in the TT-format, :type y0: tensor :param tau: Timestep :type tau: float :param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order :type scheme: str :param space: Maximal dimension of the Krylov space for the local EXPOKIT solver. :type space: int :rtype: tensor :Example: >>> import tt >>> import tt.ksl >>> import numpy as np >>> d = 8 >>> a = tt.qlaplace_dd([d, d, d]) >>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0) Solving a block eigenvalue problem Looking for 1 eigenvalues with accuracy 1E-06 swp: 1 er = 1.1408 rmax:2 swp: 2 er = 190.01 rmax:2 swp: 3 er = 2.72582E-08 rmax:2 Total number of matvecs: 0 >>> y1 = tt.ksl.ksl(a, y0, 1e-2) Solving a real-valued dynamical problem with tau=1E-02 >>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change 0.0 """ y0 = y0.round(1e-14) # This will fix ranks # to be no more than maximal reasonable. # Fortran part doesn't handle excessive ranks ry = y0.r.copy() if scheme is 'symm': tp = 2 else: tp = 1 # Check for dtype y = tt.vector() if np.iscomplex(A.core).any() or np.iscomplex(y0.core).any(): dyn_tt.dyn_diag_tt.ztt_diag_ksl( y0.d, A.n, A.r, A.core + 0j, y0.core + 0j, ry, tau, rmax, 0, 10, verb, tp, space) y.core = dyn_tt.dyn_diag_tt.zresult_core.copy() else: A.core = np.real(A.core) y0.core = np.real(y0.core) dyn_tt.dyn_diag_tt.dtt_diag_ksl( y0.d, A.n, A.r, A.core, y0.core, ry, tau, rmax, 0, 10, verb, tp, space) y.core = dyn_tt.dyn_diag_tt.dresult_core.copy() dyn_tt.dyn_diag_tt.deallocate_result() y.d = y0.d y.n = A.n.copy() y.r = ry y.get_ps() return y
Dynamical tensor-train approximation based on projector splitting This function performs one step of dynamical tensor-train approximation with diagonal matrix, i.e. it solves the equation for the equation .. math :: \\frac{dy}{dt} = V y, \\quad y(0) = y_0 and outputs approximation for :math:`y(\\tau)` :References: 1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken. Time integration of tensor trains. arXiv preprint 1407.2042, 2014. http://arxiv.org/abs/1407.2042 2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator for dynamical low-rank approximation. BIT, 54(1):171-188, 2014. http://dx.doi.org/10.1007/s10543-013-0454-0 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial condition in the TT-format, :type y0: tensor :param tau: Timestep :type tau: float :param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order :type scheme: str :param space: Maximal dimension of the Krylov space for the local EXPOKIT solver. :type space: int :rtype: tensor :Example: >>> import tt >>> import tt.ksl >>> import numpy as np >>> d = 8 >>> a = tt.qlaplace_dd([d, d, d]) >>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0) Solving a block eigenvalue problem Looking for 1 eigenvalues with accuracy 1E-06 swp: 1 er = 1.1408 rmax:2 swp: 2 er = 190.01 rmax:2 swp: 3 er = 2.72582E-08 rmax:2 Total number of matvecs: 0 >>> y1 = tt.ksl.ksl(a, y0, 1e-2) Solving a real-valued dynamical problem with tau=1E-02 >>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change 0.0
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/ksl/ksl.py#L126-L231
oseledets/ttpy
tt/amen/__init__.py
amen_solve
def amen_solve(A, f, x0, eps, kickrank=4, nswp=20, local_prec='n', local_iters=2, local_restart=40, trunc_norm=1, max_full_size=50, verb=1): """ Approximate linear system solution in the tensor-train (TT) format using Alternating minimal energy (AMEN approach) :References: Sergey Dolgov, Dmitry. Savostyanov Paper 1: http://arxiv.org/abs/1301.6068 Paper 2: http://arxiv.org/abs/1304.1222 :param A: Matrix in the TT-format :type A: matrix :param f: Right-hand side in the TT-format :type f: tensor :param x0: TT-tensor of initial guess. :type x0: tensor :param eps: Accuracy. :type eps: float :param kickrank: compression rank of the residual Z, i.e. enrichment size [4] :param nswp: maximal number of sweeps [50] :param local_prec: local preconditioner: '' (no prec.), 'ljacobi', 'cjacobi', 'rjacobi' [''] :param local_iters: dimension of local gmres [40] :param local_restart: dimension of local gmres [40] :param trunc_norm: truncate in either Frob. ('fro'), or residual norm ('residual') ['residual'] :param max_full_size: maximal size of the local matrix for the full solver [50] :param verb: 0 -- no info output, 1 -- print info output :Example: >>> import tt >>> import tt.amen #Needed, not imported automatically >>> a = tt.qlaplace_dd([8, 8, 8]) #3D-Laplacian >>> rhs = tt.ones(2, 3 * 8) #Right-hand side of all ones >>> x = tt.amen.amen_solve(a, rhs, rhs, 1e-8) amen_solve: swp=1, max_dx= 9.766E-01, max_res= 3.269E+00, max_rank=5 amen_solve: swp=2, max_dx= 4.293E-01, max_res= 8.335E+00, max_rank=9 amen_solve: swp=3, max_dx= 1.135E-01, max_res= 5.341E+00, max_rank=13 amen_solve: swp=4, max_dx= 9.032E-03, max_res= 5.908E-01, max_rank=17 amen_solve: swp=5, max_dx= 9.500E-04, max_res= 7.636E-02, max_rank=21 amen_solve: swp=6, max_dx= 4.002E-05, max_res= 5.573E-03, max_rank=25 amen_solve: swp=7, max_dx= 4.949E-06, max_res= 8.418E-04, max_rank=29 amen_solve: swp=8, max_dx= 9.618E-07, max_res= 2.599E-04, max_rank=33 amen_solve: swp=9, max_dx= 2.792E-07, max_res= 6.336E-05, max_rank=37 amen_solve: swp=10, max_dx= 4.730E-08, max_res= 1.663E-05, max_rank=41 amen_solve: swp=11, max_dx= 1.508E-08, max_res= 5.463E-06, max_rank=45 amen_solve: swp=12, max_dx= 3.771E-09, max_res= 1.847E-06, max_rank=49 amen_solve: swp=13, max_dx= 7.797E-10, max_res= 6.203E-07, max_rank=53 amen_solve: swp=14, max_dx= 1.747E-10, max_res= 2.058E-07, max_rank=57 amen_solve: swp=15, max_dx= 8.150E-11, max_res= 8.555E-08, max_rank=61 amen_solve: swp=16, max_dx= 2.399E-11, max_res= 4.215E-08, max_rank=65 amen_solve: swp=17, max_dx= 7.871E-12, max_res= 1.341E-08, max_rank=69 amen_solve: swp=18, max_dx= 3.053E-12, max_res= 6.982E-09, max_rank=73 >>> print (tt.matvec(a, x) - rhs).norm() / rhs.norm() 5.5152374305127345e-09 """ m = A.m.copy() rx0 = x0.r.copy() psx0 = x0.ps.copy() if A.is_complex or f.is_complex: amen_f90.amen_f90.ztt_amen_wrapper(f.d, A.n, m, A.tt.r, A.tt.ps, A.tt.core, f.r, f.ps, f.core, rx0, psx0, x0.core, eps, kickrank, nswp, local_iters, local_restart, trunc_norm, max_full_size, verb, local_prec) else: if x0.is_complex: x0 = x0.real() rx0 = x0.r.copy() psx0 = x0.ps.copy() amen_f90.amen_f90.dtt_amen_wrapper(f.d, A.n, m, A.tt.r, A.tt.ps, A.tt.core, f.r, f.ps, f.core, rx0, psx0, x0.core, eps, kickrank, nswp, local_iters, local_restart, trunc_norm, max_full_size, verb, local_prec) x = tt.tensor() x.d = f.d x.n = m.copy() x.r = rx0 if A.is_complex or f.is_complex: x.core = amen_f90.amen_f90.zcore.copy() else: x.core = amen_f90.amen_f90.core.copy() amen_f90.amen_f90.deallocate_result() x.get_ps() return tt.vector.from_list(tt.tensor.to_list(x))
python
def amen_solve(A, f, x0, eps, kickrank=4, nswp=20, local_prec='n', local_iters=2, local_restart=40, trunc_norm=1, max_full_size=50, verb=1): """ Approximate linear system solution in the tensor-train (TT) format using Alternating minimal energy (AMEN approach) :References: Sergey Dolgov, Dmitry. Savostyanov Paper 1: http://arxiv.org/abs/1301.6068 Paper 2: http://arxiv.org/abs/1304.1222 :param A: Matrix in the TT-format :type A: matrix :param f: Right-hand side in the TT-format :type f: tensor :param x0: TT-tensor of initial guess. :type x0: tensor :param eps: Accuracy. :type eps: float :param kickrank: compression rank of the residual Z, i.e. enrichment size [4] :param nswp: maximal number of sweeps [50] :param local_prec: local preconditioner: '' (no prec.), 'ljacobi', 'cjacobi', 'rjacobi' [''] :param local_iters: dimension of local gmres [40] :param local_restart: dimension of local gmres [40] :param trunc_norm: truncate in either Frob. ('fro'), or residual norm ('residual') ['residual'] :param max_full_size: maximal size of the local matrix for the full solver [50] :param verb: 0 -- no info output, 1 -- print info output :Example: >>> import tt >>> import tt.amen #Needed, not imported automatically >>> a = tt.qlaplace_dd([8, 8, 8]) #3D-Laplacian >>> rhs = tt.ones(2, 3 * 8) #Right-hand side of all ones >>> x = tt.amen.amen_solve(a, rhs, rhs, 1e-8) amen_solve: swp=1, max_dx= 9.766E-01, max_res= 3.269E+00, max_rank=5 amen_solve: swp=2, max_dx= 4.293E-01, max_res= 8.335E+00, max_rank=9 amen_solve: swp=3, max_dx= 1.135E-01, max_res= 5.341E+00, max_rank=13 amen_solve: swp=4, max_dx= 9.032E-03, max_res= 5.908E-01, max_rank=17 amen_solve: swp=5, max_dx= 9.500E-04, max_res= 7.636E-02, max_rank=21 amen_solve: swp=6, max_dx= 4.002E-05, max_res= 5.573E-03, max_rank=25 amen_solve: swp=7, max_dx= 4.949E-06, max_res= 8.418E-04, max_rank=29 amen_solve: swp=8, max_dx= 9.618E-07, max_res= 2.599E-04, max_rank=33 amen_solve: swp=9, max_dx= 2.792E-07, max_res= 6.336E-05, max_rank=37 amen_solve: swp=10, max_dx= 4.730E-08, max_res= 1.663E-05, max_rank=41 amen_solve: swp=11, max_dx= 1.508E-08, max_res= 5.463E-06, max_rank=45 amen_solve: swp=12, max_dx= 3.771E-09, max_res= 1.847E-06, max_rank=49 amen_solve: swp=13, max_dx= 7.797E-10, max_res= 6.203E-07, max_rank=53 amen_solve: swp=14, max_dx= 1.747E-10, max_res= 2.058E-07, max_rank=57 amen_solve: swp=15, max_dx= 8.150E-11, max_res= 8.555E-08, max_rank=61 amen_solve: swp=16, max_dx= 2.399E-11, max_res= 4.215E-08, max_rank=65 amen_solve: swp=17, max_dx= 7.871E-12, max_res= 1.341E-08, max_rank=69 amen_solve: swp=18, max_dx= 3.053E-12, max_res= 6.982E-09, max_rank=73 >>> print (tt.matvec(a, x) - rhs).norm() / rhs.norm() 5.5152374305127345e-09 """ m = A.m.copy() rx0 = x0.r.copy() psx0 = x0.ps.copy() if A.is_complex or f.is_complex: amen_f90.amen_f90.ztt_amen_wrapper(f.d, A.n, m, A.tt.r, A.tt.ps, A.tt.core, f.r, f.ps, f.core, rx0, psx0, x0.core, eps, kickrank, nswp, local_iters, local_restart, trunc_norm, max_full_size, verb, local_prec) else: if x0.is_complex: x0 = x0.real() rx0 = x0.r.copy() psx0 = x0.ps.copy() amen_f90.amen_f90.dtt_amen_wrapper(f.d, A.n, m, A.tt.r, A.tt.ps, A.tt.core, f.r, f.ps, f.core, rx0, psx0, x0.core, eps, kickrank, nswp, local_iters, local_restart, trunc_norm, max_full_size, verb, local_prec) x = tt.tensor() x.d = f.d x.n = m.copy() x.r = rx0 if A.is_complex or f.is_complex: x.core = amen_f90.amen_f90.zcore.copy() else: x.core = amen_f90.amen_f90.core.copy() amen_f90.amen_f90.deallocate_result() x.get_ps() return tt.vector.from_list(tt.tensor.to_list(x))
Approximate linear system solution in the tensor-train (TT) format using Alternating minimal energy (AMEN approach) :References: Sergey Dolgov, Dmitry. Savostyanov Paper 1: http://arxiv.org/abs/1301.6068 Paper 2: http://arxiv.org/abs/1304.1222 :param A: Matrix in the TT-format :type A: matrix :param f: Right-hand side in the TT-format :type f: tensor :param x0: TT-tensor of initial guess. :type x0: tensor :param eps: Accuracy. :type eps: float :param kickrank: compression rank of the residual Z, i.e. enrichment size [4] :param nswp: maximal number of sweeps [50] :param local_prec: local preconditioner: '' (no prec.), 'ljacobi', 'cjacobi', 'rjacobi' [''] :param local_iters: dimension of local gmres [40] :param local_restart: dimension of local gmres [40] :param trunc_norm: truncate in either Frob. ('fro'), or residual norm ('residual') ['residual'] :param max_full_size: maximal size of the local matrix for the full solver [50] :param verb: 0 -- no info output, 1 -- print info output :Example: >>> import tt >>> import tt.amen #Needed, not imported automatically >>> a = tt.qlaplace_dd([8, 8, 8]) #3D-Laplacian >>> rhs = tt.ones(2, 3 * 8) #Right-hand side of all ones >>> x = tt.amen.amen_solve(a, rhs, rhs, 1e-8) amen_solve: swp=1, max_dx= 9.766E-01, max_res= 3.269E+00, max_rank=5 amen_solve: swp=2, max_dx= 4.293E-01, max_res= 8.335E+00, max_rank=9 amen_solve: swp=3, max_dx= 1.135E-01, max_res= 5.341E+00, max_rank=13 amen_solve: swp=4, max_dx= 9.032E-03, max_res= 5.908E-01, max_rank=17 amen_solve: swp=5, max_dx= 9.500E-04, max_res= 7.636E-02, max_rank=21 amen_solve: swp=6, max_dx= 4.002E-05, max_res= 5.573E-03, max_rank=25 amen_solve: swp=7, max_dx= 4.949E-06, max_res= 8.418E-04, max_rank=29 amen_solve: swp=8, max_dx= 9.618E-07, max_res= 2.599E-04, max_rank=33 amen_solve: swp=9, max_dx= 2.792E-07, max_res= 6.336E-05, max_rank=37 amen_solve: swp=10, max_dx= 4.730E-08, max_res= 1.663E-05, max_rank=41 amen_solve: swp=11, max_dx= 1.508E-08, max_res= 5.463E-06, max_rank=45 amen_solve: swp=12, max_dx= 3.771E-09, max_res= 1.847E-06, max_rank=49 amen_solve: swp=13, max_dx= 7.797E-10, max_res= 6.203E-07, max_rank=53 amen_solve: swp=14, max_dx= 1.747E-10, max_res= 2.058E-07, max_rank=57 amen_solve: swp=15, max_dx= 8.150E-11, max_res= 8.555E-08, max_rank=61 amen_solve: swp=16, max_dx= 2.399E-11, max_res= 4.215E-08, max_rank=65 amen_solve: swp=17, max_dx= 7.871E-12, max_res= 1.341E-08, max_rank=69 amen_solve: swp=18, max_dx= 3.053E-12, max_res= 6.982E-09, max_rank=73 >>> print (tt.matvec(a, x) - rhs).norm() / rhs.norm() 5.5152374305127345e-09
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/amen/__init__.py#L7-L92
oseledets/ttpy
tt/core/matrix.py
matrix.T
def T(self): """Transposed TT-matrix""" mycrs = matrix.to_list(self) trans_crs = [] for cr in mycrs: trans_crs.append(_np.transpose(cr, [0, 2, 1, 3])) return matrix.from_list(trans_crs)
python
def T(self): """Transposed TT-matrix""" mycrs = matrix.to_list(self) trans_crs = [] for cr in mycrs: trans_crs.append(_np.transpose(cr, [0, 2, 1, 3])) return matrix.from_list(trans_crs)
Transposed TT-matrix
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/matrix.py#L126-L132
oseledets/ttpy
tt/core/matrix.py
matrix.real
def real(self): """Return real part of a matrix.""" return matrix(self.tt.real(), n=self.n, m=self.m)
python
def real(self): """Return real part of a matrix.""" return matrix(self.tt.real(), n=self.n, m=self.m)
Return real part of a matrix.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/matrix.py#L134-L136
oseledets/ttpy
tt/core/matrix.py
matrix.imag
def imag(self): """Return imaginary part of a matrix.""" return matrix(self.tt.imag(), n=self.n, m=self.m)
python
def imag(self): """Return imaginary part of a matrix.""" return matrix(self.tt.imag(), n=self.n, m=self.m)
Return imaginary part of a matrix.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/matrix.py#L138-L140
oseledets/ttpy
tt/core/matrix.py
matrix.c2r
def c2r(self): """Get real matrix from complex one suitable for solving complex linear system with real solver. For matrix :math:`M(i_1,j_1,\\ldots,i_d,j_d) = \\Re M + i\\Im M` returns (d+1)-dimensional matrix :math:`\\tilde{M}(i_1,j_1,\\ldots,i_d,j_d,i_{d+1},j_{d+1})` of form :math:`\\begin{bmatrix}\\Re M & -\\Im M \\\\ \\Im M & \\Re M \\end{bmatrix}`. This function is useful for solving complex linear system :math:`\\mathcal{A}X = B` with real solver by transforming it into .. math:: \\begin{bmatrix}\\Re\\mathcal{A} & -\\Im\\mathcal{A} \\\\ \\Im\\mathcal{A} & \\Re\\mathcal{A} \\end{bmatrix} \\begin{bmatrix}\\Re X \\\\ \\Im X\\end{bmatrix} = \\begin{bmatrix}\\Re B \\\\ \\Im B\\end{bmatrix}. """ return matrix(a=self.tt.__complex_op('M'), n=_np.concatenate( (self.n, [2])), m=_np.concatenate((self.m, [2])))
python
def c2r(self): """Get real matrix from complex one suitable for solving complex linear system with real solver. For matrix :math:`M(i_1,j_1,\\ldots,i_d,j_d) = \\Re M + i\\Im M` returns (d+1)-dimensional matrix :math:`\\tilde{M}(i_1,j_1,\\ldots,i_d,j_d,i_{d+1},j_{d+1})` of form :math:`\\begin{bmatrix}\\Re M & -\\Im M \\\\ \\Im M & \\Re M \\end{bmatrix}`. This function is useful for solving complex linear system :math:`\\mathcal{A}X = B` with real solver by transforming it into .. math:: \\begin{bmatrix}\\Re\\mathcal{A} & -\\Im\\mathcal{A} \\\\ \\Im\\mathcal{A} & \\Re\\mathcal{A} \\end{bmatrix} \\begin{bmatrix}\\Re X \\\\ \\Im X\\end{bmatrix} = \\begin{bmatrix}\\Re B \\\\ \\Im B\\end{bmatrix}. """ return matrix(a=self.tt.__complex_op('M'), n=_np.concatenate( (self.n, [2])), m=_np.concatenate((self.m, [2])))
Get real matrix from complex one suitable for solving complex linear system with real solver. For matrix :math:`M(i_1,j_1,\\ldots,i_d,j_d) = \\Re M + i\\Im M` returns (d+1)-dimensional matrix :math:`\\tilde{M}(i_1,j_1,\\ldots,i_d,j_d,i_{d+1},j_{d+1})` of form :math:`\\begin{bmatrix}\\Re M & -\\Im M \\\\ \\Im M & \\Re M \\end{bmatrix}`. This function is useful for solving complex linear system :math:`\\mathcal{A}X = B` with real solver by transforming it into .. math:: \\begin{bmatrix}\\Re\\mathcal{A} & -\\Im\\mathcal{A} \\\\ \\Im\\mathcal{A} & \\Re\\mathcal{A} \\end{bmatrix} \\begin{bmatrix}\\Re X \\\\ \\Im X\\end{bmatrix} = \\begin{bmatrix}\\Re B \\\\ \\Im B\\end{bmatrix}.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/matrix.py#L142-L159
oseledets/ttpy
tt/core/matrix.py
matrix.round
def round(self, eps=1e-14, rmax=100000): """ Computes an approximation to a TT-matrix in with accuracy EPS """ c = matrix() c.tt = self.tt.round(eps, rmax) c.n = self.n.copy() c.m = self.m.copy() return c
python
def round(self, eps=1e-14, rmax=100000): """ Computes an approximation to a TT-matrix in with accuracy EPS """ c = matrix() c.tt = self.tt.round(eps, rmax) c.n = self.n.copy() c.m = self.m.copy() return c
Computes an approximation to a TT-matrix in with accuracy EPS
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/matrix.py#L317-L325
oseledets/ttpy
tt/core/matrix.py
matrix.copy
def copy(self): """ Creates a copy of the TT-matrix """ c = matrix() c.tt = self.tt.copy() c.n = self.n.copy() c.m = self.m.copy() return c
python
def copy(self): """ Creates a copy of the TT-matrix """ c = matrix() c.tt = self.tt.copy() c.n = self.n.copy() c.m = self.m.copy() return c
Creates a copy of the TT-matrix
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/matrix.py#L327-L333
oseledets/ttpy
tt/core/matrix.py
matrix.full
def full(self): """ Transforms a TT-matrix into a full matrix""" N = self.n.prod() M = self.m.prod() a = self.tt.full() d = self.tt.d sz = _np.vstack((self.n, self.m)).flatten('F') a = a.reshape(sz, order='F') # Design a permutation prm = _np.arange(2 * d) prm = prm.reshape((d, 2), order='F') prm = prm.transpose() prm = prm.flatten('F') # Get the inverse permutation iprm = [0] * (2 * d) for i in xrange(2 * d): iprm[prm[i]] = i a = a.transpose(iprm).reshape(N, M, order='F') a = a.reshape(N, M) return a
python
def full(self): """ Transforms a TT-matrix into a full matrix""" N = self.n.prod() M = self.m.prod() a = self.tt.full() d = self.tt.d sz = _np.vstack((self.n, self.m)).flatten('F') a = a.reshape(sz, order='F') # Design a permutation prm = _np.arange(2 * d) prm = prm.reshape((d, 2), order='F') prm = prm.transpose() prm = prm.flatten('F') # Get the inverse permutation iprm = [0] * (2 * d) for i in xrange(2 * d): iprm[prm[i]] = i a = a.transpose(iprm).reshape(N, M, order='F') a = a.reshape(N, M) return a
Transforms a TT-matrix into a full matrix
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/matrix.py#L355-L374
oseledets/ttpy
tt/amen/amen_mv.py
_svdgram
def _svdgram(A, tol=None, tol2=1e-7): ''' Highly experimental acceleration of SVD/QR using Gram matrix. Use with caution for m>>n only! function [u,s,r]=_svdgram(A,[tol]) u is the left singular factor of A, s is the singular values (vector!), r has the meaning of diag(s)*v'. if tol is given, performs the truncation with Fro-threshold. ''' R2 = _np.dot(_tconj(A), A) [u, s, vt] = _np.linalg.svd(R2, full_matrices=False) u = _np.dot(A, _tconj(vt)) s = (u**2).sum(axis=0) s = s ** 0.5 if tol is not None: p = _my_chop2(s, _np.linalg.norm(s) * tol) u = u[:, :p] s = s[:p] vt = vt[:p, :] tmp = _spdiags(1. / s, 0, len(s), len(s)).todense() tmp = _np.array(tmp) u = _np.dot(u, tmp) r = _np.dot(_np.diag(s), vt) # Run chol for reortogonalization. # It will stop if the matrix will be singular. # Fortunately, it means rank truncation with eps in our business. if (s[0] / s[-1] > 1. / tol2): p = 1 while (p > 0): R2 = _np.dot(_tconj(u), u) #[u_r2, s_r2, vt_r2] = _np.linalg.svd(R2) # in matlab [R, p] = chol(a) - here is a *dirty* patch #p = s_r2[s_r2 > eps].size #R2 = R2[:p, :p] R = _cholesky(R2, lower=True) if (p > 0): u = u[:, :p] s = s[:p] r = r[:p, :] iR = _np.linalg.inv(R) u = _np.dot(u, iR) r = _np.dot(R, r) return u, s, r
python
def _svdgram(A, tol=None, tol2=1e-7): ''' Highly experimental acceleration of SVD/QR using Gram matrix. Use with caution for m>>n only! function [u,s,r]=_svdgram(A,[tol]) u is the left singular factor of A, s is the singular values (vector!), r has the meaning of diag(s)*v'. if tol is given, performs the truncation with Fro-threshold. ''' R2 = _np.dot(_tconj(A), A) [u, s, vt] = _np.linalg.svd(R2, full_matrices=False) u = _np.dot(A, _tconj(vt)) s = (u**2).sum(axis=0) s = s ** 0.5 if tol is not None: p = _my_chop2(s, _np.linalg.norm(s) * tol) u = u[:, :p] s = s[:p] vt = vt[:p, :] tmp = _spdiags(1. / s, 0, len(s), len(s)).todense() tmp = _np.array(tmp) u = _np.dot(u, tmp) r = _np.dot(_np.diag(s), vt) # Run chol for reortogonalization. # It will stop if the matrix will be singular. # Fortunately, it means rank truncation with eps in our business. if (s[0] / s[-1] > 1. / tol2): p = 1 while (p > 0): R2 = _np.dot(_tconj(u), u) #[u_r2, s_r2, vt_r2] = _np.linalg.svd(R2) # in matlab [R, p] = chol(a) - here is a *dirty* patch #p = s_r2[s_r2 > eps].size #R2 = R2[:p, :p] R = _cholesky(R2, lower=True) if (p > 0): u = u[:, :p] s = s[:p] r = r[:p, :] iR = _np.linalg.inv(R) u = _np.dot(u, iR) r = _np.dot(R, r) return u, s, r
Highly experimental acceleration of SVD/QR using Gram matrix. Use with caution for m>>n only! function [u,s,r]=_svdgram(A,[tol]) u is the left singular factor of A, s is the singular values (vector!), r has the meaning of diag(s)*v'. if tol is given, performs the truncation with Fro-threshold.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/amen/amen_mv.py#L29-L74
oseledets/ttpy
tt/amen/amen_mv.py
amen_mv
def amen_mv(A, x, tol, y=None, z=None, nswp=20, kickrank=4, kickrank2=0, verb=True, init_qr=True, renorm='direct', fkick=False): ''' Approximate the matrix-by-vector via the AMEn iteration [y,z]=amen_mv(A, x, tol, varargin) Attempts to approximate the y = A*x with accuracy TOL using the AMEn+ALS iteration. Matrix A has to be given in the TT-format, right-hand side x should be given in the TT-format also. Options are provided in form 'PropertyName1',PropertyValue1,'PropertyName2',PropertyValue2 and so on. The parameters are set to default (in brackets in the following) The list of option names and default values are: o y0 - initial approximation to Ax [rand rank-2] o nswp - maximal number of sweeps [20] o verb - verbosity level, 0-silent, 1-sweep info, 2-block info [1] o kickrank - compression rank of the error, i.e. enrichment size [3] o init_qr - perform QR of the input (save some time in ts, etc) [true] o renorm - Orthog. and truncation methods: direct (svd,qr) or gram (apply svd to the gram matrix, faster for m>>n) [direct] o fkick - Perform solution enrichment during forward sweeps [false] (rather questionable yet; false makes error higher, but "better structured": it does not explode in e.g. subsequent matvecs) o z0 - initial approximation to the error Ax-y [rand rank-kickrank] ******** For description of adaptive ALS please see Sergey V. Dolgov, Dmitry V. Savostyanov, Alternating minimal energy methods for linear systems in higher dimensions. Part I: SPD systems, http://arxiv.org/abs/1301.6068, Part II: Faster algorithm and application to nonsymmetric systems, http://arxiv.org/abs/1304.1222 Use {sergey.v.dolgov, dmitry.savostyanov}@gmail.com for feedback ******** ''' if renorm is 'gram': print("Not implemented yet. Renorm is switched to 'direct'") renorm = 'direct' if isinstance(x, _tt.vector): d = x.d m = x.n rx = x.r x = _tt.vector.to_list(x) vectype = 1 # tt_tensor elif isinstance(x, list): d = len(x) m = _np.zeros(d) rx = _np.ones(d + 1, dtype=_np.int32) for i in xrange(d): [_, m[i], rx[i + 1]] = x[i].shape vectype = 0 # cell else: raise Exception('x: use tt.tensor or list of cores as numpy.arrays') if isinstance(A, _tt.matrix): n = A.n ra = A.tt.r A = _tt.matrix.to_list(A) # prepare A for fast ALS-mv for i in xrange(d): A[i] = _reshape(A[i], (ra[i] * n[i], m[i] * ra[i + 1])) atype = 1 # tt_matrix # Alternative: A is a cell of cell: sparse canonical format elif isinstance(A, list): n = _np.zeros(d) for i in xrange(d): n[i] = A[i][0].shape[0] ra = len(A[0]) atype = 0 # cell else: raise Exception('A: use tt.matrix or list of cores as numpy.arrays') if y is None: y = _tt.rand(n, d, 2) y = _tt.vector.to_list(y) else: if isinstance(y, _tt.vector): y = _tt.vector.to_list(y) ry = _np.ones(d + 1, dtype=_np.int32) for i in range(d): ry[i + 1] = y[i].shape[2] if (kickrank + kickrank2 > 0): if z is None: z = _tt.rand(n, d, kickrank + kickrank2) rz = z.r z = _tt.vector.to_list(z) else: if isinstance(z, _tt.vector): z = _tt.vector.to_list(z) rz = _np.ones(d + 1, dtype=_np.int32) for i in range(d): rz[i + 1] = z[i].shape[2] phizax = [None] * (d + 1) # cell(d+1,1); if (atype == 1): phizax[0] = _np.ones((1, 1, 1)) # 1 phizax[d] = _np.ones((1, 1, 1)) # 1 else: phizax[0] = _np.ones((1, ra)) # 33 phizax[d] = _np.ones((1, ra)) phizy = [None] * (d + 1) phizy[0] = _np.ones((1)) # , 1)) phizy[d] = _np.ones((1)) # , 1)) phiyax = [None] * (d + 1) if (atype == 1): phiyax[0] = _np.ones((1, 1, 1)) # 1 phiyax[d] = _np.ones((1, 1, 1)) # 1 else: phiyax[0] = _np.ones((1, ra)) # 3 phiyax[d] = _np.ones((1, ra)) nrms = _np.ones(d) # Initial ort for i in range(d - 1): if init_qr: cr = _reshape(y[i], (ry[i] * n[i], ry[i + 1])) if (renorm is 'gram') and (ry[i] * n[i] > 5 * ry[i + 1]): [cr, s, R] = _svdgram(cr) else: [cr, R] = _np.linalg.qr(cr) nrmr = _np.linalg.norm(R) # , 'fro') if (nrmr > 0): R = R / nrmr cr2 = _reshape(y[i + 1], (ry[i + 1], n[i + 1] * ry[i + 2])) cr2 = _np.dot(R, cr2) ry[i + 1] = cr.shape[1] y[i] = _reshape(cr, (ry[i], n[i], ry[i + 1])) y[i + 1] = _reshape(cr2, (ry[i + 1], n[i + 1], ry[i + 2])) [phiyax[i + 1], nrms[i] ] = _compute_next_Phi(phiyax[i], y[i], x[i], 'lr', A[i]) if (kickrank + kickrank2 > 0): cr = _reshape(z[i], (rz[i] * n[i], rz[i + 1])) if (renorm == 'gram') and (rz[i] * n[i] > 5 * rz[i + 1]): [cr, s, R] = _svdgram(cr) else: [cr, R] = _np.linalg.qr(cr) nrmr = _np.linalg.norm(R) # , 'fro') if (nrmr > 0): R = R / nrmr cr2 = _reshape(z[i + 1], (rz[i + 1], n[i + 1] * rz[i + 2])) cr2 = _np.dot(R, cr2) rz[i + 1] = cr.shape[1] z[i] = _reshape(cr, (rz[i], n[i], rz[i + 1])) z[i + 1] = _reshape(cr2, (rz[i + 1], n[i + 1], rz[i + 2])) phizax[ i + 1] = _compute_next_Phi( phizax[i], z[i], x[i], 'lr', A[i], nrms[i], return_norm=False) phizy[ i + 1] = _compute_next_Phi( phizy[i], z[i], y[i], 'lr', return_norm=False) i = d - 1 direct = -1 swp = 1 max_dx = 0 while swp <= nswp: # Project the MatVec generating vector crx = _reshape(x[i], (rx[i] * m[i] * rx[i + 1], 1)) cry = _bfun3(phiyax[i], A[i], phiyax[i + 1], crx) nrms[i] = _np.linalg.norm(cry) # , 'fro') # The main goal is to keep y[i] of norm 1 if (nrms[i] > 0): cry = cry / nrms[i] else: nrms[i] = 1 y[i] = _reshape(y[i], (ry[i] * n[i] * ry[i + 1], 1)) dx = _np.linalg.norm(cry - y[i]) max_dx = max(max_dx, dx) # Truncation and enrichment if ((direct > 0) and (i < d - 1)): # ?? i<d cry = _reshape(cry, (ry[i] * n[i], ry[i + 1])) if (renorm == 'gram'): [u, s, v] = _svdgram(cry, tol / d**0.5) v = v.T r = u.shape[1] else: [u, s, vt] = _np.linalg.svd(cry, full_matrices=False) #s = diag(s) r = _my_chop2(s, tol * _np.linalg.norm(s) / d**0.5) u = u[:, :r] # ????? s - matrix or vector v = _np.dot(_tconj(vt[:r, :]), _np.diag(s[:r])) # Prepare enrichment, if needed if (kickrank + kickrank2 > 0): cry = _np.dot(u, v.T) cry = _reshape(cry, (ry[i] * n[i], ry[i + 1])) # For updating z crz = _bfun3(phizax[i], A[i], phizax[i + 1], crx) crz = _reshape(crz, (rz[i] * n[i], rz[i + 1])) ys = _np.dot(cry, phizy[i + 1]) yz = _reshape(ys, (ry[i], n[i] * rz[i + 1])) yz = _np.dot(phizy[i], yz) yz = _reshape(yz, (rz[i] * n[i], rz[i + 1])) crz = crz / nrms[i] - yz nrmz = _np.linalg.norm(crz) # , 'fro') if (kickrank2 > 0): [crz, _, _] = _np.linalg.svd(crz, full_matrices=False) crz = crz[:, : min(crz.shape[1], kickrank)] crz = _np.hstack( (crz, _np.random.randn( rz[i] * n[i], kickrank2))) # For adding into solution if fkick: crs = _bfun3(phiyax[i], A[i], phizax[i + 1], crx) crs = _reshape(crs, (ry[i] * n[i], rz[i + 1])) crs = crs / nrms[i] - ys u = _np.hstack((u, crs)) if (renorm == 'gram') and ( ry[i] * n[i] > 5 * (ry[i + 1] + rz[i + 1])): [u, s, R] = _svdgram(u) else: [u, R] = _np.linalg.qr(u) v = _np.hstack((v, _np.zeros((ry[i + 1], rz[i + 1])))) v = _np.dot(v, R.T) r = u.shape[1] y[i] = _reshape(u, (ry[i], n[i], r)) cr2 = _reshape(y[i + 1], (ry[i + 1], n[i + 1] * ry[i + 2])) v = _reshape(v, (ry[i + 1], r)) cr2 = _np.dot(v.T, cr2) y[i + 1] = _reshape(cr2, (r, n[i + 1], ry[i + 2])) ry[i + 1] = r [phiyax[i + 1], nrms[i] ] = _compute_next_Phi(phiyax[i], y[i], x[i], 'lr', A[i]) if (kickrank + kickrank2 > 0): if (renorm == 'gram') and (rz[i] * n[i] > 5 * rz[i + 1]): [crz, s, R] = _svdgram(crz) else: [crz, R] = _np.linalg.qr(crz) rz[i + 1] = crz.shape[1] z[i] = _reshape(crz, (rz[i], n[i], rz[i + 1])) # z[i+1] will be recomputed from scratch in the next step phizax[ i + 1] = _compute_next_Phi( phizax[i], z[i], x[i], 'lr', A[i], nrms[i], return_norm=False) phizy[ i + 1] = _compute_next_Phi( phizy[i], z[i], y[i], 'lr', return_norm=False) elif ((direct < 0) and (i > 0)): cry = _reshape(cry, (ry[i], n[i] * ry[i + 1])) if (renorm == 'gram'): [v, s, u] = _svdgram(cry.T, tol / d**0.5) u = u.T r = v.shape[1] else: #[v, s, u] = _np.linalg.svd(cry.T, full_matrices=False) [u, s, vt] = _np.linalg.svd(cry, full_matrices=False) #s = diag(s); r = _my_chop2(s, tol * _np.linalg.norm(s) / d**0.5) v = _tconj(vt[:r, :]) #v = vt[:r, :] #v = _np.dot(v[:, :r], _np.diag(s[:r])) u = _np.dot(u[:, :r], _np.diag(s[:r])) # ?????????????????? # Prepare enrichment, if needed if (kickrank + kickrank2 > 0): cry = _np.dot(u, v.T) # .T) cry = _reshape(cry, (ry[i], n[i] * ry[i + 1])) # For updating z crz = _bfun3(phizax[i], A[i], phizax[i + 1], crx) crz = _reshape(crz, (rz[i], n[i] * rz[i + 1])) ys = _np.dot(phizy[i], cry) yz = _reshape(ys, (rz[i] * n[i], ry[i + 1])) yz = _np.dot(yz, phizy[i + 1]) yz = _reshape(yz, (rz[i], n[i] * rz[i + 1])) crz = crz / nrms[i] - yz nrmz = _np.linalg.norm(crz) # , 'fro') if (kickrank2 > 0): [_, _, crz] = _np.linalg.svd(crz, full_matrices=False) crz = crz[:, : min(crz.shape[1], kickrank)] crz = _tconj(crz) crz = _np.vstack( (crz, _np.random.randn(kickrank2, n[i] * rz[i + 1]))) # For adding into solution crs = _bfun3(phizax[i], A[i], phiyax[i + 1], crx) crs = _reshape(crs, (rz[i], n[i] * ry[i + 1])) crs = crs / nrms[i] - ys v = _np.hstack((v, crs.T)) # .T #v = v.T if (renorm == 'gram') and ( n[i] * ry[i + 1] > 5 * (ry[i] + rz[i])): [v, s, R] = _svdgram(v) else: [v, R] = _np.linalg.qr(v) u = _np.hstack((u, _np.zeros((ry[i], rz[i])))) u = _np.dot(u, R.T) r = v.shape[1] cr2 = _reshape(y[i - 1], (ry[i - 1] * n[i - 1], ry[i])) cr2 = _np.dot(cr2, u) y[i - 1] = _reshape(cr2, (ry[i - 1], n[i - 1], r)) y[i] = _reshape(v.T, (r, n[i], ry[i + 1])) ry[i] = r [phiyax[i], nrms[i]] = _compute_next_Phi( phiyax[i + 1], y[i], x[i], 'rl', A[i]) if (kickrank + kickrank2 > 0): if (renorm == 'gram') and (n[i] * rz[i + 1] > 5 * rz[i]): [crz, s, R] = _svdgram(crz.T) else: [crz, R] = _np.linalg.qr(crz.T) rz[i] = crz.shape[1] z[i] = _reshape(crz.T, (rz[i], n[i], rz[i + 1])) # don't update z[i-1], it will be recomputed from scratch phizax[i] = _compute_next_Phi( phizax[ i + 1], z[i], x[i], 'rl', A[i], nrms[i], return_norm=False) phizy[i] = _compute_next_Phi( phizy[i + 1], z[i], y[i], 'rl', return_norm=False) if (verb > 1): print('amen-mv: swp=[%d,%d], dx=%.3e, r=%d, |y|=%.3e, |z|=%.3e' % (swp, i, dx, r, _np.linalg.norm(cry), nrmz)) # Stopping or reversing if ((direct > 0) and (i == d - 1)) or ((direct < 0) and (i == 0)): if (verb > 0): print('amen-mv: swp=%d{%d}, max_dx=%.3e, max_r=%d' % (swp, (1 - direct) // 2, max_dx, max(ry))) if ((max_dx < tol) or (swp == nswp)) and (direct > 0): break else: # We are at the terminal block y[i] = _reshape(cry, (ry[i], n[i], ry[i + 1])) if (direct > 0): swp = swp + 1 max_dx = 0 direct = -direct else: i = i + direct # if (direct>0) y[d - 1] = _reshape(cry, (ry[d - 1], n[d - 1], ry[d])) # else # y{1} = reshape(cry, ry(1), n(1), ry(2)); # end; # Distribute norms equally... nrms = _np.exp(sum(_np.log(nrms)) / d) # ... and plug them into y for i in xrange(d): y[i] = _np.dot(y[i], nrms) if (vectype == 1): y = _tt.vector.from_list(y) if kickrank == 0: z = None else: z = _tt.vector.from_list(z) return y, z
python
def amen_mv(A, x, tol, y=None, z=None, nswp=20, kickrank=4, kickrank2=0, verb=True, init_qr=True, renorm='direct', fkick=False): ''' Approximate the matrix-by-vector via the AMEn iteration [y,z]=amen_mv(A, x, tol, varargin) Attempts to approximate the y = A*x with accuracy TOL using the AMEn+ALS iteration. Matrix A has to be given in the TT-format, right-hand side x should be given in the TT-format also. Options are provided in form 'PropertyName1',PropertyValue1,'PropertyName2',PropertyValue2 and so on. The parameters are set to default (in brackets in the following) The list of option names and default values are: o y0 - initial approximation to Ax [rand rank-2] o nswp - maximal number of sweeps [20] o verb - verbosity level, 0-silent, 1-sweep info, 2-block info [1] o kickrank - compression rank of the error, i.e. enrichment size [3] o init_qr - perform QR of the input (save some time in ts, etc) [true] o renorm - Orthog. and truncation methods: direct (svd,qr) or gram (apply svd to the gram matrix, faster for m>>n) [direct] o fkick - Perform solution enrichment during forward sweeps [false] (rather questionable yet; false makes error higher, but "better structured": it does not explode in e.g. subsequent matvecs) o z0 - initial approximation to the error Ax-y [rand rank-kickrank] ******** For description of adaptive ALS please see Sergey V. Dolgov, Dmitry V. Savostyanov, Alternating minimal energy methods for linear systems in higher dimensions. Part I: SPD systems, http://arxiv.org/abs/1301.6068, Part II: Faster algorithm and application to nonsymmetric systems, http://arxiv.org/abs/1304.1222 Use {sergey.v.dolgov, dmitry.savostyanov}@gmail.com for feedback ******** ''' if renorm is 'gram': print("Not implemented yet. Renorm is switched to 'direct'") renorm = 'direct' if isinstance(x, _tt.vector): d = x.d m = x.n rx = x.r x = _tt.vector.to_list(x) vectype = 1 # tt_tensor elif isinstance(x, list): d = len(x) m = _np.zeros(d) rx = _np.ones(d + 1, dtype=_np.int32) for i in xrange(d): [_, m[i], rx[i + 1]] = x[i].shape vectype = 0 # cell else: raise Exception('x: use tt.tensor or list of cores as numpy.arrays') if isinstance(A, _tt.matrix): n = A.n ra = A.tt.r A = _tt.matrix.to_list(A) # prepare A for fast ALS-mv for i in xrange(d): A[i] = _reshape(A[i], (ra[i] * n[i], m[i] * ra[i + 1])) atype = 1 # tt_matrix # Alternative: A is a cell of cell: sparse canonical format elif isinstance(A, list): n = _np.zeros(d) for i in xrange(d): n[i] = A[i][0].shape[0] ra = len(A[0]) atype = 0 # cell else: raise Exception('A: use tt.matrix or list of cores as numpy.arrays') if y is None: y = _tt.rand(n, d, 2) y = _tt.vector.to_list(y) else: if isinstance(y, _tt.vector): y = _tt.vector.to_list(y) ry = _np.ones(d + 1, dtype=_np.int32) for i in range(d): ry[i + 1] = y[i].shape[2] if (kickrank + kickrank2 > 0): if z is None: z = _tt.rand(n, d, kickrank + kickrank2) rz = z.r z = _tt.vector.to_list(z) else: if isinstance(z, _tt.vector): z = _tt.vector.to_list(z) rz = _np.ones(d + 1, dtype=_np.int32) for i in range(d): rz[i + 1] = z[i].shape[2] phizax = [None] * (d + 1) # cell(d+1,1); if (atype == 1): phizax[0] = _np.ones((1, 1, 1)) # 1 phizax[d] = _np.ones((1, 1, 1)) # 1 else: phizax[0] = _np.ones((1, ra)) # 33 phizax[d] = _np.ones((1, ra)) phizy = [None] * (d + 1) phizy[0] = _np.ones((1)) # , 1)) phizy[d] = _np.ones((1)) # , 1)) phiyax = [None] * (d + 1) if (atype == 1): phiyax[0] = _np.ones((1, 1, 1)) # 1 phiyax[d] = _np.ones((1, 1, 1)) # 1 else: phiyax[0] = _np.ones((1, ra)) # 3 phiyax[d] = _np.ones((1, ra)) nrms = _np.ones(d) # Initial ort for i in range(d - 1): if init_qr: cr = _reshape(y[i], (ry[i] * n[i], ry[i + 1])) if (renorm is 'gram') and (ry[i] * n[i] > 5 * ry[i + 1]): [cr, s, R] = _svdgram(cr) else: [cr, R] = _np.linalg.qr(cr) nrmr = _np.linalg.norm(R) # , 'fro') if (nrmr > 0): R = R / nrmr cr2 = _reshape(y[i + 1], (ry[i + 1], n[i + 1] * ry[i + 2])) cr2 = _np.dot(R, cr2) ry[i + 1] = cr.shape[1] y[i] = _reshape(cr, (ry[i], n[i], ry[i + 1])) y[i + 1] = _reshape(cr2, (ry[i + 1], n[i + 1], ry[i + 2])) [phiyax[i + 1], nrms[i] ] = _compute_next_Phi(phiyax[i], y[i], x[i], 'lr', A[i]) if (kickrank + kickrank2 > 0): cr = _reshape(z[i], (rz[i] * n[i], rz[i + 1])) if (renorm == 'gram') and (rz[i] * n[i] > 5 * rz[i + 1]): [cr, s, R] = _svdgram(cr) else: [cr, R] = _np.linalg.qr(cr) nrmr = _np.linalg.norm(R) # , 'fro') if (nrmr > 0): R = R / nrmr cr2 = _reshape(z[i + 1], (rz[i + 1], n[i + 1] * rz[i + 2])) cr2 = _np.dot(R, cr2) rz[i + 1] = cr.shape[1] z[i] = _reshape(cr, (rz[i], n[i], rz[i + 1])) z[i + 1] = _reshape(cr2, (rz[i + 1], n[i + 1], rz[i + 2])) phizax[ i + 1] = _compute_next_Phi( phizax[i], z[i], x[i], 'lr', A[i], nrms[i], return_norm=False) phizy[ i + 1] = _compute_next_Phi( phizy[i], z[i], y[i], 'lr', return_norm=False) i = d - 1 direct = -1 swp = 1 max_dx = 0 while swp <= nswp: # Project the MatVec generating vector crx = _reshape(x[i], (rx[i] * m[i] * rx[i + 1], 1)) cry = _bfun3(phiyax[i], A[i], phiyax[i + 1], crx) nrms[i] = _np.linalg.norm(cry) # , 'fro') # The main goal is to keep y[i] of norm 1 if (nrms[i] > 0): cry = cry / nrms[i] else: nrms[i] = 1 y[i] = _reshape(y[i], (ry[i] * n[i] * ry[i + 1], 1)) dx = _np.linalg.norm(cry - y[i]) max_dx = max(max_dx, dx) # Truncation and enrichment if ((direct > 0) and (i < d - 1)): # ?? i<d cry = _reshape(cry, (ry[i] * n[i], ry[i + 1])) if (renorm == 'gram'): [u, s, v] = _svdgram(cry, tol / d**0.5) v = v.T r = u.shape[1] else: [u, s, vt] = _np.linalg.svd(cry, full_matrices=False) #s = diag(s) r = _my_chop2(s, tol * _np.linalg.norm(s) / d**0.5) u = u[:, :r] # ????? s - matrix or vector v = _np.dot(_tconj(vt[:r, :]), _np.diag(s[:r])) # Prepare enrichment, if needed if (kickrank + kickrank2 > 0): cry = _np.dot(u, v.T) cry = _reshape(cry, (ry[i] * n[i], ry[i + 1])) # For updating z crz = _bfun3(phizax[i], A[i], phizax[i + 1], crx) crz = _reshape(crz, (rz[i] * n[i], rz[i + 1])) ys = _np.dot(cry, phizy[i + 1]) yz = _reshape(ys, (ry[i], n[i] * rz[i + 1])) yz = _np.dot(phizy[i], yz) yz = _reshape(yz, (rz[i] * n[i], rz[i + 1])) crz = crz / nrms[i] - yz nrmz = _np.linalg.norm(crz) # , 'fro') if (kickrank2 > 0): [crz, _, _] = _np.linalg.svd(crz, full_matrices=False) crz = crz[:, : min(crz.shape[1], kickrank)] crz = _np.hstack( (crz, _np.random.randn( rz[i] * n[i], kickrank2))) # For adding into solution if fkick: crs = _bfun3(phiyax[i], A[i], phizax[i + 1], crx) crs = _reshape(crs, (ry[i] * n[i], rz[i + 1])) crs = crs / nrms[i] - ys u = _np.hstack((u, crs)) if (renorm == 'gram') and ( ry[i] * n[i] > 5 * (ry[i + 1] + rz[i + 1])): [u, s, R] = _svdgram(u) else: [u, R] = _np.linalg.qr(u) v = _np.hstack((v, _np.zeros((ry[i + 1], rz[i + 1])))) v = _np.dot(v, R.T) r = u.shape[1] y[i] = _reshape(u, (ry[i], n[i], r)) cr2 = _reshape(y[i + 1], (ry[i + 1], n[i + 1] * ry[i + 2])) v = _reshape(v, (ry[i + 1], r)) cr2 = _np.dot(v.T, cr2) y[i + 1] = _reshape(cr2, (r, n[i + 1], ry[i + 2])) ry[i + 1] = r [phiyax[i + 1], nrms[i] ] = _compute_next_Phi(phiyax[i], y[i], x[i], 'lr', A[i]) if (kickrank + kickrank2 > 0): if (renorm == 'gram') and (rz[i] * n[i] > 5 * rz[i + 1]): [crz, s, R] = _svdgram(crz) else: [crz, R] = _np.linalg.qr(crz) rz[i + 1] = crz.shape[1] z[i] = _reshape(crz, (rz[i], n[i], rz[i + 1])) # z[i+1] will be recomputed from scratch in the next step phizax[ i + 1] = _compute_next_Phi( phizax[i], z[i], x[i], 'lr', A[i], nrms[i], return_norm=False) phizy[ i + 1] = _compute_next_Phi( phizy[i], z[i], y[i], 'lr', return_norm=False) elif ((direct < 0) and (i > 0)): cry = _reshape(cry, (ry[i], n[i] * ry[i + 1])) if (renorm == 'gram'): [v, s, u] = _svdgram(cry.T, tol / d**0.5) u = u.T r = v.shape[1] else: #[v, s, u] = _np.linalg.svd(cry.T, full_matrices=False) [u, s, vt] = _np.linalg.svd(cry, full_matrices=False) #s = diag(s); r = _my_chop2(s, tol * _np.linalg.norm(s) / d**0.5) v = _tconj(vt[:r, :]) #v = vt[:r, :] #v = _np.dot(v[:, :r], _np.diag(s[:r])) u = _np.dot(u[:, :r], _np.diag(s[:r])) # ?????????????????? # Prepare enrichment, if needed if (kickrank + kickrank2 > 0): cry = _np.dot(u, v.T) # .T) cry = _reshape(cry, (ry[i], n[i] * ry[i + 1])) # For updating z crz = _bfun3(phizax[i], A[i], phizax[i + 1], crx) crz = _reshape(crz, (rz[i], n[i] * rz[i + 1])) ys = _np.dot(phizy[i], cry) yz = _reshape(ys, (rz[i] * n[i], ry[i + 1])) yz = _np.dot(yz, phizy[i + 1]) yz = _reshape(yz, (rz[i], n[i] * rz[i + 1])) crz = crz / nrms[i] - yz nrmz = _np.linalg.norm(crz) # , 'fro') if (kickrank2 > 0): [_, _, crz] = _np.linalg.svd(crz, full_matrices=False) crz = crz[:, : min(crz.shape[1], kickrank)] crz = _tconj(crz) crz = _np.vstack( (crz, _np.random.randn(kickrank2, n[i] * rz[i + 1]))) # For adding into solution crs = _bfun3(phizax[i], A[i], phiyax[i + 1], crx) crs = _reshape(crs, (rz[i], n[i] * ry[i + 1])) crs = crs / nrms[i] - ys v = _np.hstack((v, crs.T)) # .T #v = v.T if (renorm == 'gram') and ( n[i] * ry[i + 1] > 5 * (ry[i] + rz[i])): [v, s, R] = _svdgram(v) else: [v, R] = _np.linalg.qr(v) u = _np.hstack((u, _np.zeros((ry[i], rz[i])))) u = _np.dot(u, R.T) r = v.shape[1] cr2 = _reshape(y[i - 1], (ry[i - 1] * n[i - 1], ry[i])) cr2 = _np.dot(cr2, u) y[i - 1] = _reshape(cr2, (ry[i - 1], n[i - 1], r)) y[i] = _reshape(v.T, (r, n[i], ry[i + 1])) ry[i] = r [phiyax[i], nrms[i]] = _compute_next_Phi( phiyax[i + 1], y[i], x[i], 'rl', A[i]) if (kickrank + kickrank2 > 0): if (renorm == 'gram') and (n[i] * rz[i + 1] > 5 * rz[i]): [crz, s, R] = _svdgram(crz.T) else: [crz, R] = _np.linalg.qr(crz.T) rz[i] = crz.shape[1] z[i] = _reshape(crz.T, (rz[i], n[i], rz[i + 1])) # don't update z[i-1], it will be recomputed from scratch phizax[i] = _compute_next_Phi( phizax[ i + 1], z[i], x[i], 'rl', A[i], nrms[i], return_norm=False) phizy[i] = _compute_next_Phi( phizy[i + 1], z[i], y[i], 'rl', return_norm=False) if (verb > 1): print('amen-mv: swp=[%d,%d], dx=%.3e, r=%d, |y|=%.3e, |z|=%.3e' % (swp, i, dx, r, _np.linalg.norm(cry), nrmz)) # Stopping or reversing if ((direct > 0) and (i == d - 1)) or ((direct < 0) and (i == 0)): if (verb > 0): print('amen-mv: swp=%d{%d}, max_dx=%.3e, max_r=%d' % (swp, (1 - direct) // 2, max_dx, max(ry))) if ((max_dx < tol) or (swp == nswp)) and (direct > 0): break else: # We are at the terminal block y[i] = _reshape(cry, (ry[i], n[i], ry[i + 1])) if (direct > 0): swp = swp + 1 max_dx = 0 direct = -direct else: i = i + direct # if (direct>0) y[d - 1] = _reshape(cry, (ry[d - 1], n[d - 1], ry[d])) # else # y{1} = reshape(cry, ry(1), n(1), ry(2)); # end; # Distribute norms equally... nrms = _np.exp(sum(_np.log(nrms)) / d) # ... and plug them into y for i in xrange(d): y[i] = _np.dot(y[i], nrms) if (vectype == 1): y = _tt.vector.from_list(y) if kickrank == 0: z = None else: z = _tt.vector.from_list(z) return y, z
Approximate the matrix-by-vector via the AMEn iteration [y,z]=amen_mv(A, x, tol, varargin) Attempts to approximate the y = A*x with accuracy TOL using the AMEn+ALS iteration. Matrix A has to be given in the TT-format, right-hand side x should be given in the TT-format also. Options are provided in form 'PropertyName1',PropertyValue1,'PropertyName2',PropertyValue2 and so on. The parameters are set to default (in brackets in the following) The list of option names and default values are: o y0 - initial approximation to Ax [rand rank-2] o nswp - maximal number of sweeps [20] o verb - verbosity level, 0-silent, 1-sweep info, 2-block info [1] o kickrank - compression rank of the error, i.e. enrichment size [3] o init_qr - perform QR of the input (save some time in ts, etc) [true] o renorm - Orthog. and truncation methods: direct (svd,qr) or gram (apply svd to the gram matrix, faster for m>>n) [direct] o fkick - Perform solution enrichment during forward sweeps [false] (rather questionable yet; false makes error higher, but "better structured": it does not explode in e.g. subsequent matvecs) o z0 - initial approximation to the error Ax-y [rand rank-kickrank] ******** For description of adaptive ALS please see Sergey V. Dolgov, Dmitry V. Savostyanov, Alternating minimal energy methods for linear systems in higher dimensions. Part I: SPD systems, http://arxiv.org/abs/1301.6068, Part II: Faster algorithm and application to nonsymmetric systems, http://arxiv.org/abs/1304.1222 Use {sergey.v.dolgov, dmitry.savostyanov}@gmail.com for feedback ********
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/amen/amen_mv.py#L77-L477
oseledets/ttpy
tt/amen/amen_mv.py
_compute_next_Phi
def _compute_next_Phi(Phi_prev, x, y, direction, A=None, extnrm=None, return_norm=True): ''' Performs the recurrent Phi (or Psi) matrix computation Phi = Phi_prev * (x'Ay). If direction is 'lr', computes Psi if direction is 'rl', computes Phi A can be empty, then only x'y is computed. Phi1: rx1, ry1, ra1, or {rx1, ry1}_ra, or rx1, ry1 Phi2: ry2, ra2, rx2, or {ry2, rx2}_ra, or ry2, rx2 ''' [rx1, n, rx2] = x.shape [ry1, m, ry2] = y.shape if A is not None: if isinstance(A, list): # ????????????????????????????????? # A is a canonical block ra = len(A) else: # Just full format [ra1, ra2] = A.shape ra1 = ra1 // n ra2 = ra2 // m # ????????????????????????????????????? else: [ra1, ra2] = [1, 1] if isinstance(Phi_prev, list): Phi = [None] * ra if return_norm: nrm = 0 if (direction == 'lr'): # lr: Phi1 x = _reshape(x, (rx1, n * rx2)) y = _reshape(y, (ry1 * m, ry2)) for i in xrange(ra): Phi[i] = _np.dot(_tconj(x), Phi_prev[i]) Phi[i] = _reshape(Phi[i], (n, rx2 * ry1)) Phi[i] = Phi[i].T Phi[i] = _np.dot(Phi[i], A[i]) Phi[i] = _reshape(Phi[i], (rx2, ry1 * m)) Phi[i] = _np.dot(Phi[i], y) if return_norm: nrm = max(nrm, _np.linalg.norm(Phi[i])) # , 'fro')) else: # rl: Phi2 y = _reshape(y, (ry1, m * ry2)) x = _reshape(x, (rx1 * n, rx2)) for i in xrange(ra): Phi[i] = _np.dot(Phi_prev[i], x.T) Phi[i] = _reshape(Phi[i], (ry2 * rx1, n)) Phi[i] = _np.dot(Phi[i], A[i]) Phi[i] = Phi[i].T Phi[i] = _reshape(Phi[i], (m * ry2, rx1)) Phi[i] = _np.dot(y, Phi[i]) if return_norm: nrm = max(nrm, _np.linalg.norm(Phi[i])) # , 'fro')) if return_norm: # Extract the scale to prevent overload if (nrm > 0): for i in xrange(ra): Phi[i] = Phi[i] / nrm else: nrm = 1 elif extnrm is not None: # Override the normalization for i in xrange(ra): Phi[i] = Phi[i] / extnrm else: if (direction == 'lr'): # lr: Phi1 x = _reshape(x, (rx1, n * rx2)) Phi = _reshape(Phi_prev, (rx1, ry1 * ra1)) Phi = _np.dot(_tconj(x), Phi) if A is not None: Phi = _reshape(Phi, (n * rx2 * ry1, ra1)) Phi = Phi.T Phi = _reshape(Phi, (ra1 * n, rx2 * ry1)) Phi = _np.dot(A.T, Phi) Phi = _reshape(Phi, (m, ra2 * rx2 * ry1)) else: Phi = _reshape(Phi, (n, rx2 * ry1)) Phi = Phi.T Phi = _reshape(Phi, (ra2 * rx2, ry1 * m)) y = _reshape(y, (ry1 * m, ry2)) Phi = _np.dot(Phi, y) if A is not None: Phi = _reshape(Phi, (ra2, rx2 * ry2)) Phi = Phi.T Phi = _reshape(Phi, (rx2, ry2, ra2)) else: Phi = _reshape(Phi, (rx2, ry2)) else: # rl: Phi2 y = _reshape(y, (ry1 * m, ry2)) Phi = _reshape(Phi_prev, (ry2, ra2 * rx2)) Phi = _np.dot(y, Phi) if A is not None: Phi = _reshape(Phi, (ry1, m * ra2 * rx2)) Phi = Phi.T Phi = _reshape(Phi, (m * ra2, rx2 * ry1)) Phi = _np.dot(A, Phi) Phi = _reshape(Phi, (ra1 * n * rx2, ry1)) Phi = Phi.T Phi = _reshape(Phi, (ry1 * ra1, n * rx2)) x = _reshape(x, (rx1, n * rx2)) Phi = _np.dot(Phi, _tconj(x)) if A is not None: Phi = _reshape(Phi, (ry1, ra1, rx1)) else: Phi = _reshape(Phi, (ry1, rx1)) if return_norm: # Extract the scale to prevent overload nrm = _np.linalg.norm(Phi) # , 'fro') if (nrm > 0): Phi = Phi / nrm else: nrm = 1 elif extnrm is not None: # Override the normalization by the external one Phi = Phi / extnrm if return_norm: return Phi, nrm else: return Phi
python
def _compute_next_Phi(Phi_prev, x, y, direction, A=None, extnrm=None, return_norm=True): ''' Performs the recurrent Phi (or Psi) matrix computation Phi = Phi_prev * (x'Ay). If direction is 'lr', computes Psi if direction is 'rl', computes Phi A can be empty, then only x'y is computed. Phi1: rx1, ry1, ra1, or {rx1, ry1}_ra, or rx1, ry1 Phi2: ry2, ra2, rx2, or {ry2, rx2}_ra, or ry2, rx2 ''' [rx1, n, rx2] = x.shape [ry1, m, ry2] = y.shape if A is not None: if isinstance(A, list): # ????????????????????????????????? # A is a canonical block ra = len(A) else: # Just full format [ra1, ra2] = A.shape ra1 = ra1 // n ra2 = ra2 // m # ????????????????????????????????????? else: [ra1, ra2] = [1, 1] if isinstance(Phi_prev, list): Phi = [None] * ra if return_norm: nrm = 0 if (direction == 'lr'): # lr: Phi1 x = _reshape(x, (rx1, n * rx2)) y = _reshape(y, (ry1 * m, ry2)) for i in xrange(ra): Phi[i] = _np.dot(_tconj(x), Phi_prev[i]) Phi[i] = _reshape(Phi[i], (n, rx2 * ry1)) Phi[i] = Phi[i].T Phi[i] = _np.dot(Phi[i], A[i]) Phi[i] = _reshape(Phi[i], (rx2, ry1 * m)) Phi[i] = _np.dot(Phi[i], y) if return_norm: nrm = max(nrm, _np.linalg.norm(Phi[i])) # , 'fro')) else: # rl: Phi2 y = _reshape(y, (ry1, m * ry2)) x = _reshape(x, (rx1 * n, rx2)) for i in xrange(ra): Phi[i] = _np.dot(Phi_prev[i], x.T) Phi[i] = _reshape(Phi[i], (ry2 * rx1, n)) Phi[i] = _np.dot(Phi[i], A[i]) Phi[i] = Phi[i].T Phi[i] = _reshape(Phi[i], (m * ry2, rx1)) Phi[i] = _np.dot(y, Phi[i]) if return_norm: nrm = max(nrm, _np.linalg.norm(Phi[i])) # , 'fro')) if return_norm: # Extract the scale to prevent overload if (nrm > 0): for i in xrange(ra): Phi[i] = Phi[i] / nrm else: nrm = 1 elif extnrm is not None: # Override the normalization for i in xrange(ra): Phi[i] = Phi[i] / extnrm else: if (direction == 'lr'): # lr: Phi1 x = _reshape(x, (rx1, n * rx2)) Phi = _reshape(Phi_prev, (rx1, ry1 * ra1)) Phi = _np.dot(_tconj(x), Phi) if A is not None: Phi = _reshape(Phi, (n * rx2 * ry1, ra1)) Phi = Phi.T Phi = _reshape(Phi, (ra1 * n, rx2 * ry1)) Phi = _np.dot(A.T, Phi) Phi = _reshape(Phi, (m, ra2 * rx2 * ry1)) else: Phi = _reshape(Phi, (n, rx2 * ry1)) Phi = Phi.T Phi = _reshape(Phi, (ra2 * rx2, ry1 * m)) y = _reshape(y, (ry1 * m, ry2)) Phi = _np.dot(Phi, y) if A is not None: Phi = _reshape(Phi, (ra2, rx2 * ry2)) Phi = Phi.T Phi = _reshape(Phi, (rx2, ry2, ra2)) else: Phi = _reshape(Phi, (rx2, ry2)) else: # rl: Phi2 y = _reshape(y, (ry1 * m, ry2)) Phi = _reshape(Phi_prev, (ry2, ra2 * rx2)) Phi = _np.dot(y, Phi) if A is not None: Phi = _reshape(Phi, (ry1, m * ra2 * rx2)) Phi = Phi.T Phi = _reshape(Phi, (m * ra2, rx2 * ry1)) Phi = _np.dot(A, Phi) Phi = _reshape(Phi, (ra1 * n * rx2, ry1)) Phi = Phi.T Phi = _reshape(Phi, (ry1 * ra1, n * rx2)) x = _reshape(x, (rx1, n * rx2)) Phi = _np.dot(Phi, _tconj(x)) if A is not None: Phi = _reshape(Phi, (ry1, ra1, rx1)) else: Phi = _reshape(Phi, (ry1, rx1)) if return_norm: # Extract the scale to prevent overload nrm = _np.linalg.norm(Phi) # , 'fro') if (nrm > 0): Phi = Phi / nrm else: nrm = 1 elif extnrm is not None: # Override the normalization by the external one Phi = Phi / extnrm if return_norm: return Phi, nrm else: return Phi
Performs the recurrent Phi (or Psi) matrix computation Phi = Phi_prev * (x'Ay). If direction is 'lr', computes Psi if direction is 'rl', computes Phi A can be empty, then only x'y is computed. Phi1: rx1, ry1, ra1, or {rx1, ry1}_ra, or rx1, ry1 Phi2: ry2, ra2, rx2, or {ry2, rx2}_ra, or ry2, rx2
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/amen/amen_mv.py#L480-L608
oseledets/ttpy
tt/multifuncrs.py
multifuncrs
def multifuncrs(X, funs, eps=1E-6, nswp=10, kickrank=5, y0=None, rmax=999999, # TODO:infinity \ kicktype='amr-two', \ pcatype='svd', \ trunctype='fro', \ d2=1, \ do_qr=False, \ verb=1): """Cross approximation of a (vector-)function of several TT-tensors. :param X: tuple of TT-tensors :param funs: multivariate function :param eps: accuracy """ dtype = np.float64 if len([x for x in X if x.is_complex]) > 0: dtype = np.complex128 y = y0 wasrand = False nx = len(X) d = X[0].d n = X[0].n rx = np.transpose(np.array([ttx.r for ttx in X])) #crx = [tt.tensor.to_list(ttx) for x in X] #crx = zip(*crx) crx = np.transpose(np.array([tt.tensor.to_list(ttx) for ttx in X], dtype=np.object)) crx = np.empty((nx, d), dtype=np.object) i = 0 for ttx in X: v = tt.tensor.to_list(ttx) j = 0 for w in v: crx[i, j] = w j = j + 1 i = i + 1 crx = crx.T if y is None: ry = d2 * np.ones((d + 1,), dtype=np.int32) ry[0] = 1 y = tt.rand(n, d, ry) wasrand = True ry = y.r cry = tt.tensor.to_list(y) Ry = np.zeros((d + 1, ), dtype=np.object) Ry[0] = np.array([[1.0]], dtype=dtype) Ry[d] = np.array([[1.0]], dtype=dtype) Rx = np.zeros((d + 1, nx), dtype=np.object) Rx[0, :] = np.ones(nx, dtype=dtype) Rx[d, :] = np.ones(nx, dtype=dtype) block_order = [+d, -d] # orth for i in range(0, d - 1): cr = cry[i] cr = reshape(cr, (ry[i] * n[i], ry[i + 1])) cr, rv = np.linalg.qr(cr) cr2 = cry[i + 1] cr2 = reshape(cr2, (ry[i + 1], n[i + 1] * ry[i + 2])) cr2 = np.dot(rv, cr2) # matrix multiplication ry[i + 1] = cr.shape[1] cr = reshape(cr, (ry[i], n[i], ry[i + 1])) cry[i + 1] = reshape(cr2, (ry[i + 1], n[i + 1], ry[i + 2])) cry[i] = cr Ry[i + 1] = np.dot(Ry[i], reshape(cr, (ry[i], n[i] * ry[i + 1]))) Ry[i + 1] = reshape(Ry[i + 1], (ry[i] * n[i], ry[i + 1])) curind = [] if wasrand: # EVERY DAY I'M SHUFFLIN' curind = np.random.permutation(n[i] * ry[i])[:ry[i + 1]] else: curind = maxvol(Ry[i + 1]) Ry[i + 1] = Ry[i + 1][curind, :] for j in range(0, nx): try: Rx[i + 1, j] = reshape(crx[i, j], (rx[i, j], n[i] * rx[i + 1, j])) except: pass Rx[i + 1, j] = np.dot(Rx[i, j], Rx[i + 1, j]) Rx[i + 1, j] = reshape(Rx[i + 1, j], (ry[i] * n[i], rx[i + 1, j])) Rx[i + 1, j] = Rx[i + 1, j][curind, :] d2 = ry[d] ry[d] = 1 cry[d - 1] = np.transpose(cry[d - 1], [2, 0, 1]) # permute last_sweep = False swp = 1 dy = np.zeros((d, )) max_dy = 0 cur_order = copy.copy(block_order) order_index = 1 i = d - 1 # can't use 'dir' identifier in python dirn = int(math.copysign(1, cur_order[order_index])) # DMRG sweeps while swp <= nswp or dirn > 0: oldy = reshape(cry[i], (d2 * ry[i] * n[i] * ry[i + 1],)) if not last_sweep: # compute the X superblocks curbl = np.zeros((ry[i] * n[i] * ry[i + 1], nx), dtype=dtype) for j in range(0, nx): cr = reshape(crx[i, j], (rx[i, j], n[i] * rx[i + 1, j])) cr = np.dot(Rx[i, j], cr) cr = reshape(cr, (ry[i] * n[i], rx[i + 1, j])) cr = np.dot(cr, Rx[i + 1, j]) curbl[:, j] = cr.flatten('F') # call the function newy = funs(curbl) # multiply with inverted Ry newy = reshape(newy, (ry[i], n[i] * ry[i + 1] * d2)) newy = np.linalg.solve(Ry[i], newy) # y = R \ y newy = reshape(newy, (ry[i] * n[i] * ry[i + 1], d2)) newy = reshape(np.transpose(newy), (d2 * ry[i] * n[i], ry[i + 1])) newy = np.transpose(np.linalg.solve( np.transpose(Ry[i + 1]), np.transpose(newy))) # y=y/R newy = reshape(newy, (d2 * ry[i] * n[i] * ry[i + 1],)) else: newy = oldy dy[i] = np.linalg.norm(newy - oldy) / np.linalg.norm(newy) max_dy = max(max_dy, dy[i]) # truncation if dirn > 0: # left-to-right newy = reshape(newy, (d2, ry[i] * n[i] * ry[i + 1])) newy = reshape(np.transpose(newy), (ry[i] * n[i], ry[i + 1] * d2)) else: newy = reshape(newy, (d2 * ry[i], n[i] * ry[i + 1])) r = 0 # defines a variable in global scope if kickrank >= 0: u, s, v = np.linalg.svd(newy, full_matrices=False) v = np.conj(np.transpose(v)) if trunctype == "fro" or last_sweep: r = my_chop2(s, eps / math.sqrt(d) * np.linalg.norm(s)) else: # truncate taking into account the (r+1) overhead in the cross # (T.S.: what?) cums = abs(s * np.arange(2, len(s) + 2)) ** 2 cums = np.cumsum(cums[::-1])[::-1] cums = cums / cums[0] ff = [i for i in range(len(cums)) if cums[i] < eps ** 2 / d] if len(ff) == 0: r = len(s) else: r = np.amin(ff) r = min(r, rmax, len(s)) else: if dirn > 0: u, v = np.linalg.qr(newy) v = np.conj(np.transpose(v)) r = u.shape[1] s = np.ones((r, )) else: v, u = np.linalg.qr(np.transpose(newy)) v = np.conj(v) u = np.transpose(u) r = u.shape[1] s = np.ones((r, )) if verb > 1: print('=multifuncrs= block %d{%d}, dy: %3.3e, r: %d' % (i, dirn, dy[i], r)) # kicks and interfaces if dirn > 0 and i < d - 1: u = u[:, :r] v = np.dot(v[:, :r], np.diag(s[:r])) # kick radd = 0 rv = 1 if not last_sweep and kickrank > 0: uk = None if kicktype == 'amr-two': # AMR(two)-like kick. # compute the X superblocks ind2 = np.unique(np.random.randint( 0, ry[i + 2] * n[i + 1], ry[i + 1])) #ind2 = np.unique(np.floor(np.random.rand(ry[i + 1]) * (ry[i + 2] * n[i + 1]))) rkick = len(ind2) curbl = np.zeros((ry[i] * n[i] * rkick, nx), dtype=dtype) for j in range(nx): cr1 = reshape( crx[i, j], (rx[i, j], n[i] * rx[i + 1, j])) cr1 = np.dot(Rx[i, j], cr1) cr1 = reshape(cr1, (ry[i] * n[i], rx[i + 1, j])) cr2 = reshape( crx[i + 1, j], (rx[i + 1, j] * n[i + 1], rx[i + 2, j])) cr2 = np.dot(cr2, Rx[i + 2, j]) cr2 = reshape( cr2, (rx[i + 1, j], n[i + 1] * ry[i + 2])) cr2 = cr2[:, ind2] curbl[:, j] = reshape( np.dot(cr1, cr2), (ry[i] * n[i] * rkick,)) # call the function uk = funs(curbl) uk = reshape(uk, (ry[i], n[i] * rkick * d2)) uk = np.linalg.solve(Ry[i], uk) uk = reshape(uk, (ry[i] * n[i], rkick * d2)) if pcatype == 'svd': uk, sk, vk = np.linalg.svd(uk, full_matrices=False) vk = np.conj(np.transpose(vk)) uk = uk[:, :min(kickrank, uk.shape[1])] else: # uk = uchol(np.transpose(uk), kickrank + 1) # TODO uk = uk[:, :max(uk.shape[1] - kickrank + 1, 1):-1] else: uk = np.random.rand(ry[i] * n[i], kickrank) u, rv = np.linalg.qr(np.concatenate((u, uk), axis=1)) radd = uk.shape[1] v = np.concatenate( (v, np.zeros((ry[i + 1] * d2, radd), dtype=dtype)), axis=1) v = np.dot(rv, np.conj(np.transpose(v))) r = u.shape[1] cr2 = cry[i + 1] cr2 = reshape(cr2, (ry[i + 1], n[i + 1] * ry[i + 2])) v = reshape(v, (r * ry[i + 1], d2)) v = reshape(np.transpose(v), (d2 * r, ry[i + 1])) v = np.dot(v, cr2) ry[i + 1] = r u = reshape(u, (ry[i], n[i], r)) v = reshape(v, (d2, r, n[i + 1], ry[i + 2])) cry[i] = u cry[i + 1] = v Ry[i + 1] = np.dot(Ry[i], reshape(u, (ry[i], n[i] * ry[i + 1]))) Ry[i + 1] = reshape(Ry[i + 1], (ry[i] * n[i], ry[i + 1])) curind = maxvol(Ry[i + 1]) Ry[i + 1] = Ry[i + 1][curind, :] for j in range(nx): Rx[i + 1, j] = reshape(crx[i, j], (rx[i, j], n[i] * rx[i + 1, j])) Rx[i + 1, j] = np.dot(Rx[i, j], Rx[i + 1, j]) Rx[i + 1, j] = reshape(Rx[i + 1, j], (ry[i] * n[i], rx[i + 1, j])) Rx[i + 1, j] = Rx[i + 1, j][curind, :] elif dirn < 0 and i > 0: u = np.dot(u[:, :r], np.diag(s[:r])) v = np.conj(v[:, :r]) radd = 0 rv = 1 if not last_sweep and kickrank > 0: if kicktype == 'amr-two': # compute the X superblocks ind2 = np.unique(np.random.randint( 0, ry[i - 1] * n[i - 1], ry[i])) rkick = len(ind2) curbl = np.zeros( (rkick * n[i] * ry[i + 1], nx), dtype=dtype) for j in range(nx): cr1 = reshape( crx[i, j], (rx[i, j] * n[i], rx[i + 1, j])) cr1 = np.dot(cr1, Rx[i + 1, j]) cr1 = reshape(cr1, (rx[i, j], n[i] * ry[i + 1])) cr2 = reshape( crx[i - 1, j], (rx[i - 1, j], n[i - 1] * rx[i, j])) cr2 = np.dot(Rx[i - 1, j], cr2) cr2 = reshape(cr2, (ry[i - 1] * n[i - 1], rx[i, j])) cr2 = cr2[ind2, :] curbl[:, j] = reshape( np.dot(cr2, cr1), (rkick * n[i] * ry[i + 1],)) # calling the function uk = funs(curbl) uk = reshape(uk, (rkick * n[i] * ry[i + 1], d2)) uk = reshape(np.transpose( uk), (d2 * rkick * n[i], ry[i + 1])) uk = np.transpose(np.linalg.solve( np.transpose(Ry[i + 1]), np.transpose(uk))) uk = reshape(uk, (d2 * rkick, n[i] * ry[i + 1])) if pcatype == 'svd': vk, sk, uk = np.linalg.svd(uk, full_matrices=False) uk = np.conj(np.transpose(uk)) # TODO: refactor uk = uk[:, :min(kickrank, uk.shape[1])] else: # uk = uchol(uk, kickrank + 1) # TODO uk = uk[:, :max(uk.shape[1] - kickrank + 1, 1):-1] else: uk = np.random.rand(n[i] * ry[i + 1], kickrank) v, rv = np.linalg.qr(np.concatenate((v, uk), axis=1)) radd = uk.shape[1] u = np.concatenate( (u, np.zeros((d2 * ry[i], radd), dtype=dtype)), axis=1) u = np.dot(u, np.transpose(rv)) r = v.shape[1] cr2 = cry[i - 1] cr2 = reshape(cr2, (ry[i - 1] * n[i - 1], ry[i])) u = reshape(u, (d2, ry[i] * r)) u = reshape(np.transpose(u), (ry[i], r * d2)) u = np.dot(cr2, u) u = reshape(u, (ry[i - 1] * n[i - 1] * r, d2)) u = reshape(np.transpose(u), (d2, ry[i - 1], n[i - 1], r)) v = reshape(np.transpose(v), (r, n[i], ry[i + 1])) ry[i] = r cry[i - 1] = u cry[i] = v Ry[i] = np.dot(reshape(v, (ry[i] * n[i], ry[i + 1])), Ry[i + 1]) Ry[i] = reshape(Ry[i], (ry[i], n[i] * ry[i + 1])) curind = maxvol(np.transpose(Ry[i])) Ry[i] = Ry[i][:, curind] for j in range(nx): Rx[i, j] = reshape(crx[i, j], (rx[i, j] * n[i], rx[i + 1, j])) Rx[i, j] = np.dot(Rx[i, j], Rx[i + 1, j]) Rx[i, j] = reshape(Rx[i, j], (rx[i, j], n[i] * ry[i + 1])) Rx[i, j] = Rx[i, j][:, curind] elif dirn > 0 and i == d - 1: newy = np.dot(np.dot(u[:, :r], np.diag(s[:r])), np.conj(np.transpose(v[:, :r]))) newy = reshape(newy, (ry[i] * n[i] * ry[i + 1], d2)) cry[i] = reshape(np.transpose(newy), (d2, ry[i], n[i], ry[i + 1])) elif dirn < 0 and i == 0: newy = np.dot(np.dot(u[:, :r], np.diag(s[:r])), np.conj(np.transpose(v[:, :r]))) newy = reshape(newy, (d2, ry[i], n[i], ry[i + 1])) cry[i] = newy i = i + dirn cur_order[order_index] = cur_order[order_index] - dirn if cur_order[order_index] == 0: order_index = order_index + 1 if verb > 0: print('=multifuncrs= sweep %d{%d}, max_dy: %3.3e, erank: %g' % (swp, order_index, max_dy, math.sqrt(np.dot(ry[:d], n * ry[1:]) / np.sum(n)))) if last_sweep: break if max_dy < eps and dirn < 0: last_sweep = True kickrank = 0 if order_index >= len(cur_order): cur_order = copy.copy(block_order) order_index = 0 if last_sweep: cur_order = [d - 1] max_dy = 0 swp = swp + 1 dirn = int(math.copysign(1, cur_order[order_index])) i = i + dirn cry[d - 1] = np.transpose(cry[d - 1][:, :, :, 0], [1, 2, 0]) y = tt.tensor.from_list(cry) return y
python
def multifuncrs(X, funs, eps=1E-6, nswp=10, kickrank=5, y0=None, rmax=999999, # TODO:infinity \ kicktype='amr-two', \ pcatype='svd', \ trunctype='fro', \ d2=1, \ do_qr=False, \ verb=1): """Cross approximation of a (vector-)function of several TT-tensors. :param X: tuple of TT-tensors :param funs: multivariate function :param eps: accuracy """ dtype = np.float64 if len([x for x in X if x.is_complex]) > 0: dtype = np.complex128 y = y0 wasrand = False nx = len(X) d = X[0].d n = X[0].n rx = np.transpose(np.array([ttx.r for ttx in X])) #crx = [tt.tensor.to_list(ttx) for x in X] #crx = zip(*crx) crx = np.transpose(np.array([tt.tensor.to_list(ttx) for ttx in X], dtype=np.object)) crx = np.empty((nx, d), dtype=np.object) i = 0 for ttx in X: v = tt.tensor.to_list(ttx) j = 0 for w in v: crx[i, j] = w j = j + 1 i = i + 1 crx = crx.T if y is None: ry = d2 * np.ones((d + 1,), dtype=np.int32) ry[0] = 1 y = tt.rand(n, d, ry) wasrand = True ry = y.r cry = tt.tensor.to_list(y) Ry = np.zeros((d + 1, ), dtype=np.object) Ry[0] = np.array([[1.0]], dtype=dtype) Ry[d] = np.array([[1.0]], dtype=dtype) Rx = np.zeros((d + 1, nx), dtype=np.object) Rx[0, :] = np.ones(nx, dtype=dtype) Rx[d, :] = np.ones(nx, dtype=dtype) block_order = [+d, -d] # orth for i in range(0, d - 1): cr = cry[i] cr = reshape(cr, (ry[i] * n[i], ry[i + 1])) cr, rv = np.linalg.qr(cr) cr2 = cry[i + 1] cr2 = reshape(cr2, (ry[i + 1], n[i + 1] * ry[i + 2])) cr2 = np.dot(rv, cr2) # matrix multiplication ry[i + 1] = cr.shape[1] cr = reshape(cr, (ry[i], n[i], ry[i + 1])) cry[i + 1] = reshape(cr2, (ry[i + 1], n[i + 1], ry[i + 2])) cry[i] = cr Ry[i + 1] = np.dot(Ry[i], reshape(cr, (ry[i], n[i] * ry[i + 1]))) Ry[i + 1] = reshape(Ry[i + 1], (ry[i] * n[i], ry[i + 1])) curind = [] if wasrand: # EVERY DAY I'M SHUFFLIN' curind = np.random.permutation(n[i] * ry[i])[:ry[i + 1]] else: curind = maxvol(Ry[i + 1]) Ry[i + 1] = Ry[i + 1][curind, :] for j in range(0, nx): try: Rx[i + 1, j] = reshape(crx[i, j], (rx[i, j], n[i] * rx[i + 1, j])) except: pass Rx[i + 1, j] = np.dot(Rx[i, j], Rx[i + 1, j]) Rx[i + 1, j] = reshape(Rx[i + 1, j], (ry[i] * n[i], rx[i + 1, j])) Rx[i + 1, j] = Rx[i + 1, j][curind, :] d2 = ry[d] ry[d] = 1 cry[d - 1] = np.transpose(cry[d - 1], [2, 0, 1]) # permute last_sweep = False swp = 1 dy = np.zeros((d, )) max_dy = 0 cur_order = copy.copy(block_order) order_index = 1 i = d - 1 # can't use 'dir' identifier in python dirn = int(math.copysign(1, cur_order[order_index])) # DMRG sweeps while swp <= nswp or dirn > 0: oldy = reshape(cry[i], (d2 * ry[i] * n[i] * ry[i + 1],)) if not last_sweep: # compute the X superblocks curbl = np.zeros((ry[i] * n[i] * ry[i + 1], nx), dtype=dtype) for j in range(0, nx): cr = reshape(crx[i, j], (rx[i, j], n[i] * rx[i + 1, j])) cr = np.dot(Rx[i, j], cr) cr = reshape(cr, (ry[i] * n[i], rx[i + 1, j])) cr = np.dot(cr, Rx[i + 1, j]) curbl[:, j] = cr.flatten('F') # call the function newy = funs(curbl) # multiply with inverted Ry newy = reshape(newy, (ry[i], n[i] * ry[i + 1] * d2)) newy = np.linalg.solve(Ry[i], newy) # y = R \ y newy = reshape(newy, (ry[i] * n[i] * ry[i + 1], d2)) newy = reshape(np.transpose(newy), (d2 * ry[i] * n[i], ry[i + 1])) newy = np.transpose(np.linalg.solve( np.transpose(Ry[i + 1]), np.transpose(newy))) # y=y/R newy = reshape(newy, (d2 * ry[i] * n[i] * ry[i + 1],)) else: newy = oldy dy[i] = np.linalg.norm(newy - oldy) / np.linalg.norm(newy) max_dy = max(max_dy, dy[i]) # truncation if dirn > 0: # left-to-right newy = reshape(newy, (d2, ry[i] * n[i] * ry[i + 1])) newy = reshape(np.transpose(newy), (ry[i] * n[i], ry[i + 1] * d2)) else: newy = reshape(newy, (d2 * ry[i], n[i] * ry[i + 1])) r = 0 # defines a variable in global scope if kickrank >= 0: u, s, v = np.linalg.svd(newy, full_matrices=False) v = np.conj(np.transpose(v)) if trunctype == "fro" or last_sweep: r = my_chop2(s, eps / math.sqrt(d) * np.linalg.norm(s)) else: # truncate taking into account the (r+1) overhead in the cross # (T.S.: what?) cums = abs(s * np.arange(2, len(s) + 2)) ** 2 cums = np.cumsum(cums[::-1])[::-1] cums = cums / cums[0] ff = [i for i in range(len(cums)) if cums[i] < eps ** 2 / d] if len(ff) == 0: r = len(s) else: r = np.amin(ff) r = min(r, rmax, len(s)) else: if dirn > 0: u, v = np.linalg.qr(newy) v = np.conj(np.transpose(v)) r = u.shape[1] s = np.ones((r, )) else: v, u = np.linalg.qr(np.transpose(newy)) v = np.conj(v) u = np.transpose(u) r = u.shape[1] s = np.ones((r, )) if verb > 1: print('=multifuncrs= block %d{%d}, dy: %3.3e, r: %d' % (i, dirn, dy[i], r)) # kicks and interfaces if dirn > 0 and i < d - 1: u = u[:, :r] v = np.dot(v[:, :r], np.diag(s[:r])) # kick radd = 0 rv = 1 if not last_sweep and kickrank > 0: uk = None if kicktype == 'amr-two': # AMR(two)-like kick. # compute the X superblocks ind2 = np.unique(np.random.randint( 0, ry[i + 2] * n[i + 1], ry[i + 1])) #ind2 = np.unique(np.floor(np.random.rand(ry[i + 1]) * (ry[i + 2] * n[i + 1]))) rkick = len(ind2) curbl = np.zeros((ry[i] * n[i] * rkick, nx), dtype=dtype) for j in range(nx): cr1 = reshape( crx[i, j], (rx[i, j], n[i] * rx[i + 1, j])) cr1 = np.dot(Rx[i, j], cr1) cr1 = reshape(cr1, (ry[i] * n[i], rx[i + 1, j])) cr2 = reshape( crx[i + 1, j], (rx[i + 1, j] * n[i + 1], rx[i + 2, j])) cr2 = np.dot(cr2, Rx[i + 2, j]) cr2 = reshape( cr2, (rx[i + 1, j], n[i + 1] * ry[i + 2])) cr2 = cr2[:, ind2] curbl[:, j] = reshape( np.dot(cr1, cr2), (ry[i] * n[i] * rkick,)) # call the function uk = funs(curbl) uk = reshape(uk, (ry[i], n[i] * rkick * d2)) uk = np.linalg.solve(Ry[i], uk) uk = reshape(uk, (ry[i] * n[i], rkick * d2)) if pcatype == 'svd': uk, sk, vk = np.linalg.svd(uk, full_matrices=False) vk = np.conj(np.transpose(vk)) uk = uk[:, :min(kickrank, uk.shape[1])] else: # uk = uchol(np.transpose(uk), kickrank + 1) # TODO uk = uk[:, :max(uk.shape[1] - kickrank + 1, 1):-1] else: uk = np.random.rand(ry[i] * n[i], kickrank) u, rv = np.linalg.qr(np.concatenate((u, uk), axis=1)) radd = uk.shape[1] v = np.concatenate( (v, np.zeros((ry[i + 1] * d2, radd), dtype=dtype)), axis=1) v = np.dot(rv, np.conj(np.transpose(v))) r = u.shape[1] cr2 = cry[i + 1] cr2 = reshape(cr2, (ry[i + 1], n[i + 1] * ry[i + 2])) v = reshape(v, (r * ry[i + 1], d2)) v = reshape(np.transpose(v), (d2 * r, ry[i + 1])) v = np.dot(v, cr2) ry[i + 1] = r u = reshape(u, (ry[i], n[i], r)) v = reshape(v, (d2, r, n[i + 1], ry[i + 2])) cry[i] = u cry[i + 1] = v Ry[i + 1] = np.dot(Ry[i], reshape(u, (ry[i], n[i] * ry[i + 1]))) Ry[i + 1] = reshape(Ry[i + 1], (ry[i] * n[i], ry[i + 1])) curind = maxvol(Ry[i + 1]) Ry[i + 1] = Ry[i + 1][curind, :] for j in range(nx): Rx[i + 1, j] = reshape(crx[i, j], (rx[i, j], n[i] * rx[i + 1, j])) Rx[i + 1, j] = np.dot(Rx[i, j], Rx[i + 1, j]) Rx[i + 1, j] = reshape(Rx[i + 1, j], (ry[i] * n[i], rx[i + 1, j])) Rx[i + 1, j] = Rx[i + 1, j][curind, :] elif dirn < 0 and i > 0: u = np.dot(u[:, :r], np.diag(s[:r])) v = np.conj(v[:, :r]) radd = 0 rv = 1 if not last_sweep and kickrank > 0: if kicktype == 'amr-two': # compute the X superblocks ind2 = np.unique(np.random.randint( 0, ry[i - 1] * n[i - 1], ry[i])) rkick = len(ind2) curbl = np.zeros( (rkick * n[i] * ry[i + 1], nx), dtype=dtype) for j in range(nx): cr1 = reshape( crx[i, j], (rx[i, j] * n[i], rx[i + 1, j])) cr1 = np.dot(cr1, Rx[i + 1, j]) cr1 = reshape(cr1, (rx[i, j], n[i] * ry[i + 1])) cr2 = reshape( crx[i - 1, j], (rx[i - 1, j], n[i - 1] * rx[i, j])) cr2 = np.dot(Rx[i - 1, j], cr2) cr2 = reshape(cr2, (ry[i - 1] * n[i - 1], rx[i, j])) cr2 = cr2[ind2, :] curbl[:, j] = reshape( np.dot(cr2, cr1), (rkick * n[i] * ry[i + 1],)) # calling the function uk = funs(curbl) uk = reshape(uk, (rkick * n[i] * ry[i + 1], d2)) uk = reshape(np.transpose( uk), (d2 * rkick * n[i], ry[i + 1])) uk = np.transpose(np.linalg.solve( np.transpose(Ry[i + 1]), np.transpose(uk))) uk = reshape(uk, (d2 * rkick, n[i] * ry[i + 1])) if pcatype == 'svd': vk, sk, uk = np.linalg.svd(uk, full_matrices=False) uk = np.conj(np.transpose(uk)) # TODO: refactor uk = uk[:, :min(kickrank, uk.shape[1])] else: # uk = uchol(uk, kickrank + 1) # TODO uk = uk[:, :max(uk.shape[1] - kickrank + 1, 1):-1] else: uk = np.random.rand(n[i] * ry[i + 1], kickrank) v, rv = np.linalg.qr(np.concatenate((v, uk), axis=1)) radd = uk.shape[1] u = np.concatenate( (u, np.zeros((d2 * ry[i], radd), dtype=dtype)), axis=1) u = np.dot(u, np.transpose(rv)) r = v.shape[1] cr2 = cry[i - 1] cr2 = reshape(cr2, (ry[i - 1] * n[i - 1], ry[i])) u = reshape(u, (d2, ry[i] * r)) u = reshape(np.transpose(u), (ry[i], r * d2)) u = np.dot(cr2, u) u = reshape(u, (ry[i - 1] * n[i - 1] * r, d2)) u = reshape(np.transpose(u), (d2, ry[i - 1], n[i - 1], r)) v = reshape(np.transpose(v), (r, n[i], ry[i + 1])) ry[i] = r cry[i - 1] = u cry[i] = v Ry[i] = np.dot(reshape(v, (ry[i] * n[i], ry[i + 1])), Ry[i + 1]) Ry[i] = reshape(Ry[i], (ry[i], n[i] * ry[i + 1])) curind = maxvol(np.transpose(Ry[i])) Ry[i] = Ry[i][:, curind] for j in range(nx): Rx[i, j] = reshape(crx[i, j], (rx[i, j] * n[i], rx[i + 1, j])) Rx[i, j] = np.dot(Rx[i, j], Rx[i + 1, j]) Rx[i, j] = reshape(Rx[i, j], (rx[i, j], n[i] * ry[i + 1])) Rx[i, j] = Rx[i, j][:, curind] elif dirn > 0 and i == d - 1: newy = np.dot(np.dot(u[:, :r], np.diag(s[:r])), np.conj(np.transpose(v[:, :r]))) newy = reshape(newy, (ry[i] * n[i] * ry[i + 1], d2)) cry[i] = reshape(np.transpose(newy), (d2, ry[i], n[i], ry[i + 1])) elif dirn < 0 and i == 0: newy = np.dot(np.dot(u[:, :r], np.diag(s[:r])), np.conj(np.transpose(v[:, :r]))) newy = reshape(newy, (d2, ry[i], n[i], ry[i + 1])) cry[i] = newy i = i + dirn cur_order[order_index] = cur_order[order_index] - dirn if cur_order[order_index] == 0: order_index = order_index + 1 if verb > 0: print('=multifuncrs= sweep %d{%d}, max_dy: %3.3e, erank: %g' % (swp, order_index, max_dy, math.sqrt(np.dot(ry[:d], n * ry[1:]) / np.sum(n)))) if last_sweep: break if max_dy < eps and dirn < 0: last_sweep = True kickrank = 0 if order_index >= len(cur_order): cur_order = copy.copy(block_order) order_index = 0 if last_sweep: cur_order = [d - 1] max_dy = 0 swp = swp + 1 dirn = int(math.copysign(1, cur_order[order_index])) i = i + dirn cry[d - 1] = np.transpose(cry[d - 1][:, :, :, 0], [1, 2, 0]) y = tt.tensor.from_list(cry) return y
Cross approximation of a (vector-)function of several TT-tensors. :param X: tuple of TT-tensors :param funs: multivariate function :param eps: accuracy
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/multifuncrs.py#L54-L425
oseledets/ttpy
tt/core/vector.py
vector.from_list
def from_list(a, order='F'): """Generate TT-vectorr object from given TT cores. :param a: List of TT cores. :type a: list :returns: vector -- TT-vector constructed from the given cores. """ d = len(a) # Number of cores res = vector() n = _np.zeros(d, dtype=_np.int32) r = _np.zeros(d+1, dtype=_np.int32) cr = _np.array([]) for i in xrange(d): cr = _np.concatenate((cr, a[i].flatten(order))) r[i] = a[i].shape[0] r[i+1] = a[i].shape[2] n[i] = a[i].shape[1] res.d = d res.n = n res.r = r res.core = cr res.get_ps() return res
python
def from_list(a, order='F'): """Generate TT-vectorr object from given TT cores. :param a: List of TT cores. :type a: list :returns: vector -- TT-vector constructed from the given cores. """ d = len(a) # Number of cores res = vector() n = _np.zeros(d, dtype=_np.int32) r = _np.zeros(d+1, dtype=_np.int32) cr = _np.array([]) for i in xrange(d): cr = _np.concatenate((cr, a[i].flatten(order))) r[i] = a[i].shape[0] r[i+1] = a[i].shape[2] n[i] = a[i].shape[1] res.d = d res.n = n res.r = r res.core = cr res.get_ps() return res
Generate TT-vectorr object from given TT cores. :param a: List of TT cores. :type a: list :returns: vector -- TT-vector constructed from the given cores.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/vector.py#L82-L105
oseledets/ttpy
tt/core/vector.py
vector.erank
def erank(self): """ Effective rank of the TT-vector """ r = self.r n = self.n d = self.d if d <= 1: er = 0e0 else: sz = _np.dot(n * r[0:d], r[1:]) if sz == 0: er = 0e0 else: b = r[0] * n[0] + n[d - 1] * r[d] if d is 2: er = sz * 1.0 / b else: a = _np.sum(n[1:d - 1]) er = (_np.sqrt(b * b + 4 * a * sz) - b) / (2 * a) return er
python
def erank(self): """ Effective rank of the TT-vector """ r = self.r n = self.n d = self.d if d <= 1: er = 0e0 else: sz = _np.dot(n * r[0:d], r[1:]) if sz == 0: er = 0e0 else: b = r[0] * n[0] + n[d - 1] * r[d] if d is 2: er = sz * 1.0 / b else: a = _np.sum(n[1:d - 1]) er = (_np.sqrt(b * b + 4 * a * sz) - b) / (2 * a) return er
Effective rank of the TT-vector
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/vector.py#L127-L145
oseledets/ttpy
tt/core/vector.py
vector.r2c
def r2c(self): """Get complex vector.from real one made by ``tensor.c2r()``. For tensor :math:`\\tilde{X}(i_1,\\ldots,i_d,i_{d+1})` returns complex tensor .. math:: X(i_1,\\ldots,i_d) = \\tilde{X}(i_1,\\ldots,i_d,0) + i\\tilde{X}(i_1,\\ldots,i_d,1). >>> a = tt.rand(2,10,5) + 1j * tt.rand(2,10,5) >>> (a.c2r().r2c() - a).norm() / a.norm() 7.310562016615692e-16 """ tmp = self.copy() newcore = _np.array(tmp.core, dtype=_np.complex) cr = newcore[tmp.ps[-2] - 1:tmp.ps[-1] - 1] cr = cr.reshape((tmp.r[-2], tmp.n[-1], tmp.r[-1]), order='F') cr[:, 1, :] *= 1j newcore[tmp.ps[-2] - 1:tmp.ps[-1] - 1] = cr.flatten('F') tmp.core = newcore return sum(tmp, axis=tmp.d - 1)
python
def r2c(self): """Get complex vector.from real one made by ``tensor.c2r()``. For tensor :math:`\\tilde{X}(i_1,\\ldots,i_d,i_{d+1})` returns complex tensor .. math:: X(i_1,\\ldots,i_d) = \\tilde{X}(i_1,\\ldots,i_d,0) + i\\tilde{X}(i_1,\\ldots,i_d,1). >>> a = tt.rand(2,10,5) + 1j * tt.rand(2,10,5) >>> (a.c2r().r2c() - a).norm() / a.norm() 7.310562016615692e-16 """ tmp = self.copy() newcore = _np.array(tmp.core, dtype=_np.complex) cr = newcore[tmp.ps[-2] - 1:tmp.ps[-1] - 1] cr = cr.reshape((tmp.r[-2], tmp.n[-1], tmp.r[-1]), order='F') cr[:, 1, :] *= 1j newcore[tmp.ps[-2] - 1:tmp.ps[-1] - 1] = cr.flatten('F') tmp.core = newcore return sum(tmp, axis=tmp.d - 1)
Get complex vector.from real one made by ``tensor.c2r()``. For tensor :math:`\\tilde{X}(i_1,\\ldots,i_d,i_{d+1})` returns complex tensor .. math:: X(i_1,\\ldots,i_d) = \\tilde{X}(i_1,\\ldots,i_d,0) + i\\tilde{X}(i_1,\\ldots,i_d,1). >>> a = tt.rand(2,10,5) + 1j * tt.rand(2,10,5) >>> (a.c2r().r2c() - a).norm() / a.norm() 7.310562016615692e-16
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/vector.py#L282-L302
oseledets/ttpy
tt/core/vector.py
vector.full
def full(self, asvector=False): """Returns full array (uncompressed). .. warning:: TT compression allows to keep in memory tensors much larger than ones PC can handle in raw format. Therefore this function is quite unsafe; use it at your own risk. :returns: numpy.ndarray -- full tensor. """ # Generate correct size vector sz = self.n.copy() if self.r[0] > 1: sz = _np.concatenate(([self.r[0]], sz)) if self.r[self.d] > 1: sz = _np.concatenate(([self.r[self.d]], sz)) if (_np.iscomplex(self.core).any()): a = _tt_f90.tt_f90.ztt_to_full( self.n, self.r, self.ps, self.core, _np.prod(sz)) else: a = _tt_f90.tt_f90.dtt_to_full( self.n, self.r, self.ps, _np.real( self.core), _np.prod(sz)) a = a.reshape(sz, order='F') if asvector: a=a.flatten(order='F') return a
python
def full(self, asvector=False): """Returns full array (uncompressed). .. warning:: TT compression allows to keep in memory tensors much larger than ones PC can handle in raw format. Therefore this function is quite unsafe; use it at your own risk. :returns: numpy.ndarray -- full tensor. """ # Generate correct size vector sz = self.n.copy() if self.r[0] > 1: sz = _np.concatenate(([self.r[0]], sz)) if self.r[self.d] > 1: sz = _np.concatenate(([self.r[self.d]], sz)) if (_np.iscomplex(self.core).any()): a = _tt_f90.tt_f90.ztt_to_full( self.n, self.r, self.ps, self.core, _np.prod(sz)) else: a = _tt_f90.tt_f90.dtt_to_full( self.n, self.r, self.ps, _np.real( self.core), _np.prod(sz)) a = a.reshape(sz, order='F') if asvector: a=a.flatten(order='F') return a
Returns full array (uncompressed). .. warning:: TT compression allows to keep in memory tensors much larger than ones PC can handle in raw format. Therefore this function is quite unsafe; use it at your own risk. :returns: numpy.ndarray -- full tensor.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/vector.py#L326-L352
oseledets/ttpy
tt/core/vector.py
vector.round
def round(self, eps=1e-14, rmax=1000000): """Applies TT rounding procedure to the TT-vector and **returns rounded tensor**. :param eps: Rounding accuracy. :type eps: float :param rmax: Maximal rank :type rmax: int :returns: tensor -- rounded TT-vector. Usage example: >>> a = tt.ones(2, 10) >>> b = a + a >>> print b.r array([1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1], dtype=int32) >>> b = b.round(1E-14) >>> print b.r array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) """ c = vector() c.n = _np.copy(self.n) c.d = self.d c.r = _np.copy(self.r) c.ps = _np.copy(self.ps) if (_np.iscomplex(self.core).any()): _tt_f90.tt_f90.ztt_compr2(c.n, c.r, c.ps, self.core, eps, rmax) c.core = _tt_f90.tt_f90.zcore.copy() else: _tt_f90.tt_f90.dtt_compr2(c.n, c.r, c.ps, self.core, eps, rmax) c.core = _tt_f90.tt_f90.core.copy() _tt_f90.tt_f90.tt_dealloc() return c
python
def round(self, eps=1e-14, rmax=1000000): """Applies TT rounding procedure to the TT-vector and **returns rounded tensor**. :param eps: Rounding accuracy. :type eps: float :param rmax: Maximal rank :type rmax: int :returns: tensor -- rounded TT-vector. Usage example: >>> a = tt.ones(2, 10) >>> b = a + a >>> print b.r array([1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1], dtype=int32) >>> b = b.round(1E-14) >>> print b.r array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) """ c = vector() c.n = _np.copy(self.n) c.d = self.d c.r = _np.copy(self.r) c.ps = _np.copy(self.ps) if (_np.iscomplex(self.core).any()): _tt_f90.tt_f90.ztt_compr2(c.n, c.r, c.ps, self.core, eps, rmax) c.core = _tt_f90.tt_f90.zcore.copy() else: _tt_f90.tt_f90.dtt_compr2(c.n, c.r, c.ps, self.core, eps, rmax) c.core = _tt_f90.tt_f90.core.copy() _tt_f90.tt_f90.tt_dealloc() return c
Applies TT rounding procedure to the TT-vector and **returns rounded tensor**. :param eps: Rounding accuracy. :type eps: float :param rmax: Maximal rank :type rmax: int :returns: tensor -- rounded TT-vector. Usage example: >>> a = tt.ones(2, 10) >>> b = a + a >>> print b.r array([1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1], dtype=int32) >>> b = b.round(1E-14) >>> print b.r array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/vector.py#L381-L413
oseledets/ttpy
tt/core/vector.py
vector.rmean
def rmean(self): """ Calculates the mean rank of a TT-vector.""" if not _np.all(self.n): return 0 # Solving quadratic equation ar^2 + br + c = 0; a = _np.sum(self.n[1:-1]) b = self.n[0] + self.n[-1] c = - _np.sum(self.n * self.r[1:] * self.r[:-1]) D = b ** 2 - 4 * a * c r = 0.5 * (-b + _np.sqrt(D)) / a return r
python
def rmean(self): """ Calculates the mean rank of a TT-vector.""" if not _np.all(self.n): return 0 # Solving quadratic equation ar^2 + br + c = 0; a = _np.sum(self.n[1:-1]) b = self.n[0] + self.n[-1] c = - _np.sum(self.n * self.r[1:] * self.r[:-1]) D = b ** 2 - 4 * a * c r = 0.5 * (-b + _np.sqrt(D)) / a return r
Calculates the mean rank of a TT-vector.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/vector.py#L568-L578
oseledets/ttpy
tt/core/vector.py
vector.qtt_fft1
def qtt_fft1(self,tol,inverse=False, bitReverse=True): """ Compute 1D (inverse) discrete Fourier Transform in the QTT format. :param tol: error tolerance. :type tol: float :param inverse: whether do an inverse FFT or not. :type inverse: Boolean :param bitReverse: whether do the bit reversion or not. If this function is used as a subroutine for multi-dimensional qtt-fft, this option need to be set False. :type bitReverse: Boolean. :returns: QTT-vector of FFT coefficients. This is a python translation of the Matlab function "qtt_fft1" in Ivan Oseledets' project TT-Toolbox(https://github.com/oseledets/TT-Toolbox) See S. Dolgov, B. Khoromskij, D. Savostyanov, Superfast Fourier transform using QTT approximation, J. Fourier Anal. Appl., 18(5), 2012. """ d = self.d r = self.r.copy() y = self.to_list(self) if inverse: twiddle =-1+1.22e-16j # exp(pi*1j) else: twiddle =-1-1.22e-16j # exp(-pi*1j) for i in range(d-1, 0, -1): r1= y[i].shape[0] # head r r2= y[i].shape[2] # tail r crd2 = _np.zeros((r1, 2, r2), order='F', dtype=complex) # last block +- crd2[:,0,:]= (y[i][:,0,:] + y[i][:,1,:])/_np.sqrt(2) crd2[:,1,:]= (y[i][:,0,:] - y[i][:,1,:])/_np.sqrt(2) # last block twiddles y[i]= _np.zeros((r1*2, 2, r2),order='F',dtype=complex) y[i][0:r1, 0, 0:r2]= crd2[:,0,:] y[i][r1:r1*2, 1, 0:r2]= crd2[:,1,:] #1..i-1 block twiddles and qr rv=1; for j in range(0, i): cr=y[j] r1= cr.shape[0] # head r r2= cr.shape[2] # tail r if j==0: r[j]=r1 r[j+1] = r2*2 y[j] = _np.zeros((r[j], 2, r[j+1]),order='F',dtype=complex) y[j][0:r1, :, 0:r2] = cr y[j][0:r1, 0, r2 :r[j+1]] = cr[:,0,:] y[j][0:r1, 1, r2 :r[j+1]] = twiddle**(1.0/(2**(i-j)))*cr[:,1,:] else: r[j]=r1*2 r[j+1] = r2*2 y[j] = _np.zeros((r[j], 2, r[j+1]),order='F',dtype=complex) y[j][0:r1, :, 0:r2] = cr y[j][r1:r[j], 0, r2 :r[j+1]] = cr[:,0,:] y[j][r1:r[j], 1, r2 :r[j+1]] = twiddle**(1.0/(2**(i-j)))*cr[:,1,:] y[j] = _np.reshape(y[j],( r[j], 2*r[j+1]),order='F') y[j] = _np.dot(rv,y[j]) r[j] = y[j].shape[0] y[j] = _np.reshape(y[j],( 2*r[j], r[j+1]),order='F') y[j], rv = _np.linalg.qr(y[j]) y[j] = _np.reshape(y[j], (r[j], 2, rv.shape[0]),order='F') y[i] = _np.reshape(y[i], (r[i], 2*r[i+1]),order='F') y[i] = _np.dot(rv,y[i]) r[i] = rv.shape[0] # backward svd for j in range(i, 0,-1): u,s,v = _np.linalg.svd(y[j], full_matrices=False) rnew = my_chop2(s, _np.linalg.norm(s)*tol/_np.sqrt(i)) u=_np.dot(u[:, 0:rnew], _np.diag(s[0:rnew])) v= v[0:rnew, :] y[j] = _np.reshape(v, (rnew, 2, r[j+1]),order='F' ) y[j-1] = _np.reshape(y[j-1], (r[j-1]*2,r[j] ),order='F' ) y[j-1] = _np.dot(y[j-1], u) r[j] = rnew y[j-1] = _np.reshape(y[j-1], (r[j-1],r[j]*2 ),order='F' ) y[0] = _np.reshape(y[0], (r[0],2, r[1]), order='F' ) # FFT on the first block y[0]=_np.transpose(y[0],(1,0,2)) y[0]=_np.reshape(y[0],(2, r[0]*r[1]),order='F') y[0]= _np.dot( _np.array([[1,1],[1,-1]]), y[0])/_np.sqrt(2) y[0]=_np.reshape(y[0],(2, r[0], r[1]),order='F') y[0]=_np.transpose(y[0],(1,0,2)) if bitReverse: # Reverse the train y2=[None]*d for i in range(d): y2[d-i-1]= _np.transpose(y[i],(2,1,0)) y=self.from_list(y2) else: # for multi-dimensional qtt_fft y=self.from_list(y) return y
python
def qtt_fft1(self,tol,inverse=False, bitReverse=True): """ Compute 1D (inverse) discrete Fourier Transform in the QTT format. :param tol: error tolerance. :type tol: float :param inverse: whether do an inverse FFT or not. :type inverse: Boolean :param bitReverse: whether do the bit reversion or not. If this function is used as a subroutine for multi-dimensional qtt-fft, this option need to be set False. :type bitReverse: Boolean. :returns: QTT-vector of FFT coefficients. This is a python translation of the Matlab function "qtt_fft1" in Ivan Oseledets' project TT-Toolbox(https://github.com/oseledets/TT-Toolbox) See S. Dolgov, B. Khoromskij, D. Savostyanov, Superfast Fourier transform using QTT approximation, J. Fourier Anal. Appl., 18(5), 2012. """ d = self.d r = self.r.copy() y = self.to_list(self) if inverse: twiddle =-1+1.22e-16j # exp(pi*1j) else: twiddle =-1-1.22e-16j # exp(-pi*1j) for i in range(d-1, 0, -1): r1= y[i].shape[0] # head r r2= y[i].shape[2] # tail r crd2 = _np.zeros((r1, 2, r2), order='F', dtype=complex) # last block +- crd2[:,0,:]= (y[i][:,0,:] + y[i][:,1,:])/_np.sqrt(2) crd2[:,1,:]= (y[i][:,0,:] - y[i][:,1,:])/_np.sqrt(2) # last block twiddles y[i]= _np.zeros((r1*2, 2, r2),order='F',dtype=complex) y[i][0:r1, 0, 0:r2]= crd2[:,0,:] y[i][r1:r1*2, 1, 0:r2]= crd2[:,1,:] #1..i-1 block twiddles and qr rv=1; for j in range(0, i): cr=y[j] r1= cr.shape[0] # head r r2= cr.shape[2] # tail r if j==0: r[j]=r1 r[j+1] = r2*2 y[j] = _np.zeros((r[j], 2, r[j+1]),order='F',dtype=complex) y[j][0:r1, :, 0:r2] = cr y[j][0:r1, 0, r2 :r[j+1]] = cr[:,0,:] y[j][0:r1, 1, r2 :r[j+1]] = twiddle**(1.0/(2**(i-j)))*cr[:,1,:] else: r[j]=r1*2 r[j+1] = r2*2 y[j] = _np.zeros((r[j], 2, r[j+1]),order='F',dtype=complex) y[j][0:r1, :, 0:r2] = cr y[j][r1:r[j], 0, r2 :r[j+1]] = cr[:,0,:] y[j][r1:r[j], 1, r2 :r[j+1]] = twiddle**(1.0/(2**(i-j)))*cr[:,1,:] y[j] = _np.reshape(y[j],( r[j], 2*r[j+1]),order='F') y[j] = _np.dot(rv,y[j]) r[j] = y[j].shape[0] y[j] = _np.reshape(y[j],( 2*r[j], r[j+1]),order='F') y[j], rv = _np.linalg.qr(y[j]) y[j] = _np.reshape(y[j], (r[j], 2, rv.shape[0]),order='F') y[i] = _np.reshape(y[i], (r[i], 2*r[i+1]),order='F') y[i] = _np.dot(rv,y[i]) r[i] = rv.shape[0] # backward svd for j in range(i, 0,-1): u,s,v = _np.linalg.svd(y[j], full_matrices=False) rnew = my_chop2(s, _np.linalg.norm(s)*tol/_np.sqrt(i)) u=_np.dot(u[:, 0:rnew], _np.diag(s[0:rnew])) v= v[0:rnew, :] y[j] = _np.reshape(v, (rnew, 2, r[j+1]),order='F' ) y[j-1] = _np.reshape(y[j-1], (r[j-1]*2,r[j] ),order='F' ) y[j-1] = _np.dot(y[j-1], u) r[j] = rnew y[j-1] = _np.reshape(y[j-1], (r[j-1],r[j]*2 ),order='F' ) y[0] = _np.reshape(y[0], (r[0],2, r[1]), order='F' ) # FFT on the first block y[0]=_np.transpose(y[0],(1,0,2)) y[0]=_np.reshape(y[0],(2, r[0]*r[1]),order='F') y[0]= _np.dot( _np.array([[1,1],[1,-1]]), y[0])/_np.sqrt(2) y[0]=_np.reshape(y[0],(2, r[0], r[1]),order='F') y[0]=_np.transpose(y[0],(1,0,2)) if bitReverse: # Reverse the train y2=[None]*d for i in range(d): y2[d-i-1]= _np.transpose(y[i],(2,1,0)) y=self.from_list(y2) else: # for multi-dimensional qtt_fft y=self.from_list(y) return y
Compute 1D (inverse) discrete Fourier Transform in the QTT format. :param tol: error tolerance. :type tol: float :param inverse: whether do an inverse FFT or not. :type inverse: Boolean :param bitReverse: whether do the bit reversion or not. If this function is used as a subroutine for multi-dimensional qtt-fft, this option need to be set False. :type bitReverse: Boolean. :returns: QTT-vector of FFT coefficients. This is a python translation of the Matlab function "qtt_fft1" in Ivan Oseledets' project TT-Toolbox(https://github.com/oseledets/TT-Toolbox) See S. Dolgov, B. Khoromskij, D. Savostyanov, Superfast Fourier transform using QTT approximation, J. Fourier Anal. Appl., 18(5), 2012.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/core/vector.py#L580-L687
oseledets/ttpy
tt/eigb/eigb.py
eigb
def eigb(A, y0, eps, rmax=150, nswp=20, max_full_size=1000, verb=1): """ Approximate computation of minimal eigenvalues in tensor train format This function uses alternating least-squares algorithm for the computation of several minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function. :Reference: S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov. Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm., 185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought :type y0: tensor :param eps: Accuracy required :type eps: float :param rmax: Maximal rank :type rmax: int :param kickrank: Addition rank, the larger the more robus the method, :type kickrank: int :rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors. :Example: >>> import tt >>> import tt.eigb >>> d = 8; f = 3 >>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1 >>> x = tt.rand(n, d * f, r) >>> a = tt.qlaplace_dd([8, 8, 8]) >>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0) Solving a block eigenvalue problem Looking for 8 eigenvalues with accuracy 1E-06 swp: 1 er = 35.93 rmax:19 swp: 2 er = 4.51015E-04 rmax:18 swp: 3 er = 1.87584E-12 rmax:17 Total number of matvecs: 0 >>> print ev [ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448 0.0013448 0.00164356] """ ry = y0.r.copy() lam = tt_eigb.tt_block_eig.tt_eigb(y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, eps, rmax, ry[y0.d], 0, nswp, max_full_size, verb) y = tensor() y.d = y0.d y.n = A.n.copy() y.r = ry y.core = tt_eigb.tt_block_eig.result_core.copy() tt_eigb.tt_block_eig.deallocate_result() y.get_ps() return y, lam
python
def eigb(A, y0, eps, rmax=150, nswp=20, max_full_size=1000, verb=1): """ Approximate computation of minimal eigenvalues in tensor train format This function uses alternating least-squares algorithm for the computation of several minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function. :Reference: S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov. Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm., 185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought :type y0: tensor :param eps: Accuracy required :type eps: float :param rmax: Maximal rank :type rmax: int :param kickrank: Addition rank, the larger the more robus the method, :type kickrank: int :rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors. :Example: >>> import tt >>> import tt.eigb >>> d = 8; f = 3 >>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1 >>> x = tt.rand(n, d * f, r) >>> a = tt.qlaplace_dd([8, 8, 8]) >>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0) Solving a block eigenvalue problem Looking for 8 eigenvalues with accuracy 1E-06 swp: 1 er = 35.93 rmax:19 swp: 2 er = 4.51015E-04 rmax:18 swp: 3 er = 1.87584E-12 rmax:17 Total number of matvecs: 0 >>> print ev [ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448 0.0013448 0.00164356] """ ry = y0.r.copy() lam = tt_eigb.tt_block_eig.tt_eigb(y0.d, A.n, A.m, A.tt.r, A.tt.core, y0.core, ry, eps, rmax, ry[y0.d], 0, nswp, max_full_size, verb) y = tensor() y.d = y0.d y.n = A.n.copy() y.r = ry y.core = tt_eigb.tt_block_eig.result_core.copy() tt_eigb.tt_block_eig.deallocate_result() y.get_ps() return y, lam
Approximate computation of minimal eigenvalues in tensor train format This function uses alternating least-squares algorithm for the computation of several minimal eigenvalues. If you want maximal eigenvalues, just send -A to the function. :Reference: S. V. Dolgov, B. N. Khoromskij, I. V. Oseledets, and D. V. Savostyanov. Computation of extreme eigenvalues in higher dimensions using block tensor train format. Computer Phys. Comm., 185(4):1207-1216, 2014. http://dx.doi.org/10.1016/j.cpc.2013.12.017 :param A: Matrix in the TT-format :type A: matrix :param y0: Initial guess in the block TT-format, r(d+1) is the number of eigenvalues sought :type y0: tensor :param eps: Accuracy required :type eps: float :param rmax: Maximal rank :type rmax: int :param kickrank: Addition rank, the larger the more robus the method, :type kickrank: int :rtype: A tuple (ev, tensor), where ev is a list of eigenvalues, tensor is an approximation to eigenvectors. :Example: >>> import tt >>> import tt.eigb >>> d = 8; f = 3 >>> r = [8] * (d * f + 1); r[d * f] = 8; r[0] = 1 >>> x = tt.rand(n, d * f, r) >>> a = tt.qlaplace_dd([8, 8, 8]) >>> sol, ev = tt.eigb.eigb(a, x, 1e-6, verb=0) Solving a block eigenvalue problem Looking for 8 eigenvalues with accuracy 1E-06 swp: 1 er = 35.93 rmax:19 swp: 2 er = 4.51015E-04 rmax:18 swp: 3 er = 1.87584E-12 rmax:17 Total number of matvecs: 0 >>> print ev [ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448 0.0013448 0.00164356]
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/eigb/eigb.py#L7-L65
oseledets/ttpy
tt/solvers.py
GMRES
def GMRES(A, u_0, b, eps=1e-6, maxit=100, m=20, _iteration=0, callback=None, verbose=0): """ Flexible TT GMRES :param A: matvec(x[, eps]) :param u_0: initial vector :param b: answer :param maxit: max number of iterations :param eps: required accuracy :param m: number of iteration without restart :param _iteration: iteration counter :param callback: :param verbose: to print debug info or not :return: answer, residual >>> from tt import GMRES >>> def matvec(x, eps): >>> return tt.matvec(S, x).round(eps) >>> answer, res = GMRES(matvec, u_0, b, eps=1e-8) """ maxitexceeded = False converged = False if verbose: print('GMRES(m=%d, _iteration=%d, maxit=%d)' % (m, _iteration, maxit)) v = np.ones((m + 1), dtype=object) * np.nan R = np.ones((m, m)) * np.nan g = np.zeros(m) s = np.ones(m) * np.nan c = np.ones(m) * np.nan v[0] = b - A(u_0, eps=eps) v[0] = v[0].round(eps) resnorm = v[0].norm() curr_beta = resnorm bnorm = b.norm() wlen = resnorm q = m for j in range(m): _iteration += 1 delta = eps / (curr_beta / resnorm) if verbose: print("it = %d delta = " % _iteration, delta) v[j] *= 1.0 / wlen v[j + 1] = A(v[j], eps=delta) for i in range(j + 1): R[i, j] = tt.dot(v[j + 1], v[i]) v[j + 1] = v[j + 1] - R[i, j] * v[i] v[j + 1] = v[j + 1].round(delta) wlen = v[j + 1].norm() for i in range(j): r1 = R[i, j] r2 = R[i + 1, j] R[i, j] = c[i] * r1 - s[i] * r2 R[i + 1, j] = c[i] * r2 + s[i] * r1 denom = np.hypot(wlen, R[j, j]) s[j] = wlen / denom c[j] = -R[j, j] / denom R[j, j] = -denom g[j] = c[j] * curr_beta curr_beta *= s[j] if verbose: print("it = {}, ||r|| = {}".format(_iteration, curr_beta / bnorm)) converged = (curr_beta / bnorm) < eps or (curr_beta / resnorm) < eps maxitexceeded = _iteration >= maxit if converged or maxitexceeded: q = j + 1 break y = la.solve_triangular(R[:q, :q], g[:q], check_finite=False) for idx in range(q): u_0 += v[idx] * y[idx] u_0 = u_0.round(eps) if callback is not None: callback(u_0) if converged or maxitexceeded: return u_0, resnorm / bnorm return GMRES(A, u_0, b, eps, maxit, m, _iteration, callback=callback, verbose=verbose)
python
def GMRES(A, u_0, b, eps=1e-6, maxit=100, m=20, _iteration=0, callback=None, verbose=0): """ Flexible TT GMRES :param A: matvec(x[, eps]) :param u_0: initial vector :param b: answer :param maxit: max number of iterations :param eps: required accuracy :param m: number of iteration without restart :param _iteration: iteration counter :param callback: :param verbose: to print debug info or not :return: answer, residual >>> from tt import GMRES >>> def matvec(x, eps): >>> return tt.matvec(S, x).round(eps) >>> answer, res = GMRES(matvec, u_0, b, eps=1e-8) """ maxitexceeded = False converged = False if verbose: print('GMRES(m=%d, _iteration=%d, maxit=%d)' % (m, _iteration, maxit)) v = np.ones((m + 1), dtype=object) * np.nan R = np.ones((m, m)) * np.nan g = np.zeros(m) s = np.ones(m) * np.nan c = np.ones(m) * np.nan v[0] = b - A(u_0, eps=eps) v[0] = v[0].round(eps) resnorm = v[0].norm() curr_beta = resnorm bnorm = b.norm() wlen = resnorm q = m for j in range(m): _iteration += 1 delta = eps / (curr_beta / resnorm) if verbose: print("it = %d delta = " % _iteration, delta) v[j] *= 1.0 / wlen v[j + 1] = A(v[j], eps=delta) for i in range(j + 1): R[i, j] = tt.dot(v[j + 1], v[i]) v[j + 1] = v[j + 1] - R[i, j] * v[i] v[j + 1] = v[j + 1].round(delta) wlen = v[j + 1].norm() for i in range(j): r1 = R[i, j] r2 = R[i + 1, j] R[i, j] = c[i] * r1 - s[i] * r2 R[i + 1, j] = c[i] * r2 + s[i] * r1 denom = np.hypot(wlen, R[j, j]) s[j] = wlen / denom c[j] = -R[j, j] / denom R[j, j] = -denom g[j] = c[j] * curr_beta curr_beta *= s[j] if verbose: print("it = {}, ||r|| = {}".format(_iteration, curr_beta / bnorm)) converged = (curr_beta / bnorm) < eps or (curr_beta / resnorm) < eps maxitexceeded = _iteration >= maxit if converged or maxitexceeded: q = j + 1 break y = la.solve_triangular(R[:q, :q], g[:q], check_finite=False) for idx in range(q): u_0 += v[idx] * y[idx] u_0 = u_0.round(eps) if callback is not None: callback(u_0) if converged or maxitexceeded: return u_0, resnorm / bnorm return GMRES(A, u_0, b, eps, maxit, m, _iteration, callback=callback, verbose=verbose)
Flexible TT GMRES :param A: matvec(x[, eps]) :param u_0: initial vector :param b: answer :param maxit: max number of iterations :param eps: required accuracy :param m: number of iteration without restart :param _iteration: iteration counter :param callback: :param verbose: to print debug info or not :return: answer, residual >>> from tt import GMRES >>> def matvec(x, eps): >>> return tt.matvec(S, x).round(eps) >>> answer, res = GMRES(matvec, u_0, b, eps=1e-8)
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/solvers.py#L10-L95
oseledets/ttpy
tt/completion/als.py
getRow
def getRow(leftU, rightV, jVec): ''' Compute X_{\geq \mu}^T \otimes X_{leq \mu} X_{\geq \mu} = V_{\mu+1}(j_{\mu}) \ldots V_{d} (j_{d}) [left interface matrix] X_{\leq \mu} = U_{1} (j_{1}) \ldots U_{\mu-1}(j_{\mu-1}) [right interface matrix] Parameters: :list of numpy.arrays: leftU left-orthogonal cores from 1 to \mu-1 :list of numpy.arrays: rightV right-orthogonal cores from \mu+1 to d :list, tuple, np.array: jVec indices for each dimension n[k] Returns: :numpy.array: result Kronecker product between left and right interface matrices. Left matrix is transposed. ''' jLeft = None jRight = None if len(leftU) > 0: jLeft = jVec[:len(leftU)] if len(rightV) > 0: jRight = jVec[-len(rightV):] multU = np.ones([1,1]) for k in xrange(len(leftU)): multU = np.dot(multU, leftU[k][:, jLeft[k], :]) multV= np.ones([1,1]) for k in xrange(len(rightV)-1, -1, -1): multV = np.dot(rightV[k][:, jRight[k], :], multV) result = np.kron(multV.T, multU) return result
python
def getRow(leftU, rightV, jVec): ''' Compute X_{\geq \mu}^T \otimes X_{leq \mu} X_{\geq \mu} = V_{\mu+1}(j_{\mu}) \ldots V_{d} (j_{d}) [left interface matrix] X_{\leq \mu} = U_{1} (j_{1}) \ldots U_{\mu-1}(j_{\mu-1}) [right interface matrix] Parameters: :list of numpy.arrays: leftU left-orthogonal cores from 1 to \mu-1 :list of numpy.arrays: rightV right-orthogonal cores from \mu+1 to d :list, tuple, np.array: jVec indices for each dimension n[k] Returns: :numpy.array: result Kronecker product between left and right interface matrices. Left matrix is transposed. ''' jLeft = None jRight = None if len(leftU) > 0: jLeft = jVec[:len(leftU)] if len(rightV) > 0: jRight = jVec[-len(rightV):] multU = np.ones([1,1]) for k in xrange(len(leftU)): multU = np.dot(multU, leftU[k][:, jLeft[k], :]) multV= np.ones([1,1]) for k in xrange(len(rightV)-1, -1, -1): multV = np.dot(rightV[k][:, jRight[k], :], multV) result = np.kron(multV.T, multU) return result
Compute X_{\geq \mu}^T \otimes X_{leq \mu} X_{\geq \mu} = V_{\mu+1}(j_{\mu}) \ldots V_{d} (j_{d}) [left interface matrix] X_{\leq \mu} = U_{1} (j_{1}) \ldots U_{\mu-1}(j_{\mu-1}) [right interface matrix] Parameters: :list of numpy.arrays: leftU left-orthogonal cores from 1 to \mu-1 :list of numpy.arrays: rightV right-orthogonal cores from \mu+1 to d :list, tuple, np.array: jVec indices for each dimension n[k] Returns: :numpy.array: result Kronecker product between left and right interface matrices. Left matrix is transposed.
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/completion/als.py#L23-L56
oseledets/ttpy
tt/completion/als.py
orthLRFull
def orthLRFull(coreList, mu, splitResult = True): ''' Orthogonalize list of TT-cores. Parameters: :list: coreList list of TT-cores (stored as numpy arrays) :int: mu separating index for left and right orthogonalization. Output cores will be left-orthogonal for dimensions from 1 to \mu-1 and right-orthogonal for dimensions from \mu+1 to d :boolean: splitResult = True Controls whether outut should be splitted into left-, non-, right-orthogonal parts or not. Returns: :list: resultU left-orthogonal cores with indices from 1 to \mu-1 :np.array: W \mu-th core :list: reultV right-orthogonal cores with indices from \mu+1 to d OR :list: resultU + [W] + resultV concatenated list of cores ''' d = len(coreList) assert (mu >= 0) and (mu <= d) resultU = [] for k in xrange(mu): core = coreList[k].copy() if k > 0: core = np.einsum('ijk,li->ljk', core, R) [r1, n, r2] = core.shape if (k < mu-1): core = reshape(core, [r1*n, r2]) Q, R = np.linalg.qr(core) rnew = Q.shape[1] core = reshape(Q, [r1, n, rnew]) resultU = resultU + [core] if mu > 0: W = core.copy() resultV = [] for k in xrange(d-1, mu, -1): core = coreList[k].copy() if (k < d-1): core = np.einsum('ijk,lk->ijl', core, R) [r1, n, r2] = core.shape if (k > mu+1): core = reshape(core, [r1, n*r2]) Q, R = np.linalg.qr(core.T) rnew = Q.shape[1] core = reshape(Q.T, [rnew, n, r2]) resultV = [core] + resultV if mu < d-1: if mu > 0: W = np.einsum('ijk,lk->ijl', W, R) else: W = np.einsum('ijk,lk->ijl', coreList[0], R) if splitResult: return resultU, W, resultV return resultU + [W] + resultV
python
def orthLRFull(coreList, mu, splitResult = True): ''' Orthogonalize list of TT-cores. Parameters: :list: coreList list of TT-cores (stored as numpy arrays) :int: mu separating index for left and right orthogonalization. Output cores will be left-orthogonal for dimensions from 1 to \mu-1 and right-orthogonal for dimensions from \mu+1 to d :boolean: splitResult = True Controls whether outut should be splitted into left-, non-, right-orthogonal parts or not. Returns: :list: resultU left-orthogonal cores with indices from 1 to \mu-1 :np.array: W \mu-th core :list: reultV right-orthogonal cores with indices from \mu+1 to d OR :list: resultU + [W] + resultV concatenated list of cores ''' d = len(coreList) assert (mu >= 0) and (mu <= d) resultU = [] for k in xrange(mu): core = coreList[k].copy() if k > 0: core = np.einsum('ijk,li->ljk', core, R) [r1, n, r2] = core.shape if (k < mu-1): core = reshape(core, [r1*n, r2]) Q, R = np.linalg.qr(core) rnew = Q.shape[1] core = reshape(Q, [r1, n, rnew]) resultU = resultU + [core] if mu > 0: W = core.copy() resultV = [] for k in xrange(d-1, mu, -1): core = coreList[k].copy() if (k < d-1): core = np.einsum('ijk,lk->ijl', core, R) [r1, n, r2] = core.shape if (k > mu+1): core = reshape(core, [r1, n*r2]) Q, R = np.linalg.qr(core.T) rnew = Q.shape[1] core = reshape(Q.T, [rnew, n, r2]) resultV = [core] + resultV if mu < d-1: if mu > 0: W = np.einsum('ijk,lk->ijl', W, R) else: W = np.einsum('ijk,lk->ijl', coreList[0], R) if splitResult: return resultU, W, resultV return resultU + [W] + resultV
Orthogonalize list of TT-cores. Parameters: :list: coreList list of TT-cores (stored as numpy arrays) :int: mu separating index for left and right orthogonalization. Output cores will be left-orthogonal for dimensions from 1 to \mu-1 and right-orthogonal for dimensions from \mu+1 to d :boolean: splitResult = True Controls whether outut should be splitted into left-, non-, right-orthogonal parts or not. Returns: :list: resultU left-orthogonal cores with indices from 1 to \mu-1 :np.array: W \mu-th core :list: reultV right-orthogonal cores with indices from \mu+1 to d OR :list: resultU + [W] + resultV concatenated list of cores
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/completion/als.py#L58-L119
oseledets/ttpy
tt/completion/als.py
computeFunctional
def computeFunctional(x, cooP): ''' Compute value of functional J(X) = ||PX - PA||^2_F, where P is projector into index subspace of known elements, X is our approximation, A is original tensor. Parameters: :tt.vector: x current approximation [X] :dict: cooP dictionary with two records - 'indices': numpy.array of P x d shape, contains index subspace of P known elements; each string is an index of one element. - 'values': numpy array of size P, contains P known values. Returns: :float: result value of functional ''' indices = cooP['indices'] values = cooP['values'] [P, d] = indices.shape assert P == len(values) result = 0 for p in xrange(P): index = tuple(indices[p, :]) result += (x[index] - values[p])**2 result *= 0.5 return result
python
def computeFunctional(x, cooP): ''' Compute value of functional J(X) = ||PX - PA||^2_F, where P is projector into index subspace of known elements, X is our approximation, A is original tensor. Parameters: :tt.vector: x current approximation [X] :dict: cooP dictionary with two records - 'indices': numpy.array of P x d shape, contains index subspace of P known elements; each string is an index of one element. - 'values': numpy array of size P, contains P known values. Returns: :float: result value of functional ''' indices = cooP['indices'] values = cooP['values'] [P, d] = indices.shape assert P == len(values) result = 0 for p in xrange(P): index = tuple(indices[p, :]) result += (x[index] - values[p])**2 result *= 0.5 return result
Compute value of functional J(X) = ||PX - PA||^2_F, where P is projector into index subspace of known elements, X is our approximation, A is original tensor. Parameters: :tt.vector: x current approximation [X] :dict: cooP dictionary with two records - 'indices': numpy.array of P x d shape, contains index subspace of P known elements; each string is an index of one element. - 'values': numpy array of size P, contains P known values. Returns: :float: result value of functional
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/completion/als.py#L121-L154
oseledets/ttpy
tt/completion/als.py
ttSparseALS
def ttSparseALS(cooP, shape, x0=None, ttRank=1, tol=1e-5, maxnsweeps=20, verbose=True, alpha=1e-2): ''' TT completion via Alternating Least Squares algorithm. Parameters: :dict: cooP dictionary with two records - 'indices': numpy.array of P x d shape, contains index subspace of P known elements; each string is an index of one element. - 'values': numpy array of size P, contains P known values. :list, numpy.array: shape full-format shape of tensor to be completed [dimensions] :tt.vector: x0 = None initial approximation of completed tensor If it is specified, parameters 'shape' and 'ttRank' will be ignored :int, numpy.array: ttRank = 1 assumed rank of completed tensor :float: tol = 1e-5 tolerance for functional value :int: maxnsweeps = 20 maximal number of sweeps [sequential optimization of all d cores in right or left direction] :boolean: verbose = True switcher of messages from function :float: alpha: = 1e-2 regularizer of least squares problem for each slice of current TT core. [rcond parameter for np.linalg.lstsq] Returns: :tt.vector: xNew completed TT vector :list: fit list of functional values at each sweep ''' indices = cooP['indices'] values = cooP['values'] [P, d] = indices.shape assert P == len(values) timeVal = time.clock() if x0 is None: x = tt.rand(shape, r = ttRank) x = x.round(0.) x = (1./x.norm())*x else: x = copy.deepcopy(x0) assert d == x.d # TODO: also check if cooP indices are aligned with shape normP = np.linalg.norm(values) values /= normP fitList = [] sweepTimeList = [] initTime = time.clock() - timeVal timeVal = time.clock() coreList = tt.vector.to_list(x) #coreList = orthLRFull(coreList, mu = d, splitResult = False) # orthTime = time.clock() - timeVal if verbose: print("Initialization time: %.3f seconds (proc.time)" % (initTime)) # print "Orthogonalizing time: %.3f seconds (proc.time)" % (orthTime) for sweep in xrange(maxnsweeps): sweepStart = time.clock() # list left + right [kStart, kEnd, kStep] = [0, d, 1] # select direction of sweep ''' if sweep % 2 == 0: # left to rigth [kStart, kEnd, kStep] = [0, d, 1] else: # right to left [kStart, kEnd, kStep] = [d-1, -1, -1] ''' # fix k-th core to update for k in xrange(kStart, kEnd, kStep): [r1, n, r2] = coreList[k].shape core = np.zeros([r1, n, r2]) leftU = [] rightV = [] if k > 0: leftU = coreList[:k] if k < d-1: rightV = coreList[k+1:] for i in xrange(n): thetaI = np.where(indices[:, k] == i)[0] if len(thetaI) > 0: A = np.zeros([len(thetaI), r1*r2]) for j in xrange(len(thetaI)): tmp = getRow(leftU, rightV, indices[thetaI[j], :]) A[j:j+1, :] += tmp # .flatten(order = 'F') vecCoreSlice, _, _, _ = np.linalg.lstsq(A, values[thetaI])#, rcond = alpha) # 0.5*np.linalg.norm(np.dot(A, vecCoreSlice) - values[thetaI])**2. core[:, i, :] += reshape(vecCoreSlice, [r1, r2]) #### ''' if k < (d-1): core = reshape(core, [r1*n, r2]) Q, R = np.linalg.qr(core) rnew = Q.shape[1] core = reshape(Q, [r1, n, rnew]) coreList[k+1] = np.einsum('ijk,li->ljk', coreList[k+1], R) ''' coreList[k] = core.copy() ''' else: if (k > 0): core = reshape(core, [r1, n*r2]) Q, R = np.linalg.qr(core.T) rnew = Q.shape[1] core = reshape(Q.T, [rnew, n, r2]) coreList[k-1] = np.einsum('ijk,lk->ijl', coreList[k-1], R) ''' xNew = tt.vector.from_list(coreList) fit = computeFunctional(xNew, cooP) fitList.append(fit) if fit < tol: break if sweep > 0: if abs(fit - fitList[-2]) < tol: break sweepTimeList.append(time.clock() - sweepStart) if verbose: print("sweep %d/%d\t fit value: %.5e\t time: %.3f seconds (proc.time)" % (sweep+1, maxnsweeps, fit, sweepTimeList[-1])) if verbose: print("Total sweep time: %.3f seconds (proc.time)\t Total time: %.3f seconds (proc.time)" % (sum(sweepTimeList), sum(sweepTimeList) + initTime))# + orthTime) info = {'fit': fitList, 'initTime': initTime, 'sweepTime': sweepTimeList} # 'orthTime': orthTime, xNew *= normP values *= normP return xNew, info
python
def ttSparseALS(cooP, shape, x0=None, ttRank=1, tol=1e-5, maxnsweeps=20, verbose=True, alpha=1e-2): ''' TT completion via Alternating Least Squares algorithm. Parameters: :dict: cooP dictionary with two records - 'indices': numpy.array of P x d shape, contains index subspace of P known elements; each string is an index of one element. - 'values': numpy array of size P, contains P known values. :list, numpy.array: shape full-format shape of tensor to be completed [dimensions] :tt.vector: x0 = None initial approximation of completed tensor If it is specified, parameters 'shape' and 'ttRank' will be ignored :int, numpy.array: ttRank = 1 assumed rank of completed tensor :float: tol = 1e-5 tolerance for functional value :int: maxnsweeps = 20 maximal number of sweeps [sequential optimization of all d cores in right or left direction] :boolean: verbose = True switcher of messages from function :float: alpha: = 1e-2 regularizer of least squares problem for each slice of current TT core. [rcond parameter for np.linalg.lstsq] Returns: :tt.vector: xNew completed TT vector :list: fit list of functional values at each sweep ''' indices = cooP['indices'] values = cooP['values'] [P, d] = indices.shape assert P == len(values) timeVal = time.clock() if x0 is None: x = tt.rand(shape, r = ttRank) x = x.round(0.) x = (1./x.norm())*x else: x = copy.deepcopy(x0) assert d == x.d # TODO: also check if cooP indices are aligned with shape normP = np.linalg.norm(values) values /= normP fitList = [] sweepTimeList = [] initTime = time.clock() - timeVal timeVal = time.clock() coreList = tt.vector.to_list(x) #coreList = orthLRFull(coreList, mu = d, splitResult = False) # orthTime = time.clock() - timeVal if verbose: print("Initialization time: %.3f seconds (proc.time)" % (initTime)) # print "Orthogonalizing time: %.3f seconds (proc.time)" % (orthTime) for sweep in xrange(maxnsweeps): sweepStart = time.clock() # list left + right [kStart, kEnd, kStep] = [0, d, 1] # select direction of sweep ''' if sweep % 2 == 0: # left to rigth [kStart, kEnd, kStep] = [0, d, 1] else: # right to left [kStart, kEnd, kStep] = [d-1, -1, -1] ''' # fix k-th core to update for k in xrange(kStart, kEnd, kStep): [r1, n, r2] = coreList[k].shape core = np.zeros([r1, n, r2]) leftU = [] rightV = [] if k > 0: leftU = coreList[:k] if k < d-1: rightV = coreList[k+1:] for i in xrange(n): thetaI = np.where(indices[:, k] == i)[0] if len(thetaI) > 0: A = np.zeros([len(thetaI), r1*r2]) for j in xrange(len(thetaI)): tmp = getRow(leftU, rightV, indices[thetaI[j], :]) A[j:j+1, :] += tmp # .flatten(order = 'F') vecCoreSlice, _, _, _ = np.linalg.lstsq(A, values[thetaI])#, rcond = alpha) # 0.5*np.linalg.norm(np.dot(A, vecCoreSlice) - values[thetaI])**2. core[:, i, :] += reshape(vecCoreSlice, [r1, r2]) #### ''' if k < (d-1): core = reshape(core, [r1*n, r2]) Q, R = np.linalg.qr(core) rnew = Q.shape[1] core = reshape(Q, [r1, n, rnew]) coreList[k+1] = np.einsum('ijk,li->ljk', coreList[k+1], R) ''' coreList[k] = core.copy() ''' else: if (k > 0): core = reshape(core, [r1, n*r2]) Q, R = np.linalg.qr(core.T) rnew = Q.shape[1] core = reshape(Q.T, [rnew, n, r2]) coreList[k-1] = np.einsum('ijk,lk->ijl', coreList[k-1], R) ''' xNew = tt.vector.from_list(coreList) fit = computeFunctional(xNew, cooP) fitList.append(fit) if fit < tol: break if sweep > 0: if abs(fit - fitList[-2]) < tol: break sweepTimeList.append(time.clock() - sweepStart) if verbose: print("sweep %d/%d\t fit value: %.5e\t time: %.3f seconds (proc.time)" % (sweep+1, maxnsweeps, fit, sweepTimeList[-1])) if verbose: print("Total sweep time: %.3f seconds (proc.time)\t Total time: %.3f seconds (proc.time)" % (sum(sweepTimeList), sum(sweepTimeList) + initTime))# + orthTime) info = {'fit': fitList, 'initTime': initTime, 'sweepTime': sweepTimeList} # 'orthTime': orthTime, xNew *= normP values *= normP return xNew, info
TT completion via Alternating Least Squares algorithm. Parameters: :dict: cooP dictionary with two records - 'indices': numpy.array of P x d shape, contains index subspace of P known elements; each string is an index of one element. - 'values': numpy array of size P, contains P known values. :list, numpy.array: shape full-format shape of tensor to be completed [dimensions] :tt.vector: x0 = None initial approximation of completed tensor If it is specified, parameters 'shape' and 'ttRank' will be ignored :int, numpy.array: ttRank = 1 assumed rank of completed tensor :float: tol = 1e-5 tolerance for functional value :int: maxnsweeps = 20 maximal number of sweeps [sequential optimization of all d cores in right or left direction] :boolean: verbose = True switcher of messages from function :float: alpha: = 1e-2 regularizer of least squares problem for each slice of current TT core. [rcond parameter for np.linalg.lstsq] Returns: :tt.vector: xNew completed TT vector :list: fit list of functional values at each sweep
https://github.com/oseledets/ttpy/blob/b440f6299a6338de4aea67f3d839d613f4ef1374/tt/completion/als.py#L157-L290
Infinidat/infi.systray
src/infi/systray/traybar.py
SysTrayIcon.update
def update(self, icon=None, hover_text=None): """ update icon image and/or hover text """ if icon: self._icon = icon self._load_icon() if hover_text: self._hover_text = hover_text self._refresh_icon()
python
def update(self, icon=None, hover_text=None): """ update icon image and/or hover text """ if icon: self._icon = icon self._load_icon() if hover_text: self._hover_text = hover_text self._refresh_icon()
update icon image and/or hover text
https://github.com/Infinidat/infi.systray/blob/209a6f8e5313122b35fdd2774aeceeaf9ce7c16d/src/infi/systray/traybar.py#L125-L132
Infinidat/infi.systray
src/infi/systray/win32_adapter.py
encode_for_locale
def encode_for_locale(s): """ Encode text items for system locale. If encoding fails, fall back to ASCII. """ try: return s.encode(LOCALE_ENCODING, 'ignore') except (AttributeError, UnicodeDecodeError): return s.decode('ascii', 'ignore').encode(LOCALE_ENCODING)
python
def encode_for_locale(s): """ Encode text items for system locale. If encoding fails, fall back to ASCII. """ try: return s.encode(LOCALE_ENCODING, 'ignore') except (AttributeError, UnicodeDecodeError): return s.decode('ascii', 'ignore').encode(LOCALE_ENCODING)
Encode text items for system locale. If encoding fails, fall back to ASCII.
https://github.com/Infinidat/infi.systray/blob/209a6f8e5313122b35fdd2774aeceeaf9ce7c16d/src/infi/systray/win32_adapter.py#L86-L93
pbrady/fastcache
fastcache/__init__.py
lru_cache
def lru_cache(maxsize=128, typed=False, state=None, unhashable='error'): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. If *state* is a list or dict, the items will be incorporated into argument hash. The result of calling the cached function with unhashable (mutable) arguments depends on the value of *unhashable*: If *unhashable* is 'error', a TypeError will be raised. If *unhashable* is 'warning', a UserWarning will be raised, and the wrapped function will be called with the supplied arguments. A miss will be recorded in the cache statistics. If *unhashable* is 'ignore', the wrapped function will be called with the supplied arguments. A miss will will be recorded in the cache statistics. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ def func_wrapper(func): _cached_func = clru_cache(maxsize, typed, state, unhashable)(func) def wrapper(*args, **kwargs): return _cached_func(*args, **kwargs) wrapper.__wrapped__ = func wrapper.cache_info = _cached_func.cache_info wrapper.cache_clear = _cached_func.cache_clear return update_wrapper(wrapper,func) return func_wrapper
python
def lru_cache(maxsize=128, typed=False, state=None, unhashable='error'): """Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. If *state* is a list or dict, the items will be incorporated into argument hash. The result of calling the cached function with unhashable (mutable) arguments depends on the value of *unhashable*: If *unhashable* is 'error', a TypeError will be raised. If *unhashable* is 'warning', a UserWarning will be raised, and the wrapped function will be called with the supplied arguments. A miss will be recorded in the cache statistics. If *unhashable* is 'ignore', the wrapped function will be called with the supplied arguments. A miss will will be recorded in the cache statistics. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ def func_wrapper(func): _cached_func = clru_cache(maxsize, typed, state, unhashable)(func) def wrapper(*args, **kwargs): return _cached_func(*args, **kwargs) wrapper.__wrapped__ = func wrapper.cache_info = _cached_func.cache_info wrapper.cache_clear = _cached_func.cache_clear return update_wrapper(wrapper,func) return func_wrapper
Least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. If *state* is a list or dict, the items will be incorporated into argument hash. The result of calling the cached function with unhashable (mutable) arguments depends on the value of *unhashable*: If *unhashable* is 'error', a TypeError will be raised. If *unhashable* is 'warning', a UserWarning will be raised, and the wrapped function will be called with the supplied arguments. A miss will be recorded in the cache statistics. If *unhashable* is 'ignore', the wrapped function will be called with the supplied arguments. A miss will will be recorded in the cache statistics. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
https://github.com/pbrady/fastcache/blob/c216def5d29808585123562b56a9a083ea337cad/fastcache/__init__.py#L30-L75
pbrady/fastcache
scripts/threadsafety.py
fib
def fib(n): """Terrible Fibonacci number generator.""" v = n.value return v if v < 2 else fib2(PythonInt(v-1)) + fib(PythonInt(v-2))
python
def fib(n): """Terrible Fibonacci number generator.""" v = n.value return v if v < 2 else fib2(PythonInt(v-1)) + fib(PythonInt(v-2))
Terrible Fibonacci number generator.
https://github.com/pbrady/fastcache/blob/c216def5d29808585123562b56a9a083ea337cad/scripts/threadsafety.py#L43-L46
pbrady/fastcache
scripts/threadsafety.py
run_fib_with_clear
def run_fib_with_clear(r): """ Run Fibonacci generator r times. """ for i in range(r): if randint(RAND_MIN, RAND_MAX) == RAND_MIN: fib.cache_clear() fib2.cache_clear() res = fib(PythonInt(FIB)) if RESULT != res: raise ValueError("Expected %d, Got %d" % (RESULT, res))
python
def run_fib_with_clear(r): """ Run Fibonacci generator r times. """ for i in range(r): if randint(RAND_MIN, RAND_MAX) == RAND_MIN: fib.cache_clear() fib2.cache_clear() res = fib(PythonInt(FIB)) if RESULT != res: raise ValueError("Expected %d, Got %d" % (RESULT, res))
Run Fibonacci generator r times.
https://github.com/pbrady/fastcache/blob/c216def5d29808585123562b56a9a083ea337cad/scripts/threadsafety.py#L59-L67
pbrady/fastcache
scripts/threadsafety.py
run_fib_with_stats
def run_fib_with_stats(r): """ Run Fibonacci generator r times. """ for i in range(r): res = fib(PythonInt(FIB)) if RESULT != res: raise ValueError("Expected %d, Got %d" % (RESULT, res))
python
def run_fib_with_stats(r): """ Run Fibonacci generator r times. """ for i in range(r): res = fib(PythonInt(FIB)) if RESULT != res: raise ValueError("Expected %d, Got %d" % (RESULT, res))
Run Fibonacci generator r times.
https://github.com/pbrady/fastcache/blob/c216def5d29808585123562b56a9a083ea337cad/scripts/threadsafety.py#L69-L74
FraBle/python-sutime
sutime/sutime.py
SUTime.parse
def parse(self, input_str, reference_date=""): """Parses datetime information out of string input. It invokes the SUTimeWrapper.annotate() function in Java. Args: input_str: The input as string that has to be parsed. reference_date: Optional reference data for SUTime. Returns: A list of dicts with the result from the SUTimeWrapper.annotate() call. """ if not jpype.isThreadAttachedToJVM(): jpype.attachThreadToJVM() if reference_date: return json.loads(self._sutime.annotate(input_str, reference_date)) return json.loads(self._sutime.annotate(input_str))
python
def parse(self, input_str, reference_date=""): """Parses datetime information out of string input. It invokes the SUTimeWrapper.annotate() function in Java. Args: input_str: The input as string that has to be parsed. reference_date: Optional reference data for SUTime. Returns: A list of dicts with the result from the SUTimeWrapper.annotate() call. """ if not jpype.isThreadAttachedToJVM(): jpype.attachThreadToJVM() if reference_date: return json.loads(self._sutime.annotate(input_str, reference_date)) return json.loads(self._sutime.annotate(input_str))
Parses datetime information out of string input. It invokes the SUTimeWrapper.annotate() function in Java. Args: input_str: The input as string that has to be parsed. reference_date: Optional reference data for SUTime. Returns: A list of dicts with the result from the SUTimeWrapper.annotate() call.
https://github.com/FraBle/python-sutime/blob/2b36828cd1d215c472572d253cd6c56271d71169/sutime/sutime.py#L143-L160
aichaos/rivescript-python
rivescript/brain.py
Brain.format_message
def format_message(self, msg, botreply=False): """Format a user's message for safe processing. This runs substitutions on the message and strips out any remaining symbols (depending on UTF-8 mode). :param str msg: The user's message. :param bool botreply: Whether this formatting is being done for the bot's last reply (e.g. in a ``%Previous`` command). :return str: The formatted message. """ # Make sure the string is Unicode for Python 2. if sys.version_info[0] < 3 and isinstance(msg, str): msg = msg.decode() # Lowercase it. msg = msg.lower() # Run substitutions on it. msg = self.substitute(msg, "sub") # In UTF-8 mode, only strip metacharacters and HTML brackets # (to protect from obvious XSS attacks). if self.utf8: msg = re.sub(RE.utf8_meta, '', msg) msg = re.sub(self.master.unicode_punctuation, '', msg) # For the bot's reply, also strip common punctuation. if botreply: msg = re.sub(RE.utf8_punct, '', msg) else: # For everything else, strip all non-alphanumerics. msg = utils.strip_nasties(msg) msg = msg.strip() # Strip leading and trailing white space msg = RE.ws.sub(" ",msg) # Replace the multiple whitespaces by single whitespace return msg
python
def format_message(self, msg, botreply=False): """Format a user's message for safe processing. This runs substitutions on the message and strips out any remaining symbols (depending on UTF-8 mode). :param str msg: The user's message. :param bool botreply: Whether this formatting is being done for the bot's last reply (e.g. in a ``%Previous`` command). :return str: The formatted message. """ # Make sure the string is Unicode for Python 2. if sys.version_info[0] < 3 and isinstance(msg, str): msg = msg.decode() # Lowercase it. msg = msg.lower() # Run substitutions on it. msg = self.substitute(msg, "sub") # In UTF-8 mode, only strip metacharacters and HTML brackets # (to protect from obvious XSS attacks). if self.utf8: msg = re.sub(RE.utf8_meta, '', msg) msg = re.sub(self.master.unicode_punctuation, '', msg) # For the bot's reply, also strip common punctuation. if botreply: msg = re.sub(RE.utf8_punct, '', msg) else: # For everything else, strip all non-alphanumerics. msg = utils.strip_nasties(msg) msg = msg.strip() # Strip leading and trailing white space msg = RE.ws.sub(" ",msg) # Replace the multiple whitespaces by single whitespace return msg
Format a user's message for safe processing. This runs substitutions on the message and strips out any remaining symbols (depending on UTF-8 mode). :param str msg: The user's message. :param bool botreply: Whether this formatting is being done for the bot's last reply (e.g. in a ``%Previous`` command). :return str: The formatted message.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/brain.py#L104-L141
aichaos/rivescript-python
rivescript/brain.py
Brain._getreply
def _getreply(self, user, msg, context='normal', step=0, ignore_object_errors=True): """The internal reply getter function. DO NOT CALL THIS YOURSELF. :param str user: The user ID as passed to ``reply()``. :param str msg: The formatted user message. :param str context: The reply context, one of ``begin`` or ``normal``. :param int step: The recursion depth counter. :param bool ignore_object_errors: Whether to ignore errors from within Python object macros and not raise an ``ObjectError`` exception. :return str: The reply output. """ # Needed to sort replies? if 'topics' not in self.master._sorted: raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents") # Initialize the user's profile? topic = self.master.get_uservar(user, "topic") if topic in [None, "undefined"]: topic = "random" self.master.set_uservar(user, "topic", topic) # Collect data on the user. stars = [] thatstars = [] # For %Previous's. reply = '' # Avoid letting them fall into a missing topic. if topic not in self.master._topics: self.warn("User " + user + " was in an empty topic named '" + topic + "'") topic = "random" self.master.set_uservar(user, "topic", topic) # Avoid deep recursion. if step > self.master._depth: raise DeepRecursionError # Are we in the BEGIN statement? if context == 'begin': topic = '__begin__' # Initialize this user's history. history = self.master.get_uservar(user, "__history__") if type(history) is not dict or "input" not in history or "reply" not in history: history = self.default_history() self.master.set_uservar(user, "__history__", history) # More topic sanity checking. if topic not in self.master._topics: # This was handled before, which would mean topic=random and # it doesn't exist. Serious issue! raise NoDefaultRandomTopicError("no default topic 'random' was found") # Create a pointer for the matched data when we find it. matched = None matchedTrigger = None foundMatch = False # See if there were any %Previous's in this topic, or any topic related # to it. This should only be done the first time -- not during a # recursive redirection. This is because in a redirection, "lastreply" # is still gonna be the same as it was the first time, causing an # infinite loop! if step == 0: allTopics = [topic] if topic in self.master._includes or topic in self.master._lineage: # Get all the topics! allTopics = inherit_utils.get_topic_tree(self.master, topic) # Scan them all! for top in allTopics: self.say("Checking topic " + top + " for any %Previous's.") if top in self.master._sorted["thats"]: self.say("There is a %Previous in this topic!") # Do we have history yet? lastReply = history["reply"][0] # Format the bot's last reply the same way as the human's. lastReply = self.format_message(lastReply, botreply=True) self.say("lastReply: " + lastReply) # See if it's a match. for trig in self.master._sorted["thats"][top]: pattern = trig[1]["previous"] botside = self.reply_regexp(user, pattern) self.say("Try to match lastReply ({}) to {} ({})".format(lastReply, pattern, repr(botside))) # Match?? match = re.match(botside, lastReply) if match: # Huzzah! See if OUR message is right too. self.say("Bot side matched!") thatstars = match.groups() # Compare the triggers to the user's message. user_side = trig[1] subtrig = self.reply_regexp(user, user_side["trigger"]) self.say("Now try to match " + msg + " to " + user_side["trigger"]) match = re.match(subtrig, msg) if match: self.say("Found a match!") matched = trig[1] matchedTrigger = user_side["trigger"] foundMatch = True # Get the stars! stars = match.groups() break # Break if we found a match. if foundMatch: break # Break if we found a match. if foundMatch: break # Search their topic for a match to their trigger. if not foundMatch: for trig in self.master._sorted["topics"][topic]: pattern = trig[0] # Process the triggers. regexp = self.reply_regexp(user, pattern) self.say("Try to match %r against %r (%r)" % (msg, pattern, regexp.pattern)) # Python's regular expression engine is slow. Try a verbatim # match if this is an atomic trigger. isAtomic = utils.is_atomic(pattern) isMatch = False if isAtomic: # Only look for exact matches, no sense running atomic triggers # through the regexp engine. if msg == pattern: isMatch = True else: # Non-atomic triggers always need the regexp. match = re.match(regexp, msg) if match: # The regexp matched! isMatch = True # Collect the stars. stars = match.groups() if isMatch: self.say("Found a match!") matched = trig[1] foundMatch = True matchedTrigger = pattern break # Store what trigger they matched on. If their matched trigger is None, # this will be too, which is great. self.master.set_uservar(user, "__lastmatch__", matchedTrigger) if matched: for nil in [1]: # See if there are any hard redirects. if matched["redirect"]: self.say("Redirecting us to " + matched["redirect"]) redirect = self.process_tags(user, msg, matched["redirect"], stars, thatstars, step, ignore_object_errors) redirect = redirect.lower() self.say("Pretend user said: " + redirect) reply = self._getreply(user, redirect, step=(step + 1), ignore_object_errors=ignore_object_errors) break # Check the conditionals. for con in matched["condition"]: halves = re.split(RE.cond_split, con) if halves and len(halves) == 2: condition = re.match(RE.cond_parse, halves[0]) if condition: left = condition.group(1) eq = condition.group(2) right = condition.group(3) potreply = halves[1] self.say("Left: " + left + "; eq: " + eq + "; right: " + right + " => " + potreply) # Process tags all around. left = self.process_tags(user, msg, left, stars, thatstars, step, ignore_object_errors) right = self.process_tags(user, msg, right, stars, thatstars, step, ignore_object_errors) # Defaults? if len(left) == 0: left = 'undefined' if len(right) == 0: right = 'undefined' self.say("Check if " + left + " " + eq + " " + right) # Validate it. passed = False if eq == 'eq' or eq == '==': if left == right: passed = True elif eq == 'ne' or eq == '!=' or eq == '<>': if left != right: passed = True else: # Gasp, dealing with numbers here... try: left, right = int(left), int(right) if eq == '<': if left < right: passed = True elif eq == '<=': if left <= right: passed = True elif eq == '>': if left > right: passed = True elif eq == '>=': if left >= right: passed = True except: self.warn("Failed to evaluate numeric condition!") # How truthful? if passed: reply = potreply break # Have our reply yet? if len(reply) > 0: break # Process weights in the replies. bucket = [] for text in matched["reply"]: weight = 1 match = re.search(RE.weight, text) if match: weight = int(match.group(1)) if weight <= 0: self.warn("Can't have a weight <= 0!") weight = 1 for i in range(0, weight): bucket.append(text) # Get a random reply. reply = utils.random_choice(bucket) break # Still no reply? if not foundMatch: raise NoMatchError elif len(reply) == 0: raise NoReplyError self.say("Reply: " + reply) # Process tags for the BEGIN block. if context == "begin": # BEGIN blocks can only set topics and uservars. The rest happen # later! reTopic = re.findall(RE.topic_tag, reply) for match in reTopic: self.say("Setting user's topic to " + match) self.master.set_uservar(user, "topic", match) reply = reply.replace('{{topic={match}}}'.format(match=match), '') reSet = re.findall(RE.set_tag, reply) for match in reSet: self.say("Set uservar " + str(match[0]) + "=" + str(match[1])) self.master.set_uservar(user, match[0], match[1]) reply = reply.replace('<set {key}={value}>'.format(key=match[0], value=match[1]), '') else: # Process more tags if not in BEGIN. reply = self.process_tags(user, msg, reply, stars, thatstars, step, ignore_object_errors) return reply
python
def _getreply(self, user, msg, context='normal', step=0, ignore_object_errors=True): """The internal reply getter function. DO NOT CALL THIS YOURSELF. :param str user: The user ID as passed to ``reply()``. :param str msg: The formatted user message. :param str context: The reply context, one of ``begin`` or ``normal``. :param int step: The recursion depth counter. :param bool ignore_object_errors: Whether to ignore errors from within Python object macros and not raise an ``ObjectError`` exception. :return str: The reply output. """ # Needed to sort replies? if 'topics' not in self.master._sorted: raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents") # Initialize the user's profile? topic = self.master.get_uservar(user, "topic") if topic in [None, "undefined"]: topic = "random" self.master.set_uservar(user, "topic", topic) # Collect data on the user. stars = [] thatstars = [] # For %Previous's. reply = '' # Avoid letting them fall into a missing topic. if topic not in self.master._topics: self.warn("User " + user + " was in an empty topic named '" + topic + "'") topic = "random" self.master.set_uservar(user, "topic", topic) # Avoid deep recursion. if step > self.master._depth: raise DeepRecursionError # Are we in the BEGIN statement? if context == 'begin': topic = '__begin__' # Initialize this user's history. history = self.master.get_uservar(user, "__history__") if type(history) is not dict or "input" not in history or "reply" not in history: history = self.default_history() self.master.set_uservar(user, "__history__", history) # More topic sanity checking. if topic not in self.master._topics: # This was handled before, which would mean topic=random and # it doesn't exist. Serious issue! raise NoDefaultRandomTopicError("no default topic 'random' was found") # Create a pointer for the matched data when we find it. matched = None matchedTrigger = None foundMatch = False # See if there were any %Previous's in this topic, or any topic related # to it. This should only be done the first time -- not during a # recursive redirection. This is because in a redirection, "lastreply" # is still gonna be the same as it was the first time, causing an # infinite loop! if step == 0: allTopics = [topic] if topic in self.master._includes or topic in self.master._lineage: # Get all the topics! allTopics = inherit_utils.get_topic_tree(self.master, topic) # Scan them all! for top in allTopics: self.say("Checking topic " + top + " for any %Previous's.") if top in self.master._sorted["thats"]: self.say("There is a %Previous in this topic!") # Do we have history yet? lastReply = history["reply"][0] # Format the bot's last reply the same way as the human's. lastReply = self.format_message(lastReply, botreply=True) self.say("lastReply: " + lastReply) # See if it's a match. for trig in self.master._sorted["thats"][top]: pattern = trig[1]["previous"] botside = self.reply_regexp(user, pattern) self.say("Try to match lastReply ({}) to {} ({})".format(lastReply, pattern, repr(botside))) # Match?? match = re.match(botside, lastReply) if match: # Huzzah! See if OUR message is right too. self.say("Bot side matched!") thatstars = match.groups() # Compare the triggers to the user's message. user_side = trig[1] subtrig = self.reply_regexp(user, user_side["trigger"]) self.say("Now try to match " + msg + " to " + user_side["trigger"]) match = re.match(subtrig, msg) if match: self.say("Found a match!") matched = trig[1] matchedTrigger = user_side["trigger"] foundMatch = True # Get the stars! stars = match.groups() break # Break if we found a match. if foundMatch: break # Break if we found a match. if foundMatch: break # Search their topic for a match to their trigger. if not foundMatch: for trig in self.master._sorted["topics"][topic]: pattern = trig[0] # Process the triggers. regexp = self.reply_regexp(user, pattern) self.say("Try to match %r against %r (%r)" % (msg, pattern, regexp.pattern)) # Python's regular expression engine is slow. Try a verbatim # match if this is an atomic trigger. isAtomic = utils.is_atomic(pattern) isMatch = False if isAtomic: # Only look for exact matches, no sense running atomic triggers # through the regexp engine. if msg == pattern: isMatch = True else: # Non-atomic triggers always need the regexp. match = re.match(regexp, msg) if match: # The regexp matched! isMatch = True # Collect the stars. stars = match.groups() if isMatch: self.say("Found a match!") matched = trig[1] foundMatch = True matchedTrigger = pattern break # Store what trigger they matched on. If their matched trigger is None, # this will be too, which is great. self.master.set_uservar(user, "__lastmatch__", matchedTrigger) if matched: for nil in [1]: # See if there are any hard redirects. if matched["redirect"]: self.say("Redirecting us to " + matched["redirect"]) redirect = self.process_tags(user, msg, matched["redirect"], stars, thatstars, step, ignore_object_errors) redirect = redirect.lower() self.say("Pretend user said: " + redirect) reply = self._getreply(user, redirect, step=(step + 1), ignore_object_errors=ignore_object_errors) break # Check the conditionals. for con in matched["condition"]: halves = re.split(RE.cond_split, con) if halves and len(halves) == 2: condition = re.match(RE.cond_parse, halves[0]) if condition: left = condition.group(1) eq = condition.group(2) right = condition.group(3) potreply = halves[1] self.say("Left: " + left + "; eq: " + eq + "; right: " + right + " => " + potreply) # Process tags all around. left = self.process_tags(user, msg, left, stars, thatstars, step, ignore_object_errors) right = self.process_tags(user, msg, right, stars, thatstars, step, ignore_object_errors) # Defaults? if len(left) == 0: left = 'undefined' if len(right) == 0: right = 'undefined' self.say("Check if " + left + " " + eq + " " + right) # Validate it. passed = False if eq == 'eq' or eq == '==': if left == right: passed = True elif eq == 'ne' or eq == '!=' or eq == '<>': if left != right: passed = True else: # Gasp, dealing with numbers here... try: left, right = int(left), int(right) if eq == '<': if left < right: passed = True elif eq == '<=': if left <= right: passed = True elif eq == '>': if left > right: passed = True elif eq == '>=': if left >= right: passed = True except: self.warn("Failed to evaluate numeric condition!") # How truthful? if passed: reply = potreply break # Have our reply yet? if len(reply) > 0: break # Process weights in the replies. bucket = [] for text in matched["reply"]: weight = 1 match = re.search(RE.weight, text) if match: weight = int(match.group(1)) if weight <= 0: self.warn("Can't have a weight <= 0!") weight = 1 for i in range(0, weight): bucket.append(text) # Get a random reply. reply = utils.random_choice(bucket) break # Still no reply? if not foundMatch: raise NoMatchError elif len(reply) == 0: raise NoReplyError self.say("Reply: " + reply) # Process tags for the BEGIN block. if context == "begin": # BEGIN blocks can only set topics and uservars. The rest happen # later! reTopic = re.findall(RE.topic_tag, reply) for match in reTopic: self.say("Setting user's topic to " + match) self.master.set_uservar(user, "topic", match) reply = reply.replace('{{topic={match}}}'.format(match=match), '') reSet = re.findall(RE.set_tag, reply) for match in reSet: self.say("Set uservar " + str(match[0]) + "=" + str(match[1])) self.master.set_uservar(user, match[0], match[1]) reply = reply.replace('<set {key}={value}>'.format(key=match[0], value=match[1]), '') else: # Process more tags if not in BEGIN. reply = self.process_tags(user, msg, reply, stars, thatstars, step, ignore_object_errors) return reply
The internal reply getter function. DO NOT CALL THIS YOURSELF. :param str user: The user ID as passed to ``reply()``. :param str msg: The formatted user message. :param str context: The reply context, one of ``begin`` or ``normal``. :param int step: The recursion depth counter. :param bool ignore_object_errors: Whether to ignore errors from within Python object macros and not raise an ``ObjectError`` exception. :return str: The reply output.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/brain.py#L143-L419
aichaos/rivescript-python
rivescript/brain.py
Brain.reply_regexp
def reply_regexp(self, user, regexp): """Prepares a trigger for the regular expression engine. :param str user: The user ID invoking a reply. :param str regexp: The original trigger text to be turned into a regexp. :return regexp: The final regexp object.""" if regexp in self.master._regexc["trigger"]: # Already compiled this one! return self.master._regexc["trigger"][regexp] # If the trigger is simply '*' then the * there needs to become (.*?) # to match the blank string too. regexp = re.sub(RE.zero_star, r'<zerowidthstar>', regexp) # Filter in arrays. arrays = re.findall(RE.array, regexp) for array in arrays: rep = '' if array in self.master._array: rep = r'(?:' + '|'.join(self.expand_array(array)) + ')' regexp = re.sub(r'\@' + re.escape(array) + r'\b', rep, regexp) # Simple replacements. regexp = regexp.replace('*', '(.+?)') # Convert * into (.+?) regexp = regexp.replace('#', '(\d+?)') # Convert # into (\d+?) regexp = regexp.replace('_', '(\w+?)') # Convert _ into (\w+?) regexp = re.sub(RE.weight, '', regexp) # Remove {weight} tags, allow spaces before the bracket regexp = regexp.replace('<zerowidthstar>', r'(.*?)') # Optionals. optionals = re.findall(RE.optionals, regexp) for match in optionals: parts = match.split("|") new = [] for p in parts: p = r'(?:\\s|\\b)+{}(?:\\s|\\b)+'.format(p.strip()) new.append(p) # If this optional had a star or anything in it, make it # non-matching. pipes = '|'.join(new) pipes = pipes.replace(r'(.+?)', r'(?:.+?)') pipes = pipes.replace(r'(\d+?)', r'(?:\d+?)') pipes = pipes.replace(r'([A-Za-z]+?)', r'(?:[A-Za-z]+?)') regexp = re.sub(r'\s*\[' + re.escape(match) + '\]\s*', '(?:' + pipes + r'|(?:\\s|\\b))', regexp) # _ wildcards can't match numbers! regexp = re.sub(RE.literal_w, r'[^\\s\\d]', regexp) # Filter in bot variables. bvars = re.findall(RE.bot_tag, regexp) for var in bvars: rep = '' if var in self.master._var: rep = self.format_message(self.master._var[var]) regexp = regexp.replace('<bot {var}>'.format(var=var), rep) # Filter in user variables. uvars = re.findall(RE.get_tag, regexp) for var in uvars: rep = '' value = self.master.get_uservar(user, var) if value not in [None, "undefined"]: rep = utils.strip_nasties(value) regexp = regexp.replace('<get {var}>'.format(var=var), rep) # Filter in <input> and <reply> tags. This is a slow process, so only # do it if we have to! if '<input' in regexp or '<reply' in regexp: history = self.master.get_uservar(user, "__history__") for type in ['input', 'reply']: tags = re.findall(r'<' + type + r'([0-9])>', regexp) for index in tags: rep = self.format_message(history[type][int(index) - 1]) regexp = regexp.replace('<{type}{index}>'.format(type=type, index=index), rep) regexp = regexp.replace('<{type}>'.format(type=type), self.format_message(history[type][0])) # TODO: the Perl version doesn't do just <input>/<reply> in trigs! if self.utf8: return re.compile(r'^' + regexp.lower() + r'$', re.UNICODE) else: return re.compile(r'^' + regexp.lower() + r'$')
python
def reply_regexp(self, user, regexp): """Prepares a trigger for the regular expression engine. :param str user: The user ID invoking a reply. :param str regexp: The original trigger text to be turned into a regexp. :return regexp: The final regexp object.""" if regexp in self.master._regexc["trigger"]: # Already compiled this one! return self.master._regexc["trigger"][regexp] # If the trigger is simply '*' then the * there needs to become (.*?) # to match the blank string too. regexp = re.sub(RE.zero_star, r'<zerowidthstar>', regexp) # Filter in arrays. arrays = re.findall(RE.array, regexp) for array in arrays: rep = '' if array in self.master._array: rep = r'(?:' + '|'.join(self.expand_array(array)) + ')' regexp = re.sub(r'\@' + re.escape(array) + r'\b', rep, regexp) # Simple replacements. regexp = regexp.replace('*', '(.+?)') # Convert * into (.+?) regexp = regexp.replace('#', '(\d+?)') # Convert # into (\d+?) regexp = regexp.replace('_', '(\w+?)') # Convert _ into (\w+?) regexp = re.sub(RE.weight, '', regexp) # Remove {weight} tags, allow spaces before the bracket regexp = regexp.replace('<zerowidthstar>', r'(.*?)') # Optionals. optionals = re.findall(RE.optionals, regexp) for match in optionals: parts = match.split("|") new = [] for p in parts: p = r'(?:\\s|\\b)+{}(?:\\s|\\b)+'.format(p.strip()) new.append(p) # If this optional had a star or anything in it, make it # non-matching. pipes = '|'.join(new) pipes = pipes.replace(r'(.+?)', r'(?:.+?)') pipes = pipes.replace(r'(\d+?)', r'(?:\d+?)') pipes = pipes.replace(r'([A-Za-z]+?)', r'(?:[A-Za-z]+?)') regexp = re.sub(r'\s*\[' + re.escape(match) + '\]\s*', '(?:' + pipes + r'|(?:\\s|\\b))', regexp) # _ wildcards can't match numbers! regexp = re.sub(RE.literal_w, r'[^\\s\\d]', regexp) # Filter in bot variables. bvars = re.findall(RE.bot_tag, regexp) for var in bvars: rep = '' if var in self.master._var: rep = self.format_message(self.master._var[var]) regexp = regexp.replace('<bot {var}>'.format(var=var), rep) # Filter in user variables. uvars = re.findall(RE.get_tag, regexp) for var in uvars: rep = '' value = self.master.get_uservar(user, var) if value not in [None, "undefined"]: rep = utils.strip_nasties(value) regexp = regexp.replace('<get {var}>'.format(var=var), rep) # Filter in <input> and <reply> tags. This is a slow process, so only # do it if we have to! if '<input' in regexp or '<reply' in regexp: history = self.master.get_uservar(user, "__history__") for type in ['input', 'reply']: tags = re.findall(r'<' + type + r'([0-9])>', regexp) for index in tags: rep = self.format_message(history[type][int(index) - 1]) regexp = regexp.replace('<{type}{index}>'.format(type=type, index=index), rep) regexp = regexp.replace('<{type}>'.format(type=type), self.format_message(history[type][0])) # TODO: the Perl version doesn't do just <input>/<reply> in trigs! if self.utf8: return re.compile(r'^' + regexp.lower() + r'$', re.UNICODE) else: return re.compile(r'^' + regexp.lower() + r'$')
Prepares a trigger for the regular expression engine. :param str user: The user ID invoking a reply. :param str regexp: The original trigger text to be turned into a regexp. :return regexp: The final regexp object.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/brain.py#L421-L507
aichaos/rivescript-python
rivescript/brain.py
Brain.do_expand_array
def do_expand_array(self, array_name, depth=0): """Do recurrent array expansion, returning a set of keywords. Exception is thrown when there are cyclical dependencies between arrays or if the ``@array`` name references an undefined array. :param str array_name: The name of the array to expand. :param int depth: The recursion depth counter. :return set: The final set of array entries. """ if depth > self.master._depth: raise Exception("deep recursion detected") if not array_name in self.master._array: raise Exception("array '%s' not defined" % (array_name)) ret = list(self.master._array[array_name]) for array in self.master._array[array_name]: if array.startswith('@'): ret.remove(array) expanded = self.do_expand_array(array[1:], depth+1) ret.extend(expanded) return set(ret)
python
def do_expand_array(self, array_name, depth=0): """Do recurrent array expansion, returning a set of keywords. Exception is thrown when there are cyclical dependencies between arrays or if the ``@array`` name references an undefined array. :param str array_name: The name of the array to expand. :param int depth: The recursion depth counter. :return set: The final set of array entries. """ if depth > self.master._depth: raise Exception("deep recursion detected") if not array_name in self.master._array: raise Exception("array '%s' not defined" % (array_name)) ret = list(self.master._array[array_name]) for array in self.master._array[array_name]: if array.startswith('@'): ret.remove(array) expanded = self.do_expand_array(array[1:], depth+1) ret.extend(expanded) return set(ret)
Do recurrent array expansion, returning a set of keywords. Exception is thrown when there are cyclical dependencies between arrays or if the ``@array`` name references an undefined array. :param str array_name: The name of the array to expand. :param int depth: The recursion depth counter. :return set: The final set of array entries.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/brain.py#L509-L531
aichaos/rivescript-python
rivescript/brain.py
Brain.expand_array
def expand_array(self, array_name): """Expand variables and return a set of keywords. :param str array_name: The name of the array to expand. :return list: The final array contents. Warning is issued when exceptions occur.""" ret = self.master._array[array_name] if array_name in self.master._array else [] try: ret = self.do_expand_array(array_name) except Exception as e: self.warn("Error expanding array '%s': %s" % (array_name, str(e))) return ret
python
def expand_array(self, array_name): """Expand variables and return a set of keywords. :param str array_name: The name of the array to expand. :return list: The final array contents. Warning is issued when exceptions occur.""" ret = self.master._array[array_name] if array_name in self.master._array else [] try: ret = self.do_expand_array(array_name) except Exception as e: self.warn("Error expanding array '%s': %s" % (array_name, str(e))) return ret
Expand variables and return a set of keywords. :param str array_name: The name of the array to expand. :return list: The final array contents. Warning is issued when exceptions occur.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/brain.py#L533-L546
aichaos/rivescript-python
rivescript/brain.py
Brain.process_tags
def process_tags(self, user, msg, reply, st=[], bst=[], depth=0, ignore_object_errors=True): """Post process tags in a message. :param str user: The user ID. :param str msg: The user's formatted message. :param str reply: The raw RiveScript reply for the message. :param []str st: The array of ``<star>`` matches from the trigger. :param []str bst: The array of ``<botstar>`` matches from a ``%Previous`` command. :param int depth: The recursion depth counter. :param bool ignore_object_errors: Whether to ignore errors in Python object macros instead of raising an ``ObjectError`` exception. :return str: The final reply after tags have been processed. """ stars = [''] stars.extend(st) botstars = [''] botstars.extend(bst) if len(stars) == 1: stars.append("undefined") if len(botstars) == 1: botstars.append("undefined") matcher = re.findall(RE.reply_array, reply) for match in matcher: name = match if name in self.master._array: result = "{random}" + "|".join(self.master._array[name]) + "{/random}" else: result = "\x00@" + name + "\x00" reply = reply.replace("(@"+name+")", result) reply = re.sub(RE.ph_array, r'(@\1)', reply) # Tag shortcuts. reply = reply.replace('<person>', '{person}<star>{/person}') reply = reply.replace('<@>', '{@<star>}') reply = reply.replace('<formal>', '{formal}<star>{/formal}') reply = reply.replace('<sentence>', '{sentence}<star>{/sentence}') reply = reply.replace('<uppercase>', '{uppercase}<star>{/uppercase}') reply = reply.replace('<lowercase>', '{lowercase}<star>{/lowercase}') # Weight and <star> tags. reply = re.sub(RE.weight, '', reply) # Leftover {weight}s if len(stars) > 0: reply = reply.replace('<star>', text_type(stars[1])) reStars = re.findall(RE.star_tags, reply) for match in reStars: if int(match) < len(stars): reply = reply.replace('<star{match}>'.format(match=match), text_type(stars[int(match)])) if len(botstars) > 0: reply = reply.replace('<botstar>', botstars[1]) reStars = re.findall(RE.botstars, reply) for match in reStars: if int(match) < len(botstars): reply = reply.replace('<botstar{match}>'.format(match=match), text_type(botstars[int(match)])) # <input> and <reply> history = self.master.get_uservar(user, "__history__") if type(history) is not dict: history = self.default_history() reply = reply.replace('<input>', history['input'][0]) reply = reply.replace('<reply>', history['reply'][0]) reInput = re.findall(RE.input_tags, reply) for match in reInput: reply = reply.replace('<input{match}>'.format(match=match), history['input'][int(match) - 1]) reReply = re.findall(RE.reply_tags, reply) for match in reReply: reply = reply.replace('<reply{match}>'.format(match=match), history['reply'][int(match) - 1]) # <id> and escape codes. reply = reply.replace('<id>', user) reply = reply.replace('\\s', ' ') reply = reply.replace('\\n', "\n") reply = reply.replace('\\#', '#') # Random bits. reRandom = re.findall(RE.random_tags, reply) for match in reRandom: output = '' if '|' in match: output = utils.random_choice(match.split('|')) else: output = utils.random_choice(match.split(' ')) reply = reply.replace('{{random}}{match}{{/random}}'.format(match=match), output, 1) # Replace 1st match # Person Substitutions and String Formatting. for item in ['person', 'formal', 'sentence', 'uppercase', 'lowercase']: matcher = re.findall(r'\{' + item + r'\}(.+?)\{/' + item + r'\}', reply) for match in matcher: output = None if item == 'person': # Person substitutions. output = self.substitute(match, "person") else: output = utils.string_format(match, item) reply = reply.replace('{{{item}}}{match}{{/{item}}}'.format(item=item, match=match), output) # Handle all variable-related tags with an iterative regex approach, # to allow for nesting of tags in arbitrary ways (think <set a=<get b>>) # Dummy out the <call> tags first, because we don't handle them right # here. reply = reply.replace("<call>", "{__call__}") reply = reply.replace("</call>", "{/__call__}") while True: # This regex will match a <tag> which contains no other tag inside # it, i.e. in the case of <set a=<get b>> it will match <get b> but # not the <set> tag, on the first pass. The second pass will get the # <set> tag, and so on. match = re.search(RE.tag_search, reply) if not match: break # No remaining tags! match = match.group(1) parts = match.split(" ", 1) tag = parts[0].lower() data = parts[1] if len(parts) > 1 else "" insert = "" # Result of the tag evaluation # Handle the tags. if tag == "bot" or tag == "env": # <bot> and <env> tags are similar. target = self.master._var if tag == "bot" else self.master._global if "=" in data: # Setting a bot/env variable. parts = data.split("=") self.say("Set " + tag + " variable " + text_type(parts[0]) + "=" + text_type(parts[1])) target[parts[0]] = parts[1] else: # Getting a bot/env variable. insert = target.get(data, "undefined") elif tag == "set": # <set> user vars. parts = data.split("=") self.say("Set uservar " + text_type(parts[0]) + "=" + text_type(parts[1])) self.master.set_uservar(user, parts[0], parts[1]) elif tag in ["add", "sub", "mult", "div"]: # Math operator tags. parts = data.split("=") var = parts[0] value = parts[1] curv = self.master.get_uservar(user, var) # Sanity check the value. try: value = int(value) if curv in [None, "undefined"]: # Initialize it. curv = 0 except: insert = "[ERR: Math can't '{}' non-numeric value '{}']".format(tag, value) # Attempt the operation. try: orig = int(curv) new = 0 if tag == "add": new = orig + value elif tag == "sub": new = orig - value elif tag == "mult": new = orig * value elif tag == "div": new = orig // value self.master.set_uservar(user, var, new) except: insert = "[ERR: Math couldn't '{}' to value '{}']".format(tag, curv) elif tag == "get": insert = self.master.get_uservar(user, data) else: # Unrecognized tag. insert = "\x00{}\x01".format(match) reply = reply.replace("<{}>".format(match), text_type(insert)) # Restore unrecognized tags. reply = reply.replace("\x00", "<").replace("\x01", ">") # Streaming code. DEPRECATED! if '{!' in reply: self._warn("Use of the {!...} tag is deprecated and not supported here.") # Topic setter. reTopic = re.findall(RE.topic_tag, reply) for match in reTopic: self.say("Setting user's topic to " + match) self.master.set_uservar(user, "topic", match) reply = reply.replace('{{topic={match}}}'.format(match=match), '') # Inline redirecter. reRedir = re.findall(RE.redir_tag, reply) for match in reRedir: self.say("Redirect to " + match) at = match.strip() subreply = self._getreply(user, at, step=(depth + 1)) reply = reply.replace('{{@{match}}}'.format(match=match), subreply) # Object caller. reply = reply.replace("{__call__}", "<call>") reply = reply.replace("{/__call__}", "</call>") reCall = re.findall(r'<call>(.+?)</call>', reply) for match in reCall: parts = re.split(RE.ws, match) output = '' obj = parts[0] args = [] if len(parts) > 1: args = parts[1:] # Do we know this object? if obj in self.master._objlangs: # We do, but do we have a handler for that language? lang = self.master._objlangs[obj] if lang in self.master._handlers: # We do. try: output = self.master._handlers[lang].call(self.master, obj, user, args) except python.PythonObjectError as e: self.warn(str(e)) if not ignore_object_errors: raise ObjectError(str(e)) output = RS_ERR_OBJECT else: if not ignore_object_errors: raise ObjectError(RS_ERR_OBJECT_HANDLER) output = RS_ERR_OBJECT_HANDLER else: if not ignore_object_errors: raise ObjectError(RS_ERR_OBJECT_MISSING) output = RS_ERR_OBJECT_MISSING reply = reply.replace('<call>{match}</call>'.format(match=match), output) return reply
python
def process_tags(self, user, msg, reply, st=[], bst=[], depth=0, ignore_object_errors=True): """Post process tags in a message. :param str user: The user ID. :param str msg: The user's formatted message. :param str reply: The raw RiveScript reply for the message. :param []str st: The array of ``<star>`` matches from the trigger. :param []str bst: The array of ``<botstar>`` matches from a ``%Previous`` command. :param int depth: The recursion depth counter. :param bool ignore_object_errors: Whether to ignore errors in Python object macros instead of raising an ``ObjectError`` exception. :return str: The final reply after tags have been processed. """ stars = [''] stars.extend(st) botstars = [''] botstars.extend(bst) if len(stars) == 1: stars.append("undefined") if len(botstars) == 1: botstars.append("undefined") matcher = re.findall(RE.reply_array, reply) for match in matcher: name = match if name in self.master._array: result = "{random}" + "|".join(self.master._array[name]) + "{/random}" else: result = "\x00@" + name + "\x00" reply = reply.replace("(@"+name+")", result) reply = re.sub(RE.ph_array, r'(@\1)', reply) # Tag shortcuts. reply = reply.replace('<person>', '{person}<star>{/person}') reply = reply.replace('<@>', '{@<star>}') reply = reply.replace('<formal>', '{formal}<star>{/formal}') reply = reply.replace('<sentence>', '{sentence}<star>{/sentence}') reply = reply.replace('<uppercase>', '{uppercase}<star>{/uppercase}') reply = reply.replace('<lowercase>', '{lowercase}<star>{/lowercase}') # Weight and <star> tags. reply = re.sub(RE.weight, '', reply) # Leftover {weight}s if len(stars) > 0: reply = reply.replace('<star>', text_type(stars[1])) reStars = re.findall(RE.star_tags, reply) for match in reStars: if int(match) < len(stars): reply = reply.replace('<star{match}>'.format(match=match), text_type(stars[int(match)])) if len(botstars) > 0: reply = reply.replace('<botstar>', botstars[1]) reStars = re.findall(RE.botstars, reply) for match in reStars: if int(match) < len(botstars): reply = reply.replace('<botstar{match}>'.format(match=match), text_type(botstars[int(match)])) # <input> and <reply> history = self.master.get_uservar(user, "__history__") if type(history) is not dict: history = self.default_history() reply = reply.replace('<input>', history['input'][0]) reply = reply.replace('<reply>', history['reply'][0]) reInput = re.findall(RE.input_tags, reply) for match in reInput: reply = reply.replace('<input{match}>'.format(match=match), history['input'][int(match) - 1]) reReply = re.findall(RE.reply_tags, reply) for match in reReply: reply = reply.replace('<reply{match}>'.format(match=match), history['reply'][int(match) - 1]) # <id> and escape codes. reply = reply.replace('<id>', user) reply = reply.replace('\\s', ' ') reply = reply.replace('\\n', "\n") reply = reply.replace('\\#', '#') # Random bits. reRandom = re.findall(RE.random_tags, reply) for match in reRandom: output = '' if '|' in match: output = utils.random_choice(match.split('|')) else: output = utils.random_choice(match.split(' ')) reply = reply.replace('{{random}}{match}{{/random}}'.format(match=match), output, 1) # Replace 1st match # Person Substitutions and String Formatting. for item in ['person', 'formal', 'sentence', 'uppercase', 'lowercase']: matcher = re.findall(r'\{' + item + r'\}(.+?)\{/' + item + r'\}', reply) for match in matcher: output = None if item == 'person': # Person substitutions. output = self.substitute(match, "person") else: output = utils.string_format(match, item) reply = reply.replace('{{{item}}}{match}{{/{item}}}'.format(item=item, match=match), output) # Handle all variable-related tags with an iterative regex approach, # to allow for nesting of tags in arbitrary ways (think <set a=<get b>>) # Dummy out the <call> tags first, because we don't handle them right # here. reply = reply.replace("<call>", "{__call__}") reply = reply.replace("</call>", "{/__call__}") while True: # This regex will match a <tag> which contains no other tag inside # it, i.e. in the case of <set a=<get b>> it will match <get b> but # not the <set> tag, on the first pass. The second pass will get the # <set> tag, and so on. match = re.search(RE.tag_search, reply) if not match: break # No remaining tags! match = match.group(1) parts = match.split(" ", 1) tag = parts[0].lower() data = parts[1] if len(parts) > 1 else "" insert = "" # Result of the tag evaluation # Handle the tags. if tag == "bot" or tag == "env": # <bot> and <env> tags are similar. target = self.master._var if tag == "bot" else self.master._global if "=" in data: # Setting a bot/env variable. parts = data.split("=") self.say("Set " + tag + " variable " + text_type(parts[0]) + "=" + text_type(parts[1])) target[parts[0]] = parts[1] else: # Getting a bot/env variable. insert = target.get(data, "undefined") elif tag == "set": # <set> user vars. parts = data.split("=") self.say("Set uservar " + text_type(parts[0]) + "=" + text_type(parts[1])) self.master.set_uservar(user, parts[0], parts[1]) elif tag in ["add", "sub", "mult", "div"]: # Math operator tags. parts = data.split("=") var = parts[0] value = parts[1] curv = self.master.get_uservar(user, var) # Sanity check the value. try: value = int(value) if curv in [None, "undefined"]: # Initialize it. curv = 0 except: insert = "[ERR: Math can't '{}' non-numeric value '{}']".format(tag, value) # Attempt the operation. try: orig = int(curv) new = 0 if tag == "add": new = orig + value elif tag == "sub": new = orig - value elif tag == "mult": new = orig * value elif tag == "div": new = orig // value self.master.set_uservar(user, var, new) except: insert = "[ERR: Math couldn't '{}' to value '{}']".format(tag, curv) elif tag == "get": insert = self.master.get_uservar(user, data) else: # Unrecognized tag. insert = "\x00{}\x01".format(match) reply = reply.replace("<{}>".format(match), text_type(insert)) # Restore unrecognized tags. reply = reply.replace("\x00", "<").replace("\x01", ">") # Streaming code. DEPRECATED! if '{!' in reply: self._warn("Use of the {!...} tag is deprecated and not supported here.") # Topic setter. reTopic = re.findall(RE.topic_tag, reply) for match in reTopic: self.say("Setting user's topic to " + match) self.master.set_uservar(user, "topic", match) reply = reply.replace('{{topic={match}}}'.format(match=match), '') # Inline redirecter. reRedir = re.findall(RE.redir_tag, reply) for match in reRedir: self.say("Redirect to " + match) at = match.strip() subreply = self._getreply(user, at, step=(depth + 1)) reply = reply.replace('{{@{match}}}'.format(match=match), subreply) # Object caller. reply = reply.replace("{__call__}", "<call>") reply = reply.replace("{/__call__}", "</call>") reCall = re.findall(r'<call>(.+?)</call>', reply) for match in reCall: parts = re.split(RE.ws, match) output = '' obj = parts[0] args = [] if len(parts) > 1: args = parts[1:] # Do we know this object? if obj in self.master._objlangs: # We do, but do we have a handler for that language? lang = self.master._objlangs[obj] if lang in self.master._handlers: # We do. try: output = self.master._handlers[lang].call(self.master, obj, user, args) except python.PythonObjectError as e: self.warn(str(e)) if not ignore_object_errors: raise ObjectError(str(e)) output = RS_ERR_OBJECT else: if not ignore_object_errors: raise ObjectError(RS_ERR_OBJECT_HANDLER) output = RS_ERR_OBJECT_HANDLER else: if not ignore_object_errors: raise ObjectError(RS_ERR_OBJECT_MISSING) output = RS_ERR_OBJECT_MISSING reply = reply.replace('<call>{match}</call>'.format(match=match), output) return reply
Post process tags in a message. :param str user: The user ID. :param str msg: The user's formatted message. :param str reply: The raw RiveScript reply for the message. :param []str st: The array of ``<star>`` matches from the trigger. :param []str bst: The array of ``<botstar>`` matches from a ``%Previous`` command. :param int depth: The recursion depth counter. :param bool ignore_object_errors: Whether to ignore errors in Python object macros instead of raising an ``ObjectError`` exception. :return str: The final reply after tags have been processed.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/brain.py#L548-L782
aichaos/rivescript-python
rivescript/brain.py
Brain.substitute
def substitute(self, msg, kind): """Run a kind of substitution on a message. :param str msg: The message to run substitutions against. :param str kind: The kind of substitution to run, one of ``subs`` or ``person``. """ # Safety checking. if 'lists' not in self.master._sorted: raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents") if kind not in self.master._sorted["lists"]: raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents") # Get the substitution map. subs = None if kind == 'sub': subs = self.master._sub else: subs = self.master._person # Make placeholders each time we substitute something. ph = [] i = 0 for pattern in self.master._sorted["lists"][kind]: result = subs[pattern] # Make a placeholder. ph.append(result) placeholder = "\x00%d\x00" % i i += 1 cache = self.master._regexc[kind][pattern] msg = re.sub(cache["sub1"], placeholder, msg) msg = re.sub(cache["sub2"], placeholder + r'\1', msg) msg = re.sub(cache["sub3"], r'\1' + placeholder + r'\2', msg) msg = re.sub(cache["sub4"], r'\1' + placeholder, msg) placeholders = re.findall(RE.placeholder, msg) for match in placeholders: i = int(match) result = ph[i] msg = msg.replace('\x00' + match + '\x00', result) # Strip & return. return msg.strip()
python
def substitute(self, msg, kind): """Run a kind of substitution on a message. :param str msg: The message to run substitutions against. :param str kind: The kind of substitution to run, one of ``subs`` or ``person``. """ # Safety checking. if 'lists' not in self.master._sorted: raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents") if kind not in self.master._sorted["lists"]: raise RepliesNotSortedError("You must call sort_replies() once you are done loading RiveScript documents") # Get the substitution map. subs = None if kind == 'sub': subs = self.master._sub else: subs = self.master._person # Make placeholders each time we substitute something. ph = [] i = 0 for pattern in self.master._sorted["lists"][kind]: result = subs[pattern] # Make a placeholder. ph.append(result) placeholder = "\x00%d\x00" % i i += 1 cache = self.master._regexc[kind][pattern] msg = re.sub(cache["sub1"], placeholder, msg) msg = re.sub(cache["sub2"], placeholder + r'\1', msg) msg = re.sub(cache["sub3"], r'\1' + placeholder + r'\2', msg) msg = re.sub(cache["sub4"], r'\1' + placeholder, msg) placeholders = re.findall(RE.placeholder, msg) for match in placeholders: i = int(match) result = ph[i] msg = msg.replace('\x00' + match + '\x00', result) # Strip & return. return msg.strip()
Run a kind of substitution on a message. :param str msg: The message to run substitutions against. :param str kind: The kind of substitution to run, one of ``subs`` or ``person``.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/brain.py#L784-L830
aichaos/rivescript-python
rivescript/python.py
PyRiveObjects.load
def load(self, name, code): """Prepare a Python code object given by the RiveScript interpreter. :param str name: The name of the Python object macro. :param []str code: The Python source code for the object macro. """ # We need to make a dynamic Python method. source = "def RSOBJ(rs, args):\n" for line in code: source = source + "\t" + line + "\n" source += "self._objects[name] = RSOBJ\n" try: exec(source) # self._objects[name] = RSOBJ except Exception as e: print("Failed to load code from object", name) print("The error given was: ", e)
python
def load(self, name, code): """Prepare a Python code object given by the RiveScript interpreter. :param str name: The name of the Python object macro. :param []str code: The Python source code for the object macro. """ # We need to make a dynamic Python method. source = "def RSOBJ(rs, args):\n" for line in code: source = source + "\t" + line + "\n" source += "self._objects[name] = RSOBJ\n" try: exec(source) # self._objects[name] = RSOBJ except Exception as e: print("Failed to load code from object", name) print("The error given was: ", e)
Prepare a Python code object given by the RiveScript interpreter. :param str name: The name of the Python object macro. :param []str code: The Python source code for the object macro.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/python.py#L41-L59
aichaos/rivescript-python
rivescript/python.py
PyRiveObjects.call
def call(self, rs, name, user, fields): """Invoke a previously loaded object. :param RiveScript rs: the parent RiveScript instance. :param str name: The name of the object macro to be called. :param str user: The user ID invoking the object macro. :param []str fields: Array of words sent as the object's arguments. :return str: The output of the object macro. """ # Call the dynamic method. if name not in self._objects: return '[ERR: Object Not Found]' func = self._objects[name] reply = '' try: reply = func(rs, fields) if reply is None: reply = '' except Exception as e: raise PythonObjectError("Error executing Python object: " + str(e)) return text_type(reply)
python
def call(self, rs, name, user, fields): """Invoke a previously loaded object. :param RiveScript rs: the parent RiveScript instance. :param str name: The name of the object macro to be called. :param str user: The user ID invoking the object macro. :param []str fields: Array of words sent as the object's arguments. :return str: The output of the object macro. """ # Call the dynamic method. if name not in self._objects: return '[ERR: Object Not Found]' func = self._objects[name] reply = '' try: reply = func(rs, fields) if reply is None: reply = '' except Exception as e: raise PythonObjectError("Error executing Python object: " + str(e)) return text_type(reply)
Invoke a previously loaded object. :param RiveScript rs: the parent RiveScript instance. :param str name: The name of the object macro to be called. :param str user: The user ID invoking the object macro. :param []str fields: Array of words sent as the object's arguments. :return str: The output of the object macro.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/python.py#L61-L82
aichaos/rivescript-python
rivescript/inheritance.py
get_topic_triggers
def get_topic_triggers(rs, topic, thats, depth=0, inheritance=0, inherited=False): """Recursively scan a topic and return a list of all triggers. Arguments: rs (RiveScript): A reference to the parent RiveScript instance. topic (str): The original topic name. thats (bool): Are we getting triggers for 'previous' replies? depth (int): Recursion step counter. inheritance (int): The inheritance level counter, for topics that inherit other topics. inherited (bool): Whether the current topic is inherited by others. Returns: []str: List of all triggers found. """ # Break if we're in too deep. if depth > rs._depth: rs._warn("Deep recursion while scanning topic inheritance") # Keep in mind here that there is a difference between 'includes' and # 'inherits' -- topics that inherit other topics are able to OVERRIDE # triggers that appear in the inherited topic. This means that if the top # topic has a trigger of simply '*', then NO triggers are capable of # matching in ANY inherited topic, because even though * has the lowest # priority, it has an automatic priority over all inherited topics. # # The getTopicTriggers method takes this into account. All topics that # inherit other topics will have their triggers prefixed with a fictional # {inherits} tag, which would start at {inherits=0} and increment if this # topic has other inheriting topics. So we can use this tag to make sure # topics that inherit things will have their triggers always be on top of # the stack, from inherits=0 to inherits=n. # Important info about the depth vs inheritance params to this function: # depth increments by 1 each time this function recursively calls itrs. # inheritance increments by 1 only when this topic inherits another # topic. # # This way, '> topic alpha includes beta inherits gamma' will have this # effect: # alpha and beta's triggers are combined together into one matching # pool, and then those triggers have higher matching priority than # gamma's. # # The inherited option is True if this is a recursive call, from a topic # that inherits other topics. This forces the {inherits} tag to be added # to the triggers. This only applies when the top topic 'includes' # another topic. rs._say("\tCollecting trigger list for topic " + topic + "(depth=" + str(depth) + "; inheritance=" + str(inheritance) + "; " + "inherited=" + str(inherited) + ")") # topic: the name of the topic # depth: starts at 0 and ++'s with each recursion # Topic doesn't exist? if not topic in rs._topics: rs._warn("Inherited or included topic {} doesn't exist or has no triggers".format( topic )) return [] # Collect an array of triggers to return. triggers = [] # Get those that exist in this topic directly. inThisTopic = [] if not thats: # The non-that structure is {topic}->[array of triggers] if topic in rs._topics: for trigger in rs._topics[topic]: inThisTopic.append([ trigger["trigger"], trigger ]) else: # The 'that' structure is: {topic}->{cur trig}->{prev trig}->{trig info} if topic in rs._thats.keys(): for curtrig in rs._thats[topic].keys(): for previous, pointer in rs._thats[topic][curtrig].items(): inThisTopic.append([ pointer["trigger"], pointer ]) # Does this topic include others? if topic in rs._includes: # Check every included topic. for includes in rs._includes[topic]: rs._say("\t\tTopic " + topic + " includes " + includes) triggers.extend(get_topic_triggers(rs, includes, thats, (depth + 1), inheritance, True)) # Does this topic inherit others? if topic in rs._lineage: # Check every inherited topic. for inherits in rs._lineage[topic]: rs._say("\t\tTopic " + topic + " inherits " + inherits) triggers.extend(get_topic_triggers(rs, inherits, thats, (depth + 1), (inheritance + 1), False)) # Collect the triggers for *this* topic. If this topic inherits any # other topics, it means that this topic's triggers have higher # priority than those in any inherited topics. Enforce this with an # {inherits} tag. if topic in rs._lineage or inherited: for trigger in inThisTopic: rs._say("\t\tPrefixing trigger with {inherits=" + str(inheritance) + "}" + trigger[0]) triggers.append(["{inherits=" + str(inheritance) + "}" + trigger[0], trigger[1]]) else: triggers.extend(inThisTopic) return triggers
python
def get_topic_triggers(rs, topic, thats, depth=0, inheritance=0, inherited=False): """Recursively scan a topic and return a list of all triggers. Arguments: rs (RiveScript): A reference to the parent RiveScript instance. topic (str): The original topic name. thats (bool): Are we getting triggers for 'previous' replies? depth (int): Recursion step counter. inheritance (int): The inheritance level counter, for topics that inherit other topics. inherited (bool): Whether the current topic is inherited by others. Returns: []str: List of all triggers found. """ # Break if we're in too deep. if depth > rs._depth: rs._warn("Deep recursion while scanning topic inheritance") # Keep in mind here that there is a difference between 'includes' and # 'inherits' -- topics that inherit other topics are able to OVERRIDE # triggers that appear in the inherited topic. This means that if the top # topic has a trigger of simply '*', then NO triggers are capable of # matching in ANY inherited topic, because even though * has the lowest # priority, it has an automatic priority over all inherited topics. # # The getTopicTriggers method takes this into account. All topics that # inherit other topics will have their triggers prefixed with a fictional # {inherits} tag, which would start at {inherits=0} and increment if this # topic has other inheriting topics. So we can use this tag to make sure # topics that inherit things will have their triggers always be on top of # the stack, from inherits=0 to inherits=n. # Important info about the depth vs inheritance params to this function: # depth increments by 1 each time this function recursively calls itrs. # inheritance increments by 1 only when this topic inherits another # topic. # # This way, '> topic alpha includes beta inherits gamma' will have this # effect: # alpha and beta's triggers are combined together into one matching # pool, and then those triggers have higher matching priority than # gamma's. # # The inherited option is True if this is a recursive call, from a topic # that inherits other topics. This forces the {inherits} tag to be added # to the triggers. This only applies when the top topic 'includes' # another topic. rs._say("\tCollecting trigger list for topic " + topic + "(depth=" + str(depth) + "; inheritance=" + str(inheritance) + "; " + "inherited=" + str(inherited) + ")") # topic: the name of the topic # depth: starts at 0 and ++'s with each recursion # Topic doesn't exist? if not topic in rs._topics: rs._warn("Inherited or included topic {} doesn't exist or has no triggers".format( topic )) return [] # Collect an array of triggers to return. triggers = [] # Get those that exist in this topic directly. inThisTopic = [] if not thats: # The non-that structure is {topic}->[array of triggers] if topic in rs._topics: for trigger in rs._topics[topic]: inThisTopic.append([ trigger["trigger"], trigger ]) else: # The 'that' structure is: {topic}->{cur trig}->{prev trig}->{trig info} if topic in rs._thats.keys(): for curtrig in rs._thats[topic].keys(): for previous, pointer in rs._thats[topic][curtrig].items(): inThisTopic.append([ pointer["trigger"], pointer ]) # Does this topic include others? if topic in rs._includes: # Check every included topic. for includes in rs._includes[topic]: rs._say("\t\tTopic " + topic + " includes " + includes) triggers.extend(get_topic_triggers(rs, includes, thats, (depth + 1), inheritance, True)) # Does this topic inherit others? if topic in rs._lineage: # Check every inherited topic. for inherits in rs._lineage[topic]: rs._say("\t\tTopic " + topic + " inherits " + inherits) triggers.extend(get_topic_triggers(rs, inherits, thats, (depth + 1), (inheritance + 1), False)) # Collect the triggers for *this* topic. If this topic inherits any # other topics, it means that this topic's triggers have higher # priority than those in any inherited topics. Enforce this with an # {inherits} tag. if topic in rs._lineage or inherited: for trigger in inThisTopic: rs._say("\t\tPrefixing trigger with {inherits=" + str(inheritance) + "}" + trigger[0]) triggers.append(["{inherits=" + str(inheritance) + "}" + trigger[0], trigger[1]]) else: triggers.extend(inThisTopic) return triggers
Recursively scan a topic and return a list of all triggers. Arguments: rs (RiveScript): A reference to the parent RiveScript instance. topic (str): The original topic name. thats (bool): Are we getting triggers for 'previous' replies? depth (int): Recursion step counter. inheritance (int): The inheritance level counter, for topics that inherit other topics. inherited (bool): Whether the current topic is inherited by others. Returns: []str: List of all triggers found.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/inheritance.py#L8-L113
aichaos/rivescript-python
rivescript/inheritance.py
get_topic_tree
def get_topic_tree(rs, topic, depth=0): """Given one topic, get the list of all included/inherited topics. :param str topic: The topic to start the search at. :param int depth: The recursion depth counter. :return []str: Array of topics. """ # Break if we're in too deep. if depth > rs._depth: rs._warn("Deep recursion while scanning topic trees!") return [] # Collect an array of all topics. topics = [topic] # Does this topic include others? if topic in rs._includes: # Try each of these. for includes in sorted(rs._includes[topic]): topics.extend(get_topic_tree(rs, includes, depth + 1)) # Does this topic inherit others? if topic in rs._lineage: # Try each of these. for inherits in sorted(rs._lineage[topic]): topics.extend(get_topic_tree(rs, inherits, depth + 1)) return topics
python
def get_topic_tree(rs, topic, depth=0): """Given one topic, get the list of all included/inherited topics. :param str topic: The topic to start the search at. :param int depth: The recursion depth counter. :return []str: Array of topics. """ # Break if we're in too deep. if depth > rs._depth: rs._warn("Deep recursion while scanning topic trees!") return [] # Collect an array of all topics. topics = [topic] # Does this topic include others? if topic in rs._includes: # Try each of these. for includes in sorted(rs._includes[topic]): topics.extend(get_topic_tree(rs, includes, depth + 1)) # Does this topic inherit others? if topic in rs._lineage: # Try each of these. for inherits in sorted(rs._lineage[topic]): topics.extend(get_topic_tree(rs, inherits, depth + 1)) return topics
Given one topic, get the list of all included/inherited topics. :param str topic: The topic to start the search at. :param int depth: The recursion depth counter. :return []str: Array of topics.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/inheritance.py#L115-L144
aichaos/rivescript-python
rivescript/utils.py
word_count
def word_count(trigger, all=False): """Count the words that aren't wildcards or options in a trigger. :param str trigger: The trigger to count words for. :param bool all: Count purely based on whitespace separators, or consider wildcards not to be their own words. :return int: The word count.""" words = [] if all: words = re.split(RE.ws, trigger) else: words = re.split(RE.wilds_and_optionals, trigger) wc = 0 # Word count for word in words: if len(word) > 0: wc += 1 return wc
python
def word_count(trigger, all=False): """Count the words that aren't wildcards or options in a trigger. :param str trigger: The trigger to count words for. :param bool all: Count purely based on whitespace separators, or consider wildcards not to be their own words. :return int: The word count.""" words = [] if all: words = re.split(RE.ws, trigger) else: words = re.split(RE.wilds_and_optionals, trigger) wc = 0 # Word count for word in words: if len(word) > 0: wc += 1 return wc
Count the words that aren't wildcards or options in a trigger. :param str trigger: The trigger to count words for. :param bool all: Count purely based on whitespace separators, or consider wildcards not to be their own words. :return int: The word count.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/utils.py#L14-L33
aichaos/rivescript-python
rivescript/utils.py
string_format
def string_format(msg, method): """Format a string (upper, lower, formal, sentence). :param str msg: The user's message. :param str method: One of ``uppercase``, ``lowercase``, ``sentence`` or ``formal``. :return str: The reformatted string. """ if method == "uppercase": return msg.upper() elif method == "lowercase": return msg.lower() elif method == "sentence": return msg.capitalize() elif method == "formal": return string.capwords(msg)
python
def string_format(msg, method): """Format a string (upper, lower, formal, sentence). :param str msg: The user's message. :param str method: One of ``uppercase``, ``lowercase``, ``sentence`` or ``formal``. :return str: The reformatted string. """ if method == "uppercase": return msg.upper() elif method == "lowercase": return msg.lower() elif method == "sentence": return msg.capitalize() elif method == "formal": return string.capwords(msg)
Format a string (upper, lower, formal, sentence). :param str msg: The user's message. :param str method: One of ``uppercase``, ``lowercase``, ``sentence`` or ``formal``. :return str: The reformatted string.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/utils.py#L63-L79
aichaos/rivescript-python
eg/perl-objects/perl.py
PerlObject.load
def load(self, name, code): """Prepare a Perl code object given by the RS interpreter.""" source = "\n".join(code) self._objects[name] = source
python
def load(self, name, code): """Prepare a Perl code object given by the RS interpreter.""" source = "\n".join(code) self._objects[name] = source
Prepare a Perl code object given by the RS interpreter.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/eg/perl-objects/perl.py#L13-L17
aichaos/rivescript-python
eg/twilio/app.py
hello_rivescript
def hello_rivescript(): """Receive an inbound SMS and send a reply from RiveScript.""" from_number = request.values.get("From", "unknown") message = request.values.get("Body") reply = "(Internal error)" # Get a reply from RiveScript. if message: reply = bot.reply(from_number, message) # Send the response. resp = twilio.twiml.Response() resp.message(reply) return str(resp)
python
def hello_rivescript(): """Receive an inbound SMS and send a reply from RiveScript.""" from_number = request.values.get("From", "unknown") message = request.values.get("Body") reply = "(Internal error)" # Get a reply from RiveScript. if message: reply = bot.reply(from_number, message) # Send the response. resp = twilio.twiml.Response() resp.message(reply) return str(resp)
Receive an inbound SMS and send a reply from RiveScript.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/eg/twilio/app.py#L28-L42
aichaos/rivescript-python
rivescript/interactive.py
interactive_mode
def interactive_mode(): """The built-in RiveScript Interactive Mode. This feature of RiveScript allows you to test and debug a chatbot in your terminal window. There are two ways to invoke this mode:: # By running the Python RiveScript module directly: python rivescript eg/brain # By running the shell.py in the source distribution: python shell.py eg/brain The only required command line parameter is a filesystem path to a directory containing RiveScript source files (with the ``*.rive`` file extension). Additionally, it accepts command line flags. Parameters: --utf8: Enable UTF-8 mode. --json: Use JSON to communicate with the bot instead of plain text. See the JSON Mode documentation below for advanced details. --debug: Enable verbose debug logging. --log (str): The path to a text file you want the debug logging to be written to. This is to be used in conjunction with ``--debug``, for the case where you don't want your terminal window to be flooded with debug messages. --depth (int): Override the recursion depth limit (default ``50``). --nostrict: Disable strict syntax checking when parsing the RiveScript files. By default a syntax error raises an exception and will terminate the interactive mode. --help: Show the documentation of command line flags. path (str): The path to a directory containing ``.rive`` files. **JSON Mode** By invoking the interactive mode with the ``--json`` (or ``-j``) flag, the interactive mode will communicate with you via JSON messages. This can be used as a "bridge" to enable the use of RiveScript from another programming language that doesn't have its own native RiveScript implementation. For example, a program could open a shell pipe to the RiveScript interactive mode and send/receive JSON payloads to communicate with the bot. In JSON mode, you send a message to the bot in the following format:: { "username": "str username", "message": "str message", "vars": { "topic": "random", "name": "Alice" } } The ``username`` and ``message`` keys are required, and ``vars`` is a key/value object of all the variables about the user. After sending the JSON payload over standard input, you can either close the input file handle (send the EOF signal; or Ctrl-D in a terminal), or send the string ``__END__`` on a line of text by itself. This will cause the bot to parse your payload, get a reply for the message, and respond with a similar JSON payload:: { "status": "ok", "reply": "str response", "vars": { "topic": "random", "name": "Alice" } } The ``vars`` structure in the response contains all of the key/value pairs the bot knows about the username you passed in. This will also contain a lot of internal data, such as the user's history and last matched trigger. To keep a stateful session, you should parse the ``vars`` returned by RiveScript and pass them in with your next request so that the bot can remember them for the next reply. If you closed the filehandle (Ctrl-D, EOF) after your input payload, the interactive mode will exit after giving the response. If, on the other hand, you sent the string ``__END__`` on a line by itself after your payload, the RiveScript interactive mode will do the same after its response is returned. This way, you can re-use the shell pipe to send and receive many messages over a single session. """ parser = argparse.ArgumentParser(description="RiveScript interactive mode.") parser.add_argument("--debug", "-d", help="Enable debug logging within RiveScript.", action="store_true", ) parser.add_argument("--json", "-j", help="Enable JSON mode. In this mode, you communicate with the bot by " "sending a JSON-encoded object with keys 'username', 'message' and " "'vars' (an object of user variables) over standard input, and " "then close the input (^D) or send the string '__END__' on a line " "by itself. The bot will respond with a similarly formatted JSON " "response over standard output, and then will either exit or send " "'__END__' depending on how you ended your input.", action="store_true", ) parser.add_argument("--utf8", "-u", help="Enable UTF-8 mode (default is disabled)", action="store_true", ) parser.add_argument("--log", help="The path to a log file to send debugging output to (when debug " "mode is enabled) instead of standard output.", type=text_type, ) parser.add_argument("--nostrict", help="Disable strict mode (where syntax errors are fatal)", action="store_true", ) parser.add_argument("--depth", help="Override the default recursion depth limit when fetching a reply " "(default 50)", type=int, default=50, ) parser.add_argument("path", help="A directory containing RiveScript files (*.rive) to load.", type=text_type, # required=True, ) args = parser.parse_args() # Make the bot. bot = RiveScript( debug=args.debug, strict=not args.nostrict, depth=args.depth, utf8=args.utf8, log=args.log ) bot.load_directory(args.path) bot.sort_replies() # Interactive mode? if args.json: # Read from standard input. buffer = "" stateful = False while True: line = "" try: line = input() except EOFError: break # Look for the __END__ line. end = re.match(r'^__END__$', line) if end: # Process it. stateful = True # This is a stateful session json_in(bot, buffer, stateful) buffer = "" continue else: buffer += line + "\n" # We got the EOF. If the session was stateful, just exit, # otherwise process what we just read. if stateful: quit() json_in(bot, buffer, stateful) quit() print( " . . \n" " .:...:: RiveScript Interpreter (Python)\n" " .:: ::. Library Version: v{version}\n" " ..:;;. ' .;;:.. \n" " . ''' . Type '/quit' to quit.\n" " :;,:,;: Type '/help' for more options.\n" " : : \n" "\n" "Using the RiveScript bot found in: {path}\n" "Type a message to the bot and press Return to send it.\n" .format(version=bot.VERSION(), path=args.path) ) while True: msg = input("You> ") if PY2: # For Python 2 only: cast the message to Unicode. msg = msg.decode("utf-8") # Commands if msg == '/help': print("> Supported Commands:") print("> /help - Displays this message.") print("> /quit - Exit the program.") elif msg == '/quit': exit() else: reply = bot.reply("localuser", msg) print("Bot>", reply)
python
def interactive_mode(): """The built-in RiveScript Interactive Mode. This feature of RiveScript allows you to test and debug a chatbot in your terminal window. There are two ways to invoke this mode:: # By running the Python RiveScript module directly: python rivescript eg/brain # By running the shell.py in the source distribution: python shell.py eg/brain The only required command line parameter is a filesystem path to a directory containing RiveScript source files (with the ``*.rive`` file extension). Additionally, it accepts command line flags. Parameters: --utf8: Enable UTF-8 mode. --json: Use JSON to communicate with the bot instead of plain text. See the JSON Mode documentation below for advanced details. --debug: Enable verbose debug logging. --log (str): The path to a text file you want the debug logging to be written to. This is to be used in conjunction with ``--debug``, for the case where you don't want your terminal window to be flooded with debug messages. --depth (int): Override the recursion depth limit (default ``50``). --nostrict: Disable strict syntax checking when parsing the RiveScript files. By default a syntax error raises an exception and will terminate the interactive mode. --help: Show the documentation of command line flags. path (str): The path to a directory containing ``.rive`` files. **JSON Mode** By invoking the interactive mode with the ``--json`` (or ``-j``) flag, the interactive mode will communicate with you via JSON messages. This can be used as a "bridge" to enable the use of RiveScript from another programming language that doesn't have its own native RiveScript implementation. For example, a program could open a shell pipe to the RiveScript interactive mode and send/receive JSON payloads to communicate with the bot. In JSON mode, you send a message to the bot in the following format:: { "username": "str username", "message": "str message", "vars": { "topic": "random", "name": "Alice" } } The ``username`` and ``message`` keys are required, and ``vars`` is a key/value object of all the variables about the user. After sending the JSON payload over standard input, you can either close the input file handle (send the EOF signal; or Ctrl-D in a terminal), or send the string ``__END__`` on a line of text by itself. This will cause the bot to parse your payload, get a reply for the message, and respond with a similar JSON payload:: { "status": "ok", "reply": "str response", "vars": { "topic": "random", "name": "Alice" } } The ``vars`` structure in the response contains all of the key/value pairs the bot knows about the username you passed in. This will also contain a lot of internal data, such as the user's history and last matched trigger. To keep a stateful session, you should parse the ``vars`` returned by RiveScript and pass them in with your next request so that the bot can remember them for the next reply. If you closed the filehandle (Ctrl-D, EOF) after your input payload, the interactive mode will exit after giving the response. If, on the other hand, you sent the string ``__END__`` on a line by itself after your payload, the RiveScript interactive mode will do the same after its response is returned. This way, you can re-use the shell pipe to send and receive many messages over a single session. """ parser = argparse.ArgumentParser(description="RiveScript interactive mode.") parser.add_argument("--debug", "-d", help="Enable debug logging within RiveScript.", action="store_true", ) parser.add_argument("--json", "-j", help="Enable JSON mode. In this mode, you communicate with the bot by " "sending a JSON-encoded object with keys 'username', 'message' and " "'vars' (an object of user variables) over standard input, and " "then close the input (^D) or send the string '__END__' on a line " "by itself. The bot will respond with a similarly formatted JSON " "response over standard output, and then will either exit or send " "'__END__' depending on how you ended your input.", action="store_true", ) parser.add_argument("--utf8", "-u", help="Enable UTF-8 mode (default is disabled)", action="store_true", ) parser.add_argument("--log", help="The path to a log file to send debugging output to (when debug " "mode is enabled) instead of standard output.", type=text_type, ) parser.add_argument("--nostrict", help="Disable strict mode (where syntax errors are fatal)", action="store_true", ) parser.add_argument("--depth", help="Override the default recursion depth limit when fetching a reply " "(default 50)", type=int, default=50, ) parser.add_argument("path", help="A directory containing RiveScript files (*.rive) to load.", type=text_type, # required=True, ) args = parser.parse_args() # Make the bot. bot = RiveScript( debug=args.debug, strict=not args.nostrict, depth=args.depth, utf8=args.utf8, log=args.log ) bot.load_directory(args.path) bot.sort_replies() # Interactive mode? if args.json: # Read from standard input. buffer = "" stateful = False while True: line = "" try: line = input() except EOFError: break # Look for the __END__ line. end = re.match(r'^__END__$', line) if end: # Process it. stateful = True # This is a stateful session json_in(bot, buffer, stateful) buffer = "" continue else: buffer += line + "\n" # We got the EOF. If the session was stateful, just exit, # otherwise process what we just read. if stateful: quit() json_in(bot, buffer, stateful) quit() print( " . . \n" " .:...:: RiveScript Interpreter (Python)\n" " .:: ::. Library Version: v{version}\n" " ..:;;. ' .;;:.. \n" " . ''' . Type '/quit' to quit.\n" " :;,:,;: Type '/help' for more options.\n" " : : \n" "\n" "Using the RiveScript bot found in: {path}\n" "Type a message to the bot and press Return to send it.\n" .format(version=bot.VERSION(), path=args.path) ) while True: msg = input("You> ") if PY2: # For Python 2 only: cast the message to Unicode. msg = msg.decode("utf-8") # Commands if msg == '/help': print("> Supported Commands:") print("> /help - Displays this message.") print("> /quit - Exit the program.") elif msg == '/quit': exit() else: reply = bot.reply("localuser", msg) print("Bot>", reply)
The built-in RiveScript Interactive Mode. This feature of RiveScript allows you to test and debug a chatbot in your terminal window. There are two ways to invoke this mode:: # By running the Python RiveScript module directly: python rivescript eg/brain # By running the shell.py in the source distribution: python shell.py eg/brain The only required command line parameter is a filesystem path to a directory containing RiveScript source files (with the ``*.rive`` file extension). Additionally, it accepts command line flags. Parameters: --utf8: Enable UTF-8 mode. --json: Use JSON to communicate with the bot instead of plain text. See the JSON Mode documentation below for advanced details. --debug: Enable verbose debug logging. --log (str): The path to a text file you want the debug logging to be written to. This is to be used in conjunction with ``--debug``, for the case where you don't want your terminal window to be flooded with debug messages. --depth (int): Override the recursion depth limit (default ``50``). --nostrict: Disable strict syntax checking when parsing the RiveScript files. By default a syntax error raises an exception and will terminate the interactive mode. --help: Show the documentation of command line flags. path (str): The path to a directory containing ``.rive`` files. **JSON Mode** By invoking the interactive mode with the ``--json`` (or ``-j``) flag, the interactive mode will communicate with you via JSON messages. This can be used as a "bridge" to enable the use of RiveScript from another programming language that doesn't have its own native RiveScript implementation. For example, a program could open a shell pipe to the RiveScript interactive mode and send/receive JSON payloads to communicate with the bot. In JSON mode, you send a message to the bot in the following format:: { "username": "str username", "message": "str message", "vars": { "topic": "random", "name": "Alice" } } The ``username`` and ``message`` keys are required, and ``vars`` is a key/value object of all the variables about the user. After sending the JSON payload over standard input, you can either close the input file handle (send the EOF signal; or Ctrl-D in a terminal), or send the string ``__END__`` on a line of text by itself. This will cause the bot to parse your payload, get a reply for the message, and respond with a similar JSON payload:: { "status": "ok", "reply": "str response", "vars": { "topic": "random", "name": "Alice" } } The ``vars`` structure in the response contains all of the key/value pairs the bot knows about the username you passed in. This will also contain a lot of internal data, such as the user's history and last matched trigger. To keep a stateful session, you should parse the ``vars`` returned by RiveScript and pass them in with your next request so that the bot can remember them for the next reply. If you closed the filehandle (Ctrl-D, EOF) after your input payload, the interactive mode will exit after giving the response. If, on the other hand, you sent the string ``__END__`` on a line by itself after your payload, the RiveScript interactive mode will do the same after its response is returned. This way, you can re-use the shell pipe to send and receive many messages over a single session.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/interactive.py#L62-L262
aichaos/rivescript-python
eg/json-server/server.py
reply
def reply(): """Fetch a reply from RiveScript. Parameters (JSON): * username * message * vars """ params = request.json if not params: return jsonify({ "status": "error", "error": "Request must be of the application/json type!", }) username = params.get("username") message = params.get("message") uservars = params.get("vars", dict()) # Make sure the required params are present. if username is None or message is None: return jsonify({ "status": "error", "error": "username and message are required keys", }) # Copy and user vars from the post into RiveScript. if type(uservars) is dict: for key, value in uservars.items(): bot.set_uservar(username, key, value) # Get a reply from the bot. reply = bot.reply(username, message) # Get all the user's vars back out of the bot to include in the response. uservars = bot.get_uservars(username) # Send the response. return jsonify({ "status": "ok", "reply": reply, "vars": uservars, })
python
def reply(): """Fetch a reply from RiveScript. Parameters (JSON): * username * message * vars """ params = request.json if not params: return jsonify({ "status": "error", "error": "Request must be of the application/json type!", }) username = params.get("username") message = params.get("message") uservars = params.get("vars", dict()) # Make sure the required params are present. if username is None or message is None: return jsonify({ "status": "error", "error": "username and message are required keys", }) # Copy and user vars from the post into RiveScript. if type(uservars) is dict: for key, value in uservars.items(): bot.set_uservar(username, key, value) # Get a reply from the bot. reply = bot.reply(username, message) # Get all the user's vars back out of the bot to include in the response. uservars = bot.get_uservars(username) # Send the response. return jsonify({ "status": "ok", "reply": reply, "vars": uservars, })
Fetch a reply from RiveScript. Parameters (JSON): * username * message * vars
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/eg/json-server/server.py#L24-L66
aichaos/rivescript-python
eg/json-server/server.py
index
def index(path=None): """On all other routes, just return an example `curl` command.""" payload = { "username": "soandso", "message": "Hello bot", "vars": { "name": "Soandso", } } return Response(r"""Usage: curl -i \ -H "Content-Type: application/json" \ -X POST -d '{}' \ http://localhost:5000/reply""".format(json.dumps(payload)), mimetype="text/plain")
python
def index(path=None): """On all other routes, just return an example `curl` command.""" payload = { "username": "soandso", "message": "Hello bot", "vars": { "name": "Soandso", } } return Response(r"""Usage: curl -i \ -H "Content-Type: application/json" \ -X POST -d '{}' \ http://localhost:5000/reply""".format(json.dumps(payload)), mimetype="text/plain")
On all other routes, just return an example `curl` command.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/eg/json-server/server.py#L70-L83
aichaos/rivescript-python
contrib/redis/rivescript_redis.py
RedisSessionManager._key
def _key(self, username, frozen=False): """Translate a username into a key for Redis.""" if frozen: return self.frozen + username return self.prefix + username
python
def _key(self, username, frozen=False): """Translate a username into a key for Redis.""" if frozen: return self.frozen + username return self.prefix + username
Translate a username into a key for Redis.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/contrib/redis/rivescript_redis.py#L42-L46
aichaos/rivescript-python
contrib/redis/rivescript_redis.py
RedisSessionManager._get_user
def _get_user(self, username): """Custom helper method to retrieve a user's data from Redis.""" data = self.client.get(self._key(username)) if data is None: return None return json.loads(data.decode())
python
def _get_user(self, username): """Custom helper method to retrieve a user's data from Redis.""" data = self.client.get(self._key(username)) if data is None: return None return json.loads(data.decode())
Custom helper method to retrieve a user's data from Redis.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/contrib/redis/rivescript_redis.py#L48-L53
aichaos/rivescript-python
rivescript/sorting.py
sort_trigger_set
def sort_trigger_set(triggers, exclude_previous=True, say=None): """Sort a group of triggers in optimal sorting order. The optimal sorting order is, briefly: * Atomic triggers (containing nothing but plain words and alternation groups) are on top, with triggers containing the most words coming first. Triggers with equal word counts are sorted by length, and then alphabetically if they have the same length. * Triggers containing optionals are sorted next, by word count like atomic triggers. * Triggers containing wildcards are next, with ``_`` (alphabetic) wildcards on top, then ``#`` (numeric) and finally ``*``. * At the bottom of the sorted list are triggers consisting of only a single wildcard, in the order: ``_``, ``#``, ``*``. Triggers that have ``{weight}`` tags are grouped together by weight value and sorted amongst themselves. Higher weighted groups are then ordered before lower weighted groups regardless of the normal sorting algorithm. Triggers that come from topics which inherit other topics are also sorted with higher priority than triggers from the inherited topics. Arguments: triggers ([]str): Array of triggers to sort. exclude_previous (bool): Create a sort buffer for 'previous' triggers. say (function): A reference to ``RiveScript._say()`` or provide your own function. """ if say is None: say = lambda x: x # KEEP IN MIND: the `triggers` array is composed of array elements of the form # ["trigger text", pointer to trigger data] # So this code will use e.g. `trig[0]` when referring to the trigger text. # Create a list of trigger objects map. trigger_object_list = [] for index, trig in enumerate(triggers): if exclude_previous and trig[1]["previous"]: continue pattern = trig[0] # Extract only the text of the trigger, with possible tag of inherit # See if it has a weight tag match, weight = re.search(RE.weight, trig[0]), 0 if match: # Value of math is not None if there is a match. weight = int(match.group(1)) # Get the weight from the tag ``{weight}`` # See if it has an inherits tag. match = re.search(RE.inherit, pattern) if match: inherit = int(match.group(1)) # Get inherit value from the tag ``{inherit}`` say("\t\t\tTrigger belongs to a topic which inherits other topics: level=" + str(inherit)) triggers[index][0] = pattern = re.sub(RE.inherit, "", pattern) # Remove the inherit tag if any else: inherit = sys.maxsize # If not found any inherit, set it to the maximum value, to place it last in the sort trigger_object_list.append(TriggerObj(pattern, index, weight, inherit)) # Priority order of sorting criteria: # weight, inherit, is_empty, star, pound, under, option, wordcount, len, alphabet sorted_list = sorted(trigger_object_list, key=attrgetter('weight', 'inherit', 'is_empty', 'star', 'pound', 'under', 'option', 'wordcount', 'len', 'alphabet')) return [triggers[item.index] for item in sorted_list]
python
def sort_trigger_set(triggers, exclude_previous=True, say=None): """Sort a group of triggers in optimal sorting order. The optimal sorting order is, briefly: * Atomic triggers (containing nothing but plain words and alternation groups) are on top, with triggers containing the most words coming first. Triggers with equal word counts are sorted by length, and then alphabetically if they have the same length. * Triggers containing optionals are sorted next, by word count like atomic triggers. * Triggers containing wildcards are next, with ``_`` (alphabetic) wildcards on top, then ``#`` (numeric) and finally ``*``. * At the bottom of the sorted list are triggers consisting of only a single wildcard, in the order: ``_``, ``#``, ``*``. Triggers that have ``{weight}`` tags are grouped together by weight value and sorted amongst themselves. Higher weighted groups are then ordered before lower weighted groups regardless of the normal sorting algorithm. Triggers that come from topics which inherit other topics are also sorted with higher priority than triggers from the inherited topics. Arguments: triggers ([]str): Array of triggers to sort. exclude_previous (bool): Create a sort buffer for 'previous' triggers. say (function): A reference to ``RiveScript._say()`` or provide your own function. """ if say is None: say = lambda x: x # KEEP IN MIND: the `triggers` array is composed of array elements of the form # ["trigger text", pointer to trigger data] # So this code will use e.g. `trig[0]` when referring to the trigger text. # Create a list of trigger objects map. trigger_object_list = [] for index, trig in enumerate(triggers): if exclude_previous and trig[1]["previous"]: continue pattern = trig[0] # Extract only the text of the trigger, with possible tag of inherit # See if it has a weight tag match, weight = re.search(RE.weight, trig[0]), 0 if match: # Value of math is not None if there is a match. weight = int(match.group(1)) # Get the weight from the tag ``{weight}`` # See if it has an inherits tag. match = re.search(RE.inherit, pattern) if match: inherit = int(match.group(1)) # Get inherit value from the tag ``{inherit}`` say("\t\t\tTrigger belongs to a topic which inherits other topics: level=" + str(inherit)) triggers[index][0] = pattern = re.sub(RE.inherit, "", pattern) # Remove the inherit tag if any else: inherit = sys.maxsize # If not found any inherit, set it to the maximum value, to place it last in the sort trigger_object_list.append(TriggerObj(pattern, index, weight, inherit)) # Priority order of sorting criteria: # weight, inherit, is_empty, star, pound, under, option, wordcount, len, alphabet sorted_list = sorted(trigger_object_list, key=attrgetter('weight', 'inherit', 'is_empty', 'star', 'pound', 'under', 'option', 'wordcount', 'len', 'alphabet')) return [triggers[item.index] for item in sorted_list]
Sort a group of triggers in optimal sorting order. The optimal sorting order is, briefly: * Atomic triggers (containing nothing but plain words and alternation groups) are on top, with triggers containing the most words coming first. Triggers with equal word counts are sorted by length, and then alphabetically if they have the same length. * Triggers containing optionals are sorted next, by word count like atomic triggers. * Triggers containing wildcards are next, with ``_`` (alphabetic) wildcards on top, then ``#`` (numeric) and finally ``*``. * At the bottom of the sorted list are triggers consisting of only a single wildcard, in the order: ``_``, ``#``, ``*``. Triggers that have ``{weight}`` tags are grouped together by weight value and sorted amongst themselves. Higher weighted groups are then ordered before lower weighted groups regardless of the normal sorting algorithm. Triggers that come from topics which inherit other topics are also sorted with higher priority than triggers from the inherited topics. Arguments: triggers ([]str): Array of triggers to sort. exclude_previous (bool): Create a sort buffer for 'previous' triggers. say (function): A reference to ``RiveScript._say()`` or provide your own function.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/sorting.py#L52-L118
aichaos/rivescript-python
rivescript/sorting.py
sort_list
def sort_list(items): """Sort a simple list by number of words and length.""" # Track by number of words. track = {} def by_length(word1, word2): return len(word2) - len(word1) # Loop through each item. for item in items: # Count the words. cword = utils.word_count(item, all=True) if cword not in track: track[cword] = [] track[cword].append(item) # Sort them. output = [] for count in sorted(track.keys(), reverse=True): sort = sorted(track[count], key=len, reverse=True) output.extend(sort) return output
python
def sort_list(items): """Sort a simple list by number of words and length.""" # Track by number of words. track = {} def by_length(word1, word2): return len(word2) - len(word1) # Loop through each item. for item in items: # Count the words. cword = utils.word_count(item, all=True) if cword not in track: track[cword] = [] track[cword].append(item) # Sort them. output = [] for count in sorted(track.keys(), reverse=True): sort = sorted(track[count], key=len, reverse=True) output.extend(sort) return output
Sort a simple list by number of words and length.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/sorting.py#L120-L143
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.load_directory
def load_directory(self, directory, ext=None): """Load RiveScript documents from a directory. :param str directory: The directory of RiveScript documents to load replies from. :param []str ext: List of file extensions to consider as RiveScript documents. The default is ``[".rive", ".rs"]``. """ self._say("Loading from directory: " + directory) if ext is None: # Use the default extensions - .rive is preferable. ext = ['.rive', '.rs'] elif type(ext) == str: # Backwards compatibility for ext being a string value. ext = [ext] if not os.path.isdir(directory): self._warn("Error: " + directory + " is not a directory.") return for root, subdirs, files in os.walk(directory): for file in files: for extension in ext: if file.lower().endswith(extension): # Load this file. self.load_file(os.path.join(root, file)) break
python
def load_directory(self, directory, ext=None): """Load RiveScript documents from a directory. :param str directory: The directory of RiveScript documents to load replies from. :param []str ext: List of file extensions to consider as RiveScript documents. The default is ``[".rive", ".rs"]``. """ self._say("Loading from directory: " + directory) if ext is None: # Use the default extensions - .rive is preferable. ext = ['.rive', '.rs'] elif type(ext) == str: # Backwards compatibility for ext being a string value. ext = [ext] if not os.path.isdir(directory): self._warn("Error: " + directory + " is not a directory.") return for root, subdirs, files in os.walk(directory): for file in files: for extension in ext: if file.lower().endswith(extension): # Load this file. self.load_file(os.path.join(root, file)) break
Load RiveScript documents from a directory. :param str directory: The directory of RiveScript documents to load replies from. :param []str ext: List of file extensions to consider as RiveScript documents. The default is ``[".rive", ".rs"]``.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L165-L192
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.load_file
def load_file(self, filename): """Load and parse a RiveScript document. :param str filename: The path to a RiveScript file. """ self._say("Loading file: " + filename) fh = codecs.open(filename, 'r', 'utf-8') lines = fh.readlines() fh.close() self._say("Parsing " + str(len(lines)) + " lines of code from " + filename) self._parse(filename, lines)
python
def load_file(self, filename): """Load and parse a RiveScript document. :param str filename: The path to a RiveScript file. """ self._say("Loading file: " + filename) fh = codecs.open(filename, 'r', 'utf-8') lines = fh.readlines() fh.close() self._say("Parsing " + str(len(lines)) + " lines of code from " + filename) self._parse(filename, lines)
Load and parse a RiveScript document. :param str filename: The path to a RiveScript file.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L194-L206
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.stream
def stream(self, code): """Stream in RiveScript source code dynamically. :param code: Either a string containing RiveScript code or an array of lines of RiveScript code. """ self._say("Streaming code.") if type(code) in [str, text_type]: code = code.split("\n") self._parse("stream()", code)
python
def stream(self, code): """Stream in RiveScript source code dynamically. :param code: Either a string containing RiveScript code or an array of lines of RiveScript code. """ self._say("Streaming code.") if type(code) in [str, text_type]: code = code.split("\n") self._parse("stream()", code)
Stream in RiveScript source code dynamically. :param code: Either a string containing RiveScript code or an array of lines of RiveScript code.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L208-L217
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript._parse
def _parse(self, fname, code): """Parse RiveScript code into memory. :param str fname: The arbitrary file name used for syntax reporting. :param []str code: Lines of RiveScript source code to parse. """ # Get the "abstract syntax tree" ast = self._parser.parse(fname, code) # Get all of the "begin" type variables: global, var, sub, person, ... for kind, data in ast["begin"].items(): internal = getattr(self, "_" + kind) # The internal name for this attribute for name, value in data.items(): if value == "<undef>": del internal[name] else: internal[name] = value # Precompile substitutions. if kind in ["sub", "person"]: self._precompile_substitution(kind, name) # Let the scripts set the debug mode and other special globals. if self._global.get("debug"): self._debug = str(self._global["debug"]).lower() == "true" if self._global.get("depth"): self._depth = int(self._global["depth"]) # Consume all the parsed triggers. for topic, data in ast["topics"].items(): # Keep a map of the topics that are included/inherited under this topic. if not topic in self._includes: self._includes[topic] = {} if not topic in self._lineage: self._lineage[topic] = {} self._includes[topic].update(data["includes"]) self._lineage[topic].update(data["inherits"]) # Consume the triggers. if not topic in self._topics: self._topics[topic] = [] for trigger in data["triggers"]: self._topics[topic].append(trigger) # Precompile the regexp for this trigger. self._precompile_regexp(trigger["trigger"]) # Does this trigger have a %Previous? If so, make a pointer to # this exact trigger in _thats. if trigger["previous"] is not None: if not topic in self._thats: self._thats[topic] = {} if not trigger["trigger"] in self._thats[topic]: self._thats[topic][trigger["trigger"]] = {} self._thats[topic][trigger["trigger"]][trigger["previous"]] = trigger # Load all the parsed objects. for obj in ast["objects"]: # Have a handler for it? if obj["language"] in self._handlers: self._objlangs[obj["name"]] = obj["language"] self._handlers[obj["language"]].load(obj["name"], obj["code"])
python
def _parse(self, fname, code): """Parse RiveScript code into memory. :param str fname: The arbitrary file name used for syntax reporting. :param []str code: Lines of RiveScript source code to parse. """ # Get the "abstract syntax tree" ast = self._parser.parse(fname, code) # Get all of the "begin" type variables: global, var, sub, person, ... for kind, data in ast["begin"].items(): internal = getattr(self, "_" + kind) # The internal name for this attribute for name, value in data.items(): if value == "<undef>": del internal[name] else: internal[name] = value # Precompile substitutions. if kind in ["sub", "person"]: self._precompile_substitution(kind, name) # Let the scripts set the debug mode and other special globals. if self._global.get("debug"): self._debug = str(self._global["debug"]).lower() == "true" if self._global.get("depth"): self._depth = int(self._global["depth"]) # Consume all the parsed triggers. for topic, data in ast["topics"].items(): # Keep a map of the topics that are included/inherited under this topic. if not topic in self._includes: self._includes[topic] = {} if not topic in self._lineage: self._lineage[topic] = {} self._includes[topic].update(data["includes"]) self._lineage[topic].update(data["inherits"]) # Consume the triggers. if not topic in self._topics: self._topics[topic] = [] for trigger in data["triggers"]: self._topics[topic].append(trigger) # Precompile the regexp for this trigger. self._precompile_regexp(trigger["trigger"]) # Does this trigger have a %Previous? If so, make a pointer to # this exact trigger in _thats. if trigger["previous"] is not None: if not topic in self._thats: self._thats[topic] = {} if not trigger["trigger"] in self._thats[topic]: self._thats[topic][trigger["trigger"]] = {} self._thats[topic][trigger["trigger"]][trigger["previous"]] = trigger # Load all the parsed objects. for obj in ast["objects"]: # Have a handler for it? if obj["language"] in self._handlers: self._objlangs[obj["name"]] = obj["language"] self._handlers[obj["language"]].load(obj["name"], obj["code"])
Parse RiveScript code into memory. :param str fname: The arbitrary file name used for syntax reporting. :param []str code: Lines of RiveScript source code to parse.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L219-L281
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.deparse
def deparse(self): """Dump the in-memory RiveScript brain as a Python data structure. This would be useful, for example, to develop a user interface for editing RiveScript replies without having to edit the RiveScript source code directly. :return dict: JSON-serializable Python data structure containing the contents of all RiveScript replies currently loaded in memory. """ # Data to return. result = { "begin": { "global": {}, "var": {}, "sub": {}, "person": {}, "array": {}, "triggers": [], }, "topics": {}, } # Populate the config fields. if self._debug: result["begin"]["global"]["debug"] = self._debug if self._depth != 50: result["begin"]["global"]["depth"] = 50 # Definitions result["begin"]["var"] = self._var.copy() result["begin"]["sub"] = self._sub.copy() result["begin"]["person"] = self._person.copy() result["begin"]["array"] = self._array.copy() result["begin"]["global"].update(self._global.copy()) # Topic Triggers. for topic in self._topics: dest = None # Where to place the topic info if topic == "__begin__": # Begin block. dest = result["begin"] else: # Normal topic. if topic not in result["topics"]: result["topics"][topic] = { "triggers": [], "includes": {}, "inherits": {}, } dest = result["topics"][topic] # Copy the triggers. for trig in self._topics[topic]: dest["triggers"].append(copy.deepcopy(trig)) # Inherits/Includes. for label, mapping in {"inherits": self._lineage, "includes": self._includes}.items(): if topic in mapping and len(mapping[topic]): dest[label] = mapping[topic].copy() return result
python
def deparse(self): """Dump the in-memory RiveScript brain as a Python data structure. This would be useful, for example, to develop a user interface for editing RiveScript replies without having to edit the RiveScript source code directly. :return dict: JSON-serializable Python data structure containing the contents of all RiveScript replies currently loaded in memory. """ # Data to return. result = { "begin": { "global": {}, "var": {}, "sub": {}, "person": {}, "array": {}, "triggers": [], }, "topics": {}, } # Populate the config fields. if self._debug: result["begin"]["global"]["debug"] = self._debug if self._depth != 50: result["begin"]["global"]["depth"] = 50 # Definitions result["begin"]["var"] = self._var.copy() result["begin"]["sub"] = self._sub.copy() result["begin"]["person"] = self._person.copy() result["begin"]["array"] = self._array.copy() result["begin"]["global"].update(self._global.copy()) # Topic Triggers. for topic in self._topics: dest = None # Where to place the topic info if topic == "__begin__": # Begin block. dest = result["begin"] else: # Normal topic. if topic not in result["topics"]: result["topics"][topic] = { "triggers": [], "includes": {}, "inherits": {}, } dest = result["topics"][topic] # Copy the triggers. for trig in self._topics[topic]: dest["triggers"].append(copy.deepcopy(trig)) # Inherits/Includes. for label, mapping in {"inherits": self._lineage, "includes": self._includes}.items(): if topic in mapping and len(mapping[topic]): dest[label] = mapping[topic].copy() return result
Dump the in-memory RiveScript brain as a Python data structure. This would be useful, for example, to develop a user interface for editing RiveScript replies without having to edit the RiveScript source code directly. :return dict: JSON-serializable Python data structure containing the contents of all RiveScript replies currently loaded in memory.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L283-L346
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.write
def write(self, fh, deparsed=None): """Write the currently parsed RiveScript data into a file. Pass either a file name (string) or a file handle object. This uses ``deparse()`` to dump a representation of the loaded data and writes it to the destination file. If you provide your own data as the ``deparsed`` argument, it will use that data instead of calling ``deparse()`` itself. This way you can use ``deparse()``, edit the data, and use that to write the RiveScript document (for example, to be used by a user interface for editing RiveScript without writing the code directly). Parameters: fh (str or file): a string or a file-like object. deparsed (dict): a data structure in the same format as what ``deparse()`` returns. If not passed, this value will come from the current in-memory data from ``deparse()``. """ # Passed a string instead of a file handle? if type(fh) is str: fh = codecs.open(fh, "w", "utf-8") # Deparse the loaded data. if deparsed is None: deparsed = self.deparse() # Start at the beginning. fh.write("// Written by rivescript.deparse()\n") fh.write("! version = 2.0\n\n") # Variables of all sorts! for kind in ["global", "var", "sub", "person", "array"]: if len(deparsed["begin"][kind].keys()) == 0: continue for var in sorted(deparsed["begin"][kind].keys()): # Array types need to be separated by either spaces or pipes. data = deparsed["begin"][kind][var] if type(data) not in [str, text_type]: needs_pipes = False for test in data: if " " in test: needs_pipes = True break # Word-wrap the result, target width is 78 chars minus the # kind, var, and spaces and equals sign. # TODO: not implemented yet. # width = 78 - len(kind) - len(var) - 4 if needs_pipes: data = self._write_wrapped("|".join(data), sep="|") else: data = " ".join(data) fh.write("! {kind} {var} = {data}\n".format( kind=kind, var=var, data=data, )) fh.write("\n") # Begin block. if len(deparsed["begin"]["triggers"]): fh.write("> begin\n\n") self._write_triggers(fh, deparsed["begin"]["triggers"], indent="\t") fh.write("< begin\n\n") # The topics. Random first! topics = ["random"] topics.extend(sorted(deparsed["topics"].keys())) done_random = False for topic in topics: if topic not in deparsed["topics"]: continue if topic == "random" and done_random: continue if topic == "random": done_random = True tagged = False # Used > topic tag data = deparsed["topics"][topic] if topic != "random" or len(data["includes"]) or len(data["inherits"]): tagged = True fh.write("> topic " + topic) if data["inherits"]: fh.write(" inherits " + " ".join(sorted(data["inherits"].keys()))) if data["includes"]: fh.write(" includes " + " ".join(sorted(data["includes"].keys()))) fh.write("\n\n") indent = "\t" if tagged else "" self._write_triggers(fh, data["triggers"], indent=indent) if tagged: fh.write("< topic\n\n") return True
python
def write(self, fh, deparsed=None): """Write the currently parsed RiveScript data into a file. Pass either a file name (string) or a file handle object. This uses ``deparse()`` to dump a representation of the loaded data and writes it to the destination file. If you provide your own data as the ``deparsed`` argument, it will use that data instead of calling ``deparse()`` itself. This way you can use ``deparse()``, edit the data, and use that to write the RiveScript document (for example, to be used by a user interface for editing RiveScript without writing the code directly). Parameters: fh (str or file): a string or a file-like object. deparsed (dict): a data structure in the same format as what ``deparse()`` returns. If not passed, this value will come from the current in-memory data from ``deparse()``. """ # Passed a string instead of a file handle? if type(fh) is str: fh = codecs.open(fh, "w", "utf-8") # Deparse the loaded data. if deparsed is None: deparsed = self.deparse() # Start at the beginning. fh.write("// Written by rivescript.deparse()\n") fh.write("! version = 2.0\n\n") # Variables of all sorts! for kind in ["global", "var", "sub", "person", "array"]: if len(deparsed["begin"][kind].keys()) == 0: continue for var in sorted(deparsed["begin"][kind].keys()): # Array types need to be separated by either spaces or pipes. data = deparsed["begin"][kind][var] if type(data) not in [str, text_type]: needs_pipes = False for test in data: if " " in test: needs_pipes = True break # Word-wrap the result, target width is 78 chars minus the # kind, var, and spaces and equals sign. # TODO: not implemented yet. # width = 78 - len(kind) - len(var) - 4 if needs_pipes: data = self._write_wrapped("|".join(data), sep="|") else: data = " ".join(data) fh.write("! {kind} {var} = {data}\n".format( kind=kind, var=var, data=data, )) fh.write("\n") # Begin block. if len(deparsed["begin"]["triggers"]): fh.write("> begin\n\n") self._write_triggers(fh, deparsed["begin"]["triggers"], indent="\t") fh.write("< begin\n\n") # The topics. Random first! topics = ["random"] topics.extend(sorted(deparsed["topics"].keys())) done_random = False for topic in topics: if topic not in deparsed["topics"]: continue if topic == "random" and done_random: continue if topic == "random": done_random = True tagged = False # Used > topic tag data = deparsed["topics"][topic] if topic != "random" or len(data["includes"]) or len(data["inherits"]): tagged = True fh.write("> topic " + topic) if data["inherits"]: fh.write(" inherits " + " ".join(sorted(data["inherits"].keys()))) if data["includes"]: fh.write(" includes " + " ".join(sorted(data["includes"].keys()))) fh.write("\n\n") indent = "\t" if tagged else "" self._write_triggers(fh, data["triggers"], indent=indent) if tagged: fh.write("< topic\n\n") return True
Write the currently parsed RiveScript data into a file. Pass either a file name (string) or a file handle object. This uses ``deparse()`` to dump a representation of the loaded data and writes it to the destination file. If you provide your own data as the ``deparsed`` argument, it will use that data instead of calling ``deparse()`` itself. This way you can use ``deparse()``, edit the data, and use that to write the RiveScript document (for example, to be used by a user interface for editing RiveScript without writing the code directly). Parameters: fh (str or file): a string or a file-like object. deparsed (dict): a data structure in the same format as what ``deparse()`` returns. If not passed, this value will come from the current in-memory data from ``deparse()``.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L348-L448
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript._write_triggers
def _write_triggers(self, fh, triggers, indent=""): """Write triggers to a file handle. Parameters: fh (file): file object. triggers (list): list of triggers to write. indent (str): indentation for each line. """ for trig in triggers: fh.write(indent + "+ " + self._write_wrapped(trig["trigger"], indent=indent) + "\n") d = trig if d.get("previous"): fh.write(indent + "% " + self._write_wrapped(d["previous"], indent=indent) + "\n") for cond in d["condition"]: fh.write(indent + "* " + self._write_wrapped(cond, indent=indent) + "\n") if d.get("redirect"): fh.write(indent + "@ " + self._write_wrapped(d["redirect"], indent=indent) + "\n") for reply in d["reply"]: fh.write(indent + "- " + self._write_wrapped(reply, indent=indent) + "\n") fh.write("\n")
python
def _write_triggers(self, fh, triggers, indent=""): """Write triggers to a file handle. Parameters: fh (file): file object. triggers (list): list of triggers to write. indent (str): indentation for each line. """ for trig in triggers: fh.write(indent + "+ " + self._write_wrapped(trig["trigger"], indent=indent) + "\n") d = trig if d.get("previous"): fh.write(indent + "% " + self._write_wrapped(d["previous"], indent=indent) + "\n") for cond in d["condition"]: fh.write(indent + "* " + self._write_wrapped(cond, indent=indent) + "\n") if d.get("redirect"): fh.write(indent + "@ " + self._write_wrapped(d["redirect"], indent=indent) + "\n") for reply in d["reply"]: fh.write(indent + "- " + self._write_wrapped(reply, indent=indent) + "\n") fh.write("\n")
Write triggers to a file handle. Parameters: fh (file): file object. triggers (list): list of triggers to write. indent (str): indentation for each line.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L450-L475
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript._write_wrapped
def _write_wrapped(self, line, sep=" ", indent="", width=78): """Word-wrap a line of RiveScript code for being written to a file. :param str line: The original line of text to word-wrap. :param str sep: The word separator. :param str indent: The indentation to use (as a set of spaces). :param int width: The character width to constrain each line to. :return str: The reformatted line(s).""" words = line.split(sep) lines = [] line = "" buf = [] while len(words): buf.append(words.pop(0)) line = sep.join(buf) if len(line) > width: # Need to word wrap! words.insert(0, buf.pop()) # Undo lines.append(sep.join(buf)) buf = [] line = "" # Straggler? if line: lines.append(line) # Returned output result = lines.pop(0) if len(lines): eol = "" if sep == " ": eol = "\s" for item in lines: result += eol + "\n" + indent + "^ " + item return result
python
def _write_wrapped(self, line, sep=" ", indent="", width=78): """Word-wrap a line of RiveScript code for being written to a file. :param str line: The original line of text to word-wrap. :param str sep: The word separator. :param str indent: The indentation to use (as a set of spaces). :param int width: The character width to constrain each line to. :return str: The reformatted line(s).""" words = line.split(sep) lines = [] line = "" buf = [] while len(words): buf.append(words.pop(0)) line = sep.join(buf) if len(line) > width: # Need to word wrap! words.insert(0, buf.pop()) # Undo lines.append(sep.join(buf)) buf = [] line = "" # Straggler? if line: lines.append(line) # Returned output result = lines.pop(0) if len(lines): eol = "" if sep == " ": eol = "\s" for item in lines: result += eol + "\n" + indent + "^ " + item return result
Word-wrap a line of RiveScript code for being written to a file. :param str line: The original line of text to word-wrap. :param str sep: The word separator. :param str indent: The indentation to use (as a set of spaces). :param int width: The character width to constrain each line to. :return str: The reformatted line(s).
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L477-L515
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.sort_replies
def sort_replies(self, thats=False): """Sort the loaded triggers in memory. After you have finished loading your RiveScript code, call this method to populate the various internal sort buffers. This is absolutely necessary for reply matching to work efficiently! """ # (Re)initialize the sort cache. self._sorted["topics"] = {} self._sorted["thats"] = {} self._say("Sorting triggers...") # Loop through all the topics. for topic in self._topics.keys(): self._say("Analyzing topic " + topic) # Collect a list of all the triggers we're going to worry about. # If this topic inherits another topic, we need to recursively add # those to the list as well. alltrig = inherit_utils.get_topic_triggers(self, topic, False) # Sort them. self._sorted["topics"][topic] = sorting.sort_trigger_set(alltrig, True, self._say) # Get all of the %Previous triggers for this topic. that_triggers = inherit_utils.get_topic_triggers(self, topic, True) # And sort them, too. self._sorted["thats"][topic] = sorting.sort_trigger_set(that_triggers, False, self._say) # And sort the substitution lists. if not "lists" in self._sorted: self._sorted["lists"] = {} self._sorted["lists"]["sub"] = sorting.sort_list(self._sub.keys()) self._sorted["lists"]["person"] = sorting.sort_list(self._person.keys())
python
def sort_replies(self, thats=False): """Sort the loaded triggers in memory. After you have finished loading your RiveScript code, call this method to populate the various internal sort buffers. This is absolutely necessary for reply matching to work efficiently! """ # (Re)initialize the sort cache. self._sorted["topics"] = {} self._sorted["thats"] = {} self._say("Sorting triggers...") # Loop through all the topics. for topic in self._topics.keys(): self._say("Analyzing topic " + topic) # Collect a list of all the triggers we're going to worry about. # If this topic inherits another topic, we need to recursively add # those to the list as well. alltrig = inherit_utils.get_topic_triggers(self, topic, False) # Sort them. self._sorted["topics"][topic] = sorting.sort_trigger_set(alltrig, True, self._say) # Get all of the %Previous triggers for this topic. that_triggers = inherit_utils.get_topic_triggers(self, topic, True) # And sort them, too. self._sorted["thats"][topic] = sorting.sort_trigger_set(that_triggers, False, self._say) # And sort the substitution lists. if not "lists" in self._sorted: self._sorted["lists"] = {} self._sorted["lists"]["sub"] = sorting.sort_list(self._sub.keys()) self._sorted["lists"]["person"] = sorting.sort_list(self._person.keys())
Sort the loaded triggers in memory. After you have finished loading your RiveScript code, call this method to populate the various internal sort buffers. This is absolutely necessary for reply matching to work efficiently!
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L521-L555
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.set_handler
def set_handler(self, language, obj): """Define a custom language handler for RiveScript objects. Pass in a ``None`` value for the object to delete an existing handler (for example, to prevent Python code from being able to be run by default). Look in the ``eg`` folder of the rivescript-python distribution for an example script that sets up a JavaScript language handler. :param str language: The lowercased name of the programming language. Examples: python, javascript, perl :param class obj: An instance of an implementation class object. It should provide the following interface:: class MyObjectHandler: def __init__(self): pass def load(self, name, code): # name = the name of the object from the RiveScript code # code = the source code of the object def call(self, rs, name, fields): # rs = the current RiveScript interpreter object # name = the name of the object being called # fields = array of arguments passed to the object return reply """ # Allow them to delete a handler too. if obj is None: if language in self._handlers: del self._handlers[language] else: self._handlers[language] = obj
python
def set_handler(self, language, obj): """Define a custom language handler for RiveScript objects. Pass in a ``None`` value for the object to delete an existing handler (for example, to prevent Python code from being able to be run by default). Look in the ``eg`` folder of the rivescript-python distribution for an example script that sets up a JavaScript language handler. :param str language: The lowercased name of the programming language. Examples: python, javascript, perl :param class obj: An instance of an implementation class object. It should provide the following interface:: class MyObjectHandler: def __init__(self): pass def load(self, name, code): # name = the name of the object from the RiveScript code # code = the source code of the object def call(self, rs, name, fields): # rs = the current RiveScript interpreter object # name = the name of the object being called # fields = array of arguments passed to the object return reply """ # Allow them to delete a handler too. if obj is None: if language in self._handlers: del self._handlers[language] else: self._handlers[language] = obj
Define a custom language handler for RiveScript objects. Pass in a ``None`` value for the object to delete an existing handler (for example, to prevent Python code from being able to be run by default). Look in the ``eg`` folder of the rivescript-python distribution for an example script that sets up a JavaScript language handler. :param str language: The lowercased name of the programming language. Examples: python, javascript, perl :param class obj: An instance of an implementation class object. It should provide the following interface:: class MyObjectHandler: def __init__(self): pass def load(self, name, code): # name = the name of the object from the RiveScript code # code = the source code of the object def call(self, rs, name, fields): # rs = the current RiveScript interpreter object # name = the name of the object being called # fields = array of arguments passed to the object return reply
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L561-L593
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.set_subroutine
def set_subroutine(self, name, code): """Define a Python object from your program. This is equivalent to having an object defined in the RiveScript code, except your Python code is defining it instead. :param str name: The name of the object macro. :param def code: A Python function with a method signature of ``(rs, args)`` This method is only available if there is a Python handler set up (which there is by default, unless you've called ``set_handler("python", None)``). """ # Do we have a Python handler? if 'python' in self._handlers: self._handlers['python']._objects[name] = code self._objlangs[name] = 'python' else: self._warn("Can't set_subroutine: no Python object handler!")
python
def set_subroutine(self, name, code): """Define a Python object from your program. This is equivalent to having an object defined in the RiveScript code, except your Python code is defining it instead. :param str name: The name of the object macro. :param def code: A Python function with a method signature of ``(rs, args)`` This method is only available if there is a Python handler set up (which there is by default, unless you've called ``set_handler("python", None)``). """ # Do we have a Python handler? if 'python' in self._handlers: self._handlers['python']._objects[name] = code self._objlangs[name] = 'python' else: self._warn("Can't set_subroutine: no Python object handler!")
Define a Python object from your program. This is equivalent to having an object defined in the RiveScript code, except your Python code is defining it instead. :param str name: The name of the object macro. :param def code: A Python function with a method signature of ``(rs, args)`` This method is only available if there is a Python handler set up (which there is by default, unless you've called ``set_handler("python", None)``).
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L595-L615
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.set_global
def set_global(self, name, value): """Set a global variable. Equivalent to ``! global`` in RiveScript code. :param str name: The name of the variable to set. :param str value: The value of the variable. Set this to ``None`` to delete the variable. """ if value is None: # Unset the variable. if name in self._global: del self._global[name] self._global[name] = value
python
def set_global(self, name, value): """Set a global variable. Equivalent to ``! global`` in RiveScript code. :param str name: The name of the variable to set. :param str value: The value of the variable. Set this to ``None`` to delete the variable. """ if value is None: # Unset the variable. if name in self._global: del self._global[name] self._global[name] = value
Set a global variable. Equivalent to ``! global`` in RiveScript code. :param str name: The name of the variable to set. :param str value: The value of the variable. Set this to ``None`` to delete the variable.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L617-L630
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.set_variable
def set_variable(self, name, value): """Set a bot variable. Equivalent to ``! var`` in RiveScript code. :param str name: The name of the variable to set. :param str value: The value of the variable. Set this to ``None`` to delete the variable. """ if value is None: # Unset the variable. if name in self._var: del self._var[name] self._var[name] = value
python
def set_variable(self, name, value): """Set a bot variable. Equivalent to ``! var`` in RiveScript code. :param str name: The name of the variable to set. :param str value: The value of the variable. Set this to ``None`` to delete the variable. """ if value is None: # Unset the variable. if name in self._var: del self._var[name] self._var[name] = value
Set a bot variable. Equivalent to ``! var`` in RiveScript code. :param str name: The name of the variable to set. :param str value: The value of the variable. Set this to ``None`` to delete the variable.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L640-L653
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.set_substitution
def set_substitution(self, what, rep): """Set a substitution. Equivalent to ``! sub`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution. """ if rep is None: # Unset the variable. if what in self._subs: del self._subs[what] self._subs[what] = rep
python
def set_substitution(self, what, rep): """Set a substitution. Equivalent to ``! sub`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution. """ if rep is None: # Unset the variable. if what in self._subs: del self._subs[what] self._subs[what] = rep
Set a substitution. Equivalent to ``! sub`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L663-L676
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.set_person
def set_person(self, what, rep): """Set a person substitution. Equivalent to ``! person`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution. """ if rep is None: # Unset the variable. if what in self._person: del self._person[what] self._person[what] = rep
python
def set_person(self, what, rep): """Set a person substitution. Equivalent to ``! person`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution. """ if rep is None: # Unset the variable. if what in self._person: del self._person[what] self._person[what] = rep
Set a person substitution. Equivalent to ``! person`` in RiveScript code. :param str what: The original text to replace. :param str rep: The text to replace it with. Set this to ``None`` to delete the substitution.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L678-L691
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.set_uservar
def set_uservar(self, user, name, value): """Set a variable for a user. This is like the ``<set>`` tag in RiveScript code. :param str user: The user ID to set a variable for. :param str name: The name of the variable to set. :param str value: The value to set there. """ self._session.set(user, {name: value})
python
def set_uservar(self, user, name, value): """Set a variable for a user. This is like the ``<set>`` tag in RiveScript code. :param str user: The user ID to set a variable for. :param str name: The name of the variable to set. :param str value: The value to set there. """ self._session.set(user, {name: value})
Set a variable for a user. This is like the ``<set>`` tag in RiveScript code. :param str user: The user ID to set a variable for. :param str name: The name of the variable to set. :param str value: The value to set there.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L693-L702
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.set_uservars
def set_uservars(self, user, data=None): """Set many variables for a user, or set many variables for many users. This function can be called in two ways:: # Set a dict of variables for a single user. rs.set_uservars(username, vars) # Set a nested dict of variables for many users. rs.set_uservars(many_vars) In the first syntax, ``vars`` is a simple dict of key/value string pairs. In the second syntax, ``many_vars`` is a structure like this:: { "username1": { "key": "value", }, "username2": { "key": "value", }, } This way you can export *all* user variables via ``get_uservars()`` and then re-import them all at once, instead of setting them once per user. :param optional str user: The user ID to set many variables for. Skip this parameter to set many variables for many users instead. :param dict data: The dictionary of key/value pairs for user variables, or else a dict of dicts mapping usernames to key/value pairs. This may raise a ``TypeError`` exception if you pass it invalid data types. Note that only the standard ``dict`` type is accepted, but not variants like ``OrderedDict``, so if you have a dict-like type you should cast it to ``dict`` first. """ # Check the parameters to see how we're being used. if type(user) is dict and data is None: # Setting many variables for many users. for uid, uservars in user.items(): if type(uservars) is not dict: raise TypeError( "In set_uservars(many_vars) syntax, the many_vars dict " "must be in the format of `many_vars['username'] = " "dict(key=value)`, but the contents of many_vars['{}']" " is not a dict.".format(uid) ) self._session.set(uid, uservars) elif type(user) in [text_type, str] and type(data) is dict: # Setting variables for a single user. self._session.set(user, data) else: raise TypeError( "set_uservars() may only be called with types ({str}, dict) or " "(dict<{str}, dict>) but you called it with types ({a}, {b})" .format( str="unicode" if sys.version_info[0] < 3 else "str", a=type(user), b=type(data), ), )
python
def set_uservars(self, user, data=None): """Set many variables for a user, or set many variables for many users. This function can be called in two ways:: # Set a dict of variables for a single user. rs.set_uservars(username, vars) # Set a nested dict of variables for many users. rs.set_uservars(many_vars) In the first syntax, ``vars`` is a simple dict of key/value string pairs. In the second syntax, ``many_vars`` is a structure like this:: { "username1": { "key": "value", }, "username2": { "key": "value", }, } This way you can export *all* user variables via ``get_uservars()`` and then re-import them all at once, instead of setting them once per user. :param optional str user: The user ID to set many variables for. Skip this parameter to set many variables for many users instead. :param dict data: The dictionary of key/value pairs for user variables, or else a dict of dicts mapping usernames to key/value pairs. This may raise a ``TypeError`` exception if you pass it invalid data types. Note that only the standard ``dict`` type is accepted, but not variants like ``OrderedDict``, so if you have a dict-like type you should cast it to ``dict`` first. """ # Check the parameters to see how we're being used. if type(user) is dict and data is None: # Setting many variables for many users. for uid, uservars in user.items(): if type(uservars) is not dict: raise TypeError( "In set_uservars(many_vars) syntax, the many_vars dict " "must be in the format of `many_vars['username'] = " "dict(key=value)`, but the contents of many_vars['{}']" " is not a dict.".format(uid) ) self._session.set(uid, uservars) elif type(user) in [text_type, str] and type(data) is dict: # Setting variables for a single user. self._session.set(user, data) else: raise TypeError( "set_uservars() may only be called with types ({str}, dict) or " "(dict<{str}, dict>) but you called it with types ({a}, {b})" .format( str="unicode" if sys.version_info[0] < 3 else "str", a=type(user), b=type(data), ), )
Set many variables for a user, or set many variables for many users. This function can be called in two ways:: # Set a dict of variables for a single user. rs.set_uservars(username, vars) # Set a nested dict of variables for many users. rs.set_uservars(many_vars) In the first syntax, ``vars`` is a simple dict of key/value string pairs. In the second syntax, ``many_vars`` is a structure like this:: { "username1": { "key": "value", }, "username2": { "key": "value", }, } This way you can export *all* user variables via ``get_uservars()`` and then re-import them all at once, instead of setting them once per user. :param optional str user: The user ID to set many variables for. Skip this parameter to set many variables for many users instead. :param dict data: The dictionary of key/value pairs for user variables, or else a dict of dicts mapping usernames to key/value pairs. This may raise a ``TypeError`` exception if you pass it invalid data types. Note that only the standard ``dict`` type is accepted, but not variants like ``OrderedDict``, so if you have a dict-like type you should cast it to ``dict`` first.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L704-L769
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.get_uservar
def get_uservar(self, user, name): """Get a variable about a user. :param str user: The user ID to look up a variable for. :param str name: The name of the variable to get. :return: The user variable, or ``None`` or ``"undefined"``: * If the user has no data at all, this returns ``None``. * If the user doesn't have this variable set, this returns the string ``"undefined"``. * Otherwise this returns the string value of the variable. """ if name == '__lastmatch__': # Treat var `__lastmatch__` since it can't receive "undefined" value return self.last_match(user) else: return self._session.get(user, name)
python
def get_uservar(self, user, name): """Get a variable about a user. :param str user: The user ID to look up a variable for. :param str name: The name of the variable to get. :return: The user variable, or ``None`` or ``"undefined"``: * If the user has no data at all, this returns ``None``. * If the user doesn't have this variable set, this returns the string ``"undefined"``. * Otherwise this returns the string value of the variable. """ if name == '__lastmatch__': # Treat var `__lastmatch__` since it can't receive "undefined" value return self.last_match(user) else: return self._session.get(user, name)
Get a variable about a user. :param str user: The user ID to look up a variable for. :param str name: The name of the variable to get. :return: The user variable, or ``None`` or ``"undefined"``: * If the user has no data at all, this returns ``None``. * If the user doesn't have this variable set, this returns the string ``"undefined"``. * Otherwise this returns the string value of the variable.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L771-L787
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.get_uservars
def get_uservars(self, user=None): """Get all variables about a user (or all users). :param optional str user: The user ID to retrieve all variables for. If not passed, this function will return all data for all users. :return dict: All the user variables. * If a ``user`` was passed, this is a ``dict`` of key/value pairs of that user's variables. If the user doesn't exist in memory, this returns ``None``. * Otherwise, this returns a ``dict`` of key/value pairs that map user IDs to their variables (a ``dict`` of ``dict``). """ if user is None: # All the users! return self._session.get_all() else: # Just this one! return self._session.get_any(user)
python
def get_uservars(self, user=None): """Get all variables about a user (or all users). :param optional str user: The user ID to retrieve all variables for. If not passed, this function will return all data for all users. :return dict: All the user variables. * If a ``user`` was passed, this is a ``dict`` of key/value pairs of that user's variables. If the user doesn't exist in memory, this returns ``None``. * Otherwise, this returns a ``dict`` of key/value pairs that map user IDs to their variables (a ``dict`` of ``dict``). """ if user is None: # All the users! return self._session.get_all() else: # Just this one! return self._session.get_any(user)
Get all variables about a user (or all users). :param optional str user: The user ID to retrieve all variables for. If not passed, this function will return all data for all users. :return dict: All the user variables. * If a ``user`` was passed, this is a ``dict`` of key/value pairs of that user's variables. If the user doesn't exist in memory, this returns ``None``. * Otherwise, this returns a ``dict`` of key/value pairs that map user IDs to their variables (a ``dict`` of ``dict``).
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L789-L809
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.clear_uservars
def clear_uservars(self, user=None): """Delete all variables about a user (or all users). :param str user: The user ID to clear variables for, or else clear all variables for all users if not provided. """ if user is None: # All the users! self._session.reset_all() else: # Just this one. self._session.reset(user)
python
def clear_uservars(self, user=None): """Delete all variables about a user (or all users). :param str user: The user ID to clear variables for, or else clear all variables for all users if not provided. """ if user is None: # All the users! self._session.reset_all() else: # Just this one. self._session.reset(user)
Delete all variables about a user (or all users). :param str user: The user ID to clear variables for, or else clear all variables for all users if not provided.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L811-L823
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.trigger_info
def trigger_info(self, trigger=None, dump=False): """Get information about a trigger. Pass in a raw trigger to find out what file name and line number it appeared at. This is useful for e.g. tracking down the location of the trigger last matched by the user via ``last_match()``. Returns a list of matching triggers, containing their topics, filenames and line numbers. Returns ``None`` if there weren't any matches found. The keys in the trigger info is as follows: * ``category``: Either 'topic' (for normal) or 'thats' (for %Previous triggers) * ``topic``: The topic name * ``trigger``: The raw trigger text * ``filename``: The filename the trigger was found in. * ``lineno``: The line number the trigger was found on. Pass in a true value for ``dump``, and the entire syntax tracking tree is returned. :param str trigger: The raw trigger text to look up. :param bool dump: Whether to dump the entire syntax tracking tree. :return: A list of matching triggers or ``None`` if no matches. """ if dump: return self._syntax response = None # Search the syntax tree for the trigger. for category in self._syntax: for topic in self._syntax[category]: if trigger in self._syntax[category][topic]: # We got a match! if response is None: response = list() fname, lineno = self._syntax[category][topic][trigger]['trigger'] response.append(dict( category=category, topic=topic, trigger=trigger, filename=fname, line=lineno, )) return response
python
def trigger_info(self, trigger=None, dump=False): """Get information about a trigger. Pass in a raw trigger to find out what file name and line number it appeared at. This is useful for e.g. tracking down the location of the trigger last matched by the user via ``last_match()``. Returns a list of matching triggers, containing their topics, filenames and line numbers. Returns ``None`` if there weren't any matches found. The keys in the trigger info is as follows: * ``category``: Either 'topic' (for normal) or 'thats' (for %Previous triggers) * ``topic``: The topic name * ``trigger``: The raw trigger text * ``filename``: The filename the trigger was found in. * ``lineno``: The line number the trigger was found on. Pass in a true value for ``dump``, and the entire syntax tracking tree is returned. :param str trigger: The raw trigger text to look up. :param bool dump: Whether to dump the entire syntax tracking tree. :return: A list of matching triggers or ``None`` if no matches. """ if dump: return self._syntax response = None # Search the syntax tree for the trigger. for category in self._syntax: for topic in self._syntax[category]: if trigger in self._syntax[category][topic]: # We got a match! if response is None: response = list() fname, lineno = self._syntax[category][topic][trigger]['trigger'] response.append(dict( category=category, topic=topic, trigger=trigger, filename=fname, line=lineno, )) return response
Get information about a trigger. Pass in a raw trigger to find out what file name and line number it appeared at. This is useful for e.g. tracking down the location of the trigger last matched by the user via ``last_match()``. Returns a list of matching triggers, containing their topics, filenames and line numbers. Returns ``None`` if there weren't any matches found. The keys in the trigger info is as follows: * ``category``: Either 'topic' (for normal) or 'thats' (for %Previous triggers) * ``topic``: The topic name * ``trigger``: The raw trigger text * ``filename``: The filename the trigger was found in. * ``lineno``: The line number the trigger was found on. Pass in a true value for ``dump``, and the entire syntax tracking tree is returned. :param str trigger: The raw trigger text to look up. :param bool dump: Whether to dump the entire syntax tracking tree. :return: A list of matching triggers or ``None`` if no matches.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L858-L905
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.current_user
def current_user(self): """Retrieve the user ID of the current user talking to your bot. This is mostly useful inside of a Python object macro to get the user ID of the person who caused the object macro to be invoked (i.e. to set a variable for that user from within the object). This will return ``None`` if used outside of the context of getting a reply (the value is unset at the end of the ``reply()`` method). """ if self._brain._current_user is None: # They're doing it wrong. self._warn("current_user() is meant to be used from within a Python object macro!") return self._brain._current_user
python
def current_user(self): """Retrieve the user ID of the current user talking to your bot. This is mostly useful inside of a Python object macro to get the user ID of the person who caused the object macro to be invoked (i.e. to set a variable for that user from within the object). This will return ``None`` if used outside of the context of getting a reply (the value is unset at the end of the ``reply()`` method). """ if self._brain._current_user is None: # They're doing it wrong. self._warn("current_user() is meant to be used from within a Python object macro!") return self._brain._current_user
Retrieve the user ID of the current user talking to your bot. This is mostly useful inside of a Python object macro to get the user ID of the person who caused the object macro to be invoked (i.e. to set a variable for that user from within the object). This will return ``None`` if used outside of the context of getting a reply (the value is unset at the end of the ``reply()`` method).
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L907-L920
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript.reply
def reply(self, user, msg, errors_as_replies=True): """Fetch a reply from the RiveScript brain. Arguments: user (str): A unique user ID for the person requesting a reply. This could be e.g. a screen name or nickname. It's used internally to store user variables (including topic and history), so if your bot has multiple users each one should have a unique ID. msg (str): The user's message. This is allowed to contain punctuation and such, but any extraneous data such as HTML tags should be removed in advance. errors_as_replies (bool): When errors are encountered (such as a deep recursion error, no reply matched, etc.) this will make the reply be a text representation of the error message. If you set this to ``False``, errors will instead raise an exception, such as a ``DeepRecursionError`` or ``NoReplyError``. By default, no exceptions are raised and errors are set in the reply instead. Returns: str: The reply output. """ return self._brain.reply(user, msg, errors_as_replies)
python
def reply(self, user, msg, errors_as_replies=True): """Fetch a reply from the RiveScript brain. Arguments: user (str): A unique user ID for the person requesting a reply. This could be e.g. a screen name or nickname. It's used internally to store user variables (including topic and history), so if your bot has multiple users each one should have a unique ID. msg (str): The user's message. This is allowed to contain punctuation and such, but any extraneous data such as HTML tags should be removed in advance. errors_as_replies (bool): When errors are encountered (such as a deep recursion error, no reply matched, etc.) this will make the reply be a text representation of the error message. If you set this to ``False``, errors will instead raise an exception, such as a ``DeepRecursionError`` or ``NoReplyError``. By default, no exceptions are raised and errors are set in the reply instead. Returns: str: The reply output. """ return self._brain.reply(user, msg, errors_as_replies)
Fetch a reply from the RiveScript brain. Arguments: user (str): A unique user ID for the person requesting a reply. This could be e.g. a screen name or nickname. It's used internally to store user variables (including topic and history), so if your bot has multiple users each one should have a unique ID. msg (str): The user's message. This is allowed to contain punctuation and such, but any extraneous data such as HTML tags should be removed in advance. errors_as_replies (bool): When errors are encountered (such as a deep recursion error, no reply matched, etc.) this will make the reply be a text representation of the error message. If you set this to ``False``, errors will instead raise an exception, such as a ``DeepRecursionError`` or ``NoReplyError``. By default, no exceptions are raised and errors are set in the reply instead. Returns: str: The reply output.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L926-L947
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript._precompile_substitution
def _precompile_substitution(self, kind, pattern): """Pre-compile the regexp for a substitution pattern. This will speed up the substitutions that happen at the beginning of the reply fetching process. With the default brain, this took the time for _substitute down from 0.08s to 0.02s :param str kind: One of ``sub``, ``person``. :param str pattern: The substitution pattern. """ if pattern not in self._regexc[kind]: qm = re.escape(pattern) self._regexc[kind][pattern] = { "qm": qm, "sub1": re.compile(r'^' + qm + r'$'), "sub2": re.compile(r'^' + qm + r'(\W+)'), "sub3": re.compile(r'(\W+)' + qm + r'(\W+)'), "sub4": re.compile(r'(\W+)' + qm + r'$'), }
python
def _precompile_substitution(self, kind, pattern): """Pre-compile the regexp for a substitution pattern. This will speed up the substitutions that happen at the beginning of the reply fetching process. With the default brain, this took the time for _substitute down from 0.08s to 0.02s :param str kind: One of ``sub``, ``person``. :param str pattern: The substitution pattern. """ if pattern not in self._regexc[kind]: qm = re.escape(pattern) self._regexc[kind][pattern] = { "qm": qm, "sub1": re.compile(r'^' + qm + r'$'), "sub2": re.compile(r'^' + qm + r'(\W+)'), "sub3": re.compile(r'(\W+)' + qm + r'(\W+)'), "sub4": re.compile(r'(\W+)' + qm + r'$'), }
Pre-compile the regexp for a substitution pattern. This will speed up the substitutions that happen at the beginning of the reply fetching process. With the default brain, this took the time for _substitute down from 0.08s to 0.02s :param str kind: One of ``sub``, ``person``. :param str pattern: The substitution pattern.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L949-L967
aichaos/rivescript-python
rivescript/rivescript.py
RiveScript._precompile_regexp
def _precompile_regexp(self, trigger): """Precompile the regex for most triggers. If the trigger is non-atomic, and doesn't include dynamic tags like ``<bot>``, ``<get>``, ``<input>/<reply>`` or arrays, it can be precompiled and save time when matching. :param str trigger: The trigger text to attempt to precompile. """ if utils.is_atomic(trigger): return # Don't need a regexp for atomic triggers. # Check for dynamic tags. for tag in ["@", "<bot", "<get", "<input", "<reply"]: if tag in trigger: return # Can't precompile this trigger. self._regexc["trigger"][trigger] = self._brain.reply_regexp(None, trigger)
python
def _precompile_regexp(self, trigger): """Precompile the regex for most triggers. If the trigger is non-atomic, and doesn't include dynamic tags like ``<bot>``, ``<get>``, ``<input>/<reply>`` or arrays, it can be precompiled and save time when matching. :param str trigger: The trigger text to attempt to precompile. """ if utils.is_atomic(trigger): return # Don't need a regexp for atomic triggers. # Check for dynamic tags. for tag in ["@", "<bot", "<get", "<input", "<reply"]: if tag in trigger: return # Can't precompile this trigger. self._regexc["trigger"][trigger] = self._brain.reply_regexp(None, trigger)
Precompile the regex for most triggers. If the trigger is non-atomic, and doesn't include dynamic tags like ``<bot>``, ``<get>``, ``<input>/<reply>`` or arrays, it can be precompiled and save time when matching. :param str trigger: The trigger text to attempt to precompile.
https://github.com/aichaos/rivescript-python/blob/b55c820cf02a194605fd66af1f070e239f84ed31/rivescript/rivescript.py#L969-L986