text
stringlengths 1.18k
92.5k
| lang
stringclasses 39
values |
---|---|
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t, int64_t, int32_t
cdef extern from "lhalotree.h":
struct lhalotree:
# merger tree pointers
int Descendant
int FirstProgenitor
int NextProgenitor
int FirstHaloInFOFgroup
int NextHaloInFOFgroup
# properties of halo
int Len
float M_Mean200
float Mvir # for Millennium, Mvir=M_Crit200
float M_TopHat
float Pos[3]
float Vel[3]
float VelDisp
float Vmax
float Spin[3]
long long MostBoundID
# original position in simulation tree files
int SnapNum
int FileNr
int SubhaloIndex
float SubHalfMass
cdef extern from "read_lhalotree.h":
# Actual useful functions
size_t read_single_lhalotree_from_stream(FILE *fp, struct lhalotree *tree,
const int32_t nhalos)
int pread_single_lhalotree_with_offset(int fd, struct lhalotree *tree,
const int32_t nhalos, off_t offset)
int read_file_headers_lhalotree(const char *filename, int32_t *ntrees,
int32_t *totnhalos, int32_t **nhalos_per_tree)
int32_t read_ntrees_lhalotree(const char *filename)
struct lhalotree * read_entire_lhalotree(const char *filename, int *ntrees,
int *totnhalos, int **nhalos_per_tree)
struct lhalotree * read_single_lhalotree(const char *filename,
const int32_t treenum)
# Sorting an LHalotree output into a new order
int sort_lhalotree_in_snapshot_and_fof_groups(struct lhalotree *tree,
const int64_t nhalos, int test)
# And fixing lhalotree mergertree indices from a generic sort
int fix_mergertree_index(struct lhalotree *tree, const int64_t nhalos,
const int32_t *index)
<|end_of_text|>cimport numpy as np
from.doc cimport Doc
cdef class Span:
cdef readonly Doc doc
cdef readonly int start
cdef readonly int end
cdef readonly int start_char
cdef readonly int end_char
cdef readonly int label
cdef public _vector
cdef public _vector_norm
cpdef int _recalculate_indices(self) except -1
<|end_of_text|>from typing import Dict, Optional, Union
import numpy as np
cimport numpy as np
np.import_array()
cdef class Hyperparameter(object):
def __init__(self, name: str, meta: Optional[Dict]) -> None:
if not isinstance(name, str):
raise TypeError(
"The name of a hyperparameter must be an instance of"
" %s, but is %s." % (str(str), type(name)))
self.name: str = name
self.meta = meta
def __repr__(self):
raise NotImplementedError()
def is_legal(self, value):
raise NotImplementedError()
cpdef bint is_legal_vector(self, DTYPE_t value):
"""
Check whether the given value is a legal value for the vector
representation of this hyperparameter.
Parameters
----------
value
the vector value to check
Returns
-------
bool
True if the given value is a legal vector value, otherwise False
"""
raise NotImplementedError()
def sample(self, rs):
vector = self._sample(rs)
return self._transform(vector)
def rvs(
self,
size: Optional[int] = None,
random_state: Optional[Union[int, np.random, np.random.RandomState]] = None
) -> Union[float, np.ndarray]:
"""
scipy compatibility wrapper for ``_sample``,
allowing the hyperparameter to be used in sklearn API
hyperparameter searchers, eg. GridSearchCV.
"""
# copy-pasted from scikit-learn utils/validation.py
def check_random_state(seed):
"""
Turn seed into a np.random.RandomState instance
If seed is None (or np.random), return the RandomState singleton used
by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
If seed is a new-style np.random.Generator, return it.
Otherwise, raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
try:
# Generator is only available in numpy >= 1.17
if isinstance(seed, np.random.Generator):
return seed
except AttributeError:
pass
raise ValueError("%r cannot be used to seed a numpy.random.RandomState"
" instance" % seed)
# if size=None, return a value, but if size=1, return a 1-element array
vector = self._sample(
rs=check_random_state(random_state),
size=size if size is not None else 1
)
if size is None:
vector = vector[0]
return self._transform(vector)
def _sample(self, rs, size):
raise NotImplementedError()
def _transform(
self,
vector: Union[np.ndarray, float, int]
) -> Optional[Union[np.ndarray, float, int]]:
raise NotImplementedError()
def _inverse_transform(self, vector):
raise NotImplementedError()
def has_neighbors(self):
raise NotImplementedError()
def get_neighbors(self, value, rs, number, transform = False):
raise NotImplementedError()
def get_num_neighbors(self, value):
raise NotImplementedError()
cpdef int compare_vector(self, DTYPE_t value, DTYPE_t value2):
raise NotImplementedError()
def pdf(self, vector: np.ndarray) -> np.ndarray:
"""
Computes the probability density function of the hyperparameter in
the hyperparameter space (the one specified by the user).
For each hyperparameter type, there is also a method _pdf which
operates on the transformed (and possibly normalized) hyperparameter
space. Only legal values return a positive probability density,
otherwise zero.
Parameters
----------
vector: np.ndarray
the (N, ) vector of inputs for which the probability density
function is to be computed.
Returns
----------
np.ndarray(N, )
Probability density values of the input vector
"""
raise NotImplementedError()
def _pdf(self, vector: np.ndarray) -> np.ndarray:
"""
Computes the probability density function of the hyperparameter in
the transformed (and possibly normalized, depends on the parameter
type) space. As such, one never has to worry about log-normal
distributions, only normal distributions (as the inverse_transform
in the pdf method handles these).
Parameters
----------
vector: np.ndarray
the (N, ) vector of inputs for which the probability density
function is to be computed.
Returns
----------
np.ndarray(N, )
Probability density values of the input vector
"""
raise NotImplementedError()
def get_size(self) -> float:
raise NotImplementedError()
<|end_of_text|># This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.sequence.align"
__author__ = "Patrick Kunzmann"
__all__ = ["KmerAlphabet"]
cimport cython
cimport numpy as np
import numpy as np
from..alphabet import Alphabet, LetterAlphabet, AlphabetError
ctypedef np.uint8_t uint8
ctypedef np.uint16_t uint16
ctypedef np.uint32_t uint32
ctypedef np.uint64_t uint64
ctypedef np.int64_t int64
ctypedef fused CodeType:
uint8
uint16
uint32
uint64
class KmerAlphabet(Alphabet):
"""
__init__(base_alphabet, k, spacing=None)
This type of alphabet uses *k-mers* as symbols, i.e. all
combinations of *k* symbols from its *base alphabet*.
It's primary use is its :meth:`create_kmers()` method, that iterates
over all overlapping *k-mers* in a :class:`Sequence` and encodes
each one into its corresponding *k-mer* symbol code.
This functionality is prominently used by a :class:`KmerTable` to
find *k-mer* matches between two sequences.
A :class:`KmerAlphabet` has :math:`n^k` different symbols, where
:math:`n` is the number of symbols in the base alphabet.
Parameters
----------
base_alphabet : Alphabet
The base alphabet.
The created :class:`KmerAlphabet` contains all combinations of
*k* symbols from this alphabet.
k : int
An integer greater than 1 that defines the length of the
| Cython |
*k-mers*.
spacing : None or str or list or ndarray, dtype=int, shape=(k,)
If provided, spaced *k-mers* are used instead of continuous
ones :footcite:`Ma2002`.
The value contains the *informative* positions relative to the
start of the *k-mer*, also called the *model*.
The number of *informative* positions must equal *k*.
If a string is given, each ``'1'`` in the string indicates an
*informative* position.
For a continuous *k-mer* the `spacing` would be ``'111...'``.
If a list or array is given, it must contain unique non-negative
integers, that indicate the *informative* positions.
For a continuous *k-mer* the `spacing` would be
``[0, 1, 2,...]``.
Attributes
----------
base_alphabet : Alphabet
The base alphabet, from which the :class:`KmerAlphabet` was
created.
k : int
The length of the *k-mers*.
spacing : None or ndarray, dtype=int
The *k-mer* model in array form, if spaced *k-mers* are used,
``None`` otherwise.
Notes
-----
The symbol code for a *k-mer* :math:`s` calculates as
.. math:: RMSD = \sum_{i=0}^{k-1} n^{k-i-1} s_i
where :math:`n` is the length of the base alphabet.
Hence the :class:`KmerAlphabet` sorts *k-mers* in the order of the
base alphabet, where leading positions within the *k-mer* take
precedence.
References
----------
.. footbibliography::
Examples
--------
Create an alphabet of nucleobase *2-mers*:
>>> base_alphabet = NucleotideSequence.unambiguous_alphabet()
>>> print(base_alphabet.get_symbols())
['A', 'C', 'G', 'T']
>>> kmer_alphabet = KmerAlphabet(base_alphabet, 2)
>>> print(kmer_alphabet.get_symbols())
['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT']
Encode and decode *k-mers*:
>>> print(kmer_alphabet.encode("TC"))
13
>>> print(kmer_alphabet.decode(13))
['T' 'C']
Fuse symbol codes from the base alphabet into a *k-mer* code
and split the *k-mer* code back into the original symbol codes:
>>> symbol_codes = base_alphabet.encode_multiple("TC")
>>> print(symbol_codes)
[3 1]
>>> print(kmer_alphabet.fuse(symbol_codes))
13
>>> print(kmer_alphabet.split(13))
[3 1]
Encode all overlapping continuous k-mers of a sequence:
>>> sequence = NucleotideSequence("ATTGCT")
>>> kmer_codes = kmer_alphabet.create_kmers(sequence.code)
>>> print(kmer_codes)
[ 3 15 14 9 7]
>>> print(["".join(kmer) for kmer in kmer_alphabet.decode_multiple(kmer_codes)])
['AT', 'TT', 'TG', 'GC', 'CT']
Encode all overlapping k-mers using spacing:
>>> base_alphabet = ProteinSequence.alphabet
>>> kmer_alphabet = KmerAlphabet(base_alphabet, 3, spacing="1101")
>>> sequence = ProteinSequence("BIQTITE")
>>> kmer_codes = kmer_alphabet.create_kmers(sequence.code)
>>> # Pretty print k-mers
>>> strings = ["".join(kmer) for kmer in kmer_alphabet.decode_multiple(kmer_codes)]
>>> print([s[0] + s[1] + "_" + s[2] for s in strings])
['BI_T', 'IQ_I', 'QT_T', 'TI_E']
"""
def __init__(self, base_alphabet, k, spacing=None):
if not isinstance(base_alphabet, Alphabet):
raise TypeError(
f"Got {type(base_alphabet).__name__}, "
f"but Alphabet was expected"
)
if k < 2:
raise ValueError("k must be at least 2")
self._base_alph = base_alphabet
self._k = k
base_alph_len = len(self._base_alph)
self._radix_multiplier = np.array(
[base_alph_len**n for n in reversed(range(0, self._k))],
dtype=np.int64
)
if spacing is None:
self._spacing = None
elif isinstance(spacing, str):
self._spacing = _to_array_form(spacing)
else:
self._spacing = np.array(spacing, dtype=np.int64)
self._spacing.sort()
if (self._spacing < 0).any():
raise ValueError(
"Only non-negative integers are allowed for spacing"
)
if len(np.unique(self._spacing))!= len(self._spacing):
raise ValueError(
"Spacing model contains duplicate values"
)
if spacing is not None and len(self._spacing)!= self._k:
raise ValueError(
f"Expected {self._k} informative positions, "
f"but got {len(self._spacing)} positions in spacing"
)
@property
def base_alphabet(self):
return self._base_alph
@property
def k(self):
return self._k
@property
def spacing(self):
return None if self._spacing is None else self._spacing.copy()
def get_symbols(self):
"""
get_symbols()
Get the symbols in the alphabet.
Returns
-------
symbols : list
A list of all *k-mer* symbols, i.e. all possible
combinations of *k* symbols from its *base alphabet*.
Notes
-----
In contrast the base :class:`Alphabet` and
:class:`LetterAlphabet` class, :class:`KmerAlphabet` does not
hold a list of its symbols internally for performance reasons.
Hence calling :meth:`get_symbols()` may be quite time consuming
for large base alphabets or large *k* values, as the list needs
to be created first.
"""
if isinstance(self._base_alph, LetterAlphabet):
return ["".join(self.decode(code)) for code in range(len(self))]
else:
return [list(self.decode(code)) for code in range(len(self))]
def extends(self, alphabet):
# A KmerAlphabet cannot really extend another KmerAlphabet:
# If k is not equal, all symbols are not equal
# If the base alphabet has additional symbols, the correct
# order is not preserved
# A KmerAlphabet can only 'extend' another KmerAlphabet,
# if the two alphabets are equal
return alphabet == self
def encode(self, symbol):
return self.fuse(self._base_alph.encode_multiple(symbol))
def decode(self, code):
return self._base_alph.decode_multiple(self.split(code))
def fuse(self, codes):
"""
fuse(codes)
Get the *k-mer* code for *k* symbol codes from the base
alphabet.
This method can be used in a vectorized manner to obtain
*n* *k-mer* codes from an *(n,k)* integer array.
Parameters
----------
codes : ndarray, dtype=int, shape=(k,) or shape=(n,k)
The symbol codes from the base alphabet to be fused.
Returns
-------
kmer_codes : int or ndarray, dtype=np.int64, shape=(n,)
The fused *k-mer* code(s).
See also
--------
split
The reverse operation.
Examples
--------
>>> base_alphabet = NucleotideSequence.unambiguous_alphabet()
>>> kmer_alphabet = KmerAlphabet(base_alphabet, 2)
>>> symbol_codes = base_alphabet.encode_multiple("TC")
>>> print(symbol_codes)
[3 1]
>>> print(kmer_alphabet.fuse(symbol_codes))
13
>>> print(kmer_alphabet.split(13))
[3 1]
"""
if codes.shape[-1]!= self._k:
raise AlphabetError(
f"Given k-mer(s) has {codes.shape[-1]} symbols, "
f"but alphabet expects {self._k}-mers"
)
if np.any(codes > len(self._base_alph)):
raise AlphabetError("Given k-mer(s) contains invalid symbol code")
orig_shape = codes.shape
codes = np.atleast_2d(codes)
kmer_code = np.sum(self._radix_multiplier * codes, axis=-1)
# The last dimension is removed since it collpased in np.sum
return kmer_code.reshape(orig_shape[:-1])
def split(self | Cython |
, kmer_code):
"""
split(kmer_code)
Convert a *k-mer* code back into *k* symbol codes from the base
alphabet.
This method can be used in a vectorized manner to split
*n* *k-mer* codes into an *(n,k)* integer array.
Parameters
----------
kmer_code : int or ndarray, dtype=int, shape=(n,)
The *k-mer* code(s).
Returns
-------
codes : ndarray, dtype=np.int64, shape=(k,) or shape=(n,k)
The split symbol codes from the base alphabet.
See also
--------
fuse
The reverse operation.
Examples
--------
>>> base_alphabet = NucleotideSequence.unambiguous_alphabet()
>>> kmer_alphabet = KmerAlphabet(base_alphabet, 2)
>>> symbol_codes = base_alphabet.encode_multiple("TC")
>>> print(symbol_codes)
[3 1]
>>> print(kmer_alphabet.fuse(symbol_codes))
13
>>> print(kmer_alphabet.split(13))
[3 1]
"""
if np.any(kmer_code >= len(self)) or np.any(kmer_code < 0):
raise AlphabetError(
f"Given k-mer symbol code is invalid for this alphabet"
)
orig_shape = np.shape(kmer_code)
split_codes = self._split(
np.atleast_1d(kmer_code).astype(np.int64, copy=False)
)
return split_codes.reshape(orig_shape + (self._k,))
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
def _split(self, int64[:] codes not None):
cdef int i, n
cdef int64 code, val, symbol_code
cdef int64[:] radix_multiplier = self._radix_multiplier
cdef int64[:,:] split_codes = np.empty(
(codes.shape[0], self._k), dtype=np.int64
)
cdef int k = self._k
for i in range(codes.shape[0]):
code = codes[i]
for n in range(k):
val = radix_multiplier[n]
symbol_code = code // val
split_codes[i,n] = symbol_code
code -= symbol_code * val
return np.asarray(split_codes)
def kmer_array_length(self, int64 length):
"""
kmer_array_length(length)
Get the length of the *k-mer* array, created by
:meth:`create_kmers()`, if a sequence of size `length` would be
given given.
Parameters
----------
length : int
The length of the hypothetical sequence
Returns
-------
kmer_length : int
The length of created *k-mer* array.
"""
cdef int64 max_offset
cdef int64[:] spacing
if self._spacing is None:
return length - self._k + 1
else:
spacing = self._spacing
max_offset = self._spacing[len(spacing)-1] + 1
return length - max_offset + 1
def create_kmers(self, seq_code):
"""
create_kmers(seq_code)
Create *k-mer* codes for all overlapping *k-mers* in the given
sequence code.
Parameters
----------
seq_code : ndarray, dtype={np.uint8, np.uint16, np.uint32, np.uint64}
The sequence code to be converted into *k-mers*.
Returns
-------
kmer_codes : ndarray, dtype=int64
The symbol codes for the *k-mers*.
Examples
--------
>>> base_alphabet = NucleotideSequence.unambiguous_alphabet()
>>> kmer_alphabet = KmerAlphabet(base_alphabet, 2)
>>> sequence = NucleotideSequence("ATTGCT")
>>> kmer_codes = kmer_alphabet.create_kmers(sequence.code)
>>> print(kmer_codes)
[ 3 15 14 9 7]
>>> print(["".join(kmer) for kmer in kmer_alphabet.decode_multiple(kmer_codes)])
['AT', 'TT', 'TG', 'GC', 'CT']
"""
if self._spacing is None:
return self._create_continuous_kmers(seq_code)
else:
return self._create_spaced_kmers(seq_code)
@cython.boundscheck(False)
@cython.wraparound(False)
def _create_continuous_kmers(self, CodeType[:] seq_code not None):
"""
Fast implementation of k-mer decomposition.
Each k-mer is computed from the previous one by removing
a symbol shifting the remaining values and add the new symbol.
Requires looping only over sequence length.
"""
cdef int64 i
cdef int k = self._k
cdef uint64 alphabet_length = len(self._base_alph)
cdef int64[:] radix_multiplier = self._radix_multiplier
cdef int64 end_radix_multiplier = alphabet_length**(k-1)
if len(seq_code) < <unsigned int>k:
raise ValueError(
"The length of the sequence code is shorter than k"
)
cdef int64[:] kmers = np.empty(
self.kmer_array_length(len(seq_code)), dtype=np.int64
)
cdef CodeType code
cdef int64 kmer, prev_kmer
# Compute first k-mer using naive approach
kmer = 0
for i in range(k):
code = seq_code[i]
if code >= alphabet_length:
raise AlphabetError(f"Symbol code {code} is out of range")
kmer += radix_multiplier[i] * code
kmers[0] = kmer
# Compute all following k-mers from the previous one
prev_kmer = kmer
for i in range(1, kmers.shape[0]):
code = seq_code[i + k - 1]
if code >= alphabet_length:
raise AlphabetError(f"Symbol code {code} is out of range")
kmer = (
(
# Remove first symbol
(prev_kmer - seq_code[i - 1] * end_radix_multiplier)
# Shift k-mer to left
* alphabet_length
)
# Add new symbol
+ code
)
kmers[i] = kmer
prev_kmer = kmer
return np.asarray(kmers)
@cython.boundscheck(False)
@cython.wraparound(False)
def _create_spaced_kmers(self, CodeType[:] seq_code not None):
cdef int64 i, j
cdef int k = self._k
cdef int64[:] spacing = self._spacing
# The last element of the spacing model
# defines the total k-mer'span'
cdef int64 max_offset = spacing[len(spacing)-1] + 1
cdef uint64 alphabet_length = len(self._base_alph)
cdef int64[:] radix_multiplier = self._radix_multiplier
if len(seq_code) < <unsigned int>max_offset:
raise ValueError(
"The length of the sequence code is shorter "
"than the k-mer span"
)
cdef int64[:] kmers = np.empty(
self.kmer_array_length(len(seq_code)), dtype=np.int64
)
cdef CodeType code
cdef int64 kmer
cdef int64 offset
for i in range(kmers.shape[0]):
kmer = 0
for j in range(k):
offset = spacing[j]
code = seq_code[i + offset]
if code >= alphabet_length:
raise AlphabetError(f"Symbol code {code} is out of range")
kmer += radix_multiplier[j] * code
kmers[i] = kmer
return np.asarray(kmers)
def __str__(self):
return str(self.get_symbols())
def __repr__(self):
return f"KmerAlphabet({repr(self._base_alph)}, " \
f"{self._k}, {repr(self._spacing)})"
def __eq__(self, item):
if item is self:
return True
if not isinstance(item, KmerAlphabet):
return False
if self._base_alph!= item._base_alph:
return False
if self._k!= item._k:
return False
if self._spacing is None:
if item._spacing is not None:
return False
elif np.any(self._spacing!= item._spacing):
return False
return True
def __len__(self):
return int(len(self._base_alph) ** self._k)
def _to_array_form(model_string):
"""
Convert the the common string representation of a *k-mer* spacing
model into an array, e.g. ``'1*11'`` into ``[0, 2, 3]``.
"""
return np.array([
i for i in range(len(model_string)) if model_string[i] == "1"
], dtype=np.int64)
<|end_of_text|># mode: error
def f(obj2):
cdef int *ptr1 | Cython |
obj1 = obj2[ptr1::] # error
obj1 = obj2[:ptr1:] # error
obj1 = obj2[::ptr1] # error
cdef int a
cdef int* int_ptr
for a in int_ptr:
pass
for a in int_ptr[2:]:
pass
for a in int_ptr[2:2:a]:
pass
_ERRORS = u"""
5:16: Cannot convert 'int *' to Python object
6:17: Cannot convert 'int *' to Python object
7:18: Cannot convert 'int *' to Python object
12:9: C array iteration requires known end index
14:16: C array iteration requires known end index
16:21: C array iteration requires known step size and end index
"""
<|end_of_text|>cpdef double hyperplane(double[:,::1] compositions,
double[::1] energies,
double[::1] composition,
double[::1] chemical_potentials,
double[::1] result_fractions,
int[::1] result_simplex) except *<|end_of_text|># -*- coding: latin-1 -*-
#cython: boundscheck=False
import os
import array
import zipfile
import bisect
class Primes():
_appdata = os.path.expanduser("~")
_primedata = "primenumbers.data"
_primezip = os.path.join(_appdata, "primenumbers.zip")
primes = array.array("L")
store_limit = 5*10**6 # largest prime we want to store
if str is bytes:
_ziplevel = zipfile.ZIP_DEFLATED
else:
_ziplevel = zipfile.ZIP_LZMA
if os.path.exists(_primezip):
mzip = zipfile.ZipFile(_primezip, "r")
pzin = mzip.read(_primedata)
primes.fromstring(pzin)
mzip.close()
del mzip, pzin
else:
primes.fromlist([2, 3, 5, 7, 11])
oldlen = len(primes)
def __init__(self, max_primes = 0):
"""Change maximum prime that can be stored to disk."""
if max_primes > 0:
self.store_limit = max_primes
#==============================================================================
# Use Miller-Rabin-Test for primality checks
#==============================================================================
def _enlarge(self, long long zahl):
"""Extend the primes array up to provided parameter."""
def mrtest(long long n):
# the actual Miller-Rabin logic ----------------------------------
def mrt(long long n, long long a):
cdef long long n1, d, t, p, j
n1 = n - 1
d = n1 >> 1
j = 1
while (d & 1) == 0:
d >>= 1
j += 1
t = a
p = a
while d: # square and multiply: a^d mod n
d >>= 1
p = p*p % n
if d & 1:
t = t*p % n
if t == 1 or t == n1:
return 1 # n ist wahrscheinlich prim
for k in range(1, j):
t = t*t % n
if t == n1: return True
if t <= 1: break
return 0 # n ist nicht prim
#-----------------------------------------------------------------
# testing the following a-values suffices for a determninistic test,
# if checked number n <= 2**32
# see https://de.wikipedia.org/wiki/Miller-Rabin-Test
# n < 1.373.653 => alist = {2, 3}
# n < 9.080.191 => alist = {31, 73}
# n < 4.759.123.141 => alist = {2, 7, 61}
if n < 1373653:
alist = (2, 3)
elif n < 9080191:
alist = (31, 73)
else:
alist = (2, 7, 61)
for a in alist:
if not mrt(n, a):
return 0
return 1
cdef long long check
check = self.primes[len(self.primes)-1] # last stored prime
while check <= zahl:
check += 2 # stick with odd numbers
if mrtest(check):
self.primes.append(check)
return
#==============================================================================
# Save to disk (eventually) when object gets deleted
#==============================================================================
def __del__(self):
if self.oldlen >= len(self.primes): # did not go beyond old limit
return
if self.primes[len(self.primes)-1] > self.store_limit: # exceeds size limit
return
self.save()
def save(self):
with zipfile.ZipFile(self._primezip, "w", self._ziplevel) as mzip:
mzip.writestr(self._primedata, self.primes.tostring(), self._ziplevel)
mzip.close()
#==============================================================================
# Binary search for index of next prime
#==============================================================================
def _nxt_prime_idx(self, long long zahl):
"""Binary search for the smallest index i with zahl <= primes[i]
"""
cdef long long p
p = zahl
while zahl >= self.primes[len(self.primes)-1]: # larger than what we have so far?
p += 1000 # big enough for max. one loop
self._enlarge(p)
return bisect.bisect_left(self.primes, zahl)
#==============================================================================
# Calculate prime factors
#==============================================================================
def factors(self, long long zahl):
"""Return the prime factors of an integer as a list of lists. Each list consists of a prime factor and its exponent.
"""
cdef long long f, n, nz, i
if (type(zahl) is not int) or (zahl < 2):
raise ValueError("arg must be integer > 1")
if zahl == self.nextprime(zahl):
return [[zahl, 1]] # shortcut for primes
x = [] # initialize returned list
f = 1 # contains product of factors of zahl
for n in self.primes:
if f >= zahl: break
i = 0 # contains prime exponent
nz = zahl # start with zahl
while nz % n == 0:
nz = nz // n
f *= n
i += 1
if i > 0: x.append([n, i])
return x
#==============================================================================
# Deliver next prime
#==============================================================================
def nextprime(self, long long zahl):
"""Return the next prime following a provided number."""
return self.primes[self._nxt_prime_idx(zahl)]
#==============================================================================
# Deliver next prime twin
#==============================================================================
def nexttwin(self, long long zahl):
"""Return the first prime twin following a provided number."""
cdef long long start_here, p, p1, p2, i
start_here = max(self._nxt_prime_idx(zahl), 1) # prime twin must be GE...
p = zahl
while 1: # look several times if next twin not within know primes
for i in range(start_here, len(self.primes)):
p1 = self.primes[i - 1]
p2 = self.primes[i]
if zahl <= p1 and p1 + 2 == p2:
return (p1, p2)
start_here = len(self.primes) - 1
p += 1000 # big enough for max. one loop
self._enlarge(p)
def prev_twins(self, long long zahl):
"""Return the number of prime twins less or equal a given number."""
cdef long long p, j, i
p = zahl
while zahl > self.primes[len(self.primes)-1]: # larger than what we have so far?
p += 1000 # big enough for max. one loop
self._enlarge(p)
j = 0
for i in range(len(self.primes)-1):
if self.primes[i + 1] > zahl:
break
if self.primes[i] == self.primes[i + 1] - 2:
j += 1
return j
def prev_primes(self, long long zahl):
"""Return the number of primes less or equal a given number."""
cdef long long p
p = zahl
while zahl > self.primes[len(self.primes)-1]: # larger than what we have so far?
p += 1000 # big enough for max. one loop
self._enlarge(p)
return bisect.bisect_right(self.primes, zahl)
<|end_of_text|>from libcpp.vector cimport vector
from libcpp.map cimport map
from libcpp.string cimport string
from libcpp cimport bool
from libcpp.queue cimport queue
from libcpp.utility cimport pair
ctypedef vector[vector[float]] IndAction
ctypedef vector[float] | Cython |
GovAction
ctypedef map[string, map[string, float]] Obs
cdef extern from "facility.cpp":
pass
cdef extern from "facility.h":
cdef cppclass Facility:
string type, name
float longtitude, latitude
int n_infected, n_infected_new
int basic_capacity, adjusted_capacity, n_d, current
float avg_frequency, infect_rate
bool disinfected, shut, rotate
vector[int] id_l
int robots, unexamined
Facility()
Facility(map[string, float] info, string name)
void reset()
float efficiency()
int operational_workers, full_workers
float revenue, ind_reward;
cdef extern from "city.cpp":
pass
cdef extern from "city.h":
cdef cppclass City:
Args args
map[string, int] n_p_hs
map[string, int] n_infected_new, n_infected
int n_infected_all
void reset()
City()
cdef extern from "individual.cpp":
pass
cdef extern from "individual.h":
cdef cppclass Individual:
void init(int type)
void reset()
float R0
int type, health, which_hospital, health_obs
double p_not_infected
double p_infect
double infected_rate
double supply_level
bool get_infected_now, has_examined_ill
bool examined, hospitalized
vector[float] action
map[string, int] has_gone
int get_obs()
"""
cdef extern from "EnvCWrapper.cpp": # to includet these code.
pass
cdef extern from "EnvCWrapper.h":
cdef cppclass EnvCWrapper:
EnvCWrapper() except + # let C++ handle the exception.
init(Args args) except +
void* env
Obs reset() except +
Step_returns step_(AllAction action) except +
"""
cdef extern from "environment.cpp":
pass
cdef extern from "environment.h":
cdef cppclass AllAction:
IndAction ind
GovAction gov
cdef cppclass Step_returns:
Obs gov_obs
vector[float] ind_obs
vector[vector[int]] action_count
vector[float] reward
vector[int] done
vector[map[string, float]] info
map[string, vector[float]] graph
cdef cppclass Env:
double t1, t2, t3, t4
int n_agent
vector[Individual] ind
vector[int] new_ind_hs
map[string, vector[Facility]] fac
City city
queue[int] online_queue
int stop_step, s_n
Args args
map[string, int] n_p_hs
map[string, int] n_infected_new, n_infected
vector[int] gov_unchanged_days, gov_last_action
int last_new_deaths, last_new_infections
int gov_current_robot
int n_infected_all, action_repeat, overall_accurate_infected, overall_observed_infected
vector[float] sum_r
map[string, vector[vector[int]]] graph
pair[vector[float], Obs] reset()
void init_epidemic()
int step_health_state(int id)
void init(Args &args)
Env()
vector[float] deal_gov_action(vector[float] &action_gov)
int where_recreational(int ID, vector[float] &action)
bool shutdowned(string facility_type, int facility_id)
int where_hospital(int ID, vector[float]& action)
bool is_workable(int ID)
bool is_outable(int ID)
bool is_working(int ID, vector[float]& action)
bool is_schooling(int ID, vector[float]& action)
bool is_disinfected(string &facility_type, int facility_id)
bool is_hospitalized(int ID)
bool is_examined(int ID)
void deal_debug(const string &facility_type, int facility_id)
void deal_market(const string &facility_type, int facility_id)
void deal_general(const string &facility_type, int facility_id)
int activity_level(vector[float] &action)
void deal_hospital(int facility_id)
int deal_ind_action(IndAction &ind_action)
Step_returns step(AllAction actions)
float parse_reward_ind(int id, vector[float] &action, int health_old)
vector[float] parse_reward_gov(float changing_penalty,float disinfect_cost,int num_of_tested, int num_of_robots, int new_deaths, int new_infections)
Obs parse_obs_gov()
vector[float] parse_obs_ind(int id)
cdef extern from "config.cpp":
pass
cdef extern from "config.h":
cdef cppclass Args:
map[string, int] action_size;
int n_agent, action_repeat, stop_step;
int hospitalized_capacity, hospital_testing_capacity, hosp_policy;
float hpn;
float recreational_reward, community_reward;
float work_reward;
float gov_disinfect_cost, gov_test_cost, gov_robot_maintainance;
float gov_death_penalty, gov_infection_penalty;
float gov_reward_adjust_ratio, gov_infected_penalty_growth, gov_death_penalty_growth;
int gov_action_size, gov_maximum_robot;
int maximum_online_request;
float mask_infected_rate, mask_infect_rate;
int n_disinfected_last;
float disinfected_rate;
float p_inc2pre, p_ina2asy, p_pre2sym, p_hos, p_rec_sym;
float p_sym2sev[3];
float p_sev2cri[3];
float p_cri2dea[3];
int agent_count[4];
float p_sev2rec, p_cri2rec, p_sev2cri_nhos, p_rec_asy;
float p_deimm_asy, p_deimm_sym;
float asy_pop_rate, asy_infect_rate, pre_infect_rate;
map[string, int] n_p, n_f;
vector[int] type;
map[string, vector[vector[int]]] graph;
map[string, int] f_capacity_debug;
map[string, vector[string]] name_f;
map[string, vector[map[string, float]]] info_f;
float beta;
Args();
void init();
<|end_of_text|>from Types cimport *
from IsobaricQuantitationMethod cimport *
cdef extern from "<OpenMS/ANALYSIS/QUANTITATION/ItraqEightPlexQuantitationMethod.h>" namespace "OpenMS":
cdef cppclass ItraqEightPlexQuantitationMethod(IsobaricQuantitationMethod) :
# wrap-inherits:
# IsobaricQuantitationMethod
ItraqEightPlexQuantitationMethod() except + nogil # wrap-doc:iTRAQ 8 plex quantitation to be used with the IsobaricQuantitation
ItraqEightPlexQuantitationMethod(ItraqEightPlexQuantitationMethod &) except + nogil
<|end_of_text|>ctypedef unsigned int uint
ctypedef unsigned long ulong
ctypedef unsigned short ushort
ctypedef long long llong
ctypedef unsigned int docid
ctypedef unsigned long wordid
cdef struct Hit:
ulong wordID
uint docID
ushort format
#记录单个wordid对应的笨pos内的范围
cdef struct WordWidth:
ulong left #左范围
ulong right #右范围
#本wordID命中的不重复的文档数目
uint docnum
int pos #记录数组号
#倒排索引
cdef struct Idx:
uint wordID
uint docID
uint score
#单个hitlist字段
cdef struct HitList:
Hit *_list
ulong size
ulong space
<|end_of_text|>import numpy as np
cimport numpy as np
from libc.math cimport tanh
from libc.math cimport copysign
from libc.math cimport fabs
cimport cython
ctypedef np.float64_t DT
cpdef solver(np.ndarray[DT, ndim=1, negative_indices=False, mode='c'] I, int N, int loop, double dt, np.ndarray[DT, ndim=1, negative_indices=False,
mode='c'] params, np.ndarray[DT, ndim=1, negative_indices=False, mode='c'] ramp):
cdef double sqrtdt = np.sqrt(dt)
cdef int n, i
cdef np.ndarray[DT, ndim=1, negative_indices=False,
mode='c'] u0 = np.zeros(N)
cdef np.ndarray[DT, ndim=1, negative_indices=False,
mode='c'] u1 = np.zeros(N)
cdef np.ndarray[DT, ndim=1, negative_indices=False,
mode='c'] u2 = np.zeros(N)
cdef np.ndarray[DT, ndim=1, negative_indices=False,
mode='c'] noise0 = np.random.normal(loc=0.0,scale=1.0, size=(N*loop))
cdef np.ndarray[DT, ndim=1, negative_indices=False,
mode='c'] noise1 = np.random.normal(loc=0.0,scale=1.0, size=( | Cython |
N*loop))
cdef np.ndarray[DT, ndim=1, negative_indices=False,
mode='c'] noise2 = np.random.normal(loc=0.0,scale=1.0, size=(N*loop))
cdef double ul0 = I[0]
cdef double ul1 = I[1]
cdef double ul2 = I[2]
cdef double k0, k1, k2
for n in xrange(N):
for i in xrange(loop):
k0 = dt*(-1+params[0]*tanh(ul0/params[1])+(params[3]*heaviside(ul0)-params[4])*ul0 +params[6]-params[2]+params[5]+ramp[loop*n+i])
k1 = dt*params[11]*(params[7]-params[10]*heaviside(ul0)*ul0-ul1-fabs(ul1-ul2)*ul1)
k2 = dt*params[11]*(params[8]-params[9]*ul2-fabs(ul1-ul2)*ul2)
ul0 = ul0 + k0 + params[12]*sqrtdt*noise0[loop*n+i]
ul1 = ul1 + k1 + params[11]*params[13]*sqrtdt*noise1[loop*n+i]
ul2 = ul2 + k2 + params[11]*params[14]*sqrtdt*noise2[loop*n+i]
u0[n] = ul0
u1[n] = ul1
u2[n] = ul2
return u0, u1, u2
cpdef heaviside(x):
return 0.5*(copysign(1,x) + 1)
<|end_of_text|>from decimal import Decimal
import logging
from typing import Optional
from hummingbot.market.market_base cimport MarketBase
from hummingbot.market.market_base import MarketBase
from hummingbot.core.event.events import (
OrderType,
TradeType
)
from hummingbot.logger import HummingbotLogger
from.data_types import SizingProposal
from.pure_market_making_v2 cimport PureMarketMakingStrategyV2
s_logger = None
s_decimal_0 = Decimal(0)
cdef class InventorySkewMultipleSizeSizingDelegate(OrderSizingDelegate):
def __init__(self,
order_start_size: Decimal,
order_step_size: Decimal,
number_of_orders: int,
inventory_target_base_percent: Optional[Decimal] = None):
super().__init__()
self._order_start_size = order_start_size
self._order_step_size = order_step_size
self._number_of_orders = number_of_orders
self._inventory_target_base_percent = inventory_target_base_percent
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
@property
def order_start_size(self) -> Decimal:
return self._order_start_size
@property
def order_step_size(self) -> Decimal:
return self._order_step_size
@property
def number_of_orders(self) -> int:
return self._number_of_orders
cdef object c_get_order_size_proposal(self,
PureMarketMakingStrategyV2 strategy,
object market_info,
list active_orders,
object pricing_proposal):
cdef:
MarketBase market = market_info.market
str trading_pair = market_info.trading_pair
object base_asset_balance = market.c_get_available_balance(market_info.base_asset)
object quote_asset_balance = market.c_get_available_balance(market_info.quote_asset)
object current_quote_asset_order_size_total = s_decimal_0
object quote_asset_order_size
object current_base_asset_order_size_total = s_decimal_0
object top_bid_price
object top_ask_price
object mid_price
object total_base_asset_quote_value
object total_quote_asset_quote_value
object current_base_percent
object current_quote_percent
object target_base_percent
object target_quote_percent
object current_target_base_ratio
object current_target_quote_ratio
bint has_active_bid = False
bint has_active_ask = False
list buy_orders = []
list sell_orders = []
current_bid_order_size
current_ask_order_size
for active_order in active_orders:
if active_order.is_buy:
has_active_bid = True
quote_asset_balance += active_order.quantity * active_order.price
else:
has_active_ask = True
base_asset_balance += active_order.quantity
if has_active_bid and has_active_ask:
return SizingProposal([s_decimal_0], [s_decimal_0])
if self._inventory_target_base_percent is not None:
top_bid_price = market.c_get_price(trading_pair, False)
top_ask_price = market.c_get_price(trading_pair, True)
mid_price = (top_bid_price + top_ask_price) / Decimal(2)
total_base_asset_quote_value = base_asset_balance * mid_price
total_quote_asset_quote_value = quote_asset_balance
# Calculate percent value of base and quote
current_base_percent = total_base_asset_quote_value / (total_base_asset_quote_value + total_quote_asset_quote_value)
current_quote_percent = total_quote_asset_quote_value / (total_base_asset_quote_value + total_quote_asset_quote_value)
target_base_percent = self._inventory_target_base_percent
target_quote_percent = Decimal(1) - target_base_percent
# Calculate target ratio based on current percent vs. target percent
current_target_base_ratio = current_base_percent / target_base_percent \
if target_base_percent > s_decimal_0 else s_decimal_0
current_target_quote_ratio = current_quote_percent / target_quote_percent \
if target_quote_percent > s_decimal_0 else s_decimal_0
# By default 100% of order size is on both sides, therefore adjusted ratios should be 2 (100% + 100%).
# If target base percent is 0 (0%) target quote ratio is 200%.
# If target base percent is 1 (100%) target base ratio is 200%.
if current_target_base_ratio > Decimal(1) or current_target_quote_ratio == s_decimal_0:
current_target_base_ratio = Decimal(2) - current_target_quote_ratio
else:
current_target_quote_ratio = Decimal(2) - current_target_base_ratio
for i in range(self.number_of_orders):
current_bid_order_size = (self.order_start_size + self.order_step_size * i) * current_target_quote_ratio
current_ask_order_size = (self.order_start_size + self.order_step_size * i) * current_target_base_ratio
if market.name == "binance":
# For binance fees is calculated in base token, so need to adjust for that
quantized_bid_order_size = market.c_quantize_order_amount(
market_info.trading_pair,
current_bid_order_size,
pricing_proposal.buy_order_prices[i]
)
# Check whether you have enough quote tokens
quote_asset_order_size = quantized_bid_order_size * pricing_proposal.buy_order_prices[i]
if quote_asset_balance < current_quote_asset_order_size_total + quote_asset_order_size:
quote_asset_order_size = quote_asset_balance - current_quote_asset_order_size_total
bid_order_size = quote_asset_order_size / pricing_proposal.buy_order_prices[i]
quantized_bid_order_size = market.c_quantize_order_amount(
market_info.trading_pair,
bid_order_size,
pricing_proposal.buy_order_prices[i]
)
else:
quantized_bid_order_size = market.c_quantize_order_amount(
market_info.trading_pair,
current_bid_order_size
)
buy_fees = market.c_get_fee(
market_info.base_asset,
market_info.quote_asset,
OrderType.MARKET,
TradeType.BUY,
quantized_bid_order_size,
pricing_proposal.buy_order_prices[i]
)
# For other exchanges, fees is calculated in quote tokens, so need to ensure you have enough for order + fees
quote_asset_order_size = quantized_bid_order_size * pricing_proposal.buy_order_prices[i] * (Decimal(1) + buy_fees.percent)
if quote_asset_balance < current_quote_asset_order_size_total + quote_asset_order_size:
quote_asset_order_size = quote_asset_balance - current_quote_asset_order_size_total
bid_order_size = quote_asset_order_size / pricing_proposal.buy_order_prices[i] * (Decimal(1) - buy_fees.percent)
quantized_bid_order_size = market.c_quantize_order_amount(
market_info.trading_pair,
bid_order_size,
pricing_proposal.buy_order_prices[i]
)
current_quote_asset_order_size_total += quote_asset_order_size
quantized_ask_order_size = market.c_quantize_order_amount(
market_info.trading_pair,
current_ask_order_size,
pricing_proposal.sell_order_prices[i]
)
if base_asset_balance < current_base_asset_order_size_total + quantized_ask_order_size:
quantized_ask_order_size = market.c_quantize_order_amount(
market_info.trading_pair,
base_asset_balance - current_base_asset_order_size_total,
pricing_proposal.sell_order_prices[i]
)
current_base_asset_order_size_total += quantized_ask_order_size
if quantized_bid_order_size > s_decimal_0:
buy_orders.append(quantized_bid_order_size)
if quantized_ask_order_size | Cython |
> s_decimal_0:
sell_orders.append(quantized_ask_order_size)
return SizingProposal(
buy_orders if not has_active_bid and len(buy_orders) > 0 else [s_decimal_0],
sell_orders if not has_active_ask and len(sell_orders) > 0 else [s_decimal_0]
)
<|end_of_text|>from collections.abc import Mapping
from sqlalchemy import exc
cdef tuple _Empty_Tuple = ()
cdef inline bint _mapping_or_tuple(object value):
return isinstance(value, dict) or isinstance(value, tuple) or isinstance(value, Mapping)
cdef inline bint _check_item(object params) except 0:
cdef object item
cdef bint ret = 1
if params:
item = params[0]
if not _mapping_or_tuple(item):
ret = 0
raise exc.ArgumentError(
"List argument must consist only of tuples or dictionaries"
)
return ret
def _distill_params_20(object params):
if params is None:
return _Empty_Tuple
elif isinstance(params, list) or isinstance(params, tuple):
_check_item(params)
return params
elif isinstance(params, dict) or isinstance(params, Mapping):
return [params]
else:
raise exc.ArgumentError("mapping or list expected for parameters")
def _distill_raw_params(object params):
if params is None:
return _Empty_Tuple
elif isinstance(params, list):
_check_item(params)
return params
elif _mapping_or_tuple(params):
return [params]
else:
raise exc.ArgumentError("mapping or sequence expected for parameters")
cdef class prefix_anon_map(dict):
def __missing__(self, str key):
cdef str derived
cdef int anonymous_counter
cdef dict self_dict = self
derived = key.split(" ", 1)[1]
anonymous_counter = self_dict.get(derived, 1)
self_dict[derived] = anonymous_counter + 1
value = f"{derived}_{anonymous_counter}"
self_dict[key] = value
return value
cdef class cache_anon_map(dict):
cdef int _index
def __init__(self):
self._index = 0
def get_anon(self, obj):
cdef long long idself
cdef str id_
cdef dict self_dict = self
idself = id(obj)
if idself in self_dict:
return self_dict[idself], True
else:
id_ = self.__missing__(idself)
return id_, False
def __missing__(self, key):
cdef str val
cdef dict self_dict = self
self_dict[key] = val = str(self._index)
self._index += 1
return val
<|end_of_text|>from libcpp.vector cimport vector
from primitiv.shape cimport Shape, _Shape, wrapShape
from primitiv.tensor cimport wrapTensor, _Tensor
from primitiv.device cimport wrapDevice, _Device
from primitiv.function cimport _Function
from primitiv.parameter cimport _Parameter
import numpy as np
from..utils cimport ndarray_to_vector
cdef class _Input(_Function):
"""
Input(shape, data, device):
or
Input(data, device):
"""
def __cinit__(self, *args):
if len(args) == 3:
self.wrapped = new Input((<_Shape> args[0]).wrapped, args[1], (<_Device> args[2]).wrapped[0])
if self.wrapped is NULL:
raise MemoryError()
elif len(args) == 2:
shape = _Shape(args[0].shape)
self.wrapped = new Input(shape.wrapped, ndarray_to_vector(args[0]), (<_Device> args[1]).wrapped[0])
if self.wrapped is NULL:
raise MemoryError()
else:
raise TypeError("Input() takes two or three arguments (%d given)" % len(args))
def __dealloc__(self):
cdef Input *temp
if self.wrapped is not NULL:
temp = <Input*> self.wrapped
del temp
self.wrapped = NULL
def get_device(self):
return wrapDevice((<Input*> self.wrapped).get_device())
def name(self):
return (<Input*> self.wrapped).name().decode("utf-8")
cdef class _ParameterInput(_Function):
def __cinit__(self, _Parameter param):
self.wrapped = new ParameterInput(param.wrapped[0])
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef ParameterInput *temp
if self.wrapped is not NULL:
temp = <ParameterInput*> self.wrapped
del temp
self.wrapped = NULL
def get_device(self):
return wrapDevice((<ParameterInput*> self.wrapped).get_device())
def get_inner_value(self):
return wrapTensor((<ParameterInput*> self.wrapped).get_inner_value()[0])
def name(self):
return (<ParameterInput*> self.wrapped).name().decode("utf-8")
cdef class _Copy(_Function):
def __init__(self, _Device device):
self.wrapped = new Copy(device.wrapped[0])
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef Copy *temp
if self.wrapped is not NULL:
temp = <Copy*> self.wrapped
del temp
self.wrapped = NULL
def get_device(self):
return wrapDevice((<Copy*> self.wrapped).get_device())
def name(self):
return (<Copy*> self.wrapped).name().decode("utf-8")
cdef class _Constant(_Function):
def __cinit__(self, _Shape shape, float k, _Device device):
self.wrapped = new Constant(shape.wrapped, k, device.wrapped[0])
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef Constant *temp
if self.wrapped is not NULL:
temp = <Constant*> self.wrapped
del temp
self.wrapped = NULL
def get_device(self):
return wrapDevice((<Constant*> self.wrapped).get_device())
def name(self):
return (<Constant*> self.wrapped).name().decode("utf-8")
cdef class _IdentityMatrix(_Function):
def __cinit__(self, unsigned size, _Device device):
self.wrapped = new IdentityMatrix(size, device.wrapped[0])
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef IdentityMatrix *temp
if self.wrapped is not NULL:
temp = <IdentityMatrix*> self.wrapped
del temp
self.wrapped = NULL
def get_device(self):
return wrapDevice((<IdentityMatrix*> self.wrapped).get_device())
def name(self):
return (<IdentityMatrix*> self.wrapped).name().decode("utf-8")
cdef class _RandomBernoulli(_Function):
def __cinit__(self, _Shape shape, float p, _Device device):
self.wrapped = new RandomBernoulli(shape.wrapped, p, device.wrapped[0])
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef RandomBernoulli *temp
if self.wrapped is not NULL:
temp = <RandomBernoulli*> self.wrapped
del temp
self.wrapped = NULL
def get_device(self):
return wrapDevice((<RandomBernoulli*> self.wrapped).get_device())
def name(self):
return (<RandomBernoulli*> self.wrapped).name().decode("utf-8")
cdef class _RandomUniform(_Function):
def __cinit__(self, _Shape shape, float lower, float upper, _Device device):
self.wrapped = new RandomUniform(shape.wrapped, lower, upper, device.wrapped[0])
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef RandomUniform *temp
if self.wrapped is not NULL:
temp = <RandomUniform*> self.wrapped
del temp
self.wrapped = NULL
def get_device(self):
return wrapDevice((<RandomUniform*> self.wrapped).get_device())
def name(self):
return (<RandomUniform*> self.wrapped).name().decode("utf-8")
cdef class _RandomNormal(_Function):
def __cinit__(self, _Shape shape, float mean, float sd, _Device device):
self.wrapped = new RandomNormal(shape.wrapped, mean, sd, device.wrapped[0])
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef RandomNormal *temp
if self.wrapped is not NULL:
temp = <RandomNormal*> self.wrapped
del temp
self.wrapped = NULL
def get_device(self):
return wrapDevice((<RandomNormal*> self.wrapped).get_device())
def name(self):
return (<RandomNormal*> self.wrapped).name().decode("utf-8")
cdef class _RandomLogNormal(_Function):
def __cinit__(self, _Shape | Cython |
shape, float mean, float sd, _Device device):
self.wrapped = new RandomLogNormal(shape.wrapped, mean, sd, device.wrapped[0])
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef RandomLogNormal *temp
if self.wrapped is not NULL:
temp = <RandomLogNormal*> self.wrapped
del temp
self.wrapped = NULL
def get_device(self):
return wrapDevice((<RandomLogNormal*> self.wrapped).get_device())
def name(self):
return (<RandomLogNormal*> self.wrapped).name().decode("utf-8")
cdef class _Pick(_Function):
def __cinit__(self, vector[unsigned] ids, unsigned dim):
self.wrapped = new Pick(ids, dim)
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef Pick *temp
if self.wrapped is not NULL:
temp = <Pick*> self.wrapped
del temp
self.wrapped = NULL
def name(self):
return (<Pick*> self.wrapped).name().decode("utf-8")
cdef class _Slice(_Function):
def __cinit__(self, unsigned dim, unsigned lower, unsigned upper):
self.wrapped = new Slice(dim, lower, upper)
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef Slice *temp
if self.wrapped is not NULL:
temp = <Slice*> self.wrapped
del temp
self.wrapped = NULL
def name(self):
return (<Slice*> self.wrapped).name().decode("utf-8")
cdef class _Concat(_Function):
def __cinit__(self, unsigned dim):
self.wrapped = new Concat(dim)
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef Concat *temp
if self.wrapped is not NULL:
temp = <Concat*> self.wrapped
del temp
self.wrapped = NULL
def name(self):
return (<Concat*> self.wrapped).name().decode("utf-8")
cdef class _Reshape(_Function):
def __cinit__(self, _Shape shape):
self.wrapped = new Reshape(shape.wrapped)
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef Reshape *temp
if self.wrapped is not NULL:
temp = <Reshape*> self.wrapped
del temp
self.wrapped = NULL
def name(self):
return (<Reshape*> self.wrapped).name().decode("utf-8")
cdef class _Sum(_Function):
def __cinit__(self, unsigned dim):
self.wrapped = new Sum(dim)
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef Sum *temp
if self.wrapped is not NULL:
temp = <Sum*> self.wrapped
del temp
self.wrapped = NULL
def name(self):
return (<Sum*> self.wrapped).name().decode("utf-8")
cdef class _LogSumExp(_Function):
def __cinit__(self, unsigned dim):
self.wrapped = new LogSumExp(dim)
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef LogSumExp *temp
if self.wrapped is not NULL:
temp = <LogSumExp*> self.wrapped
del temp
self.wrapped = NULL
def name(self):
return (<LogSumExp*> self.wrapped).name().decode("utf-8")
cdef class _Broadcast(_Function):
def __cinit__(self, unsigned dim, unsigned size):
self.wrapped = new Broadcast(dim, size)
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef Broadcast *temp
if self.wrapped is not NULL:
temp = <Broadcast*> self.wrapped
del temp
self.wrapped = NULL
def name(self):
return (<Broadcast*> self.wrapped).name().decode("utf-8")
cdef class _SoftmaxCrossEntropy(_Function):
def __cinit__(self, unsigned dim):
self.wrapped = new SoftmaxCrossEntropy(dim)
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef SoftmaxCrossEntropy *temp
if self.wrapped is not NULL:
temp = <SoftmaxCrossEntropy*> self.wrapped
del temp
self.wrapped = NULL
def name(self):
return (<SoftmaxCrossEntropy*> self.wrapped).name().decode("utf-8")
cdef class _SparseSoftmaxCrossEntropy(_Function):
def __cinit__(self, vector[unsigned] ids, unsigned dim):
self.wrapped = new SparseSoftmaxCrossEntropy(ids, dim)
if self.wrapped is NULL:
raise MemoryError()
def __dealloc__(self):
cdef SparseSoftmaxCrossEntropy *temp
if self.wrapped is not NULL:
temp = <SparseSoftmaxCrossEntropy*> self.wrapped
del temp
self.wrapped = NULL
def name(self):
return (<SparseSoftmaxCrossEntropy*> self.wrapped).name().decode("utf-8")
<|end_of_text|>
cdef class Time(object):
cdef readonly double time
cdef class Duration(object):
cdef readonly double duration
<|end_of_text|>#!/usr/bin/env cython
# coding: utf-8
"""
Created on 1 November 2018
@author: jason
"""
cimport cython
from cpython.mem cimport PyMem_Malloc, PyMem_Free
from libc.stdlib cimport malloc, free, rand, RAND_MAX, srand
from libc.math cimport floor, fabs
@cython.cdivision(True)
cdef double randUniform() nogil:
return <double> rand() / RAND_MAX
cdef int randInt(int low, int high) nogil:
return <int> floor((high - low) * randUniform() + low)
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
cdef int rand_choice(int n, double * prob) nogil:
cdef int i
cdef double r
cdef double cuml
r = <double> rand() / RAND_MAX
cuml = 0.0
for i in range(n):
cuml = cuml + prob[i]
if (r <= cuml):
return i
<|end_of_text|>from Types cimport *
from libcpp cimport bool
from libcpp.vector cimport vector as libcpp_vector
from DPosition cimport *
cdef extern from "<OpenMS/COMPARISON/CLUSTERING/ClusteringGrid.h>" namespace "OpenMS":
cdef cppclass ClusteringGrid "OpenMS::ClusteringGrid":
ClusteringGrid(libcpp_vector[ double ] & grid_spacing_x, libcpp_vector[ double ] & grid_spacing_y) except + nogil
ClusteringGrid(ClusteringGrid &) except + nogil # compiler
libcpp_vector[ double ] getGridSpacingX() except + nogil
libcpp_vector[ double ] getGridSpacingY() except + nogil
void addCluster(libcpp_pair[int,int] cell_index, int & cluster_index) except + nogil # wrap-doc:Adds a cluster to this grid cell
void removeCluster(libcpp_pair[int,int] cell_index, int & cluster_index) except + nogil # wrap-doc:Removes a cluster from this grid cell and removes the cell if no other cluster left
void removeAllClusters() except + nogil # wrap-doc:Removes all clusters from this grid (and hence all cells)
# NAMESPACE # std::list[ int ] getClusters(CellIndex & cell_index) except + nogil
libcpp_pair[int,int] getIndex(DPosition2 position) except + nogil
bool isNonEmptyCell(libcpp_pair[int,int] cell_index) except + nogil # wrap-doc:Checks if there are clusters at this cell index
int getCellCount() except + nogil # wrap-doc:Returns number of grid cells occupied by one or more clusters
<|end_of_text|>from exception.custom_exception cimport raise_py_error
from libcpp cimport bool
from libc.stdint cimport int64_t, uint32_t
from base.cgcbase cimport gcstring
from genapi.civalue cimport IValue
from baslerpylon.genapi.types cimport EIncMode, EDisplayNotation, ERepresentation
cdef extern from "genapi/IFloat.h" namespace 'GENAPI_NAMESPACE':
cdef cppclass IFloat (IValue):
# Set node value
# /*!
# \param Value The value to set
# \param Verify Enables AccessMode and Range verification (default = true)
# */
void SetValue(double Value, bool Verify) except +raise_py_error
void SetValue(double Value) except +raise_py_error
# Get node value
# /*!
# \param Verify Enables Range verification (default = false). The AccessMode is always checked
# \param IgnoreCache If true the value is read ignoring any caches (default = false)
# \return The value | Cython |
read
# */
double GetValue(bool Verify, bool IgnoreCache) except +raise_py_error
double GetValue(bool IgnoreCache) except +raise_py_error
double GetValue(bool Verify) except +raise_py_error
double GetValue() except +raise_py_error
# Get minimum value allowed
double GetMin() except +raise_py_error
# Get maximum value allowed
double GetMax() except +raise_py_error
# True if the float has a constant increment
bool HasInc() except +raise_py_error
# Get increment mode
EIncMode GetIncMode() except +raise_py_error
# Get the constant increment if there is any
double GetInc() except +raise_py_error
#double_autovector_t GetListOfValidValues( bool bounded) except +
#double_autovector_t GetListOfValidValues() except +
# Get recommended representation
ERepresentation GetRepresentation() except +raise_py_error
# Get the physical unit name
gcstring GetUnit() except +raise_py_error
# Get the way the float should be converted to a string
EDisplayNotation GetDisplayNotation() except +raise_py_error
# Get the precision to be used when converting the float to a string
int64_t GetDisplayPrecision() except +raise_py_error
# Restrict minimum value
void ImposeMin(double Value) except +raise_py_error
# Restrict maximum value
void ImposeMax(double Value) except +raise_py_error
<|end_of_text|>cdef class GraphicsCompiler
from.instructions cimport InstructionGroup
cdef class GraphicsCompiler:
cdef InstructionGroup compile(self, InstructionGroup group)
<|end_of_text|>'''
Created on Oct 2, 2013
@author: Wiehan
'''
import numpy as np
cimport numpy as np
from cython.parallel import prange
import cython
from data_processing.display_friendly import *, downsample_for_display
import matplotlib.pyplot as plt
from utils import get_cpu_count
@cython.boundscheck(False)
def find_discontinuities(np.ndarray[np.float64_t] signal, double tolerance=4, max_back=10):
'''
'''
cdef float std = 0
cdef int idx = 0
cdef int state = 0
cdef int start_pos = 0
cdef int x = len(signal)
cdef float difference
cdef int CPU_COUNT = get_cpu_count()
if CPU_COUNT == 1:
for idx in xrange(1, x):
difference = signal[idx] - signal[idx - 1]
std += difference * difference
else:
with nogil:
for idx in prange(1, x, num_threads=CPU_COUNT):
difference = signal[idx] - signal[idx - 1]
std += difference * difference
std = np.sqrt(std / x)
events = []
cdef float threshold = tolerance * std
for idx in xrange(1, len(signal)):
difference = signal[idx] - signal[idx - 1]
if difference < 0:
difference *= -1
if state == 0:
if difference > threshold:
state = 1
start_pos = idx
elif state == 1:
if difference > threshold:
state = 2
else:
state = 0
elif state == 2:
if difference < threshold:
events.append((start_pos, idx - 1))
state = 0
last_event_end = 0
for idx, tup in enumerate(events):
back = max(0, last_event_end + 1, tup[0] - max_back)
view = signal[back:tup[0]+1]
if len(view) == 0:
print 'empty slice', back, tup[0]
baseline = np.mean(view)
window = np.exp(np.log(0.0001) / len(view) * np.arange(len(view)))
signal[back : tup[0] + 1] = (signal[back : tup[0] + 1] - baseline) * window + baseline
signal[tup[0]: tup[1] + 1] = baseline
next_event_start = 0
if idx == len(events) - 1:
next_event_start = len(signal)
else:
next_event_start = events[idx + 1][0]
forward = min(len(signal), next_event_start + 1, tup[1] + max_back)
end_baseline = np.mean(signal[tup[1] + 1:forward])
view = signal[tup[1] + 1:forward]
window = np.exp(np.log(0.0001) / len(view) * np.arange(len(view)))[::-1]
signal[tup[1] + 1:forward] = (signal[tup[1] + 1:forward] - end_baseline) * window + end_baseline
jump = -(end_baseline - baseline)
signal[tup[1] + 1:next_event_start] = signal[tup[1] + 1:next_event_start] + jump
last_event_end = tup[1]
return signal, events
<|end_of_text|>###############################################################################
# axion_ll.pyx
###############################################################################
#
# Class to calculate the log-likelihood of an observed data set given model
# parameters
#
###############################################################################
# Import basic functions
import numpy as np
cimport numpy as np
cimport cython
from speed_dist cimport get_vObs
from speed_dist cimport f_SHM
# C math functions
cdef extern from "math.h":
double pow(double x, double y) nogil
double log(double x) nogil
double sqrt(double x) nogil
# Physical Constants
cdef double pi = np.pi
cdef double c = 299792.458 # speed of light [km/s]
######################
# External functions #
######################
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
@cython.initializedcheck(False)
cdef double stacked_ll(double[::1] freqs, double[::1] PSD, double mass,
double A, double v0, double vObs, double lambdaB,
double num_stacked) nogil:
""" Stacked log likelihood for a given PSD dataset and model params
:param freqs: frequencies scanned over [Hz]
:param PSD: power spectral density data at those frequencies [Wb^2/Hz]
:param mass: axion mass [angular frequency Hz]
:param A: signal strength parameters [Wb^2]
:param v0: velocity dispersion of SHM [km/s]
:param vObs: lab/observer/Earth speed w.r.t. the galactic frame [km/s]
:param lambdaB: mean background noise [Wb^2/Hz]
:param num_stacked: number of stackings
:returns: log likelihood (ll)
"""
# Set up length of input data and output variable
cdef int N_freqs = freqs.shape[0]
cdef double ll = 0.0
# Set up loop variables
cdef double vSq, v, lambdaK
# Scan from the frequency of mass up to some value well above the peak
# of the velocity distribution
cdef double fmin = mass / 2. / pi
cdef double fmax = fmin * (1+2*(vObs + v0)**2 / c**2)
fmin *= (1-(vObs + v0)**2 / c**2)
cdef int fmin_Index = getIndex(freqs, fmin)
cdef int fmax_Index = getIndex(freqs, fmax)
cdef Py_ssize_t ifrq
for ifrq in range(fmin_Index, fmax_Index):
vSq = 2. * (2.*pi*freqs[ifrq]-mass) / mass
if vSq > 0:
v = sqrt(vSq)
lambdaK = A * pi * f_SHM(v, v0/c, vObs/c) / mass / v + lambdaB
else:
lambdaK = lambdaB
if lambdaK <= 0:
lambdaK = 1e-5
ll += -PSD[ifrq] / lambdaK - log(lambdaK)
return ll * num_stacked
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
@cython.initializedcheck(False)
cdef double SHM_AnnualMod_ll(double[::1] freqs, double[:, ::1] PSD, double mass,
double A, double v0, double vDotMag, double alpha,
double tbar, double lambdaB,
double num_stacked) nogil:
""" log likelihood with annual modulation for a given PSD dataset and
model params
:param freqs: frequencies scanned over [Hz]
:param PSD: power spectral density data at those frequencies [Wb^2/Hz]
:param mass: axion mass [angular frequency Hz]
:param A: signal strength parameters [Wb^2]
:param v0: velocity dispersion of SHM [km/s]
:param vDotMag: velocity of the sun w.r.t. the galactic frame [km | Cython |
/s]
:param alpha/tbar: scalar quantities defining direction of vDot
:param lambdaB: mean background noise [Wb^2/Hz]
:param num_stacked: number of stackings
:returns: log likelihood (ll)
"""
# Set up length of input data and output variable
cdef int N_freqs = freqs.shape[0]
cdef int N_days = PSD.shape[0]
cdef double ll = 0.0
# Set up loop variables
cdef double v, vObs, lambdaK
# Scan from the frequency of mass up to some value well above the peak
# of the velocity distribution
cdef double fmin = mass / 2. / pi
cdef double fmax = fmin * (1+3*(vDotMag + v0)**2 / c**2)
cdef int fmin_Index = getIndex(freqs, fmin)
cdef int fmax_Index = getIndex(freqs, fmax)
cdef Py_ssize_t ifrq, iDay
for iDay in range(N_days):
vObs = get_vObs(vDotMag, alpha, tbar, iDay)
for ifrq in range(fmin_Index, fmax_Index):
v = sqrt(2. * (2.*pi*freqs[ifrq]-mass) / mass)
lambdaK = A * pi * f_SHM(v, v0/c, vObs/c) / mass / v + lambdaB
ll += -PSD[iDay, ifrq] / lambdaK - log(lambdaK)
return ll * num_stacked
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
@cython.initializedcheck(False)
cdef double Sub_AnnualMod_ll(double[::1] freqs, double[:, ::1] PSD, double mass,
double A, double v0_Halo, double vDotMag_Halo,
double alpha_Halo, double tbar_Halo, double v0_Sub,
double vDotMag_Sub, double alpha_Sub,
double tbar_Sub, double frac_Sub, double lambdaB,
double num_stacked) nogil:
""" log likelihood with annual modulation and substructure for a given PSD
dataset and model params
:param freqs: frequencies scanned over [Hz]
:param PSD: power spectral density data at those frequencies [Wb^2/Hz]
:param mass: axion mass [angular frequency Hz]
:param A: signal strength parameters [Wb^2]
Following 4 parameters defined for the Halo (_Halo) and substructure (_Sub)
:param v0: velocity dispersion of SHM [km/s]
:param vDotMag: velocity of the sun w.r.t. the galactic frame [km/s]
:param alpha/tbar: scalar quantities defining direction of vDot
:param frac_Sub: fraction of local DM in the substructure
:param lambdaB: mean background noise [Wb^2/Hz]
:param num_stacked: number of stackings
:returns: log likelihood (ll)
"""
# Set up length of input data and output variable
cdef int N_freqs = freqs.shape[0]
cdef int N_days = PSD.shape[0]
cdef double ll = 0.0
# Set up loop variables
cdef double v, vObs_Halo, vObs_Sub, lambdaK
# Scan from the frequency of mass up to some value well above the peak
# of the velocity distribution
cdef double fmin = mass / 2. / pi
cdef double fmax = fmin * (1+3*(vDotMag_Halo + v0_Halo)**2 / c**2)
cdef int fmin_Index = getIndex(freqs, fmin)
cdef int fmax_Index = getIndex(freqs, fmax)
cdef Py_ssize_t ifrq, iDay
for iDay in range(N_days):
vObs_Halo = get_vObs(vDotMag_Halo, alpha_Halo, tbar_Halo, iDay)
vObs_Sub = get_vObs(vDotMag_Sub, alpha_Sub, tbar_Sub, iDay)
for ifrq in range(fmin_Index, fmax_Index):
v = sqrt(2.0*(2.0*pi*freqs[ifrq]-mass)/ mass)
lambdaK = (1-frac_Sub) * A * pi * \
f_SHM(v, v0_Halo/c, vObs_Halo/c) / mass / v
lambdaK += frac_Sub * A * pi * \
f_SHM(v, v0_Sub/c, vObs_Sub/c) / mass / v
lambdaK += lambdaB
ll += -PSD[iDay, ifrq] / lambdaK - log(lambdaK)
return ll * num_stacked
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
@cython.initializedcheck(False)
cdef int getIndex(double[::1] freqs, double target) nogil:
""" Sort through an ordered array of frequencies (freqs) to find the
nearest value to a specefic f (target)
"""
cdef int N_freqs = freqs.shape[0]
cdef Py_ssize_t i
if freqs[0] > target:
return 0
for i in range(N_freqs-1):
if freqs[i] <= target and freqs[i+1] > target:
return i+1
return N_freqs-1
<|end_of_text|>from libcpp.string cimport string
from libcpp cimport bool as cpp_bool
from libcpp.deque cimport deque
from.slice_ cimport Slice
from.logger cimport Logger
from.std_memory cimport shared_ptr
cdef extern from "rocksdb/merge_operator.h" namespace "rocksdb":
cdef cppclass MergeOperator:
pass
ctypedef cpp_bool (*merge_func)(
void*,
const Slice&,
const Slice*,
const Slice&,
string*,
Logger*)
ctypedef cpp_bool (*full_merge_func)(
void* ctx,
const Slice& key,
const Slice* existing_value,
const deque[string]& operand_list,
string* new_value,
Logger* logger)
ctypedef cpp_bool (*partial_merge_func)(
void* ctx,
const Slice& key,
const Slice& left_op,
const Slice& right_op,
string* new_value,
Logger* logger)
cdef extern from "cpp/merge_operator_wrapper.hpp" namespace "py_rocks":
cdef cppclass AssociativeMergeOperatorWrapper:
AssociativeMergeOperatorWrapper(string, void*, merge_func) nogil except+
cdef cppclass MergeOperatorWrapper:
MergeOperatorWrapper(
string,
void*,
void*,
full_merge_func,
partial_merge_func) nogil except+
<|end_of_text|>cimport libav as lib
cdef class VideoReformatter(object):
def __dealloc__(self):
with nogil: lib.sws_freeContext(self.ptr)
<|end_of_text|>#cython: boundscheck=False, wraparound=False, nonecheck=False, cdivision=True
import numpy
cimport numpy
cimport libc.math
import matplotlib.pyplot as plt
import skimage.filters
cpdef segment(image, double b = 5.0):
cdef int N = image.shape[0]
cdef numpy.ndarray[numpy.double_t, ndim = 2] Y = image.astype('double')
cdef numpy.ndarray[numpy.uint64_t, ndim = 2] X = numpy.zeros((N, N)).astype('uint64')
cdef numpy.ndarray[numpy.double_t, ndim = 2] p = numpy.zeros((N, N))
threshold = skimage.filters.threshold_otsu(Y)
X[Y > threshold] = 1
cdef int samples = 10
cdef int rpts = 10
cdef numpy.ndarray[numpy.uint64_t, ndim = 3] T = numpy.zeros((N, N, 2)).astype('uint64')
cdef numpy.ndarray[numpy.uint64_t, ndim = 2] TT = numpy.zeros((N, N)).astype('uint64')
cdef int i, j, v
for i in range(N):
for j in range(N):
T[i, j, X[i, j]] = 1
cdef int r, tmp
cdef double[2] u = [0.0, 0.0]
cdef double[2] sig2 = [0.0, 0.0]
cdef double psum
cdef int[2] counts
cdef double p0, p1
cdef numpy.ndarray[numpy.double_t, ndim = 2] randoms
for r in range(rpts):
for i in range(N):
for j in range(N):
tmp = 0
for c in range(2):
tmp += T[i, j, c]
TT[i, j] = tmp
for c in range(2):
for i in range(N):
for j in range(N):
p[i, j] = T[i, j | Cython |
, c] / TT[i, j]
psum = 0.0
for i in range(N):
for j in range(N):
psum += p[i, j]
u[c] += p[i, j] * Y[i, j]
u[c] /= psum
for i in range(N):
for j in range(N):
sig2[c] += p[i, j] * (Y[i, j] - u[c])**2
sig2[c] /= psum
print u, sig2
T = numpy.zeros((N, N, 2)).astype('uint64')
for s in range(samples):
# The paper "The EM/MPM Algorithm for Segmentation of Textured Images: Analysis and Further Experimental Results" says I should only change one pixel at a time
# I saw an implementation from http://www.bluequartz.net/ (EMMPM workbench) where I think
# they changed more than one pixel at a time (I don't think I really understood the code so I could be wrong).
#
# I think changing more than one pixel is fine. You're just making bigger jumps in state space,
# so I'm doin' it here too.
#
for i in range(N):
for j in range(N):
im = 0 if i - 1 < 0 else i - 1
ip = i + 1 if i + 1 < N else N - 1
jm = 0 if j - 1 < 0 else j - 1
jp = j + 1 if j + 1 < N else N - 1
counts = [0, 0]
counts[X[ip, j]] += 1
counts[X[im, j]] += 1
counts[X[i, jp]] += 1
counts[X[i, jm]] += 1
p0 = libc.math.exp(-((Y[i, j] - u[X[i, j]])**2) / (2.0 * sig2[X[i, j]]) - b * counts[1 - X[i, j]])
p1 = libc.math.exp(-((Y[i, j] - u[1 - X[i, j]])**2) / (2.0 * sig2[1 - X[i, j]]) - b * counts[X[i, j]])
p[i, j] = p0 / (p0 + p1)
randoms = numpy.random.rand(N, N)
for i in range(N):
for j in range(N):
if randoms[i, j] > p[i, j]:
X[i, j] = 1 - X[i, j]
T[i, j, X[i, j]] += 1
return T[:, :, 1] / (T[:, :, 0] + T[:, :, 1]).astype('double')
<|end_of_text|>cdef extern from "<math.h>" nogil:
const double M_E
const double e "M_E" # as in Python's math module
const double M_LOG2E
const double M_LOG10E
const double M_LN2
const double M_LN10
const double M_PI
const double pi "M_PI" # as in Python's math module
const double M_PI_2
const double M_PI_4
const double M_1_PI
const double M_2_PI
const double M_2_SQRTPI
const double M_SQRT2
const double M_SQRT1_2
# C99 constants
const float INFINITY
const float NAN
# note: not providing "nan" and "inf" aliases here as nan() is a function in C
const double HUGE_VAL
const float HUGE_VALF
const long double HUGE_VALL
# All C99 functions in alphabetical order
double acos(double x)
float acosf(float)
double acosh(double x)
float acoshf(float)
long double acoshl(long double)
long double acosl(long double)
double asin(double x)
float asinf(float)
double asinh(double x)
float asinhf(float)
long double asinhl(long double)
long double asinl(long double)
double atan(double x)
double atan2(double y, double x)
float atan2f(float, float)
long double atan2l(long double, long double)
float atanf(float)
double atanh(double x)
float atanhf(float)
long double atanhl(long double)
long double atanl(long double)
double cbrt(double x)
float cbrtf(float)
long double cbrtl(long double)
double ceil(double x)
float ceilf(float)
long double ceill(long double)
double copysign(double, double)
float copysignf(float, float)
long double copysignl(long double, long double)
double cos(double x)
float cosf(float)
double cosh(double x)
float coshf(float)
long double coshl(long double)
long double cosl(long double)
double erf(double)
double erfc(double)
float erfcf(float)
long double erfcl(long double)
float erff(float)
long double erfl(long double)
double exp(double x)
double exp2(double x)
float exp2f(float)
long double exp2l(long double)
float expf(float)
long double expl(long double)
double expm1(double x)
float expm1f(float)
long double expm1l(long double)
double fabs(double x)
float fabsf(float)
long double fabsl(long double)
double fdim(double x, double y)
float fdimf(float, float)
long double fdiml(long double, long double)
double floor(double x)
float floorf(float)
long double floorl(long double)
double fma(double x, double y, double z)
float fmaf(float, float, float)
long double fmal(long double, long double, long double)
double fmax(double x, double y)
float fmaxf(float, float)
long double fmaxl(long double, long double)
double fmin(double x, double y)
float fminf(float, float)
long double fminl(long double, long double)
double fmod(double x, double y)
float fmodf(float, float)
long double fmodl(long double, long double)
double frexp(double x, int* exponent)
float frexpf(float, int* exponent)
long double frexpl(long double, int*)
double hypot(double x, double y)
float hypotf(float, float)
long double hypotl(long double, long double)
int ilogb(double x)
int ilogbf(float)
int ilogbl(long double)
double ldexp(double x, int exponent)
float ldexpf(float, int exponent)
long double ldexpl(long double, int exponent)
double lgamma(double x)
float lgammaf(float)
long double lgammal(long double)
long long llrint(double)
long long llrintf(float)
long long llrintl(long double)
long long llround(double)
long long llroundf(float)
long long llroundl(long double)
double log(double x)
double log10(double x)
float log10f(float)
long double log10l(long double)
double log1p(double x)
float log1pf(float)
long double log1pl(long double)
double log2(double x)
float log2f(float)
long double log2l(long double)
double logb(double x)
float logbf(float)
long double logbl(long double)
float logf(float)
long double logl(long double)
long lrint(double)
long lrintf(float)
long lrintl(long double)
long lround(double)
long lroundf(float)
long lroundl(long double)
double modf(double x, double* iptr)
float modff(float, float* iptr)
long double modfl(long double, long double* iptr)
double nan(const char*)
float nanf(const char*)
long double nanl(const char*)
double nearbyint(double x)
float nearbyintf(float)
long double nearbyintl(long double)
double nextafter(double, double)
float nextafterf(float, float)
long double nextafterl(long double, long double)
double nexttoward(double, long double)
float nexttowardf(float, long double)
long double nexttowardl(long double, long double)
double pow(double x, double y)
float powf(float, float)
long double powl(long double, long double)
double remainder(double x, double y)
float remainderf(float, float)
long double remainderl(long double, long double)
double remquo(double x, double y, int* quot)
float remquof(float, float, int* quot)
long double remquol(long double, long double, int* quot)
double rint(double x)
float rintf(float)
long double | Cython |
rintl(long double)
double round(double x)
float roundf(float)
long double roundl(long double)
double scalbln(double x, long n)
float scalblnf(float, long)
long double scalblnl(long double, long)
double scalbn(double x, int n)
float scalbnf(float, int)
long double scalbnl(long double, int)
double sin(double x)
float sinf(float)
double sinh(double x)
float sinhf(float)
long double sinhl(long double)
long double sinl(long double)
double sqrt(double x)
float sqrtf(float)
long double sqrtl(long double)
double tan(double x)
float tanf(float)
double tanh(double x)
float tanhf(float)
long double tanhl(long double)
long double tanl(long double)
double tgamma(double x)
float tgammaf(float)
long double tgammal(long double)
double trunc(double x)
float truncf(float)
long double truncl(long double)
int isinf(long double) # -1 / 0 / 1
bint isfinite(long double)
bint isnan(long double)
bint isnormal(long double)
bint signbit(long double)
int fpclassify(long double)
const int FP_NAN
const int FP_INFINITE
const int FP_ZERO
const int FP_SUBNORMAL
const int FP_NORMAL
<|end_of_text|># SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the license found in the LICENSE.txt file in the root
# directory of this source tree.
# =======
# Imports
# =======
import numpy
from..kernels import Kernel
from..kernels cimport Kernel
__all__ = ['estimate_kernel_threshold', 'estimate_max_nnz']
# ==============
# gamma function
# ==============
def _gamma_function(dimension):
"""
Computes the gamma function of the half integer dimension/2+1.
:param dimension: Dimension of space.
:type dimension: int
:return: Gamma function of dimension/2 + 1.
:rtype: float
"""
# Compute Gamma(dimension/2 + 1)
if dimension % 2 == 0:
k = 0.5 * dimension
gamma = 1.0
while k > 0.0:
gamma *= k
k -= 1.0
else:
k = numpy.ceil(0.5 * dimension)
gamma = numpy.sqrt(numpy.pi)
while k > 0.0:
gamma *= k - 0.5
k -= 1.0
return gamma
# ===========
# ball radius
# ===========
def _ball_radius(volume, dimension):
"""
Computes the radius of n-ball at dimension n, given its volume.
:param volume: Volume of n-ball
:type volume: double
:param dimension: Dimension of embedding space
:type dimension: int
:return: radius of n-ball
:rtype: double
"""
# Compute gamma function of dimension/2+1
gamma = _gamma_function(dimension)
# radius from volume
radius = (gamma * volume)**(1.0 / dimension) / numpy.sqrt(numpy.pi)
return radius
# ===========
# ball volume
# ===========
def _ball_volume(radius, dimension):
"""
Computes the volume of n-ball at dimension n, given its volume.
:param radius: Volume of n-ball
:type volume: double
:param dimension: Dimension of embedding space
:type dimension: int
:return: radius of n-ball
:rtype: double
"""
# Compute gamma function of dimension/2+1
gamma = _gamma_function(dimension)
# volume from radius
volume = (radius * numpy.sqrt(numpy.pi))**(dimension) / gamma
return volume
# =========================
# estimate kernel threshold
# =========================
def estimate_kernel_threshold(
matrix_size,
dimension,
density,
scale,
kernel):
"""
Estimates the kernel's tapering threshold to sparsify a dense matrix into a
sparse matrix with the requested density.
Here is how density :math:`\\rho` is related to the kernel_threshold
:math:`\\tau`:
.. math::
a = \\rho n = \\mathrm{Vol}_{d}(r/l),
\\tau = k(r),
where:
* :math:`n` is the number of points in the unit hypercube, also it is
the matrix size.
* :math:`d` is the dimension of space.
* :math:`\\mathrm{Vol}_{d}(r/l)` is the volume of d-ball of radius
:math:`r/l`.
* :math:`l = 1/(\\sqrt[d]{n} - 1)` is the grid size along each axis,
assuming the points are places on an equi-distanced structured grid.
* :math:`k` is the Matern correlation function.
* :math:`a` is the adjacency of a point, which is the number of
the neighbor points that are correlated to a point.
* :math:`\\rho` is the sparse matrix density (input to this function).
* :math:`\\tau` is the kernel threshold (output of this function).
The adjacency :math:`a` is the number of points on an integer lattice
and inside a d-ball. This quantity can be approximated by the volume of a
d-ball, see for instance
`Gauss circle problem<https://en.wikipedia.org/wiki/Gauss_circle_problem>`_
in 2D.
A non-zero kernel threshold is used to sparsify a matrix by tapering its
correlation function. However, if the kernel threshold is too large, some
elements of the correlation matrix will not be correlated to any other
neighbor point. This leads to a correlation matrix with some rows that have
only one non-zero element equal to one on the diagonal and zero elsewhere.
Essentially, if all points loose their correlation to a neighbor, the
matrix becomes identity.
This function checks if a set of parameters to form a sparse matrix could
lead to this issue. The correlation matrix in this module is formed by the
mutual correlation between spatial set of points in the unit hypercube. We
assume each point is surrounded by a sphere of the radius of the kernel
threshold. If this radius is large enough, then the sphere of all points
intersect. If the criteria is not met, this function raises ``ValueError``.
:param matrix_size: The size of the square matrix. This is also the number
of points used to construct the correlation matrix.
:type matrix_size: int
:param dimension: The dimension of the space of points used to construct
the correlation matrix.
:type dimension: int
:param sparse_density: The desired density of the sparse matrix. Note that
the actual density of the generated matrix will not be exactly equal to
this value. If the matrix size is large, this value is close to the
actual matrix density.
:type sparse_density: int
:param scale: A parameter of correlation function that scales spatial
distance.
:type scale: float
:param nu: The parameter :math:`\\nu` of Matern correlation kernel.
:type nu: float
:return: Kernel threshold level
:rtype: double
"""
# Number of neighbor points to be correlated in a neighborhood of a point
adjacency_volume = density * matrix_size
# If Adjacency is less that one, the correlation matrix becomes identity
# since no point will be adjacent to other in the correlation matrix.
if adjacency_volume < 1.0:
raise ValueError(
'Adjacency: %0.2f. Correlation matrix will become identity '
% (adjacency_volume) +
'since kernel radius is less than grid size. To increase'+
'adjacency, consider increasing density or scale.')
# Volume of an ellipsoid with radii of the components of the correlation
# scale is equivalent to the volume of an d-ball with the radius of the
# geometric mean of the correlation scale elements
dimesnion = scale.size
geometric_mean_radius = numpy.prod(scale)**(1.0/ dimension)
correlation_ellipsoid_volume = _ball_volume(geometric_mean_radius,
dimension)
# Normalize the adjacency volume with the volume of an ellipsoid of the
# correlation scale radii
adjacency_volume /= correlation_ellipsoid_volume
# Approximate radius of n-sphere containing the above number of adjacent
# points, assuming adjacent points are distanced on integer lattice.
adjacency_radius = _ball_radius(adjacency_volume, dimension)
# Number of points along each axis of the grid
grid_axis_num_points = matrix_size**(1.0 / dimension)
# Size of grid elements
grid_size = 1.0 / (grid_axis_num_points - 1.0)
# Scale the integer lattice of adjacency radius by the grid size.
# This is | Cython |
the tapering radius of the kernel
kernel_radius = grid_size * adjacency_radius
# Threshold of kernel to perform tapering
kernel_threshold = kernel(kernel_radius)
return kernel_threshold
# ================
# estimate max nnz
# ================
def estimate_max_nnz(
matrix_size,
scale,
dimension,
density):
"""
Estimates the maximum number of nnz needed to store the indices and data of
the generated sparse matrix. Before the generation of the sparse matrix,
its nnz (number of non-zero elements) are not known. Thus, this function
only guesses this value based on its density.
:param matrix_size: The size of the square matrix. This is also the number
of points used to construct the correlation matrix.
:type matrix_size: int
:param dimension: The dimension of the space of points used to construct
the correlation matrix.
:type dimension: int
:param sparse_density: The desired density of the sparse matrix. Note that
the actual density of the generated matrix will not be exactly equal to
this value. If the matrix size is large, this value is close to the
actual matrix density.
:type sparse_density: int
:return: maximum non-zero elements of sparse array
:rtype: double
"""
estimated_nnz = int(numpy.ceil(density * (matrix_size**2)))
# Normalize correlation scale so that its largest element is one
normalized_scale = scale / numpy.max(scale)
# Get the geometric mean of the normalized correlation
geometric_mean_radius = \
numpy.prod(normalized_scale)**(1.0/dimension)
# Multiply the estimated nnz by unit hypercube over unit ball volume ratio
unit_hypercube_volume = 1.0
safty_coeff = unit_hypercube_volume / \
_ball_radius(geometric_mean_radius, dimension)
max_nnz = int(numpy.ceil(safty_coeff * estimated_nnz))
return max_nnz
<|end_of_text|># cython: boundscheck=False, wraparound=False, cdivision=True
# Authors: Luis Scoccola
# License: 3-clause BSD
import numpy as np
cimport numpy as np
np.import_array()
DTYPE = np.float64
ctypedef np.float64_t DTYPE_t
def lazy_intersection(np.ndarray[DTYPE_t, ndim=1] increasing, np.ndarray[DTYPE_t, ndim=1] increasing2, np.float64_t s0, np.float64_t k0) :
# find first occurence of s0 - (s0/k0) * increasing[i]) <= increasing2[i]
assert increasing.dtype == DTYPE and increasing2.dtype == DTYPE
cdef np.float64_t mu = s0/k0
cdef int first = 0
cdef int last = increasing.shape[0]-1
cdef int midpoint
cdef int res1
cdef int res2
with nogil:
if s0 - mu * increasing[first] <= increasing2[first] :
res1, res2 = first, False
elif s0 - mu * increasing[last] > increasing2[last] :
res1, res2 = last, True
else:
while first+1 < last :
midpoint = (first + last)//2
if s0 - mu * increasing[midpoint] <= increasing2[midpoint] :
last = midpoint
else:
first = midpoint
res1, res2 = last, False
return res1, res2
<|end_of_text|>#
# Copyright (c) 2021 by Kristoffer Paulsson <[email protected]>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Implementation of intermediary transport and the standard noise protocol for Angelos."""
import asyncio
import logging
import os
from asyncio import CancelledError
from asyncio.protocols import Protocol
from asyncio.transports import Transport
from typing import Any, Union, Callable
from angelos.bin.nacl import PublicKey, SecretKey, Backend_25519_ChaChaPoly_BLAKE2b
# TODO: Certify that this implementation of Noise_XX_25519_ChaChaPoly_BLAKE2b
# is interoperable with other implementations.
class HandshakeError(RuntimeWarning):
"""The handshake failed."""
pass
class NonceDepleted(RuntimeWarning):
"""The nonce is depleted and connection must be terminated."""
class CipherState:
"""State of the cipher algorithm."""
def __init__(self):
self.k = None
self.n = None
class SymmetricState:
"""The symmetric state of the protocol."""
def __init__(self):
self.h = None
self.ck = None
self.cipher_state = None
class HandshakeState:
"""The handshake state."""
def __init__(self):
self.symmetric_state = None
self.s = None
self.e = None
self.rs = None
self.re = None
class NoiseProtocol(Backend_25519_ChaChaPoly_BLAKE2b):
"""Static implementation of Noise Protocol Noise_XX_25519_ChaChaPoly_BLAKE2b."""
MAX_MESSAGE_LEN = 2 ** 16 - 1
MAX_NONCE = 2 ** 64 - 1
__slots__ = (
"_name", "_initiator", "_handshake_hash", "_handshake_state", "_symmetric_state", "_cipher_state_handshake",
"_cipher_state_encrypt", "_cipher_state_decrypt", "_static_key"
)
def __init__(self, initiator: bool, static_key: SecretKey):
Backend_25519_ChaChaPoly_BLAKE2b.__init__(self, 64, 64)
self._name = b"Noise_XX_25519_ChaChaPoly_BLAKE2b"
self._initiator = initiator
self._handshake_hash = None
self._handshake_state = None
self._symmetric_state = None
self._cipher_state_handshake = None
self._cipher_state_encrypt = None
self._cipher_state_decrypt = None
self._static_key = static_key
@property
def protocol(self) -> bytes:
return self._name
@property
def handshake_hash(self) -> bytes:
return self._handshake_hash
def _initialize_key(self, cs: CipherState, key):
"""Reset a cipher state with a new key."""
cs.k = key
cs.n = 0
def _encrypt_with_ad(self, cs: CipherState, ad: bytes, plaintext: bytes) -> bytes:
"""Encrypt a message with additional data."""
if cs.n == self.MAX_NONCE:
raise NonceDepleted()
if cs.k is None:
return plaintext
ciphertext = self._encrypt(cs.k, cs.n.to_bytes(8, "little"), plaintext, ad)
cs.n += 1
return ciphertext
def _decrypt_with_ad(self, cs: CipherState, ad: bytes, ciphertext: bytes) -> bytes:
"""Decrypt cipher using additional data."""
if cs.n == self.MAX_NONCE:
raise NonceDepleted()
if cs.k is None:
return ciphertext
plaintext = self._decrypt(cs.k, cs.n.to_bytes(8, "little"), ciphertext, ad)
cs.n += 1
return plaintext
def _mix_key(self, ss: SymmetricState, input_key_material: bytes):
"""Mix key with data"""
ss.ck, temp_k = self._hkdf2(ss.ck, input_key_material)
if self.hashlen == 64:
temp_k = temp_k[:32]
self._initialize_key(ss.cipher_state, temp_k)
def _mix_hash(self, ss: SymmetricState, data: bytes):
"""Mix hash with data."""
ss.h = self._hash(ss.h + data)
def _encrypt_and_hash(self, ss: SymmetricState, plaintext: bytes) -> bytes:
"""Encrypt a message, then mix the hash with the cipher."""
ciphertext = self._encrypt_with_ad(ss.cipher_state, ss.h, plaintext)
self._mix_hash(ss, ciphertext)
return ciphertext
def _decrypt_and_hash(self, ss: SymmetricState, ciphertext: bytes) -> bytes:
"""Decrypt a message, then hash with the cipher"""
plaintext = self._decrypt_with_ad(ss.cipher_state, ss.h, ciphertext)
self._mix_hash(ss, ciphertext)
return plaintext
async def _initiator_xx(self, writer: Callable, reader: Callable):
"""Shake hand as initiator according to XX."""
# Step 1
# WRITE True e
buffer = bytearray()
self._handshake_state.e = self._generate() if self._handshake_state.e is None else self._handshake_state.e
buffer += self._handshake_state.e.pk
self._mix_hash(self._handshake_state.symmetric_state, self._handshake_state.e.pk)
buffer += self._encrypt_and_hash(self._handshake_state.symmetric_state, b"")
writer(buffer)
# Step 2
message = await | Cython |
reader()
# READ True e
self._handshake_state.re = PublicKey(bytes(message[:self.dhlen]))
message = message[self.dhlen:]
self._mix_hash(self._handshake_state.symmetric_state, self._handshake_state.re.pk)
# READ True ee
self._mix_key(self._handshake_state.symmetric_state,
self._dh(self._handshake_state.e.sk, self._handshake_state.re.pk))
# READ True s
if self._cipher_state_handshake.k is not None:
temp = bytes(message[:self.dhlen + 16])
message = message[self.dhlen + 16:]
else:
temp = bytes(message[:self.dhlen])
message = message[self.dhlen:]
self._handshake_state.rs = PublicKey(self._decrypt_and_hash(self._handshake_state.symmetric_state, temp))
# READ True es
self._mix_key(self._handshake_state.symmetric_state,
self._dh(self._handshake_state.e.sk, self._handshake_state.rs.pk))
if self._decrypt_and_hash(self._handshake_state.symmetric_state, bytes(message))!= b"":
raise HandshakeError()
# Step 3
# WRITE True s
buffer = bytearray()
buffer += self._encrypt_and_hash(self._handshake_state.symmetric_state, self._handshake_state.s.pk)
# WRITE True se
self._mix_key(self._handshake_state.symmetric_state,
self._dh(self._handshake_state.s.sk, self._handshake_state.re.pk))
buffer += self._encrypt_and_hash(self._handshake_state.symmetric_state, b"")
writer(buffer)
async def _responder_xx(self, writer: Callable, reader: Callable):
"""Shake hand as responder according to XX."""
message = await reader()
# READ False e
self._handshake_state.re = PublicKey(bytes(message[:self.dhlen]))
message = message[self.dhlen:]
self._mix_hash(self._handshake_state.symmetric_state, self._handshake_state.re.pk)
if self._decrypt_and_hash(self._handshake_state.symmetric_state, bytes(message))!= b"":
raise HandshakeError()
buffer = bytearray()
# WRITE False e
self._handshake_state.e = self._generate() if self._handshake_state.e is None else self._handshake_state.e
buffer += self._handshake_state.e.pk
self._mix_hash(self._handshake_state.symmetric_state, self._handshake_state.e.pk)
# WRITE False ee
self._mix_key(self._handshake_state.symmetric_state,
self._dh(self._handshake_state.e.sk, self._handshake_state.re.pk))
# WRITE False s
buffer += self._encrypt_and_hash(self._handshake_state.symmetric_state, self._handshake_state.s.pk)
# WRITE False es
self._mix_key(self._handshake_state.symmetric_state,
self._dh(self._handshake_state.s.sk, self._handshake_state.re.pk))
buffer += self._encrypt_and_hash(self._handshake_state.symmetric_state, b"")
writer(buffer)
message = await reader()
buffer = bytearray()
# READ False s
if self._cipher_state_handshake.k is not None:
temp = bytes(message[:self.dhlen + 16])
message = message[self.dhlen + 16:]
else:
temp = bytes(message[:self.dhlen])
message = message[self.dhlen:]
self._handshake_state.rs = PublicKey(self._decrypt_and_hash(self._handshake_state.symmetric_state, temp))
# READ False se
self._mix_key(self._handshake_state.symmetric_state,
self._dh(self._handshake_state.e.sk, self._handshake_state.rs.pk))
if self._decrypt_and_hash(self._handshake_state.symmetric_state, bytes(message))!= b"":
raise HandshakeError()
async def start_handshake(self, writer: Callable, reader: Callable):
"""Do noise protocol handshake."""
ss = SymmetricState()
if len(self._name) <= self.hashlen:
ss.h = self._name.ljust(self.hashlen, b"\0")
else:
ss.h = self._hash(self._name)
ss.ck = ss.h
ss.cipher_state = CipherState()
self._initialize_key(ss.cipher_state, None)
self._cipher_state_handshake = ss.cipher_state
hs = HandshakeState()
hs.symmetric_state = ss
self._mix_hash(hs.symmetric_state, b"") # Empty prologue
hs.s = self._static_key
self._handshake_state = hs
self._symmetric_state = self._handshake_state.symmetric_state
if self._initiator:
await self._initiator_xx(writer, reader)
else:
await self._responder_xx(writer, reader)
temp_k1, temp_k2 = self._hkdf2(self._handshake_state.symmetric_state.ck, b"")
if self.hashlen == 64:
temp_k1 = temp_k1[:32]
temp_k2 = temp_k2[:32]
c1, c2 = CipherState(), CipherState()
self._initialize_key(c1, temp_k1)
self._initialize_key(c2, temp_k2)
if self._initiator:
self._cipher_state_encrypt = c1
self._cipher_state_decrypt = c2
else:
self._cipher_state_encrypt = c2
self._cipher_state_decrypt = c1
self._handshake_hash = self._symmetric_state.h
self._handshake_state = None
self._symmetric_state = None
self._cipher_state_handshake = None
def encrypt(self, data: bytes) -> bytes:
"""Encrypt data into a cipher before writing."""
if len(data) > self.MAX_MESSAGE_LEN:
raise ValueError("Data must be less or equal to {}.".format(self.MAX_MESSAGE_LEN))
return self._encrypt_with_ad(self._cipher_state_encrypt, None, data)
def decrypt(self, data: bytes) -> bytes:
"""Decrypt a cipher into data before reading."""
if len(data) > self.MAX_MESSAGE_LEN:
raise ValueError("Data must be less or equal to {}".format(self.MAX_MESSAGE_LEN))
return self._decrypt_with_ad(self._cipher_state_decrypt, None, data)
# And he made in Jerusalem engines, invented by cunning men, to be on
# the towers and upon the bulwarks, to shoot arrows and great stones
# withal. And his name spread far abroad; for he was marvellously
# helped, till he was strong. (2 Chronicles 26:15 KJV)
class IntermediateTransportProtocol(Transport, Protocol):
"""Intermediate layer in between a native transport and a program protocol,
for manipulating I/O such as encryption."""
DIVERT = 1
PASSTHROUGH = 2
EAVESDROP = 3
__slots__ = ("_loop", "_mode", "_protocol", "_transport", "_task_conn", "_task_close", "_read", "_write")
def __init__(self, protocol: Protocol):
Transport.__init__(self)
Protocol.__init__(self)
self._loop = asyncio.get_running_loop()
self._protocol = protocol
self._transport = None
self._task_conn = None
self._task_close = None
self._mode = None
self._read = None
self._write = None
self._set_mode(self.PASSTHROUGH)
async def _on_connection(self) -> None:
"""Connection made handler, overwrite to use."""
pass
async def _on_close(self) -> None:
"""Close handler, overwrite to use."""
pass
def _on_write(self, data: Union[bytes, bytearray, memoryview]) -> Union[bytes, bytearray, memoryview]:
"""Process written data before handed to the native transport. Overwrite to use."""
return data
def _on_received(self, data: Union[bytes, bytearray, memoryview]) -> Union[bytes, bytearray, memoryview]:
"""Process received data before handed to the application protocol. Overwrite to use."""
return data
def _on_lost(self) -> None:
"""Lost connection handler, overwrite to use."""
pass
def _on_eof(self) -> None:
""""Received eof handler, overwrite to use."""
pass
async def _reader(self):
"""Receives a copy of the data from the underlying native transport in DIVERT and EAVESDROP mode.
This method should be awaited when in use for interrupting flow.
Use self._transport.write() to respond."""
data = await self._read
self._read.__init__(loop=self._read.get_loop())
return data
async def _writer(self):
"""Receives a copy of the data from the overlying application protocol in DIVERT and EAVESDROP mode.
This method should be awaited when in use for interrupting flow.
Use self._protocol.received_data() to respond."""
data = await self._write
self._write.__init__(loop=self._write.get_loop())
return data
def _set_mode(self, mode: int):
"""Set any of the modes DIVERT, PASSTHROUGH, EAVESDROP and configure futures."""
self._mode = mode
if mode is self.PASSTHROUGH:
self._read = None
self._write | Cython |
= None
else:
self._read = self._read if self._read else self._loop.create_future()
self._write = self._write if self._write else self._loop.create_future()
## Methods bellow belongs to the Transport part.
def get_extra_info(self, name: str, default: Any = None) -> Any:
"""Passthroughs to underlying transport."""
return self._transport.get_extra_info(name, default)
def is_closing(self) -> bool:
"""Passthroughs to underlying transport."""
return self._transport.is_closing() or self._task_close
def close(self) -> None:
"""Close from application protocol with flow interruption."""
if self._task_close:
return
def done(fut):
fut.result()
self._transport.close()
self._task_close = asyncio.create_task(self._on_close())
self._task_close.add_done_callback(done)
def set_protocol(self, protocol: Protocol) -> None:
"""Passthroughs to underlying transport."""
self._protocol = protocol
def get_protocol(self) -> Protocol:
"""Passthroughs to underlying transport."""
return self._protocol
def is_reading(self) -> bool:
"""Passthroughs to underlying transport."""
return self._transport.is_reading()
def pause_reading(self) -> None:
"""Passthroughs to underlying transport."""
self._transport.pause_reading()
def resume_reading(self) -> None:
"""Passthroughs to underlying transport."""
self._transport.resume_reading()
def set_write_buffer_limits(self, high: int = None, low: int = None) -> None:
"""Passthroughs to underlying transport."""
self._transport.set_write_buffer_limits(high, low)
def get_write_buffer_size(self) -> int:
"""Passthroughs to underlying transport."""
return self._transport.get_write_buffer_size()
def write(self, data: Union[bytes, bytearray, memoryview]) -> None:
"""Writes data to the transport generally with flow interruption depending on mode.
DIVERT will interrupt and cancel flow to underlying transport.
PASSTHROUGH will first transform flow and then pass on to underlying transport.
EAVESDROP will first interrupt flow, then transform and lastly pass to underlying transport.
"""
if self._mode is not self.PASSTHROUGH:
self._write.set_result(data)
if self._mode is not self.DIVERT:
data = self._on_write(data)
self._transport.write(data)
def write_eof(self) -> None:
"""Passthroughs to underlying transport."""
self._transport.write_eof()
def can_write_eof(self) -> bool:
"""Passthroughs to underlying transport."""
return self._transport.can_write_eof()
def abort(self) -> None:
"""Passthroughs to underlying transport."""
self._transport.abort()
## Methods bellow belongs to the Protocol part.
def connection_made(self, transport: Transport) -> None:
"""Connection made on underlying transport. Flow is interrupted and operations can be
done before forwarded to the overlaying protocol."""
self._transport = transport
if self._task_conn:
raise BlockingIOError("Can only run one connection made at a time.")
def done(fut):
fut.result()
self._protocol.connection_made(self)
self._task_conn = None
self._task_conn = asyncio.create_task(self._on_connection())
self._task_conn.add_done_callback(done)
def connection_lost(self, exc: Exception) -> None:
"""Connection loss is interrupted and can be managed before forwarding to overlaying protocol."""
self._on_lost()
self._protocol.connection_lost(exc)
if self._task_conn:
self._task_conn.cancel()
if self._task_close:
self._task_close.cancel()
def pause_writing(self) -> None:
"""Passthroughs to overlying protocol."""
self._protocol.pause_writing()
def resume_writing(self) -> None:
"""Passthroughs to overlying protocol."""
self._protocol.resume_writing()
def data_received(self, data: Union[bytes, bytearray, memoryview]) -> None:
"""Reads data to the protocol generally with flow interruption depending on mode.
DIVERT will interrupt and cancel flow to overlying protocol.
PASSTHROUGH will first transform flow and then pass on to overlying protocol.
EAVESDROP will first transform flow, then pass to overlying protocol and lastly interrupt flow."""
if self._mode is not self.DIVERT:
data = self._on_received(data)
self._protocol.data_received(data)
if self._mode is not self.PASSTHROUGH:
self._read.set_result(data)
def eof_received(self) -> bool:
"""Eof is interrupted and can be managed before forwarding to overlaying protocol."""
self._on_eof()
return self._protocol.eof_received()
class NoiseTransportProtocol(IntermediateTransportProtocol):
__slots__ = ("_noise", "_server")
def __init__(self, protocol: Protocol, server: bool = False, key: bytes = None):
IntermediateTransportProtocol.__init__(self, protocol)
self._noise = NoiseProtocol(not server, SecretKey(os.urandom(32)))
self._event = asyncio.Event()
self._event.set()
self._server = server
async def _on_connection(self):
"""Perform noise protocol handshake before telling application protocol connection_made()."""
try:
self._set_mode(IntermediateTransportProtocol.DIVERT)
await self._noise.start_handshake(self._transport.write, self._reader)
self._set_mode(IntermediateTransportProtocol.PASSTHROUGH)
except CancelledError:
pass
# async def _on_close(self) -> None:
# """Clean up protocol."""
# self._protocol.close()
def _on_write(self, data: Union[bytes, bytearray, memoryview]) -> Union[bytes, bytearray, memoryview]:
"""Encrypt outgoing data with Noise."""
cipher = self._noise.encrypt(data)
self._event.clear()
return cipher
def _on_received(self, cipher: Union[bytes, bytearray, memoryview]) -> Union[bytes, bytearray, memoryview]:
"""Decrypt incoming data with Noise."""
data = self._noise.decrypt(cipher)
self._event.set()
return data
async def wait(self):
await self._event.wait()
<|end_of_text|>
import numpy as np
import matplotlib.pylab as pl
import scipy.integrate as sciint
import scipy.optimize as opt
import scipy.linalg as sp_linalg
import scipy.special as sp_special
from parameters import *
cimport cython
def phi(x):
return sp_special.erf(x)
def phi_prime(x):
return 2./np.sqrt(pi)*np.exp(-x**2)
def phi_pprime(x):
return -4*x/np.sqrt(pi)*np.exp(-x**2)
def E_phi_phi(C, tilC):
#print("C=", C)
def integrand_num(x):
p_x = np.exp(-x**2/(2.*C) + 1./2.*tilC*phi(x)**2)
return p_x * phi(x)**2
def integrand_denom(x):
p_x = np.exp(-x**2/(2.*C) + 1./2.*tilC*phi(x)**2)
return p_x
num, err = sciint.quad(integrand_num, -10, 10)
denom, err = sciint.quad(integrand_denom, -10, 10)
#print('num = ', num)
#print('denom = ', denom)
return num / denom
def p_x(x, q, qt):
return np.exp(-x**2/(2.*q) + 1./2.*qt*phi(x)**2)
def exp_pair_phiphi(C,alpha,beta):
#<phi(x_alpha)phi(x_beta)> for phi=erf(x)
if alpha==beta:
return 4./np.pi*np.arctan(np.sqrt(1.+4*C[0,0]))-1.
else:
return 2./np.pi*np.arcsin(2.*C[alpha,beta]/(np.sqrt(1.+2*C[alpha,alpha])*np.sqrt(1.+2*C[beta,beta])))
def exp_pair_phiprime_phiprime(C,alpha,beta):
#<phi'(x_alpha)phi'(x_beta)> for phi=erf(x)
if alpha==beta:
return 4./np.pi*1./np.sqrt(4*C[alpha,alpha]+1.)
else:
C_helper = np.array([ [ C[alpha,alpha], C[alpha,beta]], [ C[beta,alpha], C[beta,beta] ] ] )
return 4./np.pi*1./np.sqrt(2*(C[alpha,alpha]+C[beta,beta])+1+4*np.linalg.det(C_helper))
def exp_pair_phipprime_phi(C,alpha,beta):
#<phi''(x_alpha)phi''(x_beta)> for phi=erf(x)
if alpha==beta:
return -8./np.pi*C[alpha,alpha]/((2*C[alpha,alpha]+1.)*np.sqrt(4*C[alpha,alpha]+1))
# def exp_quadruple(f, g, u, w, C, indices, n_mc_samples):
# int_val = 0.
# K = np.zeros(( | Cython |
4, 4), dtype=float)
# for i in range(4):
# for j in range(4):
# K[i, j] = C[indices[i], indices[j]]
#
# lam, v = np.linalg.eig(K)
#
# for i, l in enumerate(lam):
# if l < 0.0:
# if np.abs(l) > 1e-12:
# print("error: negative lambdas!")
# print("indices = ", indices)
# print("K = ", K)
# print("lambdas = ", lam)
# return None
# lam[i] = 0.0
#
# np.random.seed(701)
# ranges = np.sqrt(lam)
# def integrand(z):
# x = np.dot(v, z * ranges)
# return phi(x[0]) * phi(x[1]) * phi(x[2]) * phi(x[3])
#
# for s in range(n_mc_samples):
# z = np.random.normal(size=4)
# int_val += integrand(z)
#
# return int_val / n_mc_samples
# # Round 1 - Cythonize variables
# def exp_quadruple(f, g, u, w, C, indices, int n_mc_samples):
# K = np.zeros((4, 4), dtype=float)
# cdef int i
# cdef int j
# cdef float int_val = 0.0
# for i in range(4):
# for j in range(4):
# K[i, j] = C[indices[i], indices[j]]
#
# lam, v = np.linalg.eig(K)
#
# for i, l in enumerate(lam):
# if l < 0.0:
# if np.abs(l) > 1e-12:
# print("error: negative lambdas!")
# print("indices = ", indices)
# print("K = ", K)
# print("lambdas = ", lam)
# return None
# lam[i] = 0.0
#
# np.random.seed(701)
# ranges = np.sqrt(lam)
# def integrand(z):
# x = np.dot(v, z * ranges)
# return phi(x[0]) * phi(x[1]) * phi(x[2]) * phi(x[3])
#
# for s in range(n_mc_samples):
# z = np.random.normal(size=4)
# int_val += integrand(z)
#
# return int_val / n_mc_samples
# # Round 2 - Python magic
# def exp_quadruple(f, g, u, w, C, indices, int n_mc_samples):
# K = np.zeros((4, 4), dtype=float)
# cdef int i
# cdef int j
# cdef float int_val = 0.0
# for i in range(4):
# for j in range(4):
# K[i, j] = C[indices[i], indices[j]]
#
# lam, v = np.linalg.eig(K)
#
# for i, l in enumerate(lam):
# if l < 0.0:
# if np.abs(l) > 1e-12:
# print("error: negative lambdas!")
# print("indices = ", indices)
# print("K = ", K)
# print("lambdas = ", lam)
# return None
# lam[i] = 0.0
#
# np.random.seed(701)
# ranges = np.sqrt(lam)
# z = np.random.normal(size=(n_mc_samples, 4)) * ranges
# x = np.dot(v, z.T)
# int_val = np.mean(np.prod(phi(x), axis=0))
#
# return int_val
# Round 3 - Using Lapack
def exp_quadruple(f, g, u, w, C, indices, int n_mc_samples):
K = np.zeros((4, 4), dtype=float)
cdef int i
cdef int j
cdef float int_val = 0.0
for i in range(4):
for j in range(4):
K[i, j] = C[indices[i], indices[j]]
# Using LAPACK to diagonalize K
lam, v, _ = sp_linalg.lapack.dsyev(K, compute_v=True)
for i, l in enumerate(lam):
if l < 0.0:
if np.abs(l) > 1e-12:
print("error: negative lambdas!")
print("indices = ", indices)
print("K = ", K)
print("lambdas = ", lam)
return None
lam[i] = 0.0
np.random.seed(701)
ranges = np.sqrt(lam)
z = np.random.normal(size=(n_mc_samples, 4)) * ranges
x = np.dot(v, z.T)
int_val = np.mean(np.prod(phi(x), axis=0))
return int_val
def iterate_C_pureMFT(C_a, alpha, beta):
# zeroth order
C_a1_0 = g2 * exp_pair_phiphi(C_a, alpha, beta) + sigma2
return C_a1_0
def iterate_C(C_a, til_Ca1, exp_pair, alpha, beta):
# zeroth order
C_a1_0 = g2 * exp_pair[alpha, beta] + sigma2
# compute correction matrix (second term in Eq:12)
#print("computing correction")
assert(til_Ca1.shape[0] == til_Ca1.shape[1])
corrC = 0.0
for gamma in range(til_Ca1.shape[0]):
#print ("Gamma2",gamma)
corrC += til_Ca1[gamma, gamma] * ( exp_quadruple(phi, phi, phi, phi, C_a, np.array([alpha, beta, gamma, gamma]), Nsamples)\
- exp_pair[alpha,beta] * exp_pair[gamma,gamma] )
for delta in range(gamma+1, til_Ca1.shape[0]):
corrC += (til_Ca1[gamma, delta] + til_Ca1[delta, gamma]) * ( exp_quadruple(phi, phi, phi, phi, C_a, np.array([alpha, beta, gamma, delta]), Nsamples)\
- exp_pair[alpha,beta] * exp_pair[gamma,delta] )
#print ("done, corrC = ", corrC)
# first order
C_a1_1 = g2**2 * corrC
return C_a1_0 + C_a1_1
def final_tilC(C_A1, Y):
"""determine value of \tilde{C^(A+1)} for final layer"""
C_inv = np.linalg.inv(C_A1)
Y_outer = np.outer(Y, Y)
tilC = -1./(2.*n) * ( C_inv - np.matmul( C_inv, np.matmul(Y_outer, C_inv) ) )
#print("tilde C^(A+1) = ", tilC)
return tilC
def iterate_tilC_back(tilC_A1, C):
tilC = np.zeros( (tilC_A1.shape[0], tilC_A1.shape[1], A+1), dtype = float )
tilC[:,:, A] = tilC_A1
print("iterating tilde C backwards")
for a in range(A-1, -1, -1):
print("layer a=", a)
for alpha in range(D):
tilC[alpha, alpha, a] = g2 * tilC[alpha, alpha, a+1] * ( exp_pair_phipprime_phi(C[:,:,a], alpha, alpha) + exp_pair_phiprime_phiprime(C[:,:,a], alpha, alpha) )
for beta in range(alpha):
tilC_alpha_beta = g2 * tilC[alpha, beta, a+1] * exp_pair_phiprime_phiprime(C[:,:,a], alpha, beta)
tilC[alpha, beta, a] = tilC_alpha_beta
tilC[beta, alpha, a] = tilC_alpha_beta
return tilC
def generate_training_data(n_samples, data_dim, task, task_param):
"""generate training data for different tasks"""
if task == 'xor':
X, Y = generate_training_data_xor(n_samples, data_dim, task_param)
elif task == 'ising':
X, Y = generate_training_data_ising(n_samples, data_dim, task_param)
else:
raise ValueError("Task must be either xor or ising.")
return X, Y
def generate_training_data_ising(D, N, delta_p):
"""random Ising vectors with random labels assigned"""
X1 = 2*(np.random.rand(int(D/2), N) > 0.5 + delta_p) - 1.0
X2 = 2*(np.random.rand(int(D/2), N) > 0.5 - delta_p) - 1.0
# setting of binary classification
# labels +-1 for the | Cython |
two classes, examples presented
# sorted by labels
X = np.vstack ( (X1, X2) )
Y = np.hstack ( (np.ones(int(D/2)), -np.ones(int(D/2))) )
return X, Y
def generate_training_data_xor(n_samples, data_dim, sigma):
"""
Random samples drawn from a high-dimensional XOR task.
See http://proceedings.mlr.press/v139/refinetti21b/refinetti21b.pdf.
"""
rng = np.random.default_rng(481) # TODO improve seeding
means = np.eye(data_dim)[:2]
cov = sigma**2 * np.eye(data_dim)
X1 = rng.multivariate_normal(mean=means[0], cov=cov, size=int(D/4))
X2 = rng.multivariate_normal(mean=-means[0], cov=cov, size=int(D/4))
X3 = rng.multivariate_normal(mean=means[1], cov=cov, size=int(D/4))
X4 = rng.multivariate_normal(mean=-means[1], cov=cov, size=int(D/4))
X = np.vstack ( (X1, X2, X3, X4) )
Y = np.hstack ( (np.ones(int(D/2)), -np.ones(int(D/2))) )
return X, Y
def compute_overlap(X):
return g_in2 * np.dot(X, X.T) / N_in
def compute_overlap2(X1, X2):
return g_in2 * np.dot(X1, X2.T) / N_in
def initial_mft(C0, Y):
C_mft = np.zeros( (D,D,A+1), dtype=float)
C_mft[:,:,0] = C0
print("computing pure MFT as starting point...")
# first compute the pure MFT without corrections in the first pass
for a in range(A):
print("layer a = ", a)
for alpha in range(D):
for beta in range(alpha+1):
C_alpha_beta = iterate_C_pureMFT(C_mft[:,:,a], alpha, beta)
C_mft[alpha, beta, a+1] = C_alpha_beta
C_mft[beta, alpha, a+1] = C_alpha_beta
print("[done]")
return C_mft
def correctionC(C, tilC, range_alpha=None):
if range_alpha is None:
range_alpha = range(C.shape[0])
exp_phi_phi = np.zeros( (C.shape[0], C.shape[1], C.shape[2]-1), dtype=float)
assert(C.shape[0] == C.shape[1])
#print("shape(C)", C.shape)
#print("shape(tilC)", tilC.shape)
for a in range(0, C.shape[2]-1):
print("layer a = ", a)
# first compute <phi phi> for next layer for all alpha, beta
for alpha in range_alpha:
for beta in range(alpha+1):
C_alpha_beta = exp_pair_phiphi(C[:,:,a], alpha, beta)
exp_phi_phi[alpha, beta, a] = C_alpha_beta
exp_phi_phi[beta, alpha, a] = C_alpha_beta
# compute correction
for alpha in range_alpha:
for beta in range(alpha+1):
#print ("BETA",beta)
C_alpha_beta = iterate_C(C[:,:,a], tilC[:,:,a+1], exp_phi_phi[:,:,a], alpha, beta)
C[alpha, beta, a+1] = C_alpha_beta
C[beta, alpha, a+1] = C_alpha_beta
return C, exp_phi_phi
def iterate_stats(C0, Y, num_iter):
"""one iteration of the statistics"""
# obtain MFT solution as initial value
C = initial_mft(C0, Y)
C_mft = C.copy()
last_tilC = np.zeros((C.shape[0], C.shape[1]), dtype=float)
for r in range(num_iter):
print("iteration #", r)
# obtain value of tilde C in final layer
tilC_A1 = final_tilC(C[:, :, A], Y)
print("tilC in final layer =", tilC_A1)
print("size of change of tilde C^(A+1)", np.mean(np.abs(tilC_A1 - last_tilC).flatten()))
last_tilC = tilC_A1.copy()
# iterate tilde C backwards through layers
tilC = iterate_tilC_back(tilC_A1, C)
#print("tilC in initial layer =", tilC[:,:,0])
print("computing corrections...")
# compute correction to current C
C, exp_phi_phi = correctionC(C, tilC)
print("[done]")
return C_mft, C, tilC, exp_phi_phi
def test_stats(C_train, tilC, exp_phi_phi, x_star, X):
"""approximate the statistics of the output for a test point
by neglecting all effects of the test point on the training range;
C, and \tilde{C} are hence computed only for all training points"""
# compute missing values for C for test point
exp_phi_phi_star = np.zeros((D+1, D+1, A), dtype=float)
exp_phi_phi_star[:D, :D, :] = exp_phi_phi
C_star = np.zeros( (D+1, D+1, A+1), dtype=float)
C_star[:D, :D, :] = C_train
tilC_star = np.zeros( (D+1, D+1, A+1), dtype=float)
tilC_star[:D, :D, :] = tilC
# overlap of test point with all other points
C0_star = compute_overlap2(X, x_star)
# insert as last row and column in layer 0
C_star[D, :D, 0] = C0_star
C_star[:D, D, 0] = C0_star
C_star[D, D, 0] = compute_overlap(x_star)
#print("C_star[a=0] = ", C_star[:,:,0])
for r in range(2):
C_inv = np.linalg.inv(C_star[:D, :D, A])
if r > 0:
tilG = C_star[:D, D, A] / ( np.dot(C_star[D,:D, A], np.dot(C_inv, C_star[:D, D, A])) - C_star[D, D, A] )
print("tilG = ", tilG)
tilC_star[D, :D, A] = np.dot( tilC[:, :, A], tilG )
tilC_star[D, D, A] = np.dot( tilG, np.dot(tilC[:, :, A], tilG) )
tilC_star = iterate_tilC_back(tilC_star[:, :, A], C_star)
#print("tilC in initial layer =", tilC[:,:,0])
print("computing corrections...")
# compute correction to current C
C_star, exp_phi_phi_star = correctionC(C_star, tilC_star, [D])
print("C_star[a=A] = ", C_star[:,:,A])
return C_star, tilC_star
def test_stats_mft(C_mft, x_star, X):
"""approximate the statistics of the output for a test point
by neglecting all effects of the test point on the training range;
C, and \tilde{C} are hence computed only for all training points"""
# compute missing values for C for test point
C_star = np.zeros( (D+1, D+1, A+1), dtype=float)
C_star[:D, :D, :] = C_mft
# overlap of test point with all other points
C0_star = compute_overlap2(X, x_star)
# insert as last row and column in layer 0
C_star[D, :D, 0] = C0_star
C_star[:D, D, 0] = C0_star
C_star[D, D, 0] = compute_overlap(x_star)
#print("C_star[a=0] = ", C_star[:,:,0])
print("computing mean-field iteration")
for a in range(0, A):
print("layer a = ", a)
# first compute <phi phi> for next layer for all alpha, beta
for alpha in range(D+1):
C_alpha_star = g2 * exp_pair_phiphi(C_star[:,:,a], alpha, D) + sigma2
C_star[alpha, D, a+1] = C_alpha_star
C_star[D, alpha, a+1] = C_alpha_star
print("C_star[a=A] = ", C_star[:,:,A])
return C_star
def mu_sigma2_pred(C, C_star, C_star_star, Y, eps):
"""
Compute the mean of the predictive distribution assuming only Gaussian statistics
described by the kernel
C: overlaps of all training points
C_star: last row containing overlaps with all training data points and test point
Y: training labels
eps: regularizer (readout noise)
"""
C_inv = np.linalg.inv(C + eps*np | Cython |
.eye(D))
mu_pred = np.dot(C_star, np.dot(C_inv, Y))
sigma2_pred = C_star_star - np.dot(C_star, np.dot(C_inv, C_star.T))
return mu_pred, sigma2_pred
<|end_of_text|>from cpython.exc cimport PyErr_CheckSignals
def compute_subsequence_kernel_from_word_similarities(
int num_terms_1,
int num_terms_2,
int p,
float lamb_sqr,
float lamb,
float[:,:] dps,
float[:,:] dp,
float[:] k,
int[:,:] matches,
double[:] p_weights,
):
#
cdef int i, j
assert k[1] == 0
for i in xrange(num_terms_1):
for j in xrange(num_terms_2):
k[1] += lamb*dps[i,j]
#
cdef int l
for l in xrange(2, p+1):
for i in xrange(num_terms_1):
for j in xrange(num_terms_2):
dp[i+1, j+1] = dps[i,j] + lamb*dp[i,j+1] + lamb*dp[i+1,j] - lamb_sqr*dp[i,j]
if matches[i,j] == 1:
dps[i,j] = lamb_sqr*dp[i,j]
k[l] = k[l] + dps[i,j]
else:
assert matches[i,j] == 0
#
cdef double k_sum = 0.0
assert p_weights.size == (p+1)
assert k.size == (p+1)
for l in xrange(p+1):
k_sum += k[l]*p_weights[l]
#
return k_sum
<|end_of_text|>#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
cimport cython as __cython
from cpython.object cimport PyTypeObject, Py_LT, Py_LE, Py_EQ, Py_NE, Py_GT, Py_GE
from libcpp.memory cimport shared_ptr, make_shared, unique_ptr, make_unique
from libcpp.optional cimport optional as __optional
from libcpp.string cimport string
from libcpp cimport bool as cbool
from libcpp.iterator cimport inserter as cinserter
from cpython cimport bool as pbool
from cython.operator cimport dereference as deref, preincrement as inc, address as ptr_address
import thrift.py3.types
from thrift.py3.types import _IsSet as _fbthrift_IsSet
cimport thrift.py3.types
cimport thrift.py3.exceptions
from thrift.py3.std_libcpp cimport sv_to_str as __sv_to_str, string_view as __cstring_view
from thrift.py3.types cimport (
cSetOp as __cSetOp,
richcmp as __richcmp,
set_op as __set_op,
setcmp as __setcmp,
list_index as __list_index,
list_count as __list_count,
list_slice as __list_slice,
list_getitem as __list_getitem,
set_iter as __set_iter,
map_iter as __map_iter,
map_contains as __map_contains,
map_getitem as __map_getitem,
reference_shared_ptr as __reference_shared_ptr,
get_field_name_by_index as __get_field_name_by_index,
reset_field as __reset_field,
translate_cpp_enum_to_python,
SetMetaClass as __SetMetaClass,
const_pointer_cast,
constant_shared_ptr,
NOTSET as __NOTSET,
EnumData as __EnumData,
EnumFlagsData as __EnumFlagsData,
UnionTypeEnumData as __UnionTypeEnumData,
createEnumDataForUnionType as __createEnumDataForUnionType,
)
from thrift.py3.types import _is_python_enum, _is_python_struct
cimport thrift.py3.serializer as serializer
import folly.iobuf as _fbthrift_iobuf
from folly.optional cimport cOptional
from folly.memory cimport to_shared_ptr as __to_shared_ptr
from folly.range cimport Range as __cRange
import sys
from collections.abc import Sequence, Set, Mapping, Iterable
import weakref as __weakref
import builtins as _builtins
cimport module.types_reflection as _types_reflection
cdef __EnumData __AnEnum_enum_data = __EnumData._fbthrift_create(thrift.py3.types.createEnumData[cAnEnum](), AnEnum)
@__cython.internal
@__cython.auto_pickle(False)
cdef class __AnEnumMeta(thrift.py3.types.EnumMeta):
def _fbthrift_get_by_value(cls, int value):
return __AnEnum_enum_data.get_by_value(value)
def _fbthrift_get_all_names(cls):
return __AnEnum_enum_data.get_all_names()
def __len__(cls):
return __AnEnum_enum_data.size()
def __getattribute__(cls, str name not None):
if name.startswith("__") or name.startswith("_fbthrift_") or name == "mro":
return super().__getattribute__(name)
return __AnEnum_enum_data.get_by_name(name)
@__cython.final
@__cython.auto_pickle(False)
cdef class AnEnum(thrift.py3.types.CompiledEnum):
cdef get_by_name(self, str name):
return __AnEnum_enum_data.get_by_name(name)
@staticmethod
def __get_metadata__():
cdef __fbthrift_cThriftMetadata meta
EnumMetadata[cAnEnum].gen(meta)
return __MetadataBox.box(cmove(meta))
@staticmethod
def __get_thrift_name__():
return "module.AnEnum"
def _to_python(self):
import importlib
python_types = importlib.import_module(
"module.thrift_types"
)
return python_types.AnEnum(self.value)
def _to_py3(self):
return self
def _to_py_deprecated(self):
return self.value
__SetMetaClass(<PyTypeObject*> AnEnum, <PyTypeObject*> __AnEnumMeta)
cdef __EnumData __AnEnumRenamed_enum_data = __EnumData._fbthrift_create(thrift.py3.types.createEnumData[cAnEnumRenamed](), AnEnumRenamed)
@__cython.internal
@__cython.auto_pickle(False)
cdef class __AnEnumRenamedMeta(thrift.py3.types.EnumMeta):
def _fbthrift_get_by_value(cls, int value):
return __AnEnumRenamed_enum_data.get_by_value(value)
def _fbthrift_get_all_names(cls):
return __AnEnumRenamed_enum_data.get_all_names()
def __len__(cls):
return __AnEnumRenamed_enum_data.size()
def __getattribute__(cls, str name not None):
if name.startswith("__") or name.startswith("_fbthrift_") or name == "mro":
return super().__getattribute__(name)
return __AnEnumRenamed_enum_data.get_by_name(name)
@__cython.final
@__cython.auto_pickle(False)
cdef class AnEnumRenamed(thrift.py3.types.CompiledEnum):
cdef get_by_name(self, str name):
return __AnEnumRenamed_enum_data.get_by_name(name)
@staticmethod
def __get_metadata__():
cdef __fbthrift_cThriftMetadata meta
EnumMetadata[cAnEnumRenamed].gen(meta)
return __MetadataBox.box(cmove(meta))
@staticmethod
def __get_thrift_name__():
return "module.AnEnumRenamed"
def _to_python(self):
import importlib
python_types = importlib.import_module(
"module.thrift_types"
)
return python_types.AnEnumRenamed(self.value)
def _to_py3(self):
return self
def _to_py_deprecated(self):
return self.value
__SetMetaClass(<PyTypeObject*> AnEnumRenamed, <PyTypeObject*> __AnEnumRenamedMeta)
cdef __EnumFlagsData __Flags_enum_data = __EnumFlagsData._fbthrift_create(thrift.py3.types.createEnumFlagsData[cFlags](), Flags)
@__cython.internal
@__cython.auto_pickle(False)
cdef class __FlagsMeta(thrift.py3.types.EnumMeta):
def _fbthrift_get_by_value(cls, int value):
return __Flags_enum_data.get_by_value(value)
def _fbthrift_get_all_names(cls):
return __Flags_enum_data.get_all_names()
def __len__(cls):
return __Flags_enum_data.size()
def __getattribute__(cls, str name not None):
if name.startswith("__") or name.startswith("_fbthrift_") or name == "mro":
return super().__getattribute__(name)
return __Flags_enum_data.get_by_name(name)
@__cython.final
@__cython.auto_pickle(False)
cdef class Flags(thrift.py3.types.Flag):
cdef get_by_name(self, str name):
return __Flags_enum_data.get_by_name(name)
def __invert__(self):
return __Flags_enum_data.get_invert(self.value)
@staticmethod
def __get_metadata__():
cdef __fbthrift_cThriftMetadata meta
EnumMetadata[cFlags].gen(meta)
return __MetadataBox.box(cmove(meta))
@staticmethod
def __get_thrift_name__():
return " | Cython |
module.Flags"
def _to_python(self):
import importlib
python_types = importlib.import_module(
"module.thrift_types"
)
return python_types.Flags(self.value)
def _to_py3(self):
return self
def _to_py_deprecated(self):
return self.value
__SetMetaClass(<PyTypeObject*> Flags, <PyTypeObject*> __FlagsMeta)
cdef __UnionTypeEnumData __BinaryUnion_union_type_enum_data = __UnionTypeEnumData._fbthrift_create(
__createEnumDataForUnionType[cBinaryUnion](),
__BinaryUnionType,
)
@__cython.internal
@__cython.auto_pickle(False)
cdef class __BinaryUnion_Union_TypeMeta(thrift.py3.types.EnumMeta):
def _fbthrift_get_by_value(cls, int value):
return __BinaryUnion_union_type_enum_data.get_by_value(value)
def _fbthrift_get_all_names(cls):
return __BinaryUnion_union_type_enum_data.get_all_names()
def __len__(cls):
return __BinaryUnion_union_type_enum_data.size()
def __getattribute__(cls, str name not None):
if name.startswith("__") or name.startswith("_fbthrift_") or name == "mro":
return super().__getattribute__(name)
return __BinaryUnion_union_type_enum_data.get_by_name(name)
@__cython.final
@__cython.auto_pickle(False)
cdef class __BinaryUnionType(thrift.py3.types.CompiledEnum):
cdef get_by_name(self, str name):
return __BinaryUnion_union_type_enum_data.get_by_name(name)
__SetMetaClass(<PyTypeObject*> __BinaryUnionType, <PyTypeObject*> __BinaryUnion_Union_TypeMeta)
@__cython.auto_pickle(False)
cdef class SimpleException(thrift.py3.exceptions.GeneratedError):
def __init__(SimpleException self, *args, **kwargs):
self._cpp_obj = make_shared[cSimpleException]()
self._fields_setter = _fbthrift_types_fields.__SimpleException_FieldsSetter._fbthrift_create(self._cpp_obj.get())
super().__init__( *args, **kwargs)
cdef void _fbthrift_set_field(self, str name, object value) except *:
self._fields_setter.set_field(name.encode("utf-8"), value)
cdef object _fbthrift_isset(self):
return _fbthrift_IsSet("SimpleException", {
"err_code": deref(self._cpp_obj).err_code_ref().has_value(),
})
@staticmethod
cdef _fbthrift_create(shared_ptr[cSimpleException] cpp_obj):
__fbthrift_inst = <SimpleException>SimpleException.__new__(SimpleException, (<bytes>deref(cpp_obj).what()).decode('utf-8'))
__fbthrift_inst._cpp_obj = cmove(cpp_obj)
_builtins.Exception.__init__(__fbthrift_inst, *(v for _, v in __fbthrift_inst))
return __fbthrift_inst
cdef inline err_code_impl(self):
return deref(self._cpp_obj).err_code_ref().value()
@property
def err_code(self):
return self.err_code_impl()
def __hash__(SimpleException self):
return super().__hash__()
def __repr__(SimpleException self):
return super().__repr__()
def __str__(SimpleException self):
return super().__str__()
def __copy__(SimpleException self):
cdef shared_ptr[cSimpleException] cpp_obj = make_shared[cSimpleException](
deref(self._cpp_obj)
)
return SimpleException._fbthrift_create(cmove(cpp_obj))
def __richcmp__(self, other, int op):
r = self._fbthrift_cmp_sametype(other, op)
return __richcmp[cSimpleException](
self._cpp_obj,
(<SimpleException>other)._cpp_obj,
op,
) if r is None else r
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__SimpleException()
@staticmethod
def __get_metadata__():
cdef __fbthrift_cThriftMetadata meta
ExceptionMetadata[cSimpleException].gen(meta)
return __MetadataBox.box(cmove(meta))
@staticmethod
def __get_thrift_name__():
return "module.SimpleException"
@classmethod
def _fbthrift_get_field_name_by_index(cls, idx):
return __sv_to_str(__get_field_name_by_index[cSimpleException](idx))
@classmethod
def _fbthrift_get_struct_size(cls):
return 1
cdef _fbthrift_iobuf.IOBuf _fbthrift_serialize(SimpleException self, __Protocol proto):
cdef unique_ptr[_fbthrift_iobuf.cIOBuf] data
with nogil:
data = cmove(serializer.cserialize[cSimpleException](self._cpp_obj.get(), proto))
return _fbthrift_iobuf.from_unique_ptr(cmove(data))
cdef cuint32_t _fbthrift_deserialize(SimpleException self, const _fbthrift_iobuf.cIOBuf* buf, __Protocol proto) except? 0:
cdef cuint32_t needed
self._cpp_obj = make_shared[cSimpleException]()
with nogil:
needed = serializer.cdeserialize[cSimpleException](buf, self._cpp_obj.get(), proto)
return needed
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module(
"module.thrift_types"
)
return thrift.python.converter.to_python_struct(python_types.SimpleException, self)
def _to_py3(self):
return self
def _to_py_deprecated(self):
import importlib
import thrift.util.converter
py_deprecated_types = importlib.import_module("module.ttypes")
return thrift.util.converter.to_py_struct(py_deprecated_types.SimpleException, self)
@__cython.auto_pickle(False)
cdef class OptionalRefStruct(thrift.py3.types.Struct):
def __init__(OptionalRefStruct self, **kwargs):
self._cpp_obj = make_shared[cOptionalRefStruct]()
self._fields_setter = _fbthrift_types_fields.__OptionalRefStruct_FieldsSetter._fbthrift_create(self._cpp_obj.get())
super().__init__(**kwargs)
def __call__(OptionalRefStruct self, **kwargs):
if not kwargs:
return self
cdef OptionalRefStruct __fbthrift_inst = OptionalRefStruct.__new__(OptionalRefStruct)
__fbthrift_inst._cpp_obj = make_shared[cOptionalRefStruct](deref(self._cpp_obj))
__fbthrift_inst._fields_setter = _fbthrift_types_fields.__OptionalRefStruct_FieldsSetter._fbthrift_create(__fbthrift_inst._cpp_obj.get())
for __fbthrift_name, _fbthrift_value in kwargs.items():
__fbthrift_inst._fbthrift_set_field(__fbthrift_name, _fbthrift_value)
return __fbthrift_inst
cdef void _fbthrift_set_field(self, str name, object value) except *:
self._fields_setter.set_field(name.encode("utf-8"), value)
cdef object _fbthrift_isset(self):
return _fbthrift_IsSet("OptionalRefStruct", {
"optional_blob": deref(self._cpp_obj).optional_blob_ref().has_value(),
})
@staticmethod
cdef _fbthrift_create(shared_ptr[cOptionalRefStruct] cpp_obj):
__fbthrift_inst = <OptionalRefStruct>OptionalRefStruct.__new__(OptionalRefStruct)
__fbthrift_inst._cpp_obj = cmove(cpp_obj)
return __fbthrift_inst
cdef inline optional_blob_impl(self):
if not deref(self._cpp_obj).optional_blob_ref().has_value():
return None
if self.__fbthrift_cached_optional_blob is None:
if not deref(self._cpp_obj).optional_blob_ref().value_unchecked():
return None
self.__fbthrift_cached_optional_blob = _fbthrift_iobuf.IOBuf.create(deref(self._cpp_obj).optional_blob_ref().value_unchecked().get(), self)
return self.__fbthrift_cached_optional_blob
@property
def optional_blob(self):
return self.optional_blob_impl()
def __hash__(OptionalRefStruct self):
return super().__hash__()
def __repr__(OptionalRefStruct self):
return super().__repr__()
def __str__(OptionalRefStruct self):
return super().__str__()
def __copy__(OptionalRefStruct self):
cdef shared_ptr[cOptionalRefStruct] cpp_obj = make_shared[cOptionalRefStruct](
deref(self._cpp_obj)
)
return OptionalRefStruct._fbthrift_create(cmove(cpp_obj))
def __richcmp__(self, other, int op):
r = self._fbthrift_cmp_sametype(other, op)
return __richcmp[cOptionalRefStruct](
self._cpp_obj,
(<OptionalRefStruct>other)._cpp_obj,
op,
) if r is None else r
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__OptionalRefStruct()
@staticmethod
def __get_metadata__():
cdef __fbthrift_cThriftMetadata meta
StructMetadata[cOptionalRefStruct | Cython |
].gen(meta)
return __MetadataBox.box(cmove(meta))
@staticmethod
def __get_thrift_name__():
return "module.OptionalRefStruct"
@classmethod
def _fbthrift_get_field_name_by_index(cls, idx):
return __sv_to_str(__get_field_name_by_index[cOptionalRefStruct](idx))
@classmethod
def _fbthrift_get_struct_size(cls):
return 1
cdef _fbthrift_iobuf.IOBuf _fbthrift_serialize(OptionalRefStruct self, __Protocol proto):
cdef unique_ptr[_fbthrift_iobuf.cIOBuf] data
with nogil:
data = cmove(serializer.cserialize[cOptionalRefStruct](self._cpp_obj.get(), proto))
return _fbthrift_iobuf.from_unique_ptr(cmove(data))
cdef cuint32_t _fbthrift_deserialize(OptionalRefStruct self, const _fbthrift_iobuf.cIOBuf* buf, __Protocol proto) except? 0:
cdef cuint32_t needed
self._cpp_obj = make_shared[cOptionalRefStruct]()
with nogil:
needed = serializer.cdeserialize[cOptionalRefStruct](buf, self._cpp_obj.get(), proto)
return needed
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module(
"module.thrift_types"
)
return thrift.python.converter.to_python_struct(python_types.OptionalRefStruct, self)
def _to_py3(self):
return self
def _to_py_deprecated(self):
import importlib
import thrift.util.converter
py_deprecated_types = importlib.import_module("module.ttypes")
return thrift.util.converter.to_py_struct(py_deprecated_types.OptionalRefStruct, self)
@__cython.auto_pickle(False)
cdef class SimpleStruct(thrift.py3.types.Struct):
def __init__(SimpleStruct self, **kwargs):
self._cpp_obj = make_shared[cSimpleStruct]()
self._fields_setter = _fbthrift_types_fields.__SimpleStruct_FieldsSetter._fbthrift_create(self._cpp_obj.get())
super().__init__(**kwargs)
def __call__(SimpleStruct self, **kwargs):
if not kwargs:
return self
cdef SimpleStruct __fbthrift_inst = SimpleStruct.__new__(SimpleStruct)
__fbthrift_inst._cpp_obj = make_shared[cSimpleStruct](deref(self._cpp_obj))
__fbthrift_inst._fields_setter = _fbthrift_types_fields.__SimpleStruct_FieldsSetter._fbthrift_create(__fbthrift_inst._cpp_obj.get())
for __fbthrift_name, _fbthrift_value in kwargs.items():
__fbthrift_inst._fbthrift_set_field(__fbthrift_name, _fbthrift_value)
return __fbthrift_inst
cdef void _fbthrift_set_field(self, str name, object value) except *:
self._fields_setter.set_field(name.encode("utf-8"), value)
cdef object _fbthrift_isset(self):
return _fbthrift_IsSet("SimpleStruct", {
"is_on": deref(self._cpp_obj).is_on_ref().has_value(),
"tiny_int": deref(self._cpp_obj).tiny_int_ref().has_value(),
"small_int": deref(self._cpp_obj).small_int_ref().has_value(),
"nice_sized_int": deref(self._cpp_obj).nice_sized_int_ref().has_value(),
"big_int": deref(self._cpp_obj).big_int_ref().has_value(),
"real": deref(self._cpp_obj).real_ref().has_value(),
"smaller_real": deref(self._cpp_obj).smaller_real_ref().has_value(),
})
@staticmethod
cdef _fbthrift_create(shared_ptr[cSimpleStruct] cpp_obj):
__fbthrift_inst = <SimpleStruct>SimpleStruct.__new__(SimpleStruct)
__fbthrift_inst._cpp_obj = cmove(cpp_obj)
return __fbthrift_inst
cdef inline is_on_impl(self):
return <pbool> deref(self._cpp_obj).is_on_ref().value()
@property
def is_on(self):
return self.is_on_impl()
cdef inline tiny_int_impl(self):
return deref(self._cpp_obj).tiny_int_ref().value()
@property
def tiny_int(self):
return self.tiny_int_impl()
cdef inline small_int_impl(self):
return deref(self._cpp_obj).small_int_ref().value()
@property
def small_int(self):
return self.small_int_impl()
cdef inline nice_sized_int_impl(self):
return deref(self._cpp_obj).nice_sized_int_ref().value()
@property
def nice_sized_int(self):
return self.nice_sized_int_impl()
cdef inline big_int_impl(self):
return deref(self._cpp_obj).big_int_ref().value()
@property
def big_int(self):
return self.big_int_impl()
cdef inline real_impl(self):
return deref(self._cpp_obj).real_ref().value()
@property
def real(self):
return self.real_impl()
cdef inline smaller_real_impl(self):
return deref(self._cpp_obj).smaller_real_ref().value()
@property
def smaller_real(self):
return self.smaller_real_impl()
def __hash__(SimpleStruct self):
return super().__hash__()
def __repr__(SimpleStruct self):
return super().__repr__()
def __str__(SimpleStruct self):
return super().__str__()
def __copy__(SimpleStruct self):
cdef shared_ptr[cSimpleStruct] cpp_obj = make_shared[cSimpleStruct](
deref(self._cpp_obj)
)
return SimpleStruct._fbthrift_create(cmove(cpp_obj))
def __richcmp__(self, other, int op):
r = self._fbthrift_cmp_sametype(other, op)
return __richcmp[cSimpleStruct](
self._cpp_obj,
(<SimpleStruct>other)._cpp_obj,
op,
) if r is None else r
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__SimpleStruct()
@staticmethod
def __get_metadata__():
cdef __fbthrift_cThriftMetadata meta
StructMetadata[cSimpleStruct].gen(meta)
return __MetadataBox.box(cmove(meta))
@staticmethod
def __get_thrift_name__():
return "module.SimpleStruct"
__fbthrift_field_name_list = [
'is_on',
'tiny_int',
'small_int',
'nice_sized_int',
'big_int',
'real',
'smaller_real',
]
@classmethod
def _fbthrift_get_field_name_by_index(cls, idx):
return cls.__fbthrift_field_name_list[idx]
@classmethod
def _fbthrift_get_struct_size(cls):
return 7
cdef _fbthrift_iobuf.IOBuf _fbthrift_serialize(SimpleStruct self, __Protocol proto):
cdef unique_ptr[_fbthrift_iobuf.cIOBuf] data
with nogil:
data = cmove(serializer.cserialize[cSimpleStruct](self._cpp_obj.get(), proto))
return _fbthrift_iobuf.from_unique_ptr(cmove(data))
cdef cuint32_t _fbthrift_deserialize(SimpleStruct self, const _fbthrift_iobuf.cIOBuf* buf, __Protocol proto) except? 0:
cdef cuint32_t needed
self._cpp_obj = make_shared[cSimpleStruct]()
with nogil:
needed = serializer.cdeserialize[cSimpleStruct](buf, self._cpp_obj.get(), proto)
return needed
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module(
"module.thrift_types"
)
return thrift.python.converter.to_python_struct(python_types.SimpleStruct, self)
def _to_py3(self):
return self
def _to_py_deprecated(self):
import importlib
import thrift.util.converter
py_deprecated_types = importlib.import_module("module.ttypes")
return thrift.util.converter.to_py_struct(py_deprecated_types.SimpleStruct, self)
@__cython.auto_pickle(False)
cdef class HiddenTypeFieldsStruct(thrift.py3.types.Struct):
def __init__(HiddenTypeFieldsStruct self, **kwargs):
self._cpp_obj = make_shared[cHiddenTypeFieldsStruct]()
self._fields_setter = _fbthrift_types_fields.__HiddenTypeFieldsStruct_FieldsSetter._fbthrift_create(self._cpp_obj.get())
super().__init__(**kwargs)
def __call__(HiddenTypeFieldsStruct self, **kwargs):
return self
cdef void _fbthrift_set_field(self, str name, object value) except *:
self._fields_setter.set_field(name.encode("utf-8"), value)
cdef object _fbthrift_isset(self):
return _fbthrift_IsSet("HiddenTypeFieldsStruct", {
})
@staticmethod
cdef _fbthrift_create(shared_ptr[cHiddenTypeFieldsStruct] cpp_obj):
__fbthrift_inst = <HiddenTypeFieldsStruct>HiddenTypeFieldsStruct.__new__(HiddenTypeFieldsStruct)
| Cython |
__fbthrift_inst._cpp_obj = cmove(cpp_obj)
return __fbthrift_inst
def __hash__(HiddenTypeFieldsStruct self):
return super().__hash__()
def __repr__(HiddenTypeFieldsStruct self):
return super().__repr__()
def __str__(HiddenTypeFieldsStruct self):
return super().__str__()
def __copy__(HiddenTypeFieldsStruct self):
cdef shared_ptr[cHiddenTypeFieldsStruct] cpp_obj = make_shared[cHiddenTypeFieldsStruct](
deref(self._cpp_obj)
)
return HiddenTypeFieldsStruct._fbthrift_create(cmove(cpp_obj))
def __eq__(HiddenTypeFieldsStruct self, other):
if not isinstance(other, HiddenTypeFieldsStruct):
return False
return deref(self._cpp_obj.get()) == deref((<HiddenTypeFieldsStruct>other)._cpp_obj.get())
def __ne__(HiddenTypeFieldsStruct self, other):
if not isinstance(other, HiddenTypeFieldsStruct):
return True
return deref(self._cpp_obj)!= deref((<HiddenTypeFieldsStruct>other)._cpp_obj)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__HiddenTypeFieldsStruct()
@staticmethod
def __get_metadata__():
cdef __fbthrift_cThriftMetadata meta
StructMetadata[cHiddenTypeFieldsStruct].gen(meta)
return __MetadataBox.box(cmove(meta))
@staticmethod
def __get_thrift_name__():
return "module.HiddenTypeFieldsStruct"
__fbthrift_field_name_list = [
]
@classmethod
def _fbthrift_get_field_name_by_index(cls, idx):
return cls.__fbthrift_field_name_list[idx]
@classmethod
def _fbthrift_get_struct_size(cls):
return 0
cdef _fbthrift_iobuf.IOBuf _fbthrift_serialize(HiddenTypeFieldsStruct self, __Protocol proto):
cdef unique_ptr[_fbthrift_iobuf.cIOBuf] data
with nogil:
data = cmove(serializer.cserialize[cHiddenTypeFieldsStruct](self._cpp_obj.get(), proto))
return _fbthrift_iobuf.from_unique_ptr(cmove(data))
cdef cuint32_t _fbthrift_deserialize(HiddenTypeFieldsStruct self, const _fbthrift_iobuf.cIOBuf* buf, __Protocol proto) except? 0:
cdef cuint32_t needed
self._cpp_obj = make_shared[cHiddenTypeFieldsStruct]()
with nogil:
needed = serializer.cdeserialize[cHiddenTypeFieldsStruct](buf, self._cpp_obj.get(), proto)
return needed
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module(
"module.thrift_types"
)
return thrift.python.converter.to_python_struct(python_types.HiddenTypeFieldsStruct, self)
def _to_py3(self):
return self
def _to_py_deprecated(self):
import importlib
import thrift.util.converter
py_deprecated_types = importlib.import_module("module.ttypes")
return thrift.util.converter.to_py_struct(py_deprecated_types.HiddenTypeFieldsStruct, self)
@__cython.auto_pickle(False)
cdef class ComplexStruct(thrift.py3.types.Struct):
def __init__(ComplexStruct self, **kwargs):
self._cpp_obj = make_shared[cComplexStruct]()
self._fields_setter = _fbthrift_types_fields.__ComplexStruct_FieldsSetter._fbthrift_create(self._cpp_obj.get())
super().__init__(**kwargs)
def __call__(ComplexStruct self, **kwargs):
if not kwargs:
return self
cdef ComplexStruct __fbthrift_inst = ComplexStruct.__new__(ComplexStruct)
__fbthrift_inst._cpp_obj = make_shared[cComplexStruct](deref(self._cpp_obj))
__fbthrift_inst._fields_setter = _fbthrift_types_fields.__ComplexStruct_FieldsSetter._fbthrift_create(__fbthrift_inst._cpp_obj.get())
for __fbthrift_name, _fbthrift_value in kwargs.items():
__fbthrift_inst._fbthrift_set_field(__fbthrift_name, _fbthrift_value)
return __fbthrift_inst
cdef void _fbthrift_set_field(self, str name, object value) except *:
self._fields_setter.set_field(name.encode("utf-8"), value)
cdef object _fbthrift_isset(self):
return _fbthrift_IsSet("ComplexStruct", {
"structOne": deref(self._cpp_obj).structOne_ref().has_value(),
"structTwo": deref(self._cpp_obj).structTwo_ref().has_value(),
"an_integer": deref(self._cpp_obj).an_integer_ref().has_value(),
"name": deref(self._cpp_obj).name_ref().has_value(),
"an_enum": deref(self._cpp_obj).an_enum_ref().has_value(),
"some_bytes": deref(self._cpp_obj).some_bytes_ref().has_value(),
"sender": deref(self._cpp_obj).sender_ref().has_value(),
"cdef_": deref(self._cpp_obj).cdef__ref().has_value(),
"bytes_with_cpp_type": deref(self._cpp_obj).bytes_with_cpp_type_ref().has_value(),
})
@staticmethod
cdef _fbthrift_create(shared_ptr[cComplexStruct] cpp_obj):
__fbthrift_inst = <ComplexStruct>ComplexStruct.__new__(ComplexStruct)
__fbthrift_inst._cpp_obj = cmove(cpp_obj)
return __fbthrift_inst
cdef inline structOne_impl(self):
if self.__fbthrift_cached_structOne is None:
self.__fbthrift_cached_structOne = SimpleStruct._fbthrift_create(__reference_shared_ptr(deref(self._cpp_obj).structOne_ref().ref(), self._cpp_obj))
return self.__fbthrift_cached_structOne
@property
def structOne(self):
return self.structOne_impl()
cdef inline structTwo_impl(self):
if self.__fbthrift_cached_structTwo is None:
self.__fbthrift_cached_structTwo = SimpleStruct._fbthrift_create(__reference_shared_ptr(deref(self._cpp_obj).structTwo_ref().ref(), self._cpp_obj))
return self.__fbthrift_cached_structTwo
@property
def structTwo(self):
return self.structTwo_impl()
cdef inline an_integer_impl(self):
return deref(self._cpp_obj).an_integer_ref().value()
@property
def an_integer(self):
return self.an_integer_impl()
cdef inline name_impl(self):
return (<bytes>deref(self._cpp_obj).name_ref().value()).decode('UTF-8')
@property
def name(self):
return self.name_impl()
cdef inline an_enum_impl(self):
if self.__fbthrift_cached_an_enum is None:
self.__fbthrift_cached_an_enum = translate_cpp_enum_to_python(AnEnum, <int>(deref(self._cpp_obj).an_enum_ref().value()))
return self.__fbthrift_cached_an_enum
@property
def an_enum(self):
return self.an_enum_impl()
cdef inline some_bytes_impl(self):
return deref(self._cpp_obj).some_bytes_ref().value()
@property
def some_bytes(self):
return self.some_bytes_impl()
cdef inline sender_impl(self):
return (<bytes>deref(self._cpp_obj).sender_ref().value()).decode('UTF-8')
@property
def sender(self):
return self.sender_impl()
cdef inline cdef__impl(self):
return (<bytes>deref(self._cpp_obj).cdef__ref().value()).decode('UTF-8')
@property
def cdef_(self):
return self.cdef__impl()
cdef inline bytes_with_cpp_type_impl(self):
return (<const char*>deref(self._cpp_obj).bytes_with_cpp_type_ref().value().data())[:deref(self._cpp_obj).bytes_with_cpp_type_ref().value().size()]
@property
def bytes_with_cpp_type(self):
return self.bytes_with_cpp_type_impl()
def __hash__(ComplexStruct self):
return super().__hash__()
def __repr__(ComplexStruct self):
return super().__repr__()
def __str__(ComplexStruct self):
return super().__str__()
def __copy__(ComplexStruct self):
cdef shared_ptr[cComplexStruct] cpp_obj = make_shared[cComplexStruct](
deref(self._cpp_obj)
)
return ComplexStruct._fbthrift_create(cmove(cpp_obj))
def __richcmp__(self, other, int op):
r = self._fbthrift_cmp_sametype(other, op)
return __richcmp[cComplexStruct](
self._cpp_obj,
(<ComplexStruct>other)._cpp_obj,
op,
) if r is None else r
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__ComplexStruct()
@staticmethod
def __get_metadata__():
cdef __fbthrift_cThriftMetadata meta
StructMetadata[cComplexStruct].gen(meta)
return __MetadataBox.box(cmove(meta))
@staticmethod
def __get_thrift_name__():
return "module.Complex | Cython |
Struct"
@classmethod
def _fbthrift_get_field_name_by_index(cls, idx):
return __sv_to_str(__get_field_name_by_index[cComplexStruct](idx))
@classmethod
def _fbthrift_get_struct_size(cls):
return 9
cdef _fbthrift_iobuf.IOBuf _fbthrift_serialize(ComplexStruct self, __Protocol proto):
cdef unique_ptr[_fbthrift_iobuf.cIOBuf] data
with nogil:
data = cmove(serializer.cserialize[cComplexStruct](self._cpp_obj.get(), proto))
return _fbthrift_iobuf.from_unique_ptr(cmove(data))
cdef cuint32_t _fbthrift_deserialize(ComplexStruct self, const _fbthrift_iobuf.cIOBuf* buf, __Protocol proto) except? 0:
cdef cuint32_t needed
self._cpp_obj = make_shared[cComplexStruct]()
with nogil:
needed = serializer.cdeserialize[cComplexStruct](buf, self._cpp_obj.get(), proto)
return needed
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module(
"module.thrift_types"
)
return thrift.python.converter.to_python_struct(python_types.ComplexStruct, self)
def _to_py3(self):
return self
def _to_py_deprecated(self):
import importlib
import thrift.util.converter
py_deprecated_types = importlib.import_module("module.ttypes")
return thrift.util.converter.to_py_struct(py_deprecated_types.ComplexStruct, self)
@__cython.auto_pickle(False)
cdef class BinaryUnion(thrift.py3.types.Union):
Type = __BinaryUnionType
def __init__(
self, *,
iobuf_val=None
):
if iobuf_val is not None:
if not isinstance(iobuf_val, _fbthrift_iobuf.IOBuf):
if _is_python_struct(iobuf_val) or _is_python_enum(iobuf_val):
iobuf_val = iobuf_val._to_py3()
if not isinstance(iobuf_val, _fbthrift_iobuf.IOBuf):
raise TypeError(f'iobuf_val is a thrift-python type that can not be converted to { _fbthrift_iobuf.IOBuf!r}.')
else:
raise TypeError(f'iobuf_val is not a { _fbthrift_iobuf.IOBuf!r}.')
self._cpp_obj = __to_shared_ptr(cmove(BinaryUnion._make_instance(
NULL,
iobuf_val,
)))
self._load_cache()
@staticmethod
def fromValue(value):
if value is None:
return BinaryUnion()
if isinstance(value, _fbthrift_iobuf.IOBuf):
return BinaryUnion(iobuf_val=value)
raise ValueError(f"Unable to derive correct union field for value: {value}")
@staticmethod
cdef unique_ptr[cBinaryUnion] _make_instance(
cBinaryUnion* base_instance,
_fbthrift_iobuf.IOBuf iobuf_val
) except *:
cdef unique_ptr[cBinaryUnion] c_inst = make_unique[cBinaryUnion]()
cdef bint any_set = False
if iobuf_val is not None:
if any_set:
raise TypeError("At most one field may be set when initializing a union")
deref(c_inst).set_iobuf_val(deref((<_fbthrift_iobuf.IOBuf?>iobuf_val)._this))
any_set = True
# in C++ you don't have to call move(), but this doesn't translate
# into a C++ return statement, so you do here
return cmove(c_inst)
@staticmethod
cdef _fbthrift_create(shared_ptr[cBinaryUnion] cpp_obj):
__fbthrift_inst = <BinaryUnion>BinaryUnion.__new__(BinaryUnion)
__fbthrift_inst._cpp_obj = cmove(cpp_obj)
__fbthrift_inst._load_cache()
return __fbthrift_inst
@property
def iobuf_val(self):
if self.type.value!= 1:
raise AttributeError(f'Union contains a value of type {self.type.name}, not iobuf_val')
return self.value
def __hash__(BinaryUnion self):
return super().__hash__()
cdef _load_cache(BinaryUnion self):
self.type = BinaryUnion.Type(<int>(deref(self._cpp_obj).getType()))
cdef int type = self.type.value
if type == 0: # Empty
self.value = None
elif type == 1:
self.value = _fbthrift_iobuf.from_unique_ptr(deref(self._cpp_obj).get_iobuf_val().clone())
def __copy__(BinaryUnion self):
cdef shared_ptr[cBinaryUnion] cpp_obj = make_shared[cBinaryUnion](
deref(self._cpp_obj)
)
return BinaryUnion._fbthrift_create(cmove(cpp_obj))
def __eq__(BinaryUnion self, other):
return isinstance(other, BinaryUnion) and self._fbthrift_noncomparable_eq(other)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__BinaryUnion()
@staticmethod
def __get_metadata__():
cdef __fbthrift_cThriftMetadata meta
StructMetadata[cBinaryUnion].gen(meta)
return __MetadataBox.box(cmove(meta))
@staticmethod
def __get_thrift_name__():
return "module.BinaryUnion"
@classmethod
def _fbthrift_get_field_name_by_index(cls, idx):
return __sv_to_str(__get_field_name_by_index[cBinaryUnion](idx))
@classmethod
def _fbthrift_get_struct_size(cls):
return 1
cdef _fbthrift_iobuf.IOBuf _fbthrift_serialize(BinaryUnion self, __Protocol proto):
cdef unique_ptr[_fbthrift_iobuf.cIOBuf] data
with nogil:
data = cmove(serializer.cserialize[cBinaryUnion](self._cpp_obj.get(), proto))
return _fbthrift_iobuf.from_unique_ptr(cmove(data))
cdef cuint32_t _fbthrift_deserialize(BinaryUnion self, const _fbthrift_iobuf.cIOBuf* buf, __Protocol proto) except? 0:
cdef cuint32_t needed
self._cpp_obj = make_shared[cBinaryUnion]()
with nogil:
needed = serializer.cdeserialize[cBinaryUnion](buf, self._cpp_obj.get(), proto)
# force a cache reload since the underlying data's changed
self._load_cache()
return needed
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module(
"module.thrift_types"
)
return thrift.python.converter.to_python_struct(python_types.BinaryUnion, self)
def _to_py3(self):
return self
def _to_py_deprecated(self):
import importlib
import thrift.util.converter
py_deprecated_types = importlib.import_module("module.ttypes")
return thrift.util.converter.to_py_struct(py_deprecated_types.BinaryUnion, self)
@__cython.auto_pickle(False)
cdef class BinaryUnionStruct(thrift.py3.types.Struct):
def __init__(BinaryUnionStruct self, **kwargs):
self._cpp_obj = make_shared[cBinaryUnionStruct]()
self._fields_setter = _fbthrift_types_fields.__BinaryUnionStruct_FieldsSetter._fbthrift_create(self._cpp_obj.get())
super().__init__(**kwargs)
def __call__(BinaryUnionStruct self, **kwargs):
if not kwargs:
return self
cdef BinaryUnionStruct __fbthrift_inst = BinaryUnionStruct.__new__(BinaryUnionStruct)
__fbthrift_inst._cpp_obj = make_shared[cBinaryUnionStruct](deref(self._cpp_obj))
__fbthrift_inst._fields_setter = _fbthrift_types_fields.__BinaryUnionStruct_FieldsSetter._fbthrift_create(__fbthrift_inst._cpp_obj.get())
for __fbthrift_name, _fbthrift_value in kwargs.items():
__fbthrift_inst._fbthrift_set_field(__fbthrift_name, _fbthrift_value)
return __fbthrift_inst
cdef void _fbthrift_set_field(self, str name, object value) except *:
self._fields_setter.set_field(name.encode("utf-8"), value)
cdef object _fbthrift_isset(self):
return _fbthrift_IsSet("BinaryUnionStruct", {
"u": deref(self._cpp_obj).u_ref().has_value(),
})
@staticmethod
cdef _fbthrift_create(shared_ptr[cBinaryUnionStruct] cpp_obj):
__fbthrift_inst = <BinaryUnionStruct>BinaryUnionStruct.__new__(BinaryUnionStruct)
__fbthrift_inst._cpp_obj = cmove(cpp_obj)
return __fbthrift_inst
cdef inline u_impl(self):
if self.__fbthrift_cached_u is None:
self.__fbthrift_cached_u = BinaryUnion._fbthrift_create(__reference_shared_ptr(deref(self._cpp_obj).u_ref().ref(), self._cpp_obj))
return self.__fbthrift_cached_u
@property
def u(self):
return self.u_impl()
def __hash__(BinaryUnionStruct self):
return super | Cython |
().__hash__()
def __repr__(BinaryUnionStruct self):
return super().__repr__()
def __str__(BinaryUnionStruct self):
return super().__str__()
def __copy__(BinaryUnionStruct self):
cdef shared_ptr[cBinaryUnionStruct] cpp_obj = make_shared[cBinaryUnionStruct](
deref(self._cpp_obj)
)
return BinaryUnionStruct._fbthrift_create(cmove(cpp_obj))
def __eq__(BinaryUnionStruct self, other):
return isinstance(other, BinaryUnionStruct) and self._fbthrift_noncomparable_eq(other)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__BinaryUnionStruct()
@staticmethod
def __get_metadata__():
cdef __fbthrift_cThriftMetadata meta
StructMetadata[cBinaryUnionStruct].gen(meta)
return __MetadataBox.box(cmove(meta))
@staticmethod
def __get_thrift_name__():
return "module.BinaryUnionStruct"
@classmethod
def _fbthrift_get_field_name_by_index(cls, idx):
return __sv_to_str(__get_field_name_by_index[cBinaryUnionStruct](idx))
@classmethod
def _fbthrift_get_struct_size(cls):
return 1
cdef _fbthrift_iobuf.IOBuf _fbthrift_serialize(BinaryUnionStruct self, __Protocol proto):
cdef unique_ptr[_fbthrift_iobuf.cIOBuf] data
with nogil:
data = cmove(serializer.cserialize[cBinaryUnionStruct](self._cpp_obj.get(), proto))
return _fbthrift_iobuf.from_unique_ptr(cmove(data))
cdef cuint32_t _fbthrift_deserialize(BinaryUnionStruct self, const _fbthrift_iobuf.cIOBuf* buf, __Protocol proto) except? 0:
cdef cuint32_t needed
self._cpp_obj = make_shared[cBinaryUnionStruct]()
with nogil:
needed = serializer.cdeserialize[cBinaryUnionStruct](buf, self._cpp_obj.get(), proto)
return needed
def _to_python(self):
import importlib
import thrift.python.converter
python_types = importlib.import_module(
"module.thrift_types"
)
return thrift.python.converter.to_python_struct(python_types.BinaryUnionStruct, self)
def _to_py3(self):
return self
def _to_py_deprecated(self):
import importlib
import thrift.util.converter
py_deprecated_types = importlib.import_module("module.ttypes")
return thrift.util.converter.to_py_struct(py_deprecated_types.BinaryUnionStruct, self)
@__cython.auto_pickle(False)
cdef class List__i16(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__i16):
self._cpp_obj = (<List__i16> items)._cpp_obj
else:
self._cpp_obj = List__i16._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[cint16_t]] c_items):
__fbthrift_inst = <List__i16>List__i16.__new__(List__i16)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__i16 self):
cdef shared_ptr[vector[cint16_t]] cpp_obj = make_shared[vector[cint16_t]](
deref(self._cpp_obj)
)
return List__i16._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[cint16_t]] _make_instance(object items) except *:
cdef shared_ptr[vector[cint16_t]] c_inst = make_shared[vector[cint16_t]]()
if items is not None:
for item in items:
if not isinstance(item, int):
raise TypeError(f"{item!r} is not of type int")
item = <cint16_t> item
deref(c_inst).push_back(item)
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__i16._fbthrift_create(
__list_slice[vector[cint16_t]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef cint16_t citem = 0
__list_getitem(self._cpp_obj, index, citem)
return citem
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, int):
return item
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef cint16_t citem = item
cdef __optional[size_t] found = __list_index[vector[cint16_t]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef cint16_t citem = item
return __list_count[vector[cint16_t]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__i16()
Sequence.register(List__i16)
@__cython.auto_pickle(False)
cdef class List__i32(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__i32):
self._cpp_obj = (<List__i32> items)._cpp_obj
else:
self._cpp_obj = List__i32._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[cint32_t]] c_items):
__fbthrift_inst = <List__i32>List__i32.__new__(List__i32)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__i32 self):
cdef shared_ptr[vector[cint32_t]] cpp_obj = make_shared[vector[cint32_t]](
deref(self._cpp_obj)
)
return List__i32._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[cint32_t]] _make_instance(object items) except *:
cdef shared_ptr[vector[cint32_t]] c_inst = make_shared[vector[cint32_t]]()
if items is not None:
for item in items:
if not isinstance(item, int):
raise TypeError(f"{item!r} is not of type int")
item = <cint32_t> item
deref(c_inst).push_back(item)
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__i32._fbthrift_create(
__list_slice[vector[cint32_t]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef cint32_t citem = 0
__list_getitem(self._cpp_obj, index, citem)
return citem
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, int):
return item
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef cint32_t citem = item
cdef __optional[size_t] found = __list_index[vector[cint32_t]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef cint32_t citem = item
return __list_count[vector[cint32_t]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__i32()
Sequence.register(List__i32)
@__cython.auto_pickle(False)
cdef class List__i | Cython |
64(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__i64):
self._cpp_obj = (<List__i64> items)._cpp_obj
else:
self._cpp_obj = List__i64._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[cint64_t]] c_items):
__fbthrift_inst = <List__i64>List__i64.__new__(List__i64)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__i64 self):
cdef shared_ptr[vector[cint64_t]] cpp_obj = make_shared[vector[cint64_t]](
deref(self._cpp_obj)
)
return List__i64._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[cint64_t]] _make_instance(object items) except *:
cdef shared_ptr[vector[cint64_t]] c_inst = make_shared[vector[cint64_t]]()
if items is not None:
for item in items:
if not isinstance(item, int):
raise TypeError(f"{item!r} is not of type int")
item = <cint64_t> item
deref(c_inst).push_back(item)
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__i64._fbthrift_create(
__list_slice[vector[cint64_t]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef cint64_t citem = 0
__list_getitem(self._cpp_obj, index, citem)
return citem
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, int):
return item
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef cint64_t citem = item
cdef __optional[size_t] found = __list_index[vector[cint64_t]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef cint64_t citem = item
return __list_count[vector[cint64_t]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__i64()
Sequence.register(List__i64)
@__cython.auto_pickle(False)
cdef class List__string(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__string):
self._cpp_obj = (<List__string> items)._cpp_obj
else:
self._cpp_obj = List__string._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[string]] c_items):
__fbthrift_inst = <List__string>List__string.__new__(List__string)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__string self):
cdef shared_ptr[vector[string]] cpp_obj = make_shared[vector[string]](
deref(self._cpp_obj)
)
return List__string._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[string]] _make_instance(object items) except *:
cdef shared_ptr[vector[string]] c_inst = make_shared[vector[string]]()
if items is not None:
if isinstance(items, str):
raise TypeError("If you really want to pass a string into a _typing.Sequence[str] field, explicitly convert it first.")
for item in items:
if not isinstance(item, str):
raise TypeError(f"{item!r} is not of type str")
deref(c_inst).push_back(item.encode('UTF-8'))
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__string._fbthrift_create(
__list_slice[vector[string]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef string citem
__list_getitem(self._cpp_obj, index, citem)
return bytes(citem).decode('UTF-8')
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, str):
return item
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef string citem = item.encode('UTF-8')
cdef __optional[size_t] found = __list_index[vector[string]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef string citem = item.encode('UTF-8')
return __list_count[vector[string]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__string()
Sequence.register(List__string)
@__cython.auto_pickle(False)
cdef class List__SimpleStruct(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__SimpleStruct):
self._cpp_obj = (<List__SimpleStruct> items)._cpp_obj
else:
self._cpp_obj = List__SimpleStruct._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[cSimpleStruct]] c_items):
__fbthrift_inst = <List__SimpleStruct>List__SimpleStruct.__new__(List__SimpleStruct)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__SimpleStruct self):
cdef shared_ptr[vector[cSimpleStruct]] cpp_obj = make_shared[vector[cSimpleStruct]](
deref(self._cpp_obj)
)
return List__SimpleStruct._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[cSimpleStruct]] _make_instance(object items) except *:
cdef shared_ptr[vector[cSimpleStruct]] c_inst = make_shared[vector[cSimpleStruct]]()
if items is not None:
for item in items:
if not isinstance(item, SimpleStruct):
raise TypeError(f"{item!r} is not of type SimpleStruct")
deref(c_inst).push_back(deref((<SimpleStruct>item)._cpp_obj))
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__SimpleStruct._fbthrift_create(
__list_slice[vector[cSimpleStruct]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef shared_ptr[cSimpleStruct] citem
__list_getitem(self._cpp_obj, index, citem)
return SimpleStruct._fbthrift_create(citem)
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, SimpleStruct):
return item
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef cSimpleStruct citem = deref((<SimpleStruct>item)._cpp_obj)
cdef __optional[size_t] | Cython |
found = __list_index[vector[cSimpleStruct]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef cSimpleStruct citem = deref((<SimpleStruct>item)._cpp_obj)
return __list_count[vector[cSimpleStruct]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__SimpleStruct()
Sequence.register(List__SimpleStruct)
@__cython.auto_pickle(False)
cdef class Set__i32(thrift.py3.types.Set):
def __init__(self, items=None):
if isinstance(items, Set__i32):
self._cpp_obj = (<Set__i32> items)._cpp_obj
else:
self._cpp_obj = Set__i32._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cset[cint32_t]] c_items):
__fbthrift_inst = <Set__i32>Set__i32.__new__(Set__i32)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Set__i32 self):
cdef shared_ptr[cset[cint32_t]] cpp_obj = make_shared[cset[cint32_t]](
deref(self._cpp_obj)
)
return Set__i32._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[cset[cint32_t]] _make_instance(object items) except *:
cdef shared_ptr[cset[cint32_t]] c_inst = make_shared[cset[cint32_t]]()
if items is not None:
for item in items:
if not isinstance(item, int):
raise TypeError(f"{item!r} is not of type int")
item = <cint32_t> item
deref(c_inst).insert(item)
return c_inst
def __contains__(self, item):
if not self or item is None:
return False
if not isinstance(item, int):
return False
return pbool(deref(self._cpp_obj).count(item))
def __iter__(self):
if not self:
return
cdef __set_iter[cset[cint32_t]] itr = __set_iter[cset[cint32_t]](self._cpp_obj)
cdef cint32_t citem = 0
for i in range(deref(self._cpp_obj).size()):
itr.genNext(self._cpp_obj, citem)
yield citem
def __hash__(self):
return super().__hash__()
def __richcmp__(self, other, int op):
if isinstance(other, Set__i32):
# C level comparisons
return __setcmp(
self._cpp_obj,
(<Set__i32> other)._cpp_obj,
op,
)
return self._fbthrift_py_richcmp(other, op)
cdef _fbthrift_do_set_op(self, other, __cSetOp op):
if not isinstance(other, Set__i32):
other = Set__i32(other)
cdef shared_ptr[cset[cint32_t]] result
return Set__i32._fbthrift_create(__set_op[cset[cint32_t]](
self._cpp_obj,
(<Set__i32>other)._cpp_obj,
op,
))
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Set__i32()
Set.register(Set__i32)
@__cython.auto_pickle(False)
cdef class Set__string(thrift.py3.types.Set):
def __init__(self, items=None):
if isinstance(items, Set__string):
self._cpp_obj = (<Set__string> items)._cpp_obj
else:
self._cpp_obj = Set__string._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cset[string]] c_items):
__fbthrift_inst = <Set__string>Set__string.__new__(Set__string)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Set__string self):
cdef shared_ptr[cset[string]] cpp_obj = make_shared[cset[string]](
deref(self._cpp_obj)
)
return Set__string._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[cset[string]] _make_instance(object items) except *:
cdef shared_ptr[cset[string]] c_inst = make_shared[cset[string]]()
if items is not None:
if isinstance(items, str):
raise TypeError("If you really want to pass a string into a _typing.AbstractSet[str] field, explicitly convert it first.")
for item in items:
if not isinstance(item, str):
raise TypeError(f"{item!r} is not of type str")
deref(c_inst).insert(item.encode('UTF-8'))
return c_inst
def __contains__(self, item):
if not self or item is None:
return False
if not isinstance(item, str):
return False
return pbool(deref(self._cpp_obj).count(item.encode('UTF-8')))
def __iter__(self):
if not self:
return
cdef __set_iter[cset[string]] itr = __set_iter[cset[string]](self._cpp_obj)
cdef string citem
for i in range(deref(self._cpp_obj).size()):
itr.genNext(self._cpp_obj, citem)
yield bytes(citem).decode('UTF-8')
def __hash__(self):
return super().__hash__()
def __richcmp__(self, other, int op):
if isinstance(other, Set__string):
# C level comparisons
return __setcmp(
self._cpp_obj,
(<Set__string> other)._cpp_obj,
op,
)
return self._fbthrift_py_richcmp(other, op)
cdef _fbthrift_do_set_op(self, other, __cSetOp op):
if not isinstance(other, Set__string):
other = Set__string(other)
cdef shared_ptr[cset[string]] result
return Set__string._fbthrift_create(__set_op[cset[string]](
self._cpp_obj,
(<Set__string>other)._cpp_obj,
op,
))
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Set__string()
Set.register(Set__string)
@__cython.auto_pickle(False)
cdef class Map__string_string(thrift.py3.types.Map):
def __init__(self, items=None):
if isinstance(items, Map__string_string):
self._cpp_obj = (<Map__string_string> items)._cpp_obj
else:
self._cpp_obj = Map__string_string._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cmap[string,string]] c_items):
__fbthrift_inst = <Map__string_string>Map__string_string.__new__(Map__string_string)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Map__string_string self):
cdef shared_ptr[cmap[string,string]] cpp_obj = make_shared[cmap[string,string]](
deref(self._cpp_obj)
)
return Map__string_string._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[cmap[string,string]] _make_instance(object items) except *:
cdef shared_ptr[cmap[string,string]] c_inst = make_shared[cmap[string,string]]()
if items is not None:
for key, item in items.items():
if not isinstance(key, str):
raise TypeError(f"{key!r} is not of type str")
if not isinstance(item, str):
raise TypeError(f"{item!r} is not of type str")
deref(c_inst)[key.encode('UTF-8')] = item.encode('UTF-8')
return c_inst
cdef _check_key_type(self, key):
if not self or key is None:
return
if isinstance(key, str):
return key
def __getitem__(self, key):
err = KeyError(f'{key}')
key = self._check_key_type(key)
if key is None:
raise err
cdef string ckey = key.encode('UTF-8')
if not __map_contains(self._cpp_obj, ckey):
raise err
cdef string citem
__map_getitem(self._cpp_obj, ckey, citem)
return bytes(citem).decode('UTF-8')
def __iter__(self):
if not self:
return
cdef __map_iter[cmap | Cython |
[string,string]] itr = __map_iter[cmap[string,string]](self._cpp_obj)
cdef string citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextKey(self._cpp_obj, citem)
yield bytes(citem).decode('UTF-8')
def __contains__(self, key):
key = self._check_key_type(key)
if key is None:
return False
cdef string ckey = key.encode('UTF-8')
return __map_contains(self._cpp_obj, ckey)
def values(self):
if not self:
return
cdef __map_iter[cmap[string,string]] itr = __map_iter[cmap[string,string]](self._cpp_obj)
cdef string citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextValue(self._cpp_obj, citem)
yield bytes(citem).decode('UTF-8')
def items(self):
if not self:
return
cdef __map_iter[cmap[string,string]] itr = __map_iter[cmap[string,string]](self._cpp_obj)
cdef string ckey
cdef string citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextItem(self._cpp_obj, ckey, citem)
yield (ckey.data().decode('UTF-8'), bytes(citem).decode('UTF-8'))
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Map__string_string()
Mapping.register(Map__string_string)
@__cython.auto_pickle(False)
cdef class Map__string_SimpleStruct(thrift.py3.types.Map):
def __init__(self, items=None):
if isinstance(items, Map__string_SimpleStruct):
self._cpp_obj = (<Map__string_SimpleStruct> items)._cpp_obj
else:
self._cpp_obj = Map__string_SimpleStruct._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cmap[string,cSimpleStruct]] c_items):
__fbthrift_inst = <Map__string_SimpleStruct>Map__string_SimpleStruct.__new__(Map__string_SimpleStruct)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Map__string_SimpleStruct self):
cdef shared_ptr[cmap[string,cSimpleStruct]] cpp_obj = make_shared[cmap[string,cSimpleStruct]](
deref(self._cpp_obj)
)
return Map__string_SimpleStruct._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[cmap[string,cSimpleStruct]] _make_instance(object items) except *:
cdef shared_ptr[cmap[string,cSimpleStruct]] c_inst = make_shared[cmap[string,cSimpleStruct]]()
if items is not None:
for key, item in items.items():
if not isinstance(key, str):
raise TypeError(f"{key!r} is not of type str")
if not isinstance(item, SimpleStruct):
raise TypeError(f"{item!r} is not of type SimpleStruct")
deref(c_inst)[key.encode('UTF-8')] = deref((<SimpleStruct>item)._cpp_obj)
return c_inst
cdef _check_key_type(self, key):
if not self or key is None:
return
if isinstance(key, str):
return key
def __getitem__(self, key):
err = KeyError(f'{key}')
key = self._check_key_type(key)
if key is None:
raise err
cdef string ckey = key.encode('UTF-8')
if not __map_contains(self._cpp_obj, ckey):
raise err
cdef shared_ptr[cSimpleStruct] citem
__map_getitem(self._cpp_obj, ckey, citem)
return SimpleStruct._fbthrift_create(citem)
def __iter__(self):
if not self:
return
cdef __map_iter[cmap[string,cSimpleStruct]] itr = __map_iter[cmap[string,cSimpleStruct]](self._cpp_obj)
cdef string citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextKey(self._cpp_obj, citem)
yield bytes(citem).decode('UTF-8')
def __contains__(self, key):
key = self._check_key_type(key)
if key is None:
return False
cdef string ckey = key.encode('UTF-8')
return __map_contains(self._cpp_obj, ckey)
def values(self):
if not self:
return
cdef __map_iter[cmap[string,cSimpleStruct]] itr = __map_iter[cmap[string,cSimpleStruct]](self._cpp_obj)
cdef shared_ptr[cSimpleStruct] citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextValue(self._cpp_obj, citem)
yield SimpleStruct._fbthrift_create(citem)
def items(self):
if not self:
return
cdef __map_iter[cmap[string,cSimpleStruct]] itr = __map_iter[cmap[string,cSimpleStruct]](self._cpp_obj)
cdef string ckey
cdef shared_ptr[cSimpleStruct] citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextItem(self._cpp_obj, ckey, citem)
yield (ckey.data().decode('UTF-8'), SimpleStruct._fbthrift_create(citem))
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Map__string_SimpleStruct()
Mapping.register(Map__string_SimpleStruct)
@__cython.auto_pickle(False)
cdef class Map__string_i16(thrift.py3.types.Map):
def __init__(self, items=None):
if isinstance(items, Map__string_i16):
self._cpp_obj = (<Map__string_i16> items)._cpp_obj
else:
self._cpp_obj = Map__string_i16._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cmap[string,cint16_t]] c_items):
__fbthrift_inst = <Map__string_i16>Map__string_i16.__new__(Map__string_i16)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Map__string_i16 self):
cdef shared_ptr[cmap[string,cint16_t]] cpp_obj = make_shared[cmap[string,cint16_t]](
deref(self._cpp_obj)
)
return Map__string_i16._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[cmap[string,cint16_t]] _make_instance(object items) except *:
cdef shared_ptr[cmap[string,cint16_t]] c_inst = make_shared[cmap[string,cint16_t]]()
if items is not None:
for key, item in items.items():
if not isinstance(key, str):
raise TypeError(f"{key!r} is not of type str")
if not isinstance(item, int):
raise TypeError(f"{item!r} is not of type int")
item = <cint16_t> item
deref(c_inst)[key.encode('UTF-8')] = item
return c_inst
cdef _check_key_type(self, key):
if not self or key is None:
return
if isinstance(key, str):
return key
def __getitem__(self, key):
err = KeyError(f'{key}')
key = self._check_key_type(key)
if key is None:
raise err
cdef string ckey = key.encode('UTF-8')
if not __map_contains(self._cpp_obj, ckey):
raise err
cdef cint16_t citem = 0
__map_getitem(self._cpp_obj, ckey, citem)
return citem
def __iter__(self):
if not self:
return
cdef __map_iter[cmap[string,cint16_t]] itr = __map_iter[cmap[string,cint16_t]](self._cpp_obj)
cdef string citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextKey(self._cpp_obj, citem)
yield bytes(citem).decode('UTF-8')
def __contains__(self, key):
key = self._check_key_type(key)
if key is None:
return False
cdef string ckey = key.encode('UTF-8')
return __map_contains(self._cpp_obj, ckey)
def values(self):
if not self:
return
cdef __map_iter[cmap[string,cint16_t]] itr = __map_iter[cmap[string,cint16_t]](self._cpp_obj)
cdef cint16_t citem = 0
for i in range(deref(self._cpp_obj).size()):
itr.genNextValue(self._cpp_obj, citem)
yield citem
| Cython |
def items(self):
if not self:
return
cdef __map_iter[cmap[string,cint16_t]] itr = __map_iter[cmap[string,cint16_t]](self._cpp_obj)
cdef string ckey
cdef cint16_t citem = 0
for i in range(deref(self._cpp_obj).size()):
itr.genNextItem(self._cpp_obj, ckey, citem)
yield (ckey.data().decode('UTF-8'), citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Map__string_i16()
Mapping.register(Map__string_i16)
@__cython.auto_pickle(False)
cdef class List__List__i32(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__List__i32):
self._cpp_obj = (<List__List__i32> items)._cpp_obj
else:
self._cpp_obj = List__List__i32._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[vector[cint32_t]]] c_items):
__fbthrift_inst = <List__List__i32>List__List__i32.__new__(List__List__i32)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__List__i32 self):
cdef shared_ptr[vector[vector[cint32_t]]] cpp_obj = make_shared[vector[vector[cint32_t]]](
deref(self._cpp_obj)
)
return List__List__i32._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[vector[cint32_t]]] _make_instance(object items) except *:
cdef shared_ptr[vector[vector[cint32_t]]] c_inst = make_shared[vector[vector[cint32_t]]]()
if items is not None:
for item in items:
if item is None:
raise TypeError("None is not of the type _typing.Sequence[int]")
if not isinstance(item, List__i32):
item = List__i32(item)
deref(c_inst).push_back(deref((<List__i32>item)._cpp_obj))
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__List__i32._fbthrift_create(
__list_slice[vector[vector[cint32_t]]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef shared_ptr[vector[cint32_t]] citem
__list_getitem(self._cpp_obj, index, citem)
return List__i32._fbthrift_create(citem)
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, List__i32):
return item
try:
return List__i32(item)
except:
pass
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef vector[cint32_t] citem = deref((<List__i32>item)._cpp_obj)
cdef __optional[size_t] found = __list_index[vector[vector[cint32_t]]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef vector[cint32_t] citem = deref((<List__i32>item)._cpp_obj)
return __list_count[vector[vector[cint32_t]]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__List__i32()
Sequence.register(List__List__i32)
@__cython.auto_pickle(False)
cdef class Map__string_i32(thrift.py3.types.Map):
def __init__(self, items=None):
if isinstance(items, Map__string_i32):
self._cpp_obj = (<Map__string_i32> items)._cpp_obj
else:
self._cpp_obj = Map__string_i32._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cmap[string,cint32_t]] c_items):
__fbthrift_inst = <Map__string_i32>Map__string_i32.__new__(Map__string_i32)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Map__string_i32 self):
cdef shared_ptr[cmap[string,cint32_t]] cpp_obj = make_shared[cmap[string,cint32_t]](
deref(self._cpp_obj)
)
return Map__string_i32._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[cmap[string,cint32_t]] _make_instance(object items) except *:
cdef shared_ptr[cmap[string,cint32_t]] c_inst = make_shared[cmap[string,cint32_t]]()
if items is not None:
for key, item in items.items():
if not isinstance(key, str):
raise TypeError(f"{key!r} is not of type str")
if not isinstance(item, int):
raise TypeError(f"{item!r} is not of type int")
item = <cint32_t> item
deref(c_inst)[key.encode('UTF-8')] = item
return c_inst
cdef _check_key_type(self, key):
if not self or key is None:
return
if isinstance(key, str):
return key
def __getitem__(self, key):
err = KeyError(f'{key}')
key = self._check_key_type(key)
if key is None:
raise err
cdef string ckey = key.encode('UTF-8')
if not __map_contains(self._cpp_obj, ckey):
raise err
cdef cint32_t citem = 0
__map_getitem(self._cpp_obj, ckey, citem)
return citem
def __iter__(self):
if not self:
return
cdef __map_iter[cmap[string,cint32_t]] itr = __map_iter[cmap[string,cint32_t]](self._cpp_obj)
cdef string citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextKey(self._cpp_obj, citem)
yield bytes(citem).decode('UTF-8')
def __contains__(self, key):
key = self._check_key_type(key)
if key is None:
return False
cdef string ckey = key.encode('UTF-8')
return __map_contains(self._cpp_obj, ckey)
def values(self):
if not self:
return
cdef __map_iter[cmap[string,cint32_t]] itr = __map_iter[cmap[string,cint32_t]](self._cpp_obj)
cdef cint32_t citem = 0
for i in range(deref(self._cpp_obj).size()):
itr.genNextValue(self._cpp_obj, citem)
yield citem
def items(self):
if not self:
return
cdef __map_iter[cmap[string,cint32_t]] itr = __map_iter[cmap[string,cint32_t]](self._cpp_obj)
cdef string ckey
cdef cint32_t citem = 0
for i in range(deref(self._cpp_obj).size()):
itr.genNextItem(self._cpp_obj, ckey, citem)
yield (ckey.data().decode('UTF-8'), citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Map__string_i32()
Mapping.register(Map__string_i32)
@__cython.auto_pickle(False)
cdef class Map__string_Map__string_i32(thrift.py3.types.Map):
def __init__(self, items=None):
if isinstance(items, Map__string_Map__string_i32):
self._cpp_obj = (<Map__string_Map__string_i32> items)._cpp_obj
else:
self._cpp_obj = Map__string_Map__string_i32._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cmap[string,cmap[string,cint32_t]]] c_items):
__fbthrift_inst | Cython |
= <Map__string_Map__string_i32>Map__string_Map__string_i32.__new__(Map__string_Map__string_i32)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Map__string_Map__string_i32 self):
cdef shared_ptr[cmap[string,cmap[string,cint32_t]]] cpp_obj = make_shared[cmap[string,cmap[string,cint32_t]]](
deref(self._cpp_obj)
)
return Map__string_Map__string_i32._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[cmap[string,cmap[string,cint32_t]]] _make_instance(object items) except *:
cdef shared_ptr[cmap[string,cmap[string,cint32_t]]] c_inst = make_shared[cmap[string,cmap[string,cint32_t]]]()
if items is not None:
for key, item in items.items():
if not isinstance(key, str):
raise TypeError(f"{key!r} is not of type str")
if item is None:
raise TypeError("None is not of type _typing.Mapping[str, int]")
if not isinstance(item, Map__string_i32):
item = Map__string_i32(item)
deref(c_inst)[key.encode('UTF-8')] = deref((<Map__string_i32>item)._cpp_obj)
return c_inst
cdef _check_key_type(self, key):
if not self or key is None:
return
if isinstance(key, str):
return key
def __getitem__(self, key):
err = KeyError(f'{key}')
key = self._check_key_type(key)
if key is None:
raise err
cdef string ckey = key.encode('UTF-8')
if not __map_contains(self._cpp_obj, ckey):
raise err
cdef shared_ptr[cmap[string,cint32_t]] citem
__map_getitem(self._cpp_obj, ckey, citem)
return Map__string_i32._fbthrift_create(citem)
def __iter__(self):
if not self:
return
cdef __map_iter[cmap[string,cmap[string,cint32_t]]] itr = __map_iter[cmap[string,cmap[string,cint32_t]]](self._cpp_obj)
cdef string citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextKey(self._cpp_obj, citem)
yield bytes(citem).decode('UTF-8')
def __contains__(self, key):
key = self._check_key_type(key)
if key is None:
return False
cdef string ckey = key.encode('UTF-8')
return __map_contains(self._cpp_obj, ckey)
def values(self):
if not self:
return
cdef __map_iter[cmap[string,cmap[string,cint32_t]]] itr = __map_iter[cmap[string,cmap[string,cint32_t]]](self._cpp_obj)
cdef shared_ptr[cmap[string,cint32_t]] citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextValue(self._cpp_obj, citem)
yield Map__string_i32._fbthrift_create(citem)
def items(self):
if not self:
return
cdef __map_iter[cmap[string,cmap[string,cint32_t]]] itr = __map_iter[cmap[string,cmap[string,cint32_t]]](self._cpp_obj)
cdef string ckey
cdef shared_ptr[cmap[string,cint32_t]] citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextItem(self._cpp_obj, ckey, citem)
yield (ckey.data().decode('UTF-8'), Map__string_i32._fbthrift_create(citem))
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Map__string_Map__string_i32()
Mapping.register(Map__string_Map__string_i32)
@__cython.auto_pickle(False)
cdef class List__Set__string(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__Set__string):
self._cpp_obj = (<List__Set__string> items)._cpp_obj
else:
self._cpp_obj = List__Set__string._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[cset[string]]] c_items):
__fbthrift_inst = <List__Set__string>List__Set__string.__new__(List__Set__string)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__Set__string self):
cdef shared_ptr[vector[cset[string]]] cpp_obj = make_shared[vector[cset[string]]](
deref(self._cpp_obj)
)
return List__Set__string._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[cset[string]]] _make_instance(object items) except *:
cdef shared_ptr[vector[cset[string]]] c_inst = make_shared[vector[cset[string]]]()
if items is not None:
for item in items:
if item is None:
raise TypeError("None is not of the type _typing.AbstractSet[str]")
if not isinstance(item, Set__string):
item = Set__string(item)
deref(c_inst).push_back(deref((<Set__string>item)._cpp_obj))
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__Set__string._fbthrift_create(
__list_slice[vector[cset[string]]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef shared_ptr[cset[string]] citem
__list_getitem(self._cpp_obj, index, citem)
return Set__string._fbthrift_create(citem)
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, Set__string):
return item
try:
return Set__string(item)
except:
pass
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef cset[string] citem = deref((<Set__string>item)._cpp_obj)
cdef __optional[size_t] found = __list_index[vector[cset[string]]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef cset[string] citem = deref((<Set__string>item)._cpp_obj)
return __list_count[vector[cset[string]]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__Set__string()
Sequence.register(List__Set__string)
@__cython.auto_pickle(False)
cdef class Map__string_List__SimpleStruct(thrift.py3.types.Map):
def __init__(self, items=None):
if isinstance(items, Map__string_List__SimpleStruct):
self._cpp_obj = (<Map__string_List__SimpleStruct> items)._cpp_obj
else:
self._cpp_obj = Map__string_List__SimpleStruct._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cmap[string,vector[cSimpleStruct]]] c_items):
__fbthrift_inst = <Map__string_List__SimpleStruct>Map__string_List__SimpleStruct.__new__(Map__string_List__SimpleStruct)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Map__string_List__SimpleStruct self):
cdef shared_ptr[cmap[string,vector[cSimpleStruct]]] cpp_obj = make_shared[cmap[string,vector[cSimpleStruct]]](
deref(self._cpp_obj)
)
return Map__string_List__SimpleStruct._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[cmap[string,vector[cSimpleStruct]]] _make_instance(object items) except *:
cdef shared_ptr[cmap[string,vector[cSimpleStruct]]] c_inst = make_shared[cmap[string,vector[cSimpleStruct]]]()
if | Cython |
items is not None:
for key, item in items.items():
if not isinstance(key, str):
raise TypeError(f"{key!r} is not of type str")
if item is None:
raise TypeError("None is not of type _typing.Sequence[SimpleStruct]")
if not isinstance(item, List__SimpleStruct):
item = List__SimpleStruct(item)
deref(c_inst)[key.encode('UTF-8')] = deref((<List__SimpleStruct>item)._cpp_obj)
return c_inst
cdef _check_key_type(self, key):
if not self or key is None:
return
if isinstance(key, str):
return key
def __getitem__(self, key):
err = KeyError(f'{key}')
key = self._check_key_type(key)
if key is None:
raise err
cdef string ckey = key.encode('UTF-8')
if not __map_contains(self._cpp_obj, ckey):
raise err
cdef shared_ptr[vector[cSimpleStruct]] citem
__map_getitem(self._cpp_obj, ckey, citem)
return List__SimpleStruct._fbthrift_create(citem)
def __iter__(self):
if not self:
return
cdef __map_iter[cmap[string,vector[cSimpleStruct]]] itr = __map_iter[cmap[string,vector[cSimpleStruct]]](self._cpp_obj)
cdef string citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextKey(self._cpp_obj, citem)
yield bytes(citem).decode('UTF-8')
def __contains__(self, key):
key = self._check_key_type(key)
if key is None:
return False
cdef string ckey = key.encode('UTF-8')
return __map_contains(self._cpp_obj, ckey)
def values(self):
if not self:
return
cdef __map_iter[cmap[string,vector[cSimpleStruct]]] itr = __map_iter[cmap[string,vector[cSimpleStruct]]](self._cpp_obj)
cdef shared_ptr[vector[cSimpleStruct]] citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextValue(self._cpp_obj, citem)
yield List__SimpleStruct._fbthrift_create(citem)
def items(self):
if not self:
return
cdef __map_iter[cmap[string,vector[cSimpleStruct]]] itr = __map_iter[cmap[string,vector[cSimpleStruct]]](self._cpp_obj)
cdef string ckey
cdef shared_ptr[vector[cSimpleStruct]] citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextItem(self._cpp_obj, ckey, citem)
yield (ckey.data().decode('UTF-8'), List__SimpleStruct._fbthrift_create(citem))
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Map__string_List__SimpleStruct()
Mapping.register(Map__string_List__SimpleStruct)
@__cython.auto_pickle(False)
cdef class List__List__string(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__List__string):
self._cpp_obj = (<List__List__string> items)._cpp_obj
else:
self._cpp_obj = List__List__string._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[vector[string]]] c_items):
__fbthrift_inst = <List__List__string>List__List__string.__new__(List__List__string)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__List__string self):
cdef shared_ptr[vector[vector[string]]] cpp_obj = make_shared[vector[vector[string]]](
deref(self._cpp_obj)
)
return List__List__string._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[vector[string]]] _make_instance(object items) except *:
cdef shared_ptr[vector[vector[string]]] c_inst = make_shared[vector[vector[string]]]()
if items is not None:
for item in items:
if item is None:
raise TypeError("None is not of the type _typing.Sequence[str]")
if not isinstance(item, List__string):
item = List__string(item)
deref(c_inst).push_back(deref((<List__string>item)._cpp_obj))
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__List__string._fbthrift_create(
__list_slice[vector[vector[string]]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef shared_ptr[vector[string]] citem
__list_getitem(self._cpp_obj, index, citem)
return List__string._fbthrift_create(citem)
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, List__string):
return item
try:
return List__string(item)
except:
pass
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef vector[string] citem = deref((<List__string>item)._cpp_obj)
cdef __optional[size_t] found = __list_index[vector[vector[string]]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef vector[string] citem = deref((<List__string>item)._cpp_obj)
return __list_count[vector[vector[string]]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__List__string()
Sequence.register(List__List__string)
@__cython.auto_pickle(False)
cdef class List__Set__i32(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__Set__i32):
self._cpp_obj = (<List__Set__i32> items)._cpp_obj
else:
self._cpp_obj = List__Set__i32._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[cset[cint32_t]]] c_items):
__fbthrift_inst = <List__Set__i32>List__Set__i32.__new__(List__Set__i32)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__Set__i32 self):
cdef shared_ptr[vector[cset[cint32_t]]] cpp_obj = make_shared[vector[cset[cint32_t]]](
deref(self._cpp_obj)
)
return List__Set__i32._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[cset[cint32_t]]] _make_instance(object items) except *:
cdef shared_ptr[vector[cset[cint32_t]]] c_inst = make_shared[vector[cset[cint32_t]]]()
if items is not None:
for item in items:
if item is None:
raise TypeError("None is not of the type _typing.AbstractSet[int]")
if not isinstance(item, Set__i32):
item = Set__i32(item)
deref(c_inst).push_back(deref((<Set__i32>item)._cpp_obj))
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__Set__i32._fbthrift_create(
__list_slice[vector[cset[cint32_t]]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef shared_ptr[cset[cint32_t]] citem
__list_getitem(self._cpp_obj, index, citem)
return Set__i32._fbthrift_create(citem)
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, Set__i32):
return item
| Cython |
try:
return Set__i32(item)
except:
pass
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef cset[cint32_t] citem = deref((<Set__i32>item)._cpp_obj)
cdef __optional[size_t] found = __list_index[vector[cset[cint32_t]]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef cset[cint32_t] citem = deref((<Set__i32>item)._cpp_obj)
return __list_count[vector[cset[cint32_t]]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__Set__i32()
Sequence.register(List__Set__i32)
@__cython.auto_pickle(False)
cdef class List__Map__string_string(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__Map__string_string):
self._cpp_obj = (<List__Map__string_string> items)._cpp_obj
else:
self._cpp_obj = List__Map__string_string._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[cmap[string,string]]] c_items):
__fbthrift_inst = <List__Map__string_string>List__Map__string_string.__new__(List__Map__string_string)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__Map__string_string self):
cdef shared_ptr[vector[cmap[string,string]]] cpp_obj = make_shared[vector[cmap[string,string]]](
deref(self._cpp_obj)
)
return List__Map__string_string._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[cmap[string,string]]] _make_instance(object items) except *:
cdef shared_ptr[vector[cmap[string,string]]] c_inst = make_shared[vector[cmap[string,string]]]()
if items is not None:
for item in items:
if item is None:
raise TypeError("None is not of the type _typing.Mapping[str, str]")
if not isinstance(item, Map__string_string):
item = Map__string_string(item)
deref(c_inst).push_back(deref((<Map__string_string>item)._cpp_obj))
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__Map__string_string._fbthrift_create(
__list_slice[vector[cmap[string,string]]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef shared_ptr[cmap[string,string]] citem
__list_getitem(self._cpp_obj, index, citem)
return Map__string_string._fbthrift_create(citem)
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, Map__string_string):
return item
try:
return Map__string_string(item)
except:
pass
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef cmap[string,string] citem = deref((<Map__string_string>item)._cpp_obj)
cdef __optional[size_t] found = __list_index[vector[cmap[string,string]]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef cmap[string,string] citem = deref((<Map__string_string>item)._cpp_obj)
return __list_count[vector[cmap[string,string]]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__Map__string_string()
Sequence.register(List__Map__string_string)
@__cython.auto_pickle(False)
cdef class List__binary(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__binary):
self._cpp_obj = (<List__binary> items)._cpp_obj
else:
self._cpp_obj = List__binary._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[string]] c_items):
__fbthrift_inst = <List__binary>List__binary.__new__(List__binary)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__binary self):
cdef shared_ptr[vector[string]] cpp_obj = make_shared[vector[string]](
deref(self._cpp_obj)
)
return List__binary._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[string]] _make_instance(object items) except *:
cdef shared_ptr[vector[string]] c_inst = make_shared[vector[string]]()
if items is not None:
if isinstance(items, str):
raise TypeError("If you really want to pass a string into a _typing.Sequence[bytes] field, explicitly convert it first.")
for item in items:
if not isinstance(item, bytes):
raise TypeError(f"{item!r} is not of type bytes")
deref(c_inst).push_back(item)
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__binary._fbthrift_create(
__list_slice[vector[string]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef string citem
__list_getitem(self._cpp_obj, index, citem)
return bytes(citem)
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, bytes):
return item
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef string citem = item
cdef __optional[size_t] found = __list_index[vector[string]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef string citem = item
return __list_count[vector[string]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__binary()
Sequence.register(List__binary)
@__cython.auto_pickle(False)
cdef class Set__binary(thrift.py3.types.Set):
def __init__(self, items=None):
if isinstance(items, Set__binary):
self._cpp_obj = (<Set__binary> items)._cpp_obj
else:
self._cpp_obj = Set__binary._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cset[string]] c_items):
__fbthrift_inst = <Set__binary>Set__binary.__new__(Set__binary)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Set__binary self):
cdef shared_ptr[cset[string]] cpp_obj = make_shared[cset[string]](
deref(self._cpp_obj)
)
return Set__binary._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef | Cython |
shared_ptr[cset[string]] _make_instance(object items) except *:
cdef shared_ptr[cset[string]] c_inst = make_shared[cset[string]]()
if items is not None:
if isinstance(items, str):
raise TypeError("If you really want to pass a string into a _typing.AbstractSet[bytes] field, explicitly convert it first.")
for item in items:
if not isinstance(item, bytes):
raise TypeError(f"{item!r} is not of type bytes")
deref(c_inst).insert(item)
return c_inst
def __contains__(self, item):
if not self or item is None:
return False
if not isinstance(item, bytes):
return False
return pbool(deref(self._cpp_obj).count(item))
def __iter__(self):
if not self:
return
cdef __set_iter[cset[string]] itr = __set_iter[cset[string]](self._cpp_obj)
cdef string citem
for i in range(deref(self._cpp_obj).size()):
itr.genNext(self._cpp_obj, citem)
yield bytes(citem)
def __hash__(self):
return super().__hash__()
def __richcmp__(self, other, int op):
if isinstance(other, Set__binary):
# C level comparisons
return __setcmp(
self._cpp_obj,
(<Set__binary> other)._cpp_obj,
op,
)
return self._fbthrift_py_richcmp(other, op)
cdef _fbthrift_do_set_op(self, other, __cSetOp op):
if not isinstance(other, Set__binary):
other = Set__binary(other)
cdef shared_ptr[cset[string]] result
return Set__binary._fbthrift_create(__set_op[cset[string]](
self._cpp_obj,
(<Set__binary>other)._cpp_obj,
op,
))
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Set__binary()
Set.register(Set__binary)
@__cython.auto_pickle(False)
cdef class List__AnEnum(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__AnEnum):
self._cpp_obj = (<List__AnEnum> items)._cpp_obj
else:
self._cpp_obj = List__AnEnum._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[cAnEnum]] c_items):
__fbthrift_inst = <List__AnEnum>List__AnEnum.__new__(List__AnEnum)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__AnEnum self):
cdef shared_ptr[vector[cAnEnum]] cpp_obj = make_shared[vector[cAnEnum]](
deref(self._cpp_obj)
)
return List__AnEnum._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[cAnEnum]] _make_instance(object items) except *:
cdef shared_ptr[vector[cAnEnum]] c_inst = make_shared[vector[cAnEnum]]()
if items is not None:
for item in items:
if not isinstance(item, AnEnum):
raise TypeError(f"{item!r} is not of type AnEnum")
deref(c_inst).push_back(<cAnEnum><int>item)
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__AnEnum._fbthrift_create(
__list_slice[vector[cAnEnum]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef cAnEnum citem
__list_getitem(self._cpp_obj, index, citem)
return translate_cpp_enum_to_python(AnEnum, <int> citem)
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, AnEnum):
return item
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef cAnEnum citem = <cAnEnum><int>item
cdef __optional[size_t] found = __list_index[vector[cAnEnum]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef cAnEnum citem = <cAnEnum><int>item
return __list_count[vector[cAnEnum]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__AnEnum()
Sequence.register(List__AnEnum)
@__cython.auto_pickle(False)
cdef class Map__i32_double(thrift.py3.types.Map):
def __init__(self, items=None):
if isinstance(items, Map__i32_double):
self._cpp_obj = (<Map__i32_double> items)._cpp_obj
else:
self._cpp_obj = Map__i32_double._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cmap[cint32_t,double]] c_items):
__fbthrift_inst = <Map__i32_double>Map__i32_double.__new__(Map__i32_double)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Map__i32_double self):
cdef shared_ptr[cmap[cint32_t,double]] cpp_obj = make_shared[cmap[cint32_t,double]](
deref(self._cpp_obj)
)
return Map__i32_double._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[cmap[cint32_t,double]] _make_instance(object items) except *:
cdef shared_ptr[cmap[cint32_t,double]] c_inst = make_shared[cmap[cint32_t,double]]()
if items is not None:
for key, item in items.items():
if not isinstance(key, int):
raise TypeError(f"{key!r} is not of type int")
key = <cint32_t> key
if not isinstance(item, (float, int)):
raise TypeError(f"{item!r} is not of type float")
deref(c_inst)[key] = item
return c_inst
cdef _check_key_type(self, key):
if not self or key is None:
return
if isinstance(key, int):
return key
def __getitem__(self, key):
err = KeyError(f'{key}')
key = self._check_key_type(key)
if key is None:
raise err
cdef cint32_t ckey = key
if not __map_contains(self._cpp_obj, ckey):
raise err
cdef double citem = 0
__map_getitem(self._cpp_obj, ckey, citem)
return citem
def __iter__(self):
if not self:
return
cdef __map_iter[cmap[cint32_t,double]] itr = __map_iter[cmap[cint32_t,double]](self._cpp_obj)
cdef cint32_t citem = 0
for i in range(deref(self._cpp_obj).size()):
itr.genNextKey(self._cpp_obj, citem)
yield citem
def __contains__(self, key):
key = self._check_key_type(key)
if key is None:
return False
cdef cint32_t ckey = key
return __map_contains(self._cpp_obj, ckey)
def values(self):
if not self:
return
cdef __map_iter[cmap[cint32_t,double]] itr = __map_iter[cmap[cint32_t,double]](self._cpp_obj)
cdef double citem = 0
for i in range(deref(self._cpp_obj).size()):
itr.genNextValue(self._cpp_obj, citem)
yield citem
def items(self):
if not self:
return
cdef __map_iter[cmap[cint32_t,double]] itr = __map_iter[cmap[cint32_t,double]](self._cpp_obj)
cdef cint32_t ckey = 0
cdef double citem = 0
for i in range(deref(self._cpp_obj).size()):
itr.genNextItem(self._cpp_obj, ckey, citem)
| Cython |
yield (ckey, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Map__i32_double()
Mapping.register(Map__i32_double)
@__cython.auto_pickle(False)
cdef class List__Map__i32_double(thrift.py3.types.List):
def __init__(self, items=None):
if isinstance(items, List__Map__i32_double):
self._cpp_obj = (<List__Map__i32_double> items)._cpp_obj
else:
self._cpp_obj = List__Map__i32_double._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[vector[cmap[cint32_t,double]]] c_items):
__fbthrift_inst = <List__Map__i32_double>List__Map__i32_double.__new__(List__Map__i32_double)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(List__Map__i32_double self):
cdef shared_ptr[vector[cmap[cint32_t,double]]] cpp_obj = make_shared[vector[cmap[cint32_t,double]]](
deref(self._cpp_obj)
)
return List__Map__i32_double._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[vector[cmap[cint32_t,double]]] _make_instance(object items) except *:
cdef shared_ptr[vector[cmap[cint32_t,double]]] c_inst = make_shared[vector[cmap[cint32_t,double]]]()
if items is not None:
for item in items:
if item is None:
raise TypeError("None is not of the type _typing.Mapping[int, float]")
if not isinstance(item, Map__i32_double):
item = Map__i32_double(item)
deref(c_inst).push_back(deref((<Map__i32_double>item)._cpp_obj))
return c_inst
cdef _get_slice(self, slice index_obj):
cdef int start, stop, step
start, stop, step = index_obj.indices(deref(self._cpp_obj).size())
return List__Map__i32_double._fbthrift_create(
__list_slice[vector[cmap[cint32_t,double]]](self._cpp_obj, start, stop, step)
)
cdef _get_single_item(self, size_t index):
cdef shared_ptr[cmap[cint32_t,double]] citem
__list_getitem(self._cpp_obj, index, citem)
return Map__i32_double._fbthrift_create(citem)
cdef _check_item_type(self, item):
if not self or item is None:
return
if isinstance(item, Map__i32_double):
return item
try:
return Map__i32_double(item)
except:
pass
def index(self, item, start=0, stop=None):
err = ValueError(f'{item} is not in list')
item = self._check_item_type(item)
if item is None:
raise err
cdef (int, int, int) indices = slice(start, stop).indices(deref(self._cpp_obj).size())
cdef cmap[cint32_t,double] citem = deref((<Map__i32_double>item)._cpp_obj)
cdef __optional[size_t] found = __list_index[vector[cmap[cint32_t,double]]](self._cpp_obj, indices[0], indices[1], citem)
if not found.has_value():
raise err
return found.value()
def count(self, item):
item = self._check_item_type(item)
if item is None:
return 0
cdef cmap[cint32_t,double] citem = deref((<Map__i32_double>item)._cpp_obj)
return __list_count[vector[cmap[cint32_t,double]]](self._cpp_obj, citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__List__Map__i32_double()
Sequence.register(List__Map__i32_double)
@__cython.auto_pickle(False)
cdef class Map__AnEnumRenamed_i32(thrift.py3.types.Map):
def __init__(self, items=None):
if isinstance(items, Map__AnEnumRenamed_i32):
self._cpp_obj = (<Map__AnEnumRenamed_i32> items)._cpp_obj
else:
self._cpp_obj = Map__AnEnumRenamed_i32._make_instance(items)
@staticmethod
cdef _fbthrift_create(shared_ptr[cmap[cAnEnumRenamed,cint32_t]] c_items):
__fbthrift_inst = <Map__AnEnumRenamed_i32>Map__AnEnumRenamed_i32.__new__(Map__AnEnumRenamed_i32)
__fbthrift_inst._cpp_obj = cmove(c_items)
return __fbthrift_inst
def __copy__(Map__AnEnumRenamed_i32 self):
cdef shared_ptr[cmap[cAnEnumRenamed,cint32_t]] cpp_obj = make_shared[cmap[cAnEnumRenamed,cint32_t]](
deref(self._cpp_obj)
)
return Map__AnEnumRenamed_i32._fbthrift_create(cmove(cpp_obj))
def __len__(self):
return deref(self._cpp_obj).size()
@staticmethod
cdef shared_ptr[cmap[cAnEnumRenamed,cint32_t]] _make_instance(object items) except *:
cdef shared_ptr[cmap[cAnEnumRenamed,cint32_t]] c_inst = make_shared[cmap[cAnEnumRenamed,cint32_t]]()
if items is not None:
for key, item in items.items():
if not isinstance(key, AnEnumRenamed):
raise TypeError(f"{key!r} is not of type AnEnumRenamed")
if not isinstance(item, int):
raise TypeError(f"{item!r} is not of type int")
item = <cint32_t> item
deref(c_inst)[<cAnEnumRenamed><int>key] = item
return c_inst
cdef _check_key_type(self, key):
if not self or key is None:
return
if isinstance(key, AnEnumRenamed):
return key
def __getitem__(self, key):
err = KeyError(f'{key}')
key = self._check_key_type(key)
if key is None:
raise err
cdef cAnEnumRenamed ckey = <cAnEnumRenamed><int>key
if not __map_contains(self._cpp_obj, ckey):
raise err
cdef cint32_t citem = 0
__map_getitem(self._cpp_obj, ckey, citem)
return citem
def __iter__(self):
if not self:
return
cdef __map_iter[cmap[cAnEnumRenamed,cint32_t]] itr = __map_iter[cmap[cAnEnumRenamed,cint32_t]](self._cpp_obj)
cdef cAnEnumRenamed citem
for i in range(deref(self._cpp_obj).size()):
itr.genNextKey(self._cpp_obj, citem)
yield translate_cpp_enum_to_python(AnEnumRenamed, <int> citem)
def __contains__(self, key):
key = self._check_key_type(key)
if key is None:
return False
cdef cAnEnumRenamed ckey = <cAnEnumRenamed><int>key
return __map_contains(self._cpp_obj, ckey)
def values(self):
if not self:
return
cdef __map_iter[cmap[cAnEnumRenamed,cint32_t]] itr = __map_iter[cmap[cAnEnumRenamed,cint32_t]](self._cpp_obj)
cdef cint32_t citem = 0
for i in range(deref(self._cpp_obj).size()):
itr.genNextValue(self._cpp_obj, citem)
yield citem
def items(self):
if not self:
return
cdef __map_iter[cmap[cAnEnumRenamed,cint32_t]] itr = __map_iter[cmap[cAnEnumRenamed,cint32_t]](self._cpp_obj)
cdef cAnEnumRenamed ckey
cdef cint32_t citem = 0
for i in range(deref(self._cpp_obj).size()):
itr.genNextItem(self._cpp_obj, ckey, citem)
yield (translate_cpp_enum_to_python(AnEnumRenamed, <int> ckey), citem)
@staticmethod
def __get_reflection__():
return _types_reflection.get_reflection__Map__AnEnumRenamed_i32()
Mapping.register(Map__AnEnumRenamed_i32)
A_BOOL = True
A_BYTE = 8
THE_ANSWER = 42
A_NUMBER = 84
A_BIG_NUMBER = 102
A_REAL_NUMBER = 3.14
A_FAKE_NUMBER = 3.0
A_WORD = cA_WORD().decode('UTF-8')
SOME_BYTES | Cython |
= <bytes> cSOME_BYTES()
A_STRUCT = SimpleStruct._fbthrift_create(constant_shared_ptr(cA_STRUCT()))
WORD_LIST = List__string._fbthrift_create(constant_shared_ptr(cWORD_LIST()))
SOME_MAP = List__Map__i32_double._fbthrift_create(constant_shared_ptr(cSOME_MAP()))
DIGITS = Set__i32._fbthrift_create(constant_shared_ptr(cDIGITS()))
A_CONST_MAP = Map__string_SimpleStruct._fbthrift_create(constant_shared_ptr(cA_CONST_MAP()))
ANOTHER_CONST_MAP = Map__AnEnumRenamed_i32._fbthrift_create(constant_shared_ptr(cANOTHER_CONST_MAP()))
IOBufPtr = _fbthrift_iobuf.IOBuf
IOBuf = _fbthrift_iobuf.IOBuf
foo_bar = bytes
<|end_of_text|>import numpy as np
cimport numpy as np
cimport cython
import random
cdef extern from "math.h":
double exp(double x)
double log(double x)
cdef inline int int_abs(int a): return a if a > 0 else -a
@cython.boundscheck(False)
def hmc_main_loop(f, np.ndarray[np.double_t, ndim=1] x, gradf, args,
np.ndarray[np.double_t, ndim=1] p,
np.ndarray[np.double_t, ndim=2] samples,
np.ndarray[np.double_t, ndim=1] energies,
np.ndarray[np.double_t, ndim=2] diagn_pos,
np.ndarray[np.double_t, ndim=2] diagn_mom,
np.ndarray[np.double_t, ndim=1] diagn_acc,
int opt_nsamples, int opt_nomit, int opt_window,
int opt_steps, int opt_display,
int opt_persistence, int return_energies, int return_diagnostics,
double alpha, double salpha, double epsilon):
cdef int nparams = x.shape[0]
cdef int nreject = 0 # number of rejected samples
cdef int window_offset = 0 # window offset initialised to zero
cdef int k = -opt_nomit # nomit samples are omitted, so we store
cdef int n, stps, direction, have_rej, have_acc
cdef double a, E, Eold, E_acc, E_rej, H, Hold, acc_free_energy, rej_free_energy
cdef unsigned int i, j, m
cdef np.ndarray[np.double_t] xold = np.zeros(nparams)
cdef np.ndarray[np.double_t] pold = np.zeros(nparams)
cdef np.ndarray[np.double_t] x_acc = np.zeros(nparams)
cdef np.ndarray[np.double_t] p_acc = np.zeros(nparams)
cdef np.ndarray[np.double_t] x_rej = np.zeros(nparams)
cdef np.ndarray[np.double_t] p_rej = np.zeros(nparams)
cdef np.ndarray[np.double_t] ptmp = np.zeros(nparams)
rand = random.random
randn = random.normalvariate
# Evaluate starting energy.
E = f(x, *args)
while k < opt_nsamples: # samples from k >= 0
# Store starting position and momenta
for i in range(nparams): xold[i] = x[i]
for i in range(nparams): pold[i] = p[i]
# Recalculate Hamiltonian as momenta have changed
Eold = E
# Hold = E + 0.5*(p*p')
Hold = E
for i in range(nparams): Hold +=.5*p[i]**2
# Decide on window offset, if windowed HMC is used
if opt_window > 1:
# window_offset=fix(opt_window*rand(1));
window_offset = int(opt_window*rand())
have_rej = 0
have_acc = 0
n = window_offset
direction = -1 # the default value for direction
# assumes that windowing is used
while direction == -1 or n!= opt_steps:
# if windowing is not used or we have allready taken
# window_offset steps backwards...
if direction == -1 and n == 0:
# Restore, next state should be original start state.
if window_offset > 0:
for i in range(nparams): x[i] = xold[i]
for i in range(nparams): p[i] = pold[i]
n = window_offset
# set direction for forward steps
E = Eold
H = Hold
direction = 1
stps = direction
else:
if n*direction+1<opt_window or n > (opt_steps-opt_window):
# State in the accept and/or reject window.
stps = direction
else:
# State not in the accept and/or reject window.
stps = opt_steps-2*(opt_window-1)
# First half-step of leapfrog.
# p = p - direction*0.5*epsilon.*feval(gradf, x, varargin{:});
p = p - direction*0.5*epsilon*gradf(x, *args)
for i in range(nparams): x[i] += direction*epsilon*p[i]
# Full leapfrog steps.
# for m = 1:(abs(stps)-1):
for m in range(int_abs(stps)-1):
# p = p - direction*epsilon.*feval(gradf, x, varargin{:});
p = p - direction*epsilon*gradf(x, *args)
for i in range(nparams): x[i] += direction*epsilon*p[i]
# Final half-step of leapfrog.
# p = p - direction*0.5*epsilon.*feval(gradf, x, varargin{:});
p = p - direction*0.5*epsilon*gradf(x, *args)
# E = feval(f, x, varargin{:});
E = f(x, *args)
# H = E + 0.5*(p*p');
H = E
for i in range(nparams): H += 0.5*p[i]**2
n += stps
if opt_window!= opt_steps+1 and n < opt_window:
# Account for state in reject window. Reject window can be
# ignored if windows consist of the entire trajectory.
if not have_rej:
rej_free_energy = H
else:
rej_free_energy = -addlogs(-rej_free_energy, -H)
if not have_rej or rand() < exp(rej_free_energy-H):
E_rej = E
for i in range(nparams): x_rej[i] = x[i]
for i in range(nparams): p_rej[i] = p[i]
have_rej = 1
if n > (opt_steps-opt_window):
# Account for state in the accept window.
if not have_acc:
acc_free_energy = H
else:
acc_free_energy = -addlogs(-acc_free_energy, -H)
if not have_acc or rand() < exp(acc_free_energy-H):
E_acc = E
for i in range(nparams): x_acc[i] = x[i]
for i in range(nparams): p_acc[i] = p[i]
have_acc = 1
# Acceptance threshold.
a = exp(rej_free_energy - acc_free_energy)
if return_diagnostics and k >= 0:
j = k
for i in range(nparams):
diagn_pos[j,i] = x_acc[i]
diagn_mom[j,i] = p_acc[i]
diagn_acc[j] = a
if opt_display:
print 'New position is\n',x
# Take new state from the appropriate window.
if a > rand():
# Accept
E = E_acc
for i in range(nparams): x[i] = x_acc[i]
for i in range(nparams): p[i] = -p_acc[i] # Reverse momenta
if opt_display:
print 'Finished step %4d Threshold: %g\n'%(k,a)
else:
# Reject
if k >= 0:
nreject = nreject + 1
E = E_rej
for i in range(nparams): x[i] = x_rej[i]
for i in range(nparams): p[i] = p_rej[i]
if opt_display:
print' Sample rejected %4d. Threshold: %g\n'%(k,a)
if k >= 0:
j = k
# Store sample
for i in range(nparams): samples[j,i] = x[i]
if return_energies:
# Store energy
energies[j] = E
# Set momenta for next iteration
if opt_persistence:
# Reverse momenta
for i in range(nparams): p[i] = -p[i]
# Adjust momenta by a small random amount
for i in range(nparams): p[i] = alpha*p[i]+salpha*<double>randn(0,1)
else:
# Replace all momenta
for i in range(nparams): p[i] = randn(0,1)
k += 1
c | Cython |
def double addlogs(double a, double b):
if a>b:
return a + log(1+exp(b-a))
else:
return b + log(1+exp(a-b))
<|end_of_text|># cython_utils.pyx -- Utility file containing most of the DRK functions
#
# Copyright (C) <2016> <Kevin Deweese>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
# cython: profile=False
# cython.boundscheck(False)
# cython.wraparound(False)
# cython.cdivision(True)
import numpy
cimport numpy
import scipy
import random
from libc.stdlib cimport rand,RAND_MAX,srand,malloc,free
cdef extern from "../C/utils.c":
void update_c(numpy.int8_t *Fdata,
int *Findices,
int *Findptr,
numpy.float64_t *R,
numpy.float64_t *flow,
numpy.float64_t resist_mod,
int m, int num_cycles, int cycle,
int tracker, long *edges)
int binary_search_c(numpy.float64_t *probs,
numpy.float64_t target,
int start,
int stop)
void induce_voltage_c(numpy.float64_t *flow,
numpy.float64_t *Bdata,
int *Bindices,
int *Bindptr,
numpy.float64_t *R,
int *parents,
int *gedge,
long *sorteddepth,
numpy.float64_t *v,
int n)
numpy.float64_t relative_residual_c(numpy.float64_t *v,
numpy.float64_t *b,
numpy.float64_t *Ldata,
int *Lindices,
int *Lindptr,
int n)
void get_probs_c(numpy.int8_t *Fdata,
int *Findices,
int *Findptr,
numpy.float64_t *R,
numpy.float64_t *probs,
numpy.float64_t resist_mod,
int m,
int num_cycles)
void initialize_flow_c(numpy.float64_t *Bdata,
int *Bindices,
int *Bindptr,
numpy.float64_t *b,
int *parents,
long *sorteddepth,
int *gedge,
numpy.float64_t *flow,
int n, int m)
cdef get_random():
cdef numpy.float64_t random_var = rand()
return random_var/RAND_MAX
# Python wrapper to call C solve funtions
def solve_wrapper(numpy.ndarray[numpy.int8_t, ndim=1] Fdata,
numpy.ndarray[int, ndim=1] Findices,
numpy.ndarray[int, ndim=1] Findptr,
numpy.ndarray[numpy.float64_t, ndim=1] Bdata,
numpy.ndarray[int, ndim=1] Bindices,
numpy.ndarray[int, ndim=1] Bindptr,
numpy.ndarray[numpy.float64_t, ndim=1] R, numpy.float64_t resist_mod,
numpy.ndarray[numpy.float64_t, ndim=1] Ldata,
numpy.ndarray[int, ndim=1] Lindices,
numpy.ndarray[int, ndim=1] Lindptr,
numpy.ndarray[numpy.float64_t, ndim=1] b,
numpy.ndarray[numpy.float64_t, ndim=1] probs,
numpy.ndarray[numpy.float64_t, ndim=1] flow,
numpy.ndarray[int, ndim=1] parents,
numpy.ndarray[int, ndim=1] depth,
numpy.ndarray[int, ndim=1] gedge,
double tolerance, int processors,
numpy.ndarray[numpy.float64_t, ndim=1] known_solution,
int useres, int maxiters, int tracker):
cdef int n=len(b)
cdef numpy.ndarray[numpy.float64_t] stats = numpy.zeros(10)
cdef numpy.ndarray[numpy.float64_t, ndim=1] v = numpy.zeros(n)
if(maxiters > -1):
solve_fixediters(Fdata,Findices,Findptr,Bdata,Bindices,Bindptr,R,resist_mod,Ldata,Lindices,Lindptr,b,v,probs,flow,parents,depth,gedge,maxiters,processors,stats,known_solution,useres,tracker)
else:
solve_fixedtol(Fdata,Findices,Findptr,Bdata,Bindices,Bindptr,
R,resist_mod,Ldata,Lindices,Lindptr,b,v,probs,
flow,parents,depth,gedge,tolerance,processors,stats,known_solution,useres,tracker)
return(stats[0],stats[1],stats[2],stats[3],stats[4],stats[5],stats[6],
stats[7],stats[8],stats[9],v)
# solves dual system to a fixed tolerance
cdef solve_fixedtol(numpy.ndarray[numpy.int8_t, ndim=1] Fdata,
numpy.ndarray[int, ndim=1] Findices,
numpy.ndarray[int, ndim=1] Findptr,
numpy.ndarray[numpy.float64_t, ndim=1] Bdata,
numpy.ndarray[int, ndim=1] Bindices,
numpy.ndarray[int, ndim=1] Bindptr,
numpy.ndarray[numpy.float64_t, ndim=1] R, numpy.float64_t resist_mod,
numpy.ndarray[numpy.float64_t, ndim=1] Ldata,
numpy.ndarray[int, ndim=1] Lindices,
numpy.ndarray[int, ndim=1] Lindptr,
numpy.ndarray[numpy.float64_t, ndim=1] b,
numpy.ndarray[numpy.float64_t, ndim=1] v,
numpy.ndarray[numpy.float64_t, ndim=1] probs,
numpy.ndarray[numpy.float64_t, ndim=1] flow,
numpy.ndarray[int, ndim=1] parents,
numpy.ndarray[int, ndim=1] depth,
numpy.ndarray[int, ndim=1] gedge,
double tolerance, int processors,
numpy.ndarray[numpy.float64_t, ndim=1] stats,
numpy.ndarray[numpy.float64_t, ndim=1] known_solution,
int useres, int tracker):
cdef int iteration = 0
cdef int i,j
cdef int m = len(flow)
cdef int n = len(b)
cdef double relres = 100000
cdef edges_updated = 0
cdef other_edges = 0
cdef long logn_times_iters=0
cdef long log_edges_updated=0
cdef long projections = 0
cdef numpy.ndarray[long, ndim=1] temp_edges =numpy.zeros(2,dtype=int)
cdef numpy.ndarray[long, ndim=1] sorteddepth = numpy.argsort(depth)
cdef numpy.ndarray[long, ndim=1] used = numpy.zeros(m,dtype=int)
cdef numpy.float64_t randomvar=0
cdef int cycle
cdef int num_cycles = len(Findptr)-1
cdef int clash = 0
cdef long maxspan = 0
cdef long totalspan=0
cdef long maxlogspan = 0
cdef long totallogspan=0
cdef long totallogespan=0
cdef long maxlogespan=0
cdef numpy.float64_t err = 100000
while(err > tolerance):
maxspan=0
maxlogspan=0
maxlogespan=0
cycle = binary_search_c(&probs[0],get_random(),0,num_cycles-1)
#update(Fdata,Findices,Findptr,R,resist_mod,probs,flow,m,num_cycles,cycle,temp_edges)
update_c(&Fdata[0],&Findices[0],&Findptr[0],&R[0],&flow[0],resist_mod,m,num_cycles,cycle,tracker,&temp_edges[0])
if(processors > 1):
for i in xrange(0,m):
used[i]=0
for i in xrange(Findptr[cycle],Findptr[cycle+1]):
used[Findices[i]]=1
iteration+=1
if(tracker==1):
if(temp_edges[0]!=0):
edges_updated+=temp_edges[0]
log_edges_updated+=numpy.log2(temp_edges[0])
maxspan=temp_edges[0]
maxlogspan=numpy.log2(n)
logn_times_iters+=numpy.log2(n)
maxlogespan=numpy.log2(temp_edges[0])
projections+=1
if(temp_edges[1]!=0):
other_edges+=temp_edges[1]
log_edges_updated+=numpy.log2(temp_edges[1])
maxspan=temp_edges[1]
maxlogspan=temp_edges[1]
maxlogespan=numpy.log2(temp_edges[1])
projections+=1
temp_edges[0]=0
temp_edges[1]=0
for i in xrange(1,processors):
clash = 0
cycle = binary_search_c(&probs[0],get_random(),0,num_cycles-1)
for j in xrange(Findptr[cycle],Findptr[cycle+1]):
if(used | Cython |
[Findices[j]]==1):
clash = 1
break
else:
used[Findices[j]]=1
if clash:
continue
else:
update_c(&Fdata[0],&Findices[0],&Findptr[0],&R[0],&flow[0],resist_mod,m,num_cycles,cycle,tracker,&temp_edges[0])
if(tracker==1):
if(temp_edges[0]!=0):
edges_updated+=temp_edges[0]
log_edges_updated+=numpy.log2(temp_edges[0])
if(temp_edges[0] > maxspan):
maxspan=temp_edges[0]
if(numpy.log2(n) > maxlogspan):
maxlogspan = numpy.log2(n)
logn_times_iters+=numpy.log2(n)
projections+=1
if(numpy.log2(temp_edges[0]) > maxlogespan):
maxlogespan=numpy.log2(temp_edges[0])
if(temp_edges[1]!=0):
other_edges+=temp_edges[1]
log_edges_updated+=numpy.log2(temp_edges[1])
if(temp_edges[1] > maxspan):
maxspan=temp_edges[1]
if(temp_edges[1] > maxlogspan):
maxlogspan=temp_edges[1]
projections+=1
if(numpy.log2(temp_edges[1]) > maxlogespan):
maxlogespan=numpy.log2(temp_edges[1])
temp_edges[0]=0
temp_edges[1]=0
totalspan=totalspan+maxspan
totallogspan=totallogspan+maxlogspan
totallogespan=totallogespan+maxlogespan
if(numpy.mod(iteration,n)==0):
induce_voltage_c(&flow[0],&Bdata[0],&Bindices[0],&Bindptr[0],&R[0],&parents[0],&gedge[0],&sorteddepth[0],&v[0],n)
if(useres==1):
err=relative_residual_c(&v[0],&b[0],&Ldata[0],&Lindices[0],&Lindptr[0],n)
else:
err=numpy.linalg.norm(v-numpy.mean(v) - known_solution)/numpy.linalg.norm(known_solution)
stats[0]=edges_updated
stats[1]=logn_times_iters
stats[2]=log_edges_updated
stats[3]=projections
stats[4]=totalspan
stats[5]=totallogspan
stats[6]=totallogespan
stats[7]=iteration
stats[8]=other_edges
stats[9]=err
# Solve dual system to a fixed number of iterations
cdef solve_fixediters(numpy.ndarray[numpy.int8_t, ndim=1] Fdata,
numpy.ndarray[int, ndim=1] Findices,
numpy.ndarray[int, ndim=1] Findptr,
numpy.ndarray[numpy.float64_t, ndim=1] Bdata,
numpy.ndarray[int, ndim=1] Bindices,
numpy.ndarray[int, ndim=1] Bindptr,
numpy.ndarray[numpy.float64_t, ndim=1] R,
numpy.float64_t resist_mod,
numpy.ndarray[numpy.float64_t, ndim=1] Ldata,
numpy.ndarray[int, ndim=1] Lindices,
numpy.ndarray[int, ndim=1] Lindptr,
numpy.ndarray[numpy.float64_t, ndim=1] b,
numpy.ndarray[numpy.float64_t, ndim=1] v,
numpy.ndarray[numpy.float64_t, ndim=1] probs,
numpy.ndarray[numpy.float64_t, ndim=1] flow,
numpy.ndarray[int, ndim=1] parents,
numpy.ndarray[int, ndim=1] depth,
numpy.ndarray[int, ndim=1] gedge,
int maxiters, int processors,
numpy.ndarray[numpy.float64_t, ndim=1] stats,
numpy.ndarray[numpy.float64_t, ndim=1] known_solution,
int useres, int tracker):
cdef int iteration=0
cdef long edges_updated=0
cdef long log_edges_updated=0
cdef long logn_times_iters=0
cdef int num_cycles = len(Findptr)-1
cdef int projections=0
cdef int totalspan=0
cdef long maxspan = 0
cdef long maxlogspan = 0
cdef long totallogspan=0
cdef long totallogespan=0
cdef long maxlogespan=0
cdef long other_edges=0
cdef numpy.ndarray[long, ndim=1] sorteddepth = numpy.argsort(depth)
cdef numpy.ndarray[long, ndim=1] temp_edges =numpy.zeros(2,dtype=int)
cdef numpy.float64_t relres
cdef int n=len(b)
cdef int m=len(flow)
cycle = binary_search_c(&probs[0],get_random(),0,num_cycles-1)
while(iteration < maxiters):
maxspan=0
maxlogspan=0
maxlogespan=0
if(processors>1):
used = numpy.zeros(m,dtype=int)
cycle = binary_search_c(&probs[0],get_random(),0,num_cycles-1)
update_c(&Fdata[0],&Findices[0],&Findptr[0],&R[0],&flow[0],resist_mod,m,num_cycles,cycle,tracker,&temp_edges[0])
iteration+=1
if(tracker==1):
for i in xrange(Findptr[cycle],Findptr[cycle+1]):
used[Findices[i]]=1
if(temp_edges[0]!=0):
edges_updated+=temp_edges[0]
log_edges_updated+=numpy.log2(temp_edges[0])
maxspan=temp_edges[0]
maxlogspan=numpy.log2(n)
logn_times_iters+=numpy.log2(n)
maxlogespan=numpy.log2(temp_edges[0])
projections+=1
if(temp_edges[1]!=0):
other_edges+=temp_edges[1]
log_edges_updated+=numpy.log2(temp_edges[1])
maxspan=temp_edges[1]
maxlogspan=temp_edges[1]
maxlogespan=numpy.log2(temp_edges[1])
projections+=1
temp_edges[0]=0
temp_edges[1]=0
for i in xrange(1,processors):
clash = 0
cycle = binary_search_c(&probs[0],get_random(),0,num_cycles-1)
for j in xrange(Findptr[cycle],Findptr[cycle+1]):
if(used[Findices[j]]==1):
clash = 1
break
else:
used[Findices[j]]=1
if clash:
continue
else:
update_c(&Fdata[0],&Findices[0],&Findptr[0],&R[0],&flow[0],resist_mod,m,num_cycles,cycle,tracker,&temp_edges[0])
if(tracker==1):
if(temp_edges[0]!=0):
edges_updated+=temp_edges[0]
log_edges_updated+=numpy.log2(temp_edges[0])
if(temp_edges[0] > maxspan):
maxspan=temp_edges[0]
if(numpy.log2(n) > maxlogspan):
maxlogspan = numpy.log2(n)
logn_times_iters+=numpy.log2(n)
projections+=1
if(numpy.log2(temp_edges[0]) > maxlogespan):
maxlogespan=numpy.log2(temp_edges[0])
if(temp_edges[1]!=0):
other_edges+=temp_edges[1]
log_edges_updated+=numpy.log2(temp_edges[1])
if(temp_edges[1] > maxspan):
maxspan=temp_edges[1]
if(temp_edges[1] > maxlogspan):
maxlogspan=temp_edges[1]
projections+=1
if(numpy.log2(temp_edges[1]) > maxlogespan):
maxlogespan=numpy.log2(temp_edges[1])
temp_edges[0]=0
temp_edges[1]=0
totalspan=totalspan+maxspan
totallogspan=totallogspan+maxlogspan
totallogespan=totallogespan+maxlogespan
induce_voltage_c(&flow[0],&Bdata[0],&Bindices[0],&Bindptr[0],&R[0],&parents[0],&gedge[0],&sorteddepth[0],&v[0],n)
if(useres==1):
err=relative_residual_c(&v[0],&b[0],&Ldata[0],&Lindices[0],&Lindptr[0],n)
else:
err=numpy.linalg.norm(v-numpy.mean(v) - known_solution)/numpy.linalg.norm(known_solution)
stats[0]=edges_updated
stats[1]=logn_times_iters
stats[2]=log_edges_updated
stats[3]=projections
stats[4]=totalspan
stats[5]=totallogspan
stats[6]=totallogespan
| Cython |
stats[7]=iteration
stats[8]=other_edges
stats[9]=err
# initialize the flow vector
cpdef initialize_flow(numpy.ndarray[numpy.float64_t, ndim=1] Bdata,
numpy.ndarray[int, ndim=1] Bindptr,
numpy.ndarray[int, ndim=1] Bindices,
numpy.ndarray[numpy.float64_t,ndim=1] b,
numpy.ndarray[int, ndim=1] parents,
numpy.ndarray[int, ndim=1] depth,
numpy.ndarray[int, ndim=1] gedge,int m,
numpy.ndarray[numpy.float64_t, ndim=1] flow):
cdef int n = len(depth)
cdef numpy.ndarray[numpy.float64_t] bcopy = b.copy()
cdef numpy.ndarray[long, ndim=1] sorteddepth = numpy.argsort(depth)
initialize_flow_c(&Bdata[0],&Bindices[0],&Bindptr[0],&bcopy[0],&parents[0],&sorteddepth[0],&gedge[0],&flow[0],n,m)
# calculate the cycle probabilities
cpdef get_probs(numpy.ndarray[numpy.int8_t, ndim=1] Fdata,
numpy.ndarray[int, ndim=1] Findptr,
numpy.ndarray[int, ndim=1] Findices,
numpy.ndarray[numpy.float64_t, ndim=1] R,
numpy.float64_t resist_mod):
cdef int m = len(R)
cdef int num_cycles=len(Findptr)-1
cdef numpy.float64_t sum = 0
cdef numpy.ndarray[numpy.float64_t] probs = numpy.zeros(num_cycles)
get_probs_c(&Fdata[0],&Findices[0],&Findptr[0],&R[0],&probs[0],resist_mod,m,num_cycles)
return probs
# find tau of the tree
cpdef find_tau(numpy.ndarray[numpy.int8_t, ndim=1] Fdata,
numpy.ndarray[int, ndim=1] Findptr,
numpy.ndarray[int, ndim=1] Findices,
numpy.ndarray[numpy.float64_t, ndim=1] R):
cdef double tau = 0
cdef int m = len(R)
cdef int i,j
for i in xrange(0,m):
for j in xrange(Findptr[i],Findptr[i+1]):
if(Findices[j]!=i):
tau += R[Findices[j]]/R[i]
return tau
<|end_of_text|>include '../../types.pxi'
from cython.operator cimport dereference as deref
from libcpp cimport bool as cbool
from quantlib.time._period cimport Frequency
from quantlib.time.calendar cimport Calendar
from quantlib.time.daycounter cimport DayCounter
from quantlib.time.date cimport Date, date_from_qldate
from quantlib.compounding import Continuous
from quantlib.time.date import Annual
cimport _flat_forward as ffwd
cimport quantlib._quote as _qt
cimport quantlib._interest_rate as _ir
from quantlib.quotes cimport Quote
from quantlib.interest_rate cimport InterestRate
cdef class YieldTermStructure:
# FIXME: the relinkable stuff is really ugly. Do we need this on the python
# side?
def __cinit__(self):
self.relinkable = False
self._thisptr = NULL
self._relinkable_ptr = NULL
def __dealloc__(self):
if self._thisptr is not NULL:
del self._thisptr
if self._relinkable_ptr is not NULL:
del self._relinkable_ptr
def __init__(self, relinkable=True):
if relinkable:
self.relinkable = True
# Create a new RelinkableHandle to a YieldTermStructure within a
# new shared_ptr
self._relinkable_ptr = new \
shared_ptr[ffwd.RelinkableHandle[ffwd.YieldTermStructure]](
new ffwd.RelinkableHandle[ffwd.YieldTermStructure]()
)
else:
# initialize an empty shared_ptr.! Might be dangerous
self._thisptr = new shared_ptr[ffwd.YieldTermStructure]()
def link_to(self, YieldTermStructure structure):
if not self.relinkable:
raise ValueError('Non relinkable term structure!')
else:
self._relinkable_ptr.get().linkTo(deref(structure._thisptr))
return
def zero_rate(self, Date date, DayCounter day_counter, int compounding, int frequency=Annual, extrapolate=False):
""" Returns the implied zero-yield rate for the given date.
The time is calculated as a fraction of year from the reference date.
Parameters
----------
date: :py:class`~quantlib.time.date.Date'
The date used to calcule the zero-yield rate.
day_counter: :py:class`~quantlib.time.daycounter.DayCounter'
The day counter used to compute the time.
compounding: int
The compounding as defined in quantlib.compounding
frequency: int
A frequency as defined in quantlib.time.date
extraplolate: bool, optional
Default to False
"""
cdef ffwd.YieldTermStructure* term_structure
if self.relinkable is True:
# retrieves the shared_ptr (currentLink()) then gets the
# term_structure (get())
# FIXME: this does not compile :
# term_structure = self._relinkable_ptr.get().currentLink().get()
pass
else:
term_structure = self._thisptr.get()
cdef _ir.InterestRate ql_zero_rate = term_structure.zeroRate(
deref(date._thisptr.get()), deref(day_counter._thisptr), <_ir.Compounding>compounding,
<_ir.Frequency>frequency, extrapolate)
zero_rate = InterestRate(0, None, 0, 0, noalloc=True)
zero_rate._thisptr = new shared_ptr[_ir.InterestRate](
new _ir.InterestRate(
ql_zero_rate.rate(),
ql_zero_rate.dayCounter(),
ql_zero_rate.compounding(),
ql_zero_rate.frequency()
)
)
return zero_rate
def discount(self, value):
cdef ffwd.YieldTermStructure* term_structure
cdef shared_ptr[ffwd.YieldTermStructure] ts_ptr
if self.relinkable is True:
# retrieves the shared_ptr (currentLink()) then gets the
# term_structure (get())
ts_ptr = shared_ptr[ffwd.YieldTermStructure](self._relinkable_ptr.get().currentLink())
term_structure = ts_ptr.get()
else:
term_structure = self._thisptr.get()
if isinstance(value, Date):
discount_value = term_structure.discount(
deref((<Date>value)._thisptr.get())
)
elif isinstance(value, float):
discount_value = term_structure.discount(
<Time>value
)
else:
raise ValueError('Unsupported value type')
return discount_value
property reference_date:
def __get__(self):
cdef ffwd.Date ref_date = self._thisptr.get().referenceDate()
return date_from_qldate(ref_date)
<|end_of_text|>#cython: language_level=3
cimport numpy as np
import numpy as np
ctypedef np.uint8_t uint8
ctypedef np.int32_t int32
cimport cython
@cython.boundscheck(False)
@cython.wraparound(False)
def get_binary_masks(uint8[:,:,:] instance_mask, int[:] instance_sizes, unsigned short[:] instance_ids, unsigned short[:,:] instance_im):
for i in xrange(instance_im.shape[0]):
for j in xrange(instance_im.shape[1]):
for l in xrange(instance_ids.shape[0]):
if instance_ids[l] == instance_im[i, j]:
instance_mask[i, j, l] = True
instance_sizes[l] = instance_sizes[l] + 1
break
@cython.boundscheck(False)
@cython.wraparound(False)
def extract_bboxes(uint8[:,:,:] instance_mask):
#y1, x1, y2, x2
bboxes = -np.ones([instance_mask.shape[2], 4], dtype=np.int32)
cdef int32[:,:] bboxes_view = bboxes
for i_ in range(instance_mask.shape[0]):
for j_ in range(instance_mask.shape[1]):
for l in range(instance_mask.shape[2]):
if instance_mask[i_, j_, l]:
if bboxes_view[l, 0] == -1 or i_ < bboxes_view[l, 0]:
bboxes_view[l, 0] = i_
if bboxes_view[l, 2] == -1 or i_ > bboxes_view[l, 2] - 1:
bboxes_view[l, 2] = i_ + 1
if bboxes_view[l, 1] == -1 or j_ < bboxes_view[l, 1]:
bboxes_view[l, 1] = j_
if bboxes_view[l, 3] == -1 or j_ > bboxes_view[l, 3] - 1:
bboxes_view[l, 3] = j_ | Cython |
+ 1
break
return bboxes<|end_of_text|>cimport numpy as np
import_array()
##########################################################################
#
# BLAS LEVEL 1
#
##########################################################################
#
# vector swap: x <-> y
#
cdef void sswap_(int M, float *x, int incX, float *y, int incY):
lib_sswap( M, x, incX, y, incY )
cdef void sswap( np.ndarray x, np.ndarray y ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if x.shape[0]!= y.shape[0]: raise ValueError("x rows!= y rows")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
if y.descr.type_num!= PyArray_FLOAT: raise ValueError("y is not of type float")
lib_sswap( x.shape[0], <float*>x.data, 1, <float*>y.data, 1 )
cdef void dswap_(int M, double *x, int incX, double *y, int incY):
lib_dswap( M, x, incX, y, incY )
cdef void dswap( np.ndarray x, np.ndarray y ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if x.shape[0]!= y.shape[0]: raise ValueError("x rows!= y rows")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
if y.descr.type_num!= PyArray_DOUBLE: raise ValueError("y is not of type double")
lib_dswap( x.shape[0], <double*>x.data, 1, <double*>y.data, 1 )
#
# scalar vector multiply: x *= alpha
#
cdef void sscal_(int N, float alpha, float *x, int incX ):
lib_sscal( N, alpha, x, incX )
cdef void sscal( float alpha, np.ndarray x ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
lib_sscal( x.shape[0], alpha, <float*>x.data, 1 )
cdef void dscal_(int N, double alpha, double *x, int incX ):
lib_dscal( N, alpha, x, incX )
cdef void dscal( double alpha, np.ndarray x ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
lib_dscal( x.shape[0], alpha, <double*>x.data, 1 )
#
# vector copy: y <- x
#
cdef void scopy_(int N, float *x, int incX, float *y, int incY):
lib_scopy( N, x, incX, y, incY )
cdef void scopy( np.ndarray x, np.ndarray y ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if x.shape[0]!= y.shape[0]: raise ValueError("x rows!= y rows")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
if y.descr.type_num!= PyArray_FLOAT: raise ValueError("y is not of type float")
lib_scopy( x.shape[0], <float*>x.data, 1, <float*>y.data, 1 )
cdef void dcopy_(int N, double *x, int incX, double *y, int incY):
lib_dcopy( N, x, incX, y, incY )
cdef void dcopy( np.ndarray x, np.ndarray y ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if x.shape[0]!= y.shape[0]: raise ValueError("x rows!= y rows")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
if y.descr.type_num!= PyArray_DOUBLE: raise ValueError("y is not of type double")
lib_dcopy( x.shape[0], <double*>x.data, 1, <double*>y.data, 1 )
#
# vector addition: y += alpha*x
#
cdef void saxpy_(int N, float alpha, float *x, int incX, float *y, int incY ):
lib_saxpy( N, alpha, x, incX, y, incY )
cdef void saxpy( float alpha, np.ndarray x, np.ndarray y ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if x.shape[0]!= y.shape[0]: raise ValueError("x rows!= y rows")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
if y.descr.type_num!= PyArray_FLOAT: raise ValueError("y is not of type float")
lib_saxpy( x.shape[0], alpha, <float*>x.data, 1, <float*>y.data, 1 )
cdef void daxpy_(int N, double alpha, double *x, int incX, double *y, int incY ):
lib_daxpy( N, alpha, x, incX, y, incY )
cdef void daxpy( double alpha, np.ndarray x, np.ndarray y ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if x.shape[0]!= y.shape[0]: raise ValueError("x rows!= y rows")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
if y.descr.type_num!= PyArray_DOUBLE: raise ValueError("y is not of type double")
lib_daxpy( x.shape[0], alpha, <double*>x.data, 1, <double*>y.data, 1 )
#
# vector dot product: x.T y
#
cdef float sdot_(int N, float *x, int incX, float *y, int incY ):
return lib_sdot( N, x, incX, y, incY )
cdef float sdot( np.ndarray x, np.ndarray y ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if x.shape[0]!= y.shape[0]: raise ValueError("x rows!= y rows")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
if y.descr.type_num!= PyArray_FLOAT: raise ValueError("y is not of type float")
return lib_sdot( x.shape[0], <float*>x.data, 1, <float*>y.data, 1 )
cdef double ddot_(int N, double *x, int incX, double *y, int incY ):
return lib_ddot( N, x, incX, y, incY )
cdef double ddot( np.ndarray x, np.ndarray y ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if x.shape[0]!= y.shape[0]: raise ValueError("x rows!= y rows")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
if y.descr.type_num!= PyArray_DOUBLE: raise ValueError("y is not of type double")
return lib_ddot( x.shape[0], <double*>x.data, 1, <double*>y.data, 1 )
#
# Euclidean norm: ||x||_2
#
cdef float snrm2_(int N, float *x, int incX): return lib_snrm2( N, x, incX )
cdef float snrm2( np.ndarray x ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
return lib_snrm2( x.shape[0], <float*>x.data, 1 )
cdef double dnrm2_(int N, double *x, int incX): return lib_dnrm2( N, x, incX )
cdef double dnrm2( np.ndarray x ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
return lib_dnrm2( x.shape[0], <double*>x.data, 1 )
#
# sum of absolute | Cython |
values: ||x||_1
#
cdef float sasum_(int N, float *x, int incX): return lib_sasum(N, x, incX)
cdef float sasum( np.ndarray x ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
return lib_sasum( x.shape[0], <float*>x.data, 1 )
cdef double dasum_(int N, double *x, int incX): return lib_dasum(N, x, incX)
cdef double dasum( np.ndarray x ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
return lib_dasum( x.shape[0], <double*>x.data, 1 )
#
# index of maximum absolute value element
#
cdef int isamax_(int N, float *x, int incX): return lib_isamax( N, x, incX )
cdef int isamax( np.ndarray x ):
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
return lib_isamax( x.shape[0], <float*>x.data, 1 )
cdef int idamax_(int N, double *x, int incX): return lib_idamax( N, x, incX )
cdef int idamax( np.ndarray x ):
if x.ndim!= 1: raise ValueError("x is not a vector")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
return lib_idamax( x.shape[0], <double*>x.data, 1 )
##########################################################################
#
# BLAS LEVEL 2
#
##########################################################################
#
# matrix times vector: A = alpha * A x + beta * y
# or A = alpha * A.T x + beta * y
#
# single precison
cdef void sgemv_(CBLAS_ORDER Order, CBLAS_TRANSPOSE TransA, int M, int N,
float alpha, float *A, int lda, float *x, int incX,
float beta, float *y, int incY):
lib_sgemv( Order, TransA, M, N, alpha, A, lda, x, incX, beta, y, incY )
cdef void sgemv6( CBLAS_TRANSPOSE TransA, float alpha, np.ndarray A,
np.ndarray x, float beta, np.ndarray y):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if A.shape[0]!= y.shape[0]: raise ValueError("A rows!= y rows")
if A.shape[1]!= x.shape[0]: raise ValueError("A columns!= x rows")
if A.descr.type_num!= PyArray_FLOAT: raise ValueError("A is not of type float")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
if y.descr.type_num!= PyArray_FLOAT: raise ValueError("y is not of type float")
lib_sgemv( CblasRowMajor, TransA, A.shape[0], A.shape[1], alpha, <float*>A.data,
A.shape[1], <float*>x.data, 1, beta, <float*>y.data, 1 )
cdef void sgemv5( float alpha, np.ndarray A, np.ndarray x, float beta, np.ndarray y):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if A.shape[0]!= y.shape[0]: raise ValueError("A rows!= y rows")
if A.shape[1]!= x.shape[0]: raise ValueError("A columns!= x rows")
if A.descr.type_num!= PyArray_FLOAT: raise ValueError("A is not of type float")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
if y.descr.type_num!= PyArray_FLOAT: raise ValueError("y is not of type float")
lib_sgemv( CblasRowMajor, CblasNoTrans, A.shape[0], A.shape[1], alpha,
<float*>A.data, A.shape[1], <float*>x.data, 1, beta, <float*>y.data, 1 )
cdef void sgemv3( np.ndarray A, np.ndarray x, np.ndarray y):
sgemv5( 1.0, A, x, 0.0, y )
cdef np.ndarray sgemv( np.ndarray A, np.ndarray x ):
cdef np.ndarray y = svnewempty( A.shape[0] )
sgemv5( 1.0, A, x, 0.0, y )
return y
# double precision
cdef void dgemv_(CBLAS_ORDER Order, CBLAS_TRANSPOSE TransA, int M, int N,
double alpha, double *A, int lda, double *x, int incX,
double beta, double *y, int incY):
lib_dgemv( Order, TransA, M, N, alpha, A, lda, x, incX, beta, y, incY )
cdef void dgemv6( CBLAS_TRANSPOSE TransA, double alpha, np.ndarray A,
np.ndarray x, double beta, np.ndarray y):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if A.shape[0]!= y.shape[0]: raise ValueError("A rows!= y rows")
if A.shape[1]!= x.shape[0]: raise ValueError("A columns!= x rows")
if A.descr.type_num!= PyArray_DOUBLE: raise ValueError("A is not of type double")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
if y.descr.type_num!= PyArray_DOUBLE: raise ValueError("y is not of type double")
lib_dgemv( CblasRowMajor, TransA, A.shape[0], A.shape[1], alpha, <double*>A.data,
A.shape[1], <double*>x.data, 1, beta, <double*>y.data, 1 )
cdef void dgemv5( double alpha, np.ndarray A, np.ndarray x, double beta, np.ndarray y):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if A.shape[0]!= y.shape[0]: raise ValueError("A rows!= y rows")
if A.shape[1]!= x.shape[0]: raise ValueError("A columns!= x rows")
if A.descr.type_num!= PyArray_DOUBLE: raise ValueError("A is not of type double")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
if y.descr.type_num!= PyArray_DOUBLE: raise ValueError("y is not of type double")
lib_dgemv( CblasRowMajor, CblasNoTrans, A.shape[0], A.shape[1], alpha,
<double*>A.data, A.shape[1], <double*>x.data, 1, beta, <double*>y.data, 1 )
cdef void dgemv3( np.ndarray A, np.ndarray x, np.ndarray y):
dgemv5( 1.0, A, x, 0.0, y )
cdef np.ndarray dgemv( np.ndarray A, np.ndarray x ):
cdef np.ndarray y = dvnewempty( A.shape[0] )
dgemv5( 1.0, A, x, 0.0, y )
return y
#
# vector outer-product: A = alpha * outer_product( x, y.T )
#
# Note: when calling this make sure you're working with a buffer otherwise
# a whole lot of Python stuff will be going before the call to this function
# is made in order to get the size of the arrays, there the data is located...
# single precision
cdef void sger_(CBLAS_ORDER Order, int M, int N, float alpha, float *x, int incX,
float *y, int incY, float *A, int lda):
lib_sger( Order, M, N, alpha, x, incX, y, incY, A, lda )
cdef void sger4( float alpha, np.ndarray x, np.ndarray y, np.ndarray A):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if x.ndim!= | Cython |
1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if x.shape[0]!= A.shape[0]: raise ValueError("x rows!= A rows")
if y.shape[0]!= A.shape[1]: raise ValueError("y rows!= A columns")
if A.descr.type_num!= PyArray_FLOAT: raise ValueError("A is not of type float")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
if y.descr.type_num!= PyArray_FLOAT: raise ValueError("y is not of type float")
lib_sger( CblasRowMajor, x.shape[0], y.shape[0], alpha,
<float*>x.data, 1, <float*>y.data, 1, <float*>A.data, A.shape[1] )
cdef void sger3( np.ndarray x, np.ndarray y, np.ndarray A):
sger4( 1.0, x, y, A )
cdef np.ndarray sger( np.ndarray x, np.ndarray y ):
cdef np.ndarray A = smnewzero( x.shape[0], y.shape[0] )
sger4( 1.0, x, y, A )
return A
# double precision
cdef void dger_(CBLAS_ORDER Order, int M, int N, double alpha, double *x, int incX,
double *y, int incY, double *A, int lda):
lib_dger( Order, M, N, alpha, x, incX, y, incY, A, lda )
cdef void dger4( double alpha, np.ndarray x, np.ndarray y, np.ndarray A):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if x.ndim!= 1: raise ValueError("x is not a vector")
if y.ndim!= 1: raise ValueError("y is not a vector")
if x.shape[0]!= A.shape[0]: raise ValueError("x rows!= A rows")
if y.shape[0]!= A.shape[1]: raise ValueError("y rows!= A columns")
if A.descr.type_num!= PyArray_DOUBLE: raise ValueError("A is not of type double")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
if y.descr.type_num!= PyArray_DOUBLE: raise ValueError("y is not of type double")
lib_dger( CblasRowMajor, x.shape[0], y.shape[0], alpha,
<double*>x.data, 1, <double*>y.data, 1, <double*>A.data, A.shape[1] )
cdef void dger3( np.ndarray x, np.ndarray y, np.ndarray A):
dger4( 1.0, x, y, A )
cdef np.ndarray dger( np.ndarray x, np.ndarray y ):
cdef np.ndarray A = dmnewzero( x.shape[0], y.shape[0] )
dger4( 1.0, x, y, A )
return A
##########################################################################
#
# BLAS LEVEL 3
#
##########################################################################
# matrix times matrix: C = alpha * A B + beta * C
# or C = alpha * A.T B + beta * C
# or C = alpha * A B.T + beta * C
# or C = alpha * A.T B.T + beta * C
#
# single precision
cdef void sgemm_(CBLAS_ORDER Order, CBLAS_TRANSPOSE TransA, CBLAS_TRANSPOSE TransB,
int M, int N, int K, float alpha, float *A, int lda, float *B,
int ldb, float beta, float *C, int ldc):
lib_sgemm( Order, TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc )
cdef void sgemm7( CBLAS_TRANSPOSE TransA, CBLAS_TRANSPOSE TransB,
float alpha, np.ndarray A, np.ndarray B, float beta, np.ndarray C ):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if B.ndim!= 2: raise ValueError("B is not a matrix")
if C.ndim!= 2: raise ValueError("C is not a matrix")
if A.shape[0]!= C.shape[0]: raise ValueError("A rows!= C columns")
if B.shape[1]!= C.shape[1]: raise ValueError("B columns!= C rows")
if A.shape[1]!= B.shape[0]: raise ValueError("A columns!= B rows")
if A.descr.type_num!= PyArray_FLOAT: raise ValueError("A is not of type float")
if B.descr.type_num!= PyArray_FLOAT: raise ValueError("B is not of type float")
if C.descr.type_num!= PyArray_FLOAT: raise ValueError("C is not of type float")
lib_sgemm( CblasRowMajor, TransA, TransB, C.shape[0], C.shape[1], B.shape[0],
alpha, <float*>A.data, A.shape[1], <float*>B.data, B.shape[1],
beta, <float*>C.data, C.shape[1] )
cdef void sgemm5( float alpha, np.ndarray A, np.ndarray B,
float beta, np.ndarray C ):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if B.ndim!= 2: raise ValueError("B is not a matrix")
if C.ndim!= 2: raise ValueError("C is not a matrix")
if A.shape[0]!= C.shape[0]: raise ValueError("A rows!= C columns")
if B.shape[1]!= C.shape[1]: raise ValueError("B columns!= C rows")
if A.shape[1]!= B.shape[0]: raise ValueError("A columns!= B rows")
if A.descr.type_num!= PyArray_FLOAT: raise ValueError("A is not of type float")
if B.descr.type_num!= PyArray_FLOAT: raise ValueError("B is not of type float")
if C.descr.type_num!= PyArray_FLOAT: raise ValueError("C is not of type float")
lib_sgemm( CblasRowMajor,CblasNoTrans,CblasNoTrans, C.shape[0], C.shape[1],
B.shape[0], alpha, <float*>A.data, A.shape[1], <float*>B.data,
B.shape[1], beta, <float*>C.data, C.shape[1] )
cdef void sgemm3( np.ndarray A, np.ndarray B, np.ndarray C ): sgemm5( 1.0, A, B, 0.0, C )
cdef np.ndarray sgemm( np.ndarray A, np.ndarray B ):
cdef np.ndarray C = smnewempty( A.shape[0], B.shape[1] )
sgemm5( 1.0, A, B, 0.0, C )
return C
# matrix times matrix: C = alpha * A B + beta * C
# or C = alpha * A.T B + beta * C
# or C = alpha * A B.T + beta * C
# or C = alpha * A.T B.T + beta * C
#
# double precision
cdef void dgemm_(CBLAS_ORDER Order, CBLAS_TRANSPOSE TransA, CBLAS_TRANSPOSE TransB,
int M, int N, int K, double alpha, double *A, int lda, double *B,
int ldb, double beta, double *C, int ldc):
lib_dgemm( Order, TransA, TransB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc )
cdef void dgemm7( CBLAS_TRANSPOSE TransA, CBLAS_TRANSPOSE TransB,
double alpha, np.ndarray A, np.ndarray B, double beta, np.ndarray C ):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if B.ndim!= 2: raise ValueError("B is not a matrix")
if C.ndim!= 2: raise ValueError("C is not a matrix")
if A.shape[0]!= C.shape[0]: raise ValueError("A rows!= C columns")
if B.shape[1]!= C.shape[1]: raise ValueError("B columns!= C rows")
if A.shape[1]!= B.shape[0]: raise ValueError("A columns!= B rows")
if A.descr.type_num!= PyArray_DOUBLE: raise ValueError("A is not of type double")
if B.descr.type_num!= PyArray_DOUBLE: raise ValueError("B is not of type double")
if C.descr.type_num!= PyArray_DOUBLE: raise ValueError("C is not of type double")
lib_dgemm( CblasRowMajor, TransA, TransB, C.shape[0], C.shape[1], B.shape[0],
alpha, <double*>A.data, A.shape[1], <double*>B.data, B.shape[1],
beta, <double*>C.data, C.shape[1 | Cython |
] )
cdef void dgemm5( double alpha, np.ndarray A, np.ndarray B,
double beta, np.ndarray C ):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if B.ndim!= 2: raise ValueError("B is not a matrix")
if C.ndim!= 2: raise ValueError("C is not a matrix")
if A.shape[0]!= C.shape[0]: raise ValueError("A rows!= C columns")
if B.shape[1]!= C.shape[1]: raise ValueError("B columns!= C rows")
if A.shape[1]!= B.shape[0]: raise ValueError("A columns!= B rows")
if A.descr.type_num!= PyArray_DOUBLE: raise ValueError("A is not of type double")
if B.descr.type_num!= PyArray_DOUBLE: raise ValueError("B is not of type double")
if C.descr.type_num!= PyArray_DOUBLE: raise ValueError("C is not of type double")
lib_dgemm( CblasRowMajor,CblasNoTrans,CblasNoTrans, C.shape[0], C.shape[1],
B.shape[0], alpha, <double*>A.data, A.shape[1], <double*>B.data,
B.shape[1], beta, <double*>C.data, C.shape[1] )
cdef void dgemm3( np.ndarray A, np.ndarray B, np.ndarray C ):
dgemm5( 1.0, A, B, 0.0, C )
cdef np.ndarray dgemm( np.ndarray A, np.ndarray B ):
cdef np.ndarray C = dmnewempty( A.shape[0], B.shape[1] )
dgemm5( 1.0, A, B, 0.0, C )
return C
################################
#
# Popular functions from CLAPACK
#
################################
# the inverse of a matrix using the LU factorization computed by dgetrf
cdef int sgetri_(CBLAS_ORDER Order, int N, float *A, int lda, int *ipiv):
return clapack_sgetri(Order, N, A, lda, ipiv)
cdef int dgetri_(CBLAS_ORDER Order, int N, double *A, int lda, int *ipiv):
return clapack_dgetri(Order, N, A, lda, ipiv)
cdef int sgetri(np.ndarray A, np.ndarray ipiv):
if A is None: raise TypeError("A is not numpy.ndarray")
if ipiv is None: raise TypeError("ipiv is not numpy.ndarray")
if A.ndim!= 2: raise ValueError("A is not a matrix")
if ipiv.ndim!= 1: raise ValueError("ipiv is not a vector")
if A.shape[0]!= A.shape[1]: raise ValueError("A is not square")
if ipiv.shape[0]!= A.shape[0]: raise ValueError("A.rows = ipiv.rows")
if A.descr.type_num!= PyArray_FLOAT: raise ValueError("A is not of type float")
if ipiv.descr.type_num!= np.NPY_INT32: raise ValueError("ipiv is not of type int")
return clapack_sgetri(CblasRowMajor, A.shape[0], <float*> A.data, A.shape[0],
<int*> ipiv.data)
cdef int dgetri(np.ndarray A, np.ndarray ipiv):
if A is None: raise TypeError("A is not numpy.ndarray")
if ipiv is None: raise TypeError("ipiv is not numpy.ndarray")
if A.ndim!= 2: raise ValueError("A is not a matrix")
if ipiv.ndim!= 1: raise ValueError("ipiv is not a vector")
if A.shape[0]!= A.shape[1]: raise ValueError("A is not square")
if ipiv.shape[0]!= A.shape[0]: raise ValueError("A.rows = ipiv.rows")
if A.descr.type_num!= PyArray_DOUBLE: raise ValueError("A is not of type double")
if ipiv.descr.type_num!= np.NPY_INT32: raise ValueError("ipiv is not of type int")
return clapack_dgetri(CblasRowMajor, A.shape[0], <double*> A.data, A.shape[0],
<int*> ipiv.data)
# LU factorization of a general M-by-N matrix A using partial pivoting with row interchanges
cdef int sgetrf_(CBLAS_ORDER Order, int M, int N, float *A, int lda, int *ipiv):
return clapack_sgetrf(Order, M, N, A, lda, ipiv)
cdef int dgetrf_(CBLAS_ORDER Order, int M, int N, double *A, int lda, int *ipiv):
return clapack_dgetrf(Order, M, N, A, lda, ipiv)
cdef int sgetrf(np.ndarray A, np.ndarray ipiv):
if A is None: raise TypeError("A is not numpy.ndarray")
if ipiv is None: raise TypeError("ipiv is not numpy.ndarray")
if A.ndim!= 2: raise ValueError("A is not a matrix")
if ipiv.ndim!= 1: raise ValueError("ipiv is not a vector")
if A.shape[0]!= A.shape[1]: raise ValueError("A is not square")
if ipiv.shape[0]!= A.shape[0]: raise ValueError("A.rows = ipiv.rows")
if A.descr.type_num!= PyArray_FLOAT: raise ValueError("A is not of type float")
if ipiv.descr.type_num!= np.NPY_INT32: raise ValueError("ipiv is not of type int")
return clapack_sgetrf(CblasRowMajor, A.shape[0], A.shape[0], <float*> A.data, A.shape[0],
<int*> ipiv.data)
cdef int dgetrf(np.ndarray A, np.ndarray ipiv):
if A is None: raise TypeError("A is not numpy.ndarray")
if ipiv is None: raise TypeError("ipiv is not numpy.ndarray")
if A.ndim!= 2: raise ValueError("A is not a matrix")
if ipiv.ndim!= 1: raise ValueError("ipiv is not a vector")
if A.shape[0]!= A.shape[1]: raise ValueError("A is not square")
if ipiv.shape[0]!= A.shape[0]: raise ValueError("A.rows = ipiv.rows")
if A.descr.type_num!= PyArray_DOUBLE: raise ValueError("A is not of type double")
if ipiv.descr.type_num!= np.NPY_INT32: raise ValueError("ipiv is not of type int")
return clapack_dgetrf(CblasRowMajor, A.shape[0], A.shape[0], <double*> A.data, A.shape[0],
<int*> ipiv.data)
#########################################################################
#
# Utility functions I've added myself
#
#########################################################################
# Create a new empty single precision matrix
cdef np.ndarray smnewempty( int M, int N ):
cdef np.npy_intp length[2]
length[0] = M; length[1] = N
Py_INCREF( np.NPY_FLOAT ) # This is apparently necessary
return PyArray_EMPTY( 2, length, np.NPY_FLOAT, 0 )
# Create a new empty double precision matrix
cdef np.ndarray dmnewempty( int M, int N ):
cdef np.npy_intp length[2]
length[0] = M; length[1] = N
Py_INCREF( np.NPY_DOUBLE ) # This is apparently necessary
return PyArray_EMPTY( 2, length, np.NPY_DOUBLE, 0 )
# Create a new empty single precision vector
cdef np.ndarray svnewempty( int M ):
cdef np.npy_intp length[1]
length[0] = M
Py_INCREF( np.NPY_FLOAT ) # This is apparently necessary
return PyArray_EMPTY( 1, length, np.NPY_FLOAT, 0 )
# Create a new empty double precision vector
cdef np.ndarray dvnewempty( int M ):
cdef np.npy_intp length[1]
length[0] = M
Py_INCREF( np.NPY_DOUBLE ) # This is apparently necessary
return PyArray_EMPTY( 1, length, np.NPY_DOUBLE, 0 )
# Create a new zeroed single precision matrix
cdef np.ndarray smnewzero( int M, int N ):
cdef np.npy_intp length[2]
length[0] = M; length[1] = N
Py_INCREF( np.NPY_FLOAT ) # This is apparently necessary
return PyArray_ZEROS( 2, length, np.NPY_FLOAT, 0 )
# Create a new zeroed double precision matrix
cdef np.ndarray dmnewzero( int M, int N ):
cdef np.npy_intp length[2]
length[0] = M; length[1] = N
Py_INCREF( np.NPY_DOUBLE ) # This is apparently necessary
return PyArray_ZEROS( 2, length, np.NPY_DOUBLE, 0 )
# Create a new zeroed single precision vector
cdef np.ndarray svnewzero( int M ):
cdef np.npy_intp length[1]
length[0] = M
Py_INCREF( np.NPY_FLOAT ) # | Cython |
This is apparently necessary
return PyArray_ZEROS( 1, length, np.NPY_FLOAT, 0 )
# Create a new zeroed double precision vector
cdef np.ndarray dvnewzero( int M ):
cdef np.npy_intp length[1]
length[0] = M
Py_INCREF( np.NPY_DOUBLE ) # This is apparently necessary
return PyArray_ZEROS( 1, length, np.NPY_DOUBLE, 0 )
# Set a matrix to all zeros: must be floats in contiguous memory.
cdef void smsetzero( np.ndarray A ):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if A.descr.type_num!= PyArray_FLOAT: raise ValueError("A is not of type float")
cdef float *ptr = <float*>A.data
cdef unsigned int i
for i in range(A.shape[0]*A.shape[1]):
ptr[0] = 0.0
ptr += 1
# Set a matrix to all zeros: must be doubles in contiguous memory.
cdef void dmsetzero( np.ndarray A ):
if A.ndim!= 2: raise ValueError("A is not a matrix")
if A.descr.type_num!= PyArray_DOUBLE: raise ValueError("A is not of type double")
cdef double *ptr = <double*>A.data
cdef unsigned int i
for i in range(A.shape[0]*A.shape[1]):
ptr[0] = 0.0
ptr += 1
# Set a vector to all zeros: ust be floats in contiguous memory.
cdef void svsetzero( np.ndarray x ):
if x.ndim!= 1: raise ValueError("A is not a vector")
if x.descr.type_num!= PyArray_FLOAT: raise ValueError("x is not of type float")
cdef float *ptr = <float*>x.data
cdef unsigned int i
for i in range(x.shape[0]):
ptr[0] = 0.0
ptr += 1
# Set a vector to all zeros: ust be doubles in contiguous memory.
cdef void dvsetzero( np.ndarray x ):
if x.ndim!= 1: raise ValueError("A is not a vector")
if x.descr.type_num!= PyArray_DOUBLE: raise ValueError("x is not of type double")
cdef double *ptr = <double*>x.data
cdef unsigned int i
for i in range(x.shape[0]):
ptr[0] = 0.0
ptr += 1
# Just pretend the matrices are vectors and call the BLAS daxpy routine
# Y += a * X
# single precision
cdef void smaxpy( float alpha, np.ndarray X, np.ndarray Y ):
if X.ndim!= 2: raise ValueError("A is not a matrix")
if Y.ndim!= 2: raise ValueError("A is not a matrix")
if X.shape[0]!= Y.shape[0]: raise ValueError("X rows!= Y rows")
if X.shape[1]!= Y.shape[1]: raise ValueError("X columns!= Y columns")
if X.descr.type_num!= PyArray_FLOAT: raise ValueError("X is not of type float")
if Y.descr.type_num!= PyArray_FLOAT: raise ValueError("Y is not of type float")
cdef unsigned int N = X.shape[0]*X.shape[1]
lib_saxpy( N, alpha, <float*>X.data, 1, <float*>Y.data, 1 )
# Just pretend the matrices are vectors and call the BLAS daxpy routine
# Y += a * X
# double precision
cdef void dmaxpy( double alpha, np.ndarray X, np.ndarray Y ):
if X.ndim!= 2: raise ValueError("A is not a matrix")
if Y.ndim!= 2: raise ValueError("A is not a matrix")
if X.shape[0]!= Y.shape[0]: raise ValueError("X rows!= Y rows")
if X.shape[1]!= Y.shape[1]: raise ValueError("X columns!= Y columns")
if X.descr.type_num!= PyArray_DOUBLE: raise ValueError("X is not of type double")
if Y.descr.type_num!= PyArray_DOUBLE: raise ValueError("Y is not of type double")
cdef unsigned int N = X.shape[0]*X.shape[1]
lib_daxpy( N, alpha, <double*>X.data, 1, <double*>Y.data, 1 )
<|end_of_text|># cython: boundscheck=False
from libc.math cimport fabs, log, pow, sqrt
import numpy as np
cimport numpy as np
from collections import deque
from typing import Deque
cdef class AdaptiveWindowing:
""" The helper class for ADWIN
Parameters
----------
delta
Confidence value.
clock
How often ADWIN should check for change. 1 means every new data point, default is 32. Higher
values speed up processing, but may also lead to increased delay in change detection.
max_buckets
The maximum number of buckets of each size that ADWIN should keep before merging buckets
(default is 5).
min_window_length
The minimum length of each subwindow (default is 5). Lower values may decrease delay in
change detection but may also lead to more false positives.
grace_period
ADWIN does not perform any change detection until at least this many data points have
arrived (default is 10).
"""
cdef:
dict __dict__
double delta, total, variance, total_width, width
int n_buckets, grace_period, min_window_length, tick, n_detections,\
clock, max_n_buckets, detect, detect_twice, max_buckets
def __init__(self, delta=.002, clock=32, max_buckets=5, min_window_length=5, grace_period=10):
self.delta = delta
self.bucket_deque: Deque['Bucket'] = deque([Bucket(max_size=max_buckets)])
self.total = 0.
self.variance = 0.
self.width = 0.
self.n_buckets = 0
self.grace_period = grace_period
self.tick = 0
self.total_width = 0
self.n_detections = 0
self.clock = clock
self.max_n_buckets = 0
self.min_window_length = min_window_length
self.max_buckets = max_buckets
def get_n_detections(self):
return self.n_detections
def get_width(self):
return self.width
def get_total(self):
return self.total
def get_variance(self):
return self.variance
@property
def variance_in_window(self):
return self.variance / self.width
def update(self, value: float):
"""Update the change detector with a single data point.
Apart from adding the element value to the window, by inserting it in
the correct bucket, it will also update the relevant statistics, in
this case the total sum of all values, the window width and the total
variance.
Parameters
----------
value
Input value
Returns
-------
bool
If True then a change is detected.
"""
return self._update(value)
cdef bint _update(self, double value):
# Increment window with one element
self._insert_element(value, 0.0)
return self._detect_change()
cdef void _insert_element(self, double value, double variance):
cdef Bucket bucket = self.bucket_deque[0]
bucket.insert_data(value, variance)
self.n_buckets += 1
if self.n_buckets > self.max_n_buckets:
self.max_n_buckets = self.n_buckets
# Update width, variance and total
self.width += 1
cdef double incremental_variance = 0.0
if self.width > 1.0:
incremental_variance = (
(self.width - 1.0)
* (value - self.total / (self.width - 1.0))
* (value - self.total / (self.width - 1.0))
/ self.width
)
self.variance += incremental_variance
self.total += value
self._compress_buckets()
@staticmethod
def _calculate_bucket_size(row: int):
return pow(2, row)
cdef double _delete_element(self):
cdef Bucket bucket = self.bucket_deque[-1]
cdef double n = self._calculate_bucket_size(len(self.bucket_deque) - 1) # length of bucket
cdef double u = bucket.get_total_at(0) # total of bucket
cdef double mu = u / n # mean of bucket
cdef double v = bucket.get_variance_at(0) # variance of bucket
# Update width, total and variance
self.width -= n
self.total -= u
mu_window = self.total / self.width # mean of the window
cdef double incremental_variance = (
v + n * self.width * (mu - mu_window) * (mu - mu_window)
/ (n + self.width)
)
self.variance -= incremental_variance
bucket.remove()
self.n_buckets -= 1
if bucket.current_idx == 0:
self.bucket_de | Cython |
que.pop()
return n
cdef void _compress_buckets(self):
cdef:
unsigned int idx, k
double n1, n2, mu1, mu2, temp, total12
Bucket bucket, next_bucket
bucket = self.bucket_deque[0]
idx = 0
while bucket is not None:
k = bucket.current_idx
# Merge buckets if there are more than max_buckets
if k == self.max_buckets + 1:
try:
next_bucket = self.bucket_deque[idx + 1]
except IndexError:
self.bucket_deque.append(Bucket(max_size=self.max_buckets))
next_bucket = self.bucket_deque[-1]
n1 = self._calculate_bucket_size(idx) # length of bucket 1
n2 = self._calculate_bucket_size(idx) # length of bucket 2
mu1 = bucket.get_total_at(0) / n1 # mean of bucket 1
mu2 = bucket.get_total_at(1) / n2 # mean of bucket 2
# Combine total and variance of adjacent buckets
total12 = bucket.get_total_at(0) + bucket.get_total_at(1)
temp = n1 * n2 * (mu1 - mu2) * (mu1 - mu2) / (n1 + n2)
v12 = bucket.get_variance_at(0) + bucket.get_variance_at(1) + temp
next_bucket.insert_data(total12, v12)
self.n_buckets += 1
bucket.compress(2)
if next_bucket.current_idx <= self.max_buckets:
break
else:
break
try:
bucket = self.bucket_deque[idx + 1]
except IndexError:
bucket = None
idx += 1
cdef bint _detect_change(self):
"""Detect concept change.
This function is responsible for analysing different cutting points in
the sliding window, to verify if there is a significant change.
Returns
-------
bint
If True then a change is detected.
Notes
-----
Variance calculation is based on:
Babcock, B., Datar, M., Motwani, R., & O’Callaghan, L. (2003).
Maintaining Variance and k-Medians over Data Stream Windows.
Proceedings of the ACM SIGACT-SIGMOD-SIGART
Symposium on Principles of Database Systems, 22, 234–243.
https://doi.org/10.1145/773153.773176
"""
cdef:
unsigned int idx, k
bint change_detected, exit_flag
double n0, n1, n2, u0, u1, u2, v0, v1
Bucket bucket
change_detected = False
exit_flag = False
self.tick += 1
# Reduce window
if (self.tick % self.clock == 0) and (self.width > self.grace_period):
reduce_width = True
while reduce_width:
reduce_width = False
exit_flag = False
n0 = 0.0 # length of window 0
n1 = self.width # length of window 1
u0 = 0.0 # total of window 0
u1 = self.total # total of window 1
v0 = 0 # variance of window 0
v1 = self.variance # variance of window 1
# Evaluate each window cut (W_0, W_1)
for idx in range(len(self.bucket_deque) - 1, -1, -1):
if exit_flag:
break
bucket = self.bucket_deque[idx]
for k in range(bucket.current_idx - 1):
n2 = self._calculate_bucket_size(idx) # length of window 2
u2 = bucket.get_total_at(k) # total of window 2
# Warning: means are calculated inside the loop to get updated values.
mu2 = u2 / n2 # mean of window 2
if n0 > 0.0:
mu0 = u0 / n0 # mean of window 0
v0 += (
bucket.get_variance_at(k) + n0 * n2
* (mu0 - mu2) * (mu0 - mu2)
/ (n0 + n2)
)
if n1 > 0.0:
mu1 = u1 / n1 # mean of window 1
v1 -= (
bucket.get_variance_at(k) + n1 * n2
* (mu1 - mu2) * (mu1 - mu2)
/ (n1 + n2)
)
# Update window 0 and 1
n0 += self._calculate_bucket_size(idx)
n1 -= self._calculate_bucket_size(idx)
u0 += bucket.get_total_at(k)
u1 -= bucket.get_total_at(k)
if (idx == 0) and (k == bucket.current_idx - 1):
exit_flag = True # We are done
break
# Check if delta_mean < epsilon_cut holds
# Note: Must re-calculate means per updated values
delta_mean = (u0 / n0) - (u1 / n1)
if (
n1 >= self.min_window_length
and n0 >= self.min_window_length
and self._evaluate_cut(n0, n1, delta_mean, self.delta)
):
# Change detected
reduce_width = True
change_detected = True
if self.width > 0:
# Reduce the width of the window
n0 -= self._delete_element()
exit_flag = True # We are done
break
self.total_width += self.width
if change_detected:
self.n_detections += 1
return change_detected
cdef bint _evaluate_cut(self, double n0, double n1,
double delta_mean, double delta):
cdef:
double delta_prime, m_recip, epsilon
delta_prime = log(2 * log(self.width) / delta)
# Use reciprocal of m to avoid extra divisions when calculating epsilon
m_recip = ((1.0 / (n0 - self.min_window_length + 1))
+ (1.0 / (n1 - self.min_window_length + 1)))
epsilon = (sqrt(2 * m_recip * self.variance_in_window * delta_prime)
+ 2 / 3 * delta_prime * m_recip)
return fabs(delta_mean) > epsilon
cdef class Bucket:
""" A bucket class to keep statistics.
A bucket stores the summary structure for a contiguous set of data elements.
In this implementation fixed-size arrays are used for efficiency. The index
of the "current" element is used to simulate the dynamic size of the bucket.
"""
cdef:
int current_idx, max_size
np.ndarray total_array, variance_array
def __init__(self, max_size):
self.max_size = max_size
self.current_idx = 0
self.total_array = np.zeros(self.max_size + 1, dtype=float)
self.variance_array = np.zeros(self.max_size + 1, dtype=float)
cdef void clear_at(self, int index):
self.set_total_at(0.0, index)
self.set_variance_at(0.0, index)
cdef void insert_data(self, double value, double variance):
self.set_total_at(value, self.current_idx)
self.set_variance_at(variance, self.current_idx)
self.current_idx += 1
cdef void remove(self):
self.compress(1)
cdef void compress(self, int n_elements):
cdef unsigned int i
cdef int window_len = len(self.total_array)
# Remove first n_elements by shifting elements to the left
for i in range(n_elements, window_len):
self.total_array[i - n_elements] = self.total_array[i]
self.variance_array[i - n_elements] = self.variance_array[i]
# Clear remaining elements
for i in range(window_len - n_elements, window_len):
self.clear_at(i)
self.current_idx -= n_elements
cdef double get_total_at(self, int index):
return self.total_array[index]
cdef double get_variance_at(self, int index):
return self.variance_array[index]
cdef void set_total_at(self, double value, int index):
self.total_array[index] = value
cdef void set_variance_at(self, double value, int index):
self.variance_array[index] = value
<|end_of_text|>from scipy.special.cython_special import struve, y1
import cython
"""cdef declaration cannot be placed within an 'if' or 'for' statement."""
cdef double G(double a, double gamma):
cdef double HALF_PI = 1.570796326794897
cdef double abs_gamma = abs(gamma)
cdef double x, H1, N1
if abs_gamma < 1.0e-9:
return 1.0
else:
x = 2 * abs_gamma / a
H1 = struve(1, x)
N1 = y1(x)
return x * ( | Cython |
HALF_PI * (H1 - N1) - 1)
@cython.boundscheck(False)
@cython.wraparound(False)
def calc_eb(double x,
double mu, double epsilon_1, double epsilon_2,
int num_grid, int num_series,
double a_B, double R, double q, double l,
double [::1] zgrid, double [::1] cos_zgrid):
# Convert the unit of x from exciton Bohr radius to absolute Bohr
cdef double a = x * a_B
# Evaluation the kinetic term
cdef double kinetic_energy = 1.0 / (2.0 * mu * a**2)
# Evaluate the Coulomb attraction term
cdef int i, j, n
cdef double ze, zh, integral_ze, integral_zh, kernel
cdef double qn, zhn
integral_ze = 0.0
for i, ze in enumerate(zgrid):
integral_zh = 0.0
for j, zh in enumerate(zgrid):
kernel = 0.0
for n in range(-num_series, num_series+1):
qn = q**abs(n)
zhn = zh * (-1)**n + 2 * n * l
kernel += qn * G(a, ze -zhn)
integral_zh += cos_zgrid[j] * kernel
integral_ze += cos_zgrid[i] * integral_zh
cdef double dS = (zgrid[1] - zgrid[0]) ** 2
cdef double potential_energy = -2.0 / (epsilon_1 * l**2 * a) * integral_ze * dS
# Evaluate the binding energy in effective Rydberg energy
cdef double binding_energy = (kinetic_energy + potential_energy) / R
return binding_energy
<|end_of_text|># mode: compile
cdef class Spam:
property eggs:
def __del__(self):
pass
<|end_of_text|>cdef class DigitalImageDescriptor(FileDescriptor):
"""
The DigitalImageDescriptor class specifies that a File SourceMob is associated with
video essence that is formatted either using RGBA or luminance/chrominance formatting.
The DigitalImageDescriptor class is a sub-class of the FileDescriptor class.
The DigitalImageDescriptor class is an abstract class.
"""
def __cinit__(self):
self.iid = lib.IID_IAAFDigitalImageDescriptor
self.auid = lib.AUID_AAFDigitalImageDescriptor
self.im_ptr = NULL
cdef lib.IUnknown **get_ptr(self):
return <lib.IUnknown **> &self.im_ptr
cdef query_interface(self, AAFBase obj = None):
if obj is None:
obj = self
else:
query_interface(obj.get_ptr(), <lib.IUnknown **> &self.im_ptr, lib.IID_IAAFDigitalImageDescriptor)
FileDescriptor.query_interface(self, obj)
def __dealloc__(self):
if self.im_ptr:
self.im_ptr.Release()
property compression:
def __set__(self, value):
cdef AUID auid = CompressionDefMap[value.lower()]
error_check(self.im_ptr.SetCompression(auid.get_auid()))
def __get__(self):
cdef AUID auid = AUID()
error_check(self.im_ptr.GetCompression(&auid.auid))
for key,value in CompressionDefMap.items():
if value == auid:
return key
raise ValueError("Unknown Compression")
property stored_view:
"""
The dimension of the stored view. Typically this includes
leading blank video lines, any VITC lines, as well as the active
picture area. Set takes a tuple (width, height)
"""
def __set__(self, size):
cdef lib.aafUInt32 width = size[0]
cdef lib.aafUInt32 height = size[1]
#Note AAF has these backwords!
error_check(self.im_ptr.SetStoredView(height,width))
def __get__(self):
cdef lib.aafUInt32 width
cdef lib.aafUInt32 height
error_check(self.im_ptr.GetStoredView(&height,&width))
return (width, height)
property sampled_view:
"""
The dimensions of sampled view. Typically this includes
any VITC lines as well as the active picture area, but excludes
leading blank video lines. Set takes a tuple (width, height x_offset, y_offset)
"""
def __set__(self, rect):
cdef lib.aafUInt32 width = rect[0]
cdef lib.aafUInt32 height = rect[1]
cdef lib.aafInt32 x_offset = rect[2]
cdef lib.aafInt32 y_offset = rect[3]
error_check(self.im_ptr.SetSampledView(height, width, x_offset, y_offset))
def __get__(self):
cdef lib.aafUInt32 width
cdef lib.aafUInt32 height
cdef lib.aafInt32 x_offset
cdef lib.aafInt32 y_offset
error_check(self.im_ptr.GetSampledView(&height, &width, &x_offset, &y_offset))
return (width, height, x_offset, y_offset)
property display_view:
"""
the dimension of display view. Typically this includes
the active picture area, but excludes leading blank video lines
and any VITC lines. Set takes a tuple (width, height x_offset, y_offset)
"""
def __set__(self, rect):
cdef lib.aafUInt32 width = rect[0]
cdef lib.aafUInt32 height = rect[1]
cdef lib.aafInt32 x_offset = rect[2]
cdef lib.aafInt32 y_offset = rect[3]
error_check(self.im_ptr.SetDisplayView(height, width, x_offset, y_offset))
def __get__(self):
cdef lib.aafUInt32 width
cdef lib.aafUInt32 height
cdef lib.aafInt32 x_offset
cdef lib.aafInt32 y_offset
error_check(self.im_ptr.GetDisplayView(&height, &width, &x_offset, &y_offset))
return (width, height, x_offset, y_offset)
property aspect_ratio:
"""
Image Aspect Ratio. This ratio describes the
ratio between the horizontal size and the vertical size in the
intended final image.
"""
def __set__(self, value):
cdef lib.aafRational_t ratio
fraction_to_aafRational(value, ratio)
error_check(self.im_ptr.SetImageAspectRatio(ratio))
def __get__(self):
return self['ImageAspectRatio']
property layout:
"""
The frame layout. The frame layout describes whether all
data for a complete sample is in one frame or is split into more
than/ one field. Set Takes a str.
Values are:
"fullframe" - Each frame contains a full sample in progressive
scan lines.
"separatefields" - Each sample consists of two fields,
which when interlaced produce a full sample.
"onefield" - Each sample consists of two interlaced
fields, but only one field is stored in the
data stream.
"mixxedfields" - Similar to FullFrame, except the two fields
Note: value is always converted to lowercase
"""
def __set__(self, value):
value = value.lower()
if value == "none":
value = None
if value is None:
pass
cdef lib.aafFrameLayout_t layout = FrameLayout[value]
error_check(self.im_ptr.SetFrameLayout(layout))
def __get__(self):
cdef lib.aafFrameLayout_t layout
error_check(self.im_ptr.GetFrameLayout(&layout))
print layout
for key, value in FrameLayout.items():
if value == layout:
return key
property line_map:
"""
The VideoLineMap property. The video line map specifies the
scan line in the analog source that corresponds to the beginning
of each digitized field. For single-field video, there is 1
value in the array. For interleaved video, there are 2 values
in the array. Set Takes a Tuple, example: (0) or (0,1).
"""
def __set__(self, value):
if len(value) == 0 or len(value ) > 2:
raise ValueError("line_map len must be 1 or 2")
cdef lib.aafUInt32 numberElements = len(value)
cdef lib.aafInt32 line_map[2]
for i,value in enumerate(value):
line_map[i] = value
error_check(self.im_ptr.SetVideoLineMap(numberElements, line_map))
def __get__(self):
cdef lib.aafUInt32 numberElements
error_check(self.im_ptr.GetVideoLineMapSize(&numberElements))
cdef vector[lib.aafInt32] buf
# I don't Know if its possible to have a line_map bigger then 2
cdef lib.aafInt32 line_map[5]
error_check(self.im_ptr.GetVideoLineMap(numberElements, line_map))
l = []
for | Cython |
i in xrange(numberElements):
l.append(line_map[i])
return tuple(l)
property image_alignment:
"""
Specifies the alignment when storing the digital essence. For example, a value of 16
means that the image is stored on 16-byte boundaries. The
starting point for a field will always be a multiple of 16 bytes.
If the field does not end on a 16-byte boundary, it is padded
out to the next 16-byte boundary.
"""
def __get__(self):
cdef lib.aafUInt32 value
error_check(self.im_ptr.GetImageAlignmentFactor(&value))
return value
def __set__(self, lib.aafUInt32 value):
error_check(self.im_ptr.SetImageAlignmentFactor(value))
<|end_of_text|># cython: profile=True
# cython: boundscheck=False
# cython: wraparound=False
# cython: nonecheck=False
import nltk
import numpy as np
cimport numpy as np
from collections import defaultdict
from libcpp.string cimport string
cdef extern from "math.h":
double sqrt(double x)
# This code is a port of https://github.com/beckdaniel/GPy/blob/tk_master_nograds/GPy/kern/_src/cy_tree.pyx
# Our main changes are:
# 1) Upgrade code to work for python3 and GPy 1.9.9
# 2) We focus on a single highly efficient implementation of the SSTK kernel of Moschitti (2006)
# 3) We fine-tune the cython implementation to provide another order of computational speed ups
# 4) We improve the documentation
class wrapper_raw_SubsetTreeKernel(raw_SubsetTreeKernel):
# Dummy wrapper to allow the main class to be cast into C, whilst being accessible from python
pass
cdef class Node(object):
"""
A node object, containing a grammar production, an id and the children ids.
These are the nodes stored in the node lists implementation of the SSTK
(the "Fast Tree Kernel" of Moschitti (2006))
:param production: String of the grammar production of the node (e.g production of node S in '(S (NP ns) (VP v))' is 'S NP VP')
This will be useful for creating ids to store node info in a dictionary
:param node_id: Unique ID of the Node
:param children_ids: List of unique IDs of the Node's children
"""
cdef str production
cdef int node_id
cdef list children_ids
def __init__(self, str production, int node_id, list children_ids):
self.production = production
self.node_id = node_id
self.children_ids = children_ids
def __repr__(self):
return str((self.production, self.node_id, self.children_ids))
cdef class raw_SubsetTreeKernel(object):
"""
The "Fast Tree Kernel" of Moschitti (2006), with two parameters.
Following Beck (2015) to get gradients wrt kernel parameters
:param _lambda: lambda weights the contribtuion of largers tree fragments
:param _sigma: sigma controls sparsity
:param normalization: Bool to control if we normalize. If comparing trees of different depth then should normalize.
"""
cdef int normalize
cdef dict _tree_cache
def __init__(self, double _lambda=1., double _sigma=1., bint normalize=True):
self._lambda = _lambda
self._sigma = _sigma
self._tree_cache = {}
self.normalize = normalize
cdef tuple _gen_node_list(self, str tree_repr):
"""
Generates an ordered list of nodes from a tree.
The list is used to generate the node pairs when
calculating K.
It also returns a nodes dict for fast node access.
"""
tree = nltk.tree.Tree.fromstring(tree_repr)
cdef list node_list = []
self._get_node(tree, node_list)
node_list.sort(key=lambda Node x: x.production)
cdef Node node
cdef dict node_dict
node_dict = dict([(node.node_id, node) for node in node_list])
return node_list, node_dict
cdef int _get_node(self, tree, list node_list):
"""
Recursive method for generating the node lists.
"""
cdef str cprod
cdef Node node
cdef int node_id, ch_id
cdef list prod_list, children
if type(tree[0])!= str:
prod_list = [tree.label()]
children = []
for ch in tree:
ch_id = self._get_node(ch, node_list)
#prod_list.append(ch.node)
prod_list.append(ch.label())
children.append(ch_id)
node_id = len(node_list)
cprod =''.join(prod_list)
node = Node(cprod, node_id, children)
node_list.append(node)
return node_id
else:
cprod =''.join([tree.label(), tree[0]])
node_id = len(node_list)
node = Node(cprod, node_id, None)
node_list.append(node)
return node_id
cdef void _build_cache(self, np.ndarray X):
"""
Caches the node lists, for each tree that it is not
in the cache. If all trees in X are already cached, this
method does nothing.
These node lists can then be quickly accessed when calculating K, rather than having to
traverse trees each time we want to access a node. This provided substantial speed ups (x10)
"""
cdef np.ndarray tree_repr
cdef str t_repr
cdef list node_list
cdef dict node_dict
for tree_repr in X:
t_repr = tree_repr[0]
if t_repr not in self._tree_cache:
node_list, node_dict = self._gen_node_list(t_repr)
self._tree_cache[t_repr] = (node_list, node_dict)
cpdef Kdiag(self, np.ndarray X):
"""
The method that calls the SSTK for each individual tree.
"""
cdef np.ndarray[np.double_t, ndim=1] X_diag_Ks, X_diag_dlambdas, X_diag_dsigmas
self._build_cache(X)
X_diag_Ks, X_diag_dlambdas, X_diag_dsigmas = self._diag_calculations(X)
return X_diag_Ks
cpdef tuple K(self, np.ndarray X, np.ndarray X2):
"""
The method that calls the SSTK for each tree pair. Some shortcuts are used
when X2 == None (when calculating the Gram matrix for X).
"""
cdef np.ndarray[np.double_t, ndim=1] X_diag_Ks, X_diag_dlambdas, X_diag_dsigmas,X2_diag_Ks, X2_diag_dlambdas, X2_diag_dsigmas
cdef np.ndarray x1, x2
cdef np.ndarray[np.double_t, ndim=2] Ks, dlambdas, dsigmas
cdef int i,j
cdef bint symmetric
cdef list nodes1, nodes2
cdef dict dict1, dict2
cdef double K_result, dlambda, dsigma,K_norm, dlambda_norm, dsigma_norm
# Put any new trees in the cache. If the trees are already cached, this code
# won't do anything.
self._build_cache(X)
if X2 is None:
symmetric = True
X2 = X
else:
symmetric = False
self._build_cache(X2)
# Calculate K for diagonal values
# because we will need them later to normalize.
if self.normalize:
X_diag_Ks, X_diag_dlambdas, X_diag_dsigmas = self._diag_calculations(X)
if not symmetric:
X2_diag_Ks, X2_diag_dlambdas, X2_diag_dsigmas = self._diag_calculations(X2)
# Initialize the derivatives here
# because we are going to calculate them at the same time as K.
Ks = np.zeros(shape=(len(X), len(X2)))
dlambdas = np.zeros(shape=(len(X), len(X2)))
dsigmas = np.zeros(shape=(len(X), len(X2)))
# Iterate over the trees in X and X2 (or X and X in the symmetric case).
for i, x1 in enumerate(X):
for j, x2 in enumerate(X2):
# Shortcut: no calculation is needed for the upper
# part of the Gram matrix because it is symmetric
if symmetric:
if i > j:
Ks[i][j] = Ks[j][i]
dlambdas[i][j] = dlambdas[j][i]
dsigmas[i][j] = dsigmas[j][i]
continue
# Another shortcut: because this is the normalized SSTK
# diagonal values will always be equal to 1.
if i == j and self.normalize:
Ks[i][j] = 1
continue
# It will always be a 1-element array so we just index by 0
nodes1, dict1 = self._tree_cache[x1[0]]
nodes2, dict2 = | Cython |
self._tree_cache[x2[0]]
K_result, dlambda, dsigma = self._calc_K(nodes1, nodes2, dict1, dict2)
# Normalization happens here.
if self.normalize:
if symmetric:
K_norm, dlambda_norm, dsigma_norm = self._normalize(K_result, dlambda, dsigma,
X_diag_Ks[i], X_diag_Ks[j],
X_diag_dlambdas[i], X_diag_dlambdas[j],
X_diag_dsigmas[i], X_diag_dsigmas[j])
else:
K_norm, dlambda_norm, dsigma_norm = self._normalize(K_result, dlambda, dsigma,
X_diag_Ks[i], X2_diag_Ks[j],
X_diag_dlambdas[i], X2_diag_dlambdas[j],
X_diag_dsigmas[i], X2_diag_dsigmas[j])
# Store everything, including derivatives.
Ks[i][j] = K_norm
dlambdas[i][j] = dlambda_norm
dsigmas[i][j] = dsigma_norm
else:
Ks[i][j] = K_result
dlambdas[i][j] = dlambda
dsigmas[i][j] = dsigma
return (Ks, dlambdas, dsigmas)
cdef tuple _normalize(self, double K_result, double dlambda, double dsigma, double diag_Ks_i,
double diag_Ks_j, double diag_dlambdas_i, double diag_dlambdas_j,
double diag_dsigmas_i, double diag_dsigmas_j):
"""
Normalize the result from SSTK, including derivatives.
"""
cdef double norm, sqrt_nrorm, K_norm, diff_lambda, dlambda_norm, diff_sigma, dsigma_norm
norm = diag_Ks_i * diag_Ks_j
sqrt_norm = sqrt(norm)
K_norm = K_result / sqrt_norm
diff_lambda = ((diag_dlambdas_i * diag_Ks_j) +
(diag_Ks_i * diag_dlambdas_j))
diff_lambda /= 2 * norm
dlambda_norm = ((dlambda / sqrt_norm) -
(K_norm * diff_lambda))
diff_sigma = ((diag_dsigmas_i * diag_Ks_j) +
(diag_Ks_i * diag_dsigmas_j))
diff_sigma /= 2 * norm
dsigma_norm = ((dsigma / sqrt_norm) -
(K_norm * diff_sigma))
return K_norm, dlambda_norm, dsigma_norm
cdef tuple _diag_calculations(self, np.ndarray X):
"""
Calculate the K(x,x) values (required for normalization)
"""
cdef np.ndarray[np.double_t, ndim=1] K_vec,dlambda_vec, dsimga_vec
cdef list nodes
cdef dict dict
cdef double K_result, dlambda, dsigma
K_vec = np.zeros(shape=(len(X),))
dlambda_vec = np.zeros(shape=(len(X),))
dsigma_vec = np.zeros(shape=(len(X),))
for i, x in enumerate(X):
nodes, dicts = self._tree_cache[x[0]]
K_result, dlambda, dsigma = self._calc_K(nodes, nodes, dicts, dicts)
K_vec[i] = K_result
dlambda_vec[i] = dlambda
dsigma_vec[i] = dsigma
return (K_vec, dlambda_vec, dsigma_vec)
cdef list _get_node_pairs(self,list nodes1, list nodes2):
"""
The node pair detection method devised by Moschitti (2006).
Fast way to determine which nodes should be compared
"""
cdef list node_pairs = []
cdef int i1 = 0
cdef int i2 = 0
cdef int reset2
cdef Node n1, n2
while i1 < len(nodes1) and i2 < len(nodes2):
n1 = nodes1[i1]
n2 = nodes2[i2]
if n1.production > n2.production:
i2 += 1
elif n1.production < n2.production:
i1 += 1
else:
while n1.production == n2.production:
reset2 = i2
while n1.production == n2.production:
node_pairs.append((n1, n2))
i2 += 1
if i2 >= len(nodes2):
break
n2 = nodes2[i2]
i1 += 1
if i1 >= len(nodes1):
break
i2 = reset2
n1 = nodes1[i1]
n2 = nodes2[i2]
return node_pairs
cdef tuple _delta(self, Node node1, Node node2, dict dict1, dict dict2,
delta_matrix, dlambda_matrix, dsigma_matrix,
double _lambda, double _sigma):
"""
Recursive method used in kernel calculation.
It also calculates the derivatives wrt lambda and sigma.
"""
cdef int id1, id2, ch1, ch2, i
cdef double val, prod, K_result, dlambda, dsigma, sum_lambda, sum_sigma, denom
cdef double delta_result, dlambda_result, dsigma_result
cdef Node n1, n2
id1 = node1.node_id
id2 = node2.node_id
tup = (id1, id2)
# check if already made this comparrision
# then just read from memory
val = delta_matrix[tup]
if val > 0:
return val, dlambda_matrix[tup], dsigma_matrix[tup]
#we follow iterative scheme laid out in https://www.aclweb.org/anthology/Q15-1033.pdf (Beck 2015)
if node1.children_ids == None:
delta_matrix[tup] = _lambda
dlambda_matrix[tup] = 1
return (_lambda, 1, 0)
prod = 1
sum_lambda = 0
sum_sigma = 0
children1 = node1.children_ids
children2 = node2.children_ids
for i in range(len(children1)):
ch1 = children1[i]
ch2 = children2[i]
n1 = dict1[ch1]
n2 = dict2[ch2]
if n1.production == n2.production:
K_result, dlambda, dsigma = self._delta(n1, n2, dict1, dict2,
delta_matrix,
dlambda_matrix,
dsigma_matrix,
_lambda, _sigma)
denom = _sigma + K_result
prod *= denom
sum_lambda += dlambda / denom
sum_sigma += (1 + dsigma) / denom
else:
prod *= _sigma
sum_sigma += 1 /_sigma
delta_result = _lambda * prod
dlambda_result = prod + (delta_result * sum_lambda)
dsigma_result = delta_result * sum_sigma
delta_matrix[tup] = delta_result
dlambda_matrix[tup] = dlambda_result
dsigma_matrix[tup] = dsigma_result
return (delta_result, dlambda_result, dsigma_result)
cdef tuple _calc_K(self, list nodes1,list nodes2,dict dict1,dict dict2):
"""
The actual SSTK kernel, evaluated over two node lists.
It also calculates the derivatives wrt lambda and sigma.
"""
cdef double K_total = 0
cdef double dlambda_total = 0
cdef double dsigma_total = 0
cdef double K_result, dlambda, dsigma
# We store the hypers inside C doubles and pass them as
# parameters for "delta", for efficiency.
cdef double _lambda = self._lambda
cdef double _sigma = self._sigma
# Initialize the DP structure.
delta_matrix = defaultdict(float)
dlambda_matrix = defaultdict(float)
dsigma_matrix = defaultdict(float)
# only calculate over a subset of node pairs for efficiency
node_pairs = self._get_node_pairs(nodes1, nodes2)
for node_pair in node_pairs:
K_result, dlambda, dsigma = self._delta(node_pair[0], node_pair[1], dict1, dict2,
delta_matrix, dlambda_matrix, dsigma_matrix,
_lambda, _sigma)
K_total += K_result
dlambda_total += dlambda
dsigma_total += dsigma
return (K_total, dlambda_total, dsigma_total)
<|end_of_text|>#cython: language_level=3
cdef class Cup:
cdef:
unsigned int _value
Cup _next
Cup _smaller_cup
@property
def value(self):
return self._value
@property
def smaller_cup(self):
return self._smaller_cup
@smaller_cup.setter
def smaller_cup(self, Cup smaller_cup):
self._smaller_cup = smaller_cup
@property
def next(self):
return self._next
@next.setter
def next(self, Cup next_cup):
self._next | Cython |
= next_cup
def __init__(self, unsigned int value, Cup smaller_cup = None, Cup next_cup = None):
self._value = value
self._next = next_cup
self._smaller_cup = smaller_cup
def __repr__(self):
next_cup = None if self._next is None else self._next.value
smaller_cup = None if self._smaller_cup is None else self._smaller_cup.value
return f'{self.__class__.__name__}(value={self.value}, next={next_cup}, smaller_cup={smaller_cup})'
cdef class CupsCircle:
cdef:
Cup _current_cup
Cup _largest_cup
Cup _head
Cup _tail
@property
def head(self):
return self._head
@head.setter
def head(self, Cup cup):
self._head = cup
@property
def tail(self):
return self._tail
@tail.setter
def tail(self, Cup cup):
self._tail = cup
@property
def current_cup(self):
return self._current_cup
@current_cup.setter
def current_cup(self, Cup cup):
self._current_cup = cup
@property
def largest_cup(self):
return self._largest_cup
@largest_cup.setter
def largest_cup(self, Cup cup):
self._largest_cup = cup
def __init__(self, Cup head, Cup tail):
self._head = head
self._tail = tail
if self._head is None and self._tail is not None:
self._head = self._tail
self._head.next = self._tail
self._largest_cup = self._head
elif self._head is not None and self._tail is None:
self._tail = self._head
self._head.next = self._tail
self._largest_cup = self._head
elif self._head is not None and self._tail is not None:
self._head.next = self._tail
self._tail.next = self._head
if self._head.value > self._tail.value:
self._largest_cup = self._head
else:
self._largest_cup = self._tail
self._current_cup = self._head
cpdef public void add(self, Cup cup):
if self._head is None:
self._head = cup
self._tail = cup
self._largest_cup = cup
self._current_cup = cup
cup.next = cup
else:
self._tail.next = cup
self._tail = cup
self._tail.next = self._head
if self._largest_cup.value < cup.value:
self._largest_cup = cup
cpdef public unsigned int pop(self):
cpdef Cup deleted
if self._head is None:
raise IndexError("Empty CircleQueue")
else:
deleted = self._head
if self._largest_cup is self._head:
self._largest_cup = self._head.smaller_cup
self._head = self._head.next
self._tail.next = self._head
return deleted.value
cpdef public void remove(self):
self.pop()
cpdef public void insert(self, Cup destination_cup, list added_cups):
added_cups[2].next = destination_cup.next
destination_cup.next = added_cups[0]
if destination_cup is self._tail:
self._tail = added_cups[2]
cpdef public Cup get_cup_label(self, unsigned int value):
cpdef Cup cup = self._head
while cup is not None:
if cup.value == value:
return cup
cup = cup.next
if cup is self._head:
raise KeyError(f"No cup with the label {value}")
def __str__(self):
cpdef list output = []
cpdef Cup cup = self._head
while cup is not None:
output.append(str(cup.value))
if cup.next is self._head:
break
cup = cup.next
return ''.join(output)
cpdef CupsCircle extend_cups_circle(CupsCircle cups_circle, Cup prev_cup, size_t start, size_t end):
cdef unsigned int i = start
cpdef Cup cup
while i < end:
i += 1
cup = Cup(value=i, smaller_cup=prev_cup)
cups_circle.add(cup)
# print(cup, prev_cup)
prev_cup = cup
return cups_circle
cpdef CupsCircle perform_moves(CupsCircle cups_circle, size_t num_moves=100):
cdef size_t i, j
cpdef Cup cup
cpdef Cup destination_cup = None
cpdef list picked_cups
for i in range(num_moves):
# print(str(cups_circle), cups_circle.current_cup)
picked_cups = [cups_circle.current_cup.next]
for j in range(2):
cup = picked_cups[j].next
picked_cups.append(cup)
# print('picked_cups:', picked_cups)
cups_circle.current_cup.next = picked_cups[2].next
destination_cup = cups_circle.current_cup.smaller_cup
cups_circle.current_cup = cups_circle.current_cup.next
while True:
# print('destination_cup:', destination_cup)
if destination_cup is None:
destination_cup = cups_circle.largest_cup
if destination_cup not in picked_cups:
break
destination_cup = destination_cup.smaller_cup
cups_circle.insert(destination_cup, picked_cups)
return cups_circle
<|end_of_text|>import numpy as np
cimport numpy as np
cimport cython
from libc.math cimport exp, abs, M_PI
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
def garch_recursion(double[:] parameters, double[:] sigma2, int q_terms, int p_terms, int Y_len, int max_lag):
cdef Py_ssize_t t, k
if p_terms!= 0:
for t in range(0,Y_len):
if t < max_lag:
sigma2[t] = parameters[0]/(1-np.sum(parameters[(q_terms+1):(q_terms+p_terms+1)]))
elif t >= max_lag:
for k in range(0,p_terms):
sigma2[t] += parameters[1+q_terms+k]*(sigma2[t-1-k])
return sigma2<|end_of_text|># TODO finish implementing this file
from cpython cimport bool
import numpy as np
cimport numpy as np
import pypore.filetypes.data_file as df
from pypore.i_o.abstract_reader cimport AbstractReader
DTYPE = np.float
ctypedef np.float_t DTYPE_t
cdef class DataFileReader(AbstractReader):
cdef long next_to_send
cdef object datafile
cpdef _prepare_file(self, filename):
"""
Implementation of :py:func:`pypore.i_o.abstract_reader.AbstractReader._prepare_file`
for Pypore HDF5 files.
"""
self.datafile = df.open_file(filename, mode='r')
self.sample_rate = self.datafile.get_sample_rate()
self.points_per_channel_total = self.datafile.get_data_length()
self.next_to_send = 0
cdef object get_next_blocks_c(self, long n_blocks=1):
self.next_to_send += self.block_size
if self.next_to_send > self.points_per_channel_total:
return [self.datafile.root.data[self.next_to_send - self.block_size:].astype(DTYPE)]
else:
return [self.datafile.root.data[self.next_to_send - self.block_size : self.next_to_send].astype(DTYPE)]
cdef object get_all_data_c(self, bool decimate=False):
return [self.datafile.root.data[:].astype(DTYPE)]
cdef void close_c(self):
self.datafile.close()
<|end_of_text|># because division in C and Python is different,
# cython use Python division default.
cimport cython
@cython.cdivision(True)
def divides(int a, int b):
return a / b
def remainder(int a, int b):
with cython.cdivision(True):
return a % b
<|end_of_text|>'''
Author: [email protected]
Date: 2020-01-09 14:55:06
FilePath: /AlgoLibR/python/AlgoLibR/demo/cdemo.pxd
Description:
'''
from AlgoLibR.utils.string cimport wchar_t,wstring, to_wchar_t,to_wstring,from_wstring
cdef extern from "AlgoLibR/demo/mul.h" namespace "AlgoLibR::demo":
int mul(int a, int b)
# Decalre the class with cdef
cdef extern from "AlgoLibR/demo/demo.h" namespace "AlgoLibR::demo":
cdef cppclass MyDemo:
MyDemo() except +
MyDemo(int) except +
int a
int mul(int | Cython |
)
int pymul(int)
int add(int )
void sayHello(wchar_t*)
<|end_of_text|># cython: language_level=3
from libc.stdint cimport uint64_t
cpdef uint64_t fibonacci(unsigned int n):
if n == 0:
return 0
if n == 1:
return 1
return fibonacci(n - 1) + fibonacci(n - 2)
<|end_of_text|># -*- coding: utf-8 -*-
from cpython.version cimport PY_MAJOR_VERSION
from libcpp cimport bool
from libc.stddef cimport size_t
from libc.stdlib cimport malloc, free
from slapi.slapi cimport *
from slapi.initialize cimport *
from slapi.model.defs cimport *
from slapi.model.drawing_element cimport *
from slapi.model.entities cimport *
from slapi.unicodestring cimport *
from slapi.model.entities cimport *
from slapi.model.entity cimport *
from slapi.model.camera cimport *
from slapi.model.geometry_input cimport *
from slapi.model.model cimport *
from slapi.model.component_definition cimport *
from slapi.model.component_instance cimport *
from slapi.model.material cimport *
from slapi.model.group cimport *
from slapi.model.texture cimport *
from slapi.model.scene cimport *
from slapi.model.edge cimport *
from slapi.model.layer cimport *
from slapi.model.face cimport *
from slapi.model.mesh_helper cimport *
cdef class defaultdict(dict):
default_factory = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class keep_offset(defaultdict):
def __init__(self):
defaultdict.__init__(self, int)
def __missing__(self, _):
return defaultdict.__len__(self)
def __getitem__(self, item):
number = defaultdict.__getitem__(self, item)
self[item] = number
return number
cdef inline double m(double& v):
"""
:param v: value to be converted from inches to meters
:return: value in meters
"""
return <double> 0.0254 * v
def get_API_version():
cdef size_t major, minor
SUGetAPIVersion(&major, &minor)
return (major, minor)
cdef check_result(SU_RESULT r):
if not r is SU_ERROR_NONE:
print(__str_from_SU_RESULT(r))
raise RuntimeError("Sketchup library giving unexpected results {}".format(__str_from_SU_RESULT(r)))
cdef __str_from_SU_RESULT(SU_RESULT r):
if r is SU_ERROR_NONE:
return "SU_ERROR_NONE"
if r is SU_ERROR_NULL_POINTER_INPUT:
return "SU_ERROR_NONE"
if r is SU_ERROR_INVALID_INPUT:
return "SU_ERROR_INVALID_INPUT"
if r is SU_ERROR_NULL_POINTER_OUTPUT:
return "SU_ERROR_NULL_POINTER_OUTPUT"
if r is SU_ERROR_INVALID_OUTPUT:
return "SU_ERROR_INVALID_OUTPUT"
if r is SU_ERROR_OVERWRITE_VALID:
return "SU_ERROR_OVERWRITE_VALID"
if r is SU_ERROR_GENERIC:
return "SU_ERROR_GENERIC"
if r is SU_ERROR_SERIALIZATION:
return "SU_ERROR_SERIALIZATION"
if r is SU_ERROR_OUT_OF_RANGE:
return "SU_ERROR_OUT_OF_RANGE"
if r is SU_ERROR_NO_DATA:
return "SU_ERROR_NO_DATA"
if r is SU_ERROR_INSUFFICIENT_SIZE:
return "SU_ERROR_INSUFFICIENT_SIZE"
if r is SU_ERROR_UNKNOWN_EXCEPTION:
return "SU_ERROR_UNKNOWN_EXCEPTION"
if r is SU_ERROR_MODEL_INVALID:
return "SU_ERROR_MODEL_INVALID"
if r is SU_ERROR_MODEL_VERSION:
return "SU_ERROR_MODEL_VERSION"
cdef StringRef2Py(SUStringRef& suStr):
cdef size_t out_length = 0
cdef SU_RESULT res = SUStringGetUTF8Length(suStr, &out_length)
cdef char* out_char_array
cdef size_t out_number_of_chars_copied
if out_length == 0:
return ""
else:
out_char_array = <char*> malloc(sizeof(char) * (out_length * 2))
SUStringGetUTF8(suStr, out_length, out_char_array, &out_number_of_chars_copied)
try:
py_result = out_char_array[:out_number_of_chars_copied].decode('UTF-8')
finally:
free(out_char_array)
return py_result
cdef SUStringRef Py2StringRef(s):
cdef SUStringRef out_string_ref
cdef SU_RESULT res
if type(s) is unicode:
# fast path for most common case(s)
res = SUStringCreateFromUTF8(&out_string_ref, <unicode> s)
elif PY_MAJOR_VERSION < 3 and isinstance(s, bytes):
# only accept byte strings in Python 2.x, not in Py3
res = SUStringCreateFromUTF8(&out_string_ref, (<bytes> s).decode('ascii'))
elif isinstance(s, unicode):
# an evil cast to <unicode> might work here in some(!) cases,
# depending on what the further processing does. to be safe,
# we can always create a copy instead
res = SUStringCreateFromUTF8(&out_string_ref, unicode(s))
else:
raise TypeError("Cannot make sense of string {}".format(s))
return out_string_ref
cdef class Entity:
cdef SUEntityRef entity
def __cinit__(self):
self.entity.ptr = <void*> 0
property id:
def __get__(self):
cdef int32_t entity_id
check_result(SUEntityGetID(self.entity, &entity_id))
return entity_id
def __len__(self):
cdef size_t count
check_result(SUEntityGetNumAttributeDictionaries(self.entity, &count))
return count
cdef class Point2D:
cdef SUPoint2D p
def __cinit__(self, double x, double y):
self.p.x = x
self.p.y = y
property x:
def __get__(self): return self.p.x
def __set__(self, double x): self.p.x = x
property y:
def __get__(self): return self.p.y
def __set__(self, double y): self.p.y = y
cdef class Point3D:
cdef SUPoint3D p
def __cinit__(self, double x=0, double y=0, double z=0):
self.p.x = x
self.p.y = y
self.p.z = z
property x:
def __get__(self): return self.p.x
def __set__(self, double x): self.p.x = x
property y:
def __get__(self): return self.p.y
def __set__(self, double y): self.p.y = y
property z:
def __get__(self): return self.p.z
def __set__(self, double z): self.p.z = z
def __str__(self):
return "Point3d<{},{},{}>".format(self.p.x, self.p.y, self.p.z)
def __repr__(self):
return "Point3d @{} [{},{},{}]".format(<size_t> &(self.p), self.p.x, self.p.y, self.p.z)
cdef class Vector3D:
cdef SUVector3D p
def __cinit__(self, double x, double y, double z):
self.p.x = x
self.p.y = y
self.p.z = z
property x:
def __get__(self): return self.p.x
def __set__(self, double x): self.p.x = x
property y:
def __get__(self): return self.p.y
def __set__(self, double y): self.p.y = y
property z:
def __get__(self): return self.p.z
def __set__(self, double z): self.p.z = z
cdef class Edge:
cdef SUEdgeRef edge
def __cinit__(self):
self.edge.ptr = <void *> 0
cdef set_ptr(self, void* ptr):
self.edge.ptr = ptr
def GetSoft(self):
cdef bool soft_flag = 0
check_result(SUEdgeGetSoft(self.edge, &soft_flag))
return soft_flag
def GetSmooth(self):
cdef bool smooth_flag = 0
check_result(SUEdgeGetSmooth(self.edge, &smooth_flag))
return smooth_flag
cdef class Plane3D:
cdef Plane3D p
cdef bool cleanup
def __cinit__(self, double a, double b, double c, double d):
self.p.a = a
self.p.b = b
self.p.c = c
self.p.d = d
property a:
def __get__(self): return self.p.a
def __set__(self, double a): self.p.a = a
property b:
def __get__(self): return self.p.b
def __set__(self, double b): self.p.b = b
property c:
def __get__(self): return self.p.c
def __set__(self, double c): self.p.c = c | Cython |
property d:
def __get__(self): return self.p.d
def __set__(self, double d): self.p.d = d
cdef class Camera:
cdef SUCameraRef camera
def __cinit__(self):
self.camera.ptr = <void *> 0
cdef set_ptr(self, void* ptr):
self.camera.ptr = ptr
def GetOrientation(self):
cdef SUPoint3D position
cdef SUPoint3D target
cdef SUVector3D up_vector = [0, 0, 0]
check_result(SUCameraGetOrientation(self.camera, &position, &target, &up_vector))
return (m(position.x), m(position.y), m(position.z)), \
(m(target.x), m(target.y), m(target.z)), \
(m(up_vector.x), m(up_vector.y), m(up_vector.z))
property fov:
def __get__(self):
#Retrieves the field of view in degrees of a camera object. The field of view is measured along the vertical direction of the camera.
cdef double fov
cdef SU_RESULT res = SUCameraGetPerspectiveFrustumFOV(self.camera, &fov)
if res == SU_ERROR_NONE:
return fov
if res == SU_ERROR_NO_DATA:
return False
raise RuntimeError("Sketchup library giving unexpected results {}".format(__str_from_SU_RESULT(res)))
def __set__(self, double fov):
check_result(SUCameraSetPerspectiveFrustumFOV(self.camera, fov))
property perspective:
def __get__(self):
cdef bool p
check_result(SUCameraGetPerspective(self.camera, &p))
return p
def __set__(self, bool p):
check_result(SUCameraSetPerspective(self.camera, p))
property scale:
def __get__(self):
cdef double height = 0
check_result(SUCameraGetOrthographicFrustumHeight(self.camera, &height))
return height
def __set__(self, double height):
check_result(SUCameraSetOrthographicFrustumHeight(self.camera, height))
property ortho:
def __get__(self):
cdef double o = 0
cdef SU_RESULT res = SUCameraGetOrthographicFrustumHeight(self.camera, &o)
if res == SU_ERROR_NONE:
return o
if res == SU_ERROR_NO_DATA:
return False
raise RuntimeError("Sketchup library giving unexpected results {}".format(__str_from_SU_RESULT(res)))
property aspect_ratio:
def __get__(self):
cdef double asp = 1.0
cdef SU_RESULT res = SUCameraGetAspectRatio(self.camera, &asp)
if res == SU_ERROR_NONE:
return asp
if res == SU_ERROR_NO_DATA:
return False
raise RuntimeError("Sketchup library giving unexpected results {}".format(__str_from_SU_RESULT(res)))
cdef class Texture:
cdef SUTextureRef tex_ref
def __cinit__(self):
self.tex_ref.ptr = <void*> 0
def write(self, filename):
py_byte_string = filename.encode('UTF-8')
cdef const char* file_path = py_byte_string
check_result(SUTextureWriteToFile(self.tex_ref, file_path))
property name:
def __get__(self):
cdef SUStringRef n
n.ptr = <void*> 0
SUStringCreate(&n)
check_result(SUTextureGetFileName(self.tex_ref, &n))
return StringRef2Py(n)
property dimensions:
def __get__(self):
cdef double s_scale = 1.0
cdef double t_scale = 1.0
cdef size_t width = 0
cdef size_t height = 0
cdef SUMaterialRef mat
check_result(SUTextureGetDimensions(self.tex_ref, &width, &height, &s_scale, &t_scale))
return width, height, s_scale, t_scale
property use_alpha_channel:
def __get__(self):
cdef bool alpha_channel_used
check_result(SUTextureGetUseAlphaChannel(self.tex_ref, &alpha_channel_used))
return alpha_channel_used
cdef class Instance:
cdef SUComponentInstanceRef instance
def __cinit__(self):
self.instance.ptr = <void*> 0
property name:
def __get__(self):
cdef SUStringRef n
n.ptr = <void*> 0
SUStringCreate(&n)
check_result(SUComponentInstanceGetName(self.instance, &n))
return StringRef2Py(n)
property entity:
def __get__(self):
cdef SUEntityRef ref
ref = SUComponentInstanceToEntity(self.instance)
res = Entity()
res.entity.ptr = ref.ptr
return res
property definition:
def __get__(self):
cdef SUComponentDefinitionRef component
component.ptr = <void*> 0
SUComponentInstanceGetDefinition(self.instance, &component)
c = Component()
c.comp_def.ptr = component.ptr
return c
property transform:
def __get__(self):
cdef SUTransformation t
check_result(SUComponentInstanceGetTransform(self.instance, &t))
return [[t.values[0], t.values[4], t.values[8], m(t.values[12])],
[t.values[1], t.values[5], t.values[9], m(t.values[13])],
[t.values[2], t.values[6], t.values[10], m(t.values[14])],
[t.values[3], t.values[7], t.values[11], t.values[15]]] # * transform
property material:
def __get__(self):
cdef SUDrawingElementRef draw_elem = SUComponentInstanceToDrawingElement(self.instance)
cdef SUMaterialRef mat
mat.ptr = <void*> 0
cdef SU_RESULT = SUDrawingElementGetMaterial(draw_elem, &mat)
if SU_RESULT == SU_ERROR_NONE:
m = Material()
m.material.ptr = mat.ptr
return m
else:
return None
property layer:
def __get__(self):
cdef SUDrawingElementRef draw_elem = SUComponentInstanceToDrawingElement(self.instance)
cdef SULayerRef lay
lay.ptr = <void*> 0
cdef SU_RESULT res = SUDrawingElementGetLayer(draw_elem, &lay)
if res == SU_ERROR_NONE:
l = Layer()
l.layer.ptr = lay.ptr
return l
else:
return None
property hidden:
def __get__(self):
cdef SUDrawingElementRef draw_elem = SUComponentInstanceToDrawingElement(self.instance)
cdef bool hide_flag = False
check_result(SUDrawingElementGetHidden(draw_elem, &hide_flag))
return hide_flag
def __set__(self, bool hide_flag):
cdef SUDrawingElementRef draw_elem = SUComponentInstanceToDrawingElement(self.instance)
check_result(SUDrawingElementSetHidden(draw_elem, hide_flag))
cdef instance_from_ptr(SUComponentInstanceRef r):
res = Instance()
res.instance.ptr = r.ptr
#print("Instance {}".format(hex(<int> r.ptr)))
return res
cdef class Component:
cdef SUComponentDefinitionRef comp_def
def __cinit__(self):
self.comp_def.ptr = <void*> 0
property entities:
def __get__(self):
cdef SUEntitiesRef e
e.ptr = <void*> 0
SUComponentDefinitionGetEntities(self.comp_def, &e);
res = Entities()
res.set_ptr(e.ptr)
return res
property name:
def __get__(self):
cdef SUStringRef n
n.ptr = <void*> 0
SUStringCreate(&n)
check_result(SUComponentDefinitionGetName(self.comp_def, &n))
return StringRef2Py(n)
property numInstances:
def __get__(self):
cdef size_t n = 0
check_result(SUComponentDefinitionGetNumInstances(self.comp_def, &n))
return n
property numUsedInstances:
def __get__(self):
cdef size_t n = 0
check_result(SUComponentDefinitionGetNumUsedInstances(self.comp_def, &n))
return n
cdef class Layer:
cdef SULayerRef layer
def __cinit__(self, **kwargs):
self.layer.ptr = <void*> 0
if not '__skip_init' in kwargs:
check_result(SULayerCreate(&(self.layer)))
property name:
def __get__(self):
cdef SUStringRef n
n.ptr = <void*> 0
SUStringCreate(&n)
check_result(SULayerGetName(self.layer, &n))
return StringRef2Py(n)
property visible:
def __get__(self):
cdef bool visible_flag = False
check_result(SULayerGetVisibility(self.layer, &visible_flag))
return visible_flag
def __set__(self, bool vflag):
check_result(SULayerSetVisibility(self.layer, vflag))
def __richcmp__(Layer self, Layer other not None, int op):
if op == | Cython |
2: # __eq__
return <size_t> self.layer.ptr == <size_t> other.layer.ptr
cdef class Group:
cdef SUGroupRef group
def __cinit__(self):
pass
property name:
def __get__(self):
cdef SUStringRef n
n.ptr = <void*> 0
SUStringCreate(&n)
check_result(SUGroupGetName(self.group, &n))
return StringRef2Py(n)
property transform:
def __get__(self):
cdef SUTransformation t
check_result(SUGroupGetTransform(self.group, &t))
return [[t.values[0], t.values[4], t.values[8], m(t.values[12])],
[t.values[1], t.values[5], t.values[9], m(t.values[13])],
[t.values[2], t.values[6], t.values[10], m(t.values[14])],
[t.values[3], t.values[7], t.values[11], t.values[15]]] # * transform
property entities:
def __get__(self):
cdef SUEntitiesRef entities
entities.ptr = <void*> 0
check_result(SUGroupGetEntities(self.group, &entities))
res = Entities()
res.set_ptr(entities.ptr)
return res
property material:
def __get__(self):
cdef SUDrawingElementRef draw_elem = SUGroupToDrawingElement(self.group)
cdef SUMaterialRef mat
mat.ptr = <void*> 0
cdef SU_RESULT res = SUDrawingElementGetMaterial(draw_elem, &mat)
if res == SU_ERROR_NONE:
m = Material()
m.material.ptr = mat.ptr
return m
else:
return None
property layer:
def __get__(self):
cdef SUDrawingElementRef draw_elem = SUGroupToDrawingElement(self.group)
cdef SULayerRef lay
lay.ptr = <void*> 0
cdef SU_RESULT res = SUDrawingElementGetLayer(draw_elem, &lay)
if res == SU_ERROR_NONE:
l = Layer(__skip_init=True)
l.layer.ptr = lay.ptr
return l
else:
return None
property hidden:
def __get__(self):
cdef SUDrawingElementRef draw_elem = SUGroupToDrawingElement(self.group)
cdef bool hide_flag = False
check_result(SUDrawingElementGetHidden(draw_elem, &hide_flag))
return hide_flag
def __repr__(self):
return "Group {} \n\ttransform {}".format(self.name, self.transform)
cdef class Face:
cdef SUFaceRef face_ref
cdef double s_scale
cdef double t_scale
def __cinit__(self):
self.face_ref.ptr = <void*> 0
self.s_scale = 1.0
self.t_scale = 1.0
@staticmethod
def create():
cdef SUPoint3D[4] vl = [
[0, 0, 0],
[100, 100, 0],
[100, 100, 100],
[0, 0, 100]]
cdef SULoopInputRef outer_loop
outer_loop.ptr = <void*> 0
check_result(SULoopInputCreate(&outer_loop))
for i in range(4):
check_result(SULoopInputAddVertexIndex(outer_loop, i))
res = Face()
check_result(SUFaceCreate(&(res.face_ref), vl, &outer_loop))
print("Face {}".format(<size_t> res.face_ref.ptr))
return res
@staticmethod
def create_simple(list vertices):
cdef size_t face_len = len(vertices)
cdef SUPoint3D *vl = <SUPoint3D*> malloc(sizeof(SUPoint3D) * face_len)
for i, vert in enumerate(vertices):
if len(vert)!= 3:
raise ValueError('Vertices should have 3 coordinates')
vl[i].x = float(vert[0])
vl[i].y = float(vert[1])
vl[i].z = float(vert[2])
res = Face()
check_result(SUFaceCreateSimple(&(res.face_ref), vl, face_len))
return res
property st_scale:
def __set__(self, v):
self.s_scale = v[0]
self.t_scale = v[1]
def __get__(self):
return self.s_scale, self.t_scale
property tessfaces:
def __get__(self):
cdef double z
cdef SUMeshHelperRef mesh_ref
mesh_ref.ptr = <void*> 0
check_result(SUMeshHelperCreate(&mesh_ref, self.face_ref))
cdef size_t triangle_count = 0
cdef size_t vertex_count = 0
check_result(SUMeshHelperGetNumTriangles(mesh_ref, &triangle_count))
check_result(SUMeshHelperGetNumVertices(mesh_ref, &vertex_count))
cdef size_t*indices = <size_t*> malloc(triangle_count * 3 * sizeof(size_t))
cdef size_t index_count = 0
check_result(SUMeshHelperGetVertexIndices(mesh_ref, triangle_count * 3, indices, &index_count))
cdef size_t got_vertex_count = 0
cdef SUPoint3D*vertices = <SUPoint3D*> malloc(sizeof(SUPoint3D) * vertex_count)
check_result(SUMeshHelperGetVertices(mesh_ref, vertex_count, vertices, &got_vertex_count))
cdef SUPoint3D*stq = <SUPoint3D*> malloc(sizeof(SUPoint3D) * vertex_count)
cdef size_t got_stq_count = 0
check_result(SUMeshHelperGetFrontSTQCoords(mesh_ref, vertex_count, stq, &got_stq_count))
vertices_list = []
uv_list = []
for i in range(got_vertex_count):
ind = int(i)
vertices_list.append((m(vertices[ind].x), m(vertices[ind].y), m(vertices[ind].z)))
for i in range(got_stq_count):
z = stq[i].z
if z == 0:
z = 1.0
ind = int(i)
uv_list.append((stq[ind].x / z * self.s_scale, stq[ind].y / z * self.t_scale))
triangles_list = []
for ii in range(index_count // 3):
ind = int(ii * 3)
triangles_list.append((indices[ind], indices[ind + 1], indices[ind + 2]))
free(vertices)
free(stq)
free(indices)
return (vertices_list, triangles_list, uv_list)
property edges:
def __get__(self):
cdef size_t edge_count = 0
check_result(SUFaceGetNumEdges(self.face_ref, &edge_count))
cdef SUEdgeRef*edges_array = <SUEdgeRef*>malloc(sizeof(SUEdgeRef) * edge_count)
cdef size_t count = 0
check_result(SUFaceGetEdges(self.face_ref, edge_count, edges_array, &count))
edges_list = []
for i in range(count):
e = Edge()
e.edge.ptr = edges_array[i].ptr
edges_list.append(e)
free(edges_array)
return edges_list
property material:
def __get__(self):
cdef SUMaterialRef mat
mat.ptr = <void*> 0
cdef SU_RESULT res = SUFaceGetFrontMaterial(self.face_ref, &mat)
if res == SU_ERROR_NONE:
m = Material()
m.material.ptr = mat.ptr
return m
def __repr__(self):
return "SUFaceRef 0x%0.16X" % <size_t> self.face_ref.ptr
cdef class Entities:
cdef SUEntitiesRef entities
def __cinit__(self):
self.entities.ptr = <void*> 0
cdef set_ptr(self, void* ptr):
self.entities.ptr = ptr
def NumFaces(self):
cdef size_t count = 0
check_result(SUEntitiesGetNumFaces(self.entities, &count))
return count
def NumCurves(self):
cdef size_t count = 0
check_result(SUEntitiesGetNumCurves(self.entities, &count))
return count
def NumGuidePoints(self):
cdef size_t count = 0
check_result(SUEntitiesGetNumGuidePoints(self.entities, &count))
return count
def NumEdges(self, bool standalone_only=False):
cdef size_t count = 0
check_result(SUEntitiesGetNumEdges(self.entities, standalone_only, &count))
return count
def NumPolyline3ds(self):
cdef size_t count = 0
check_result(SUEntitiesGetNumPolyline3ds(self.entities, &count))
return count
def NumGroups(self):
cdef size_t count = 0
check_result(SUEntitiesGetNumGroups(self.entities, &count))
return count
def NumImages(self):
cdef size_t count = 0 | Cython |
check_result(SUEntitiesGetNumImages(self.entities, &count))
return count
def NumInstances(self):
cdef size_t count = 0
check_result(SUEntitiesGetNumInstances(self.entities, &count))
return count
property faces:
def __get__(self):
cdef size_t len = 0
check_result(SUEntitiesGetNumFaces(self.entities, &len))
cdef SUFaceRef*faces_array = <SUFaceRef*> malloc(sizeof(SUFaceRef) * len)
cdef size_t count = 0
check_result(SUEntitiesGetFaces(self.entities, len, faces_array, &count))
for i in range(count):
res = Face()
res.face_ref.ptr = faces_array[i].ptr
yield res
free(faces_array)
def get__triangles_lists(self, default_material):
verts = []
faces = []
mat_index = []
mats = keep_offset()
seen = keep_offset()
uv_list = []
alpha = False # We assume object does not need alpha flag
uvs_used = False # We assume no uvs need to be added
for f in self.faces:
vs, tri, uvs = f.triangles
if f.material:
mat_number = mats[f.material.name]
else:
mat_number = mats[default_material]
mapping = {}
for i, (v, uv) in enumerate(zip(vs, uvs)):
l = len(seen)
mapping[i] = seen[v]
if len(seen) > l:
verts.append(v)
uvs.append(uv)
for face in tri:
f0, f1, f2 = face[0], face[1], face[2]
if f2 == 0: ## eeekadoodle dance
faces.append((mapping[f1], mapping[f2], mapping[f0]))
uv_list.append((uvs[f2][0], uvs[f2][1],
uvs[f1][0], uvs[f1][1],
uvs[f0][0], uvs[f0][1],
0, 0))
else:
faces.append((mapping[f0], mapping[f1], mapping[f2]))
uv_list.append((uvs[f0][0], uvs[f0][1],
uvs[f1][0], uvs[f1][1],
uvs[f2][0], uvs[f2][1],
0, 0))
mat_index.append(mat_number)
return verts, faces, uv_list, mat_index, mats
property groups:
def __get__(self):
cdef size_t num_groups = 0
check_result(SUEntitiesGetNumGroups(self.entities, &num_groups))
cdef SUGroupRef*groups = <SUGroupRef*> malloc(sizeof(SUGroupRef) * num_groups)
cdef size_t count = 0
check_result(SUEntitiesGetGroups(self.entities, num_groups, groups, &count))
for i in range(count):
res = Group()
res.group.ptr = groups[i].ptr
yield res
free(groups)
property instances:
def __get__(self):
cdef size_t num_instances = 0
check_result(SUEntitiesGetNumInstances(self.entities, &num_instances))
cdef SUComponentInstanceRef*instances = <SUComponentInstanceRef*> malloc(
sizeof(SUComponentInstanceRef) * num_instances)
cdef size_t count = 0
check_result(SUEntitiesGetInstances(self.entities, num_instances, instances, &count))
for i in range(count):
yield instance_from_ptr(instances[i])
free(instances)
def addFace(self, Face face):
check_result(SUEntitiesAddFaces(self.entities, 1, &face.face_ref))
def __repr__(self):
return "<sketchup.Entities at {}> groups {} instances {}".format(hex(<size_t> &self.entities),
self.NumGroups(), self.NumInstances())
cdef class Material:
cdef SUMaterialRef material
def __cinit__(self):
self.material.ptr = <void*> 0
property name:
def __get__(self):
cdef SUStringRef n
n.ptr = <void*> 0
SUStringCreate(&n)
check_result(SUMaterialGetName(self.material, &n))
return StringRef2Py(n)
property color:
def __get__(self):
cdef SUColor c
check_result(SUMaterialGetColor(self.material, &c))
return (c.red, c.green, c.blue, c.alpha)
property opacity:
def __get__(self):
cdef double alpha
check_result(SUMaterialGetOpacity(self.material, &alpha))
return alpha
def __set__(self, double alpha):
check_result(SUMaterialSetOpacity(self.material, alpha))
property texture:
def __get__(self):
cdef SUTextureRef t
t.ptr = <void*> 0
cdef SU_RESULT res = SUMaterialGetTexture(self.material, &t)
if res == SU_ERROR_NONE:
tex = Texture()
tex.tex_ref.ptr = t.ptr
return tex
else:
return False
cdef class RenderingOptions:
cdef SURenderingOptionsRef options
def __cinit__(self):
self.options.ptr = <void*> 0
cdef class Scene:
cdef SUSceneRef scene
def __cinit__(self):
self.scene.ptr = <void*> 0
property name:
def __get__(self):
cdef SUStringRef n
n.ptr = <void*> 0
SUStringCreate(&n)
check_result(SUSceneGetName(self.scene, &n))
return StringRef2Py(n)
property camera:
def __get__(self):
cdef SUCameraRef c
c.ptr = <void*> 0
check_result(SUSceneGetCamera(self.scene, &c))
res = Camera()
res.set_ptr(c.ptr)
return res
property rendering_options:
def __get__(self):
cdef SURenderingOptionsRef options
options.ptr = <void*> 0
check_result(SUSceneGetRenderingOptions(self.scene, &options))
res = RenderingOptions()
res.options = options
return res
property entity:
def __get__(self):
res = Entity()
res.entity = SUSceneToEntity(self.scene)
return res
property layers:
def __get__(self):
cdef size_t num_layers
check_result(SUSceneGetNumLayers(self.scene, &num_layers))
cdef SULayerRef*layers_array = <SULayerRef*> malloc(sizeof(SULayerRef) * num_layers)
for i in range(num_layers):
layers_array[i].ptr = <void*> 0
cdef size_t count = 0
check_result(SUSceneGetLayers(self.scene, num_layers, layers_array, &count))
for i in range(count):
l = Layer(__skip_init=True)
l.layer.ptr = layers_array[i].ptr
yield l
free(layers_array)
cdef class LoopInput:
cdef SULoopInputRef loop
def __cinit__(self, **kwargs):
self.loop.ptr = <void*> 0
if not '__skip_init' in kwargs:
check_result(SULoopInputCreate(&(self.loop)))
def AddVertexIndex(self, i):
check_result(SULoopInputAddVertexIndex(self.loop, i))
cdef class Model:
cdef SUModelRef model
def __cinit__(self, **kwargs):
self.model.ptr = <void*> 0
SUInitialize()
if not '__skip_init' in kwargs:
check_result(SUModelCreate(&(self.model)))
@staticmethod
def from_file(filename):
res = Model(__skip_init=True)
py_byte_string = filename.encode('UTF-8')
cdef const char* f = py_byte_string
check_result(SUModelCreateFromFile(&(res.model), f))
return res
def save(self, filename):
py_byte_string = filename.encode('UTF-8')
cdef const char* f = py_byte_string
check_result(SUModelSaveToFile(self.model, f))
return True
def close(self):
SUModelRelease(&self.model)
SUTerminate()
def NumMaterials(self):
cdef size_t count = 0
check_result(SUModelGetNumMaterials(self.model, &count))
return count
def NumComponentDefinitions(self):
cdef size_t count = 0
check_result(SUModelGetNumComponentDefinitions(self.model, &count))
return count
def NumScenes(self):
cdef size_t count = 0
check_result(SUModelGetNumScenes(self.model, &count))
return count
def NumLayers(self):
cdef size_t count = 0
check_result(SUModelGetNumLayers(self.model, &count))
return count
def NumAttributeDictionaries(self):
cdef size_t count = 0
check_result(SUModelGetNumAttributeDictionaries(self.model, &count))
return count
property camera:
def __get__( | Cython |
"""
Monomial expansion of `(aX + bY)^i (cX + dY)^{j-i}`
"""
##########################################################################
#
# Copyright (C) 2008 William Stein <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#
##########################################################################
from sage.ext.stdsage cimport PY_NEW
from sage.rings.integer cimport Integer
cdef class Apply:
def __cinit__(self):
"""
EXAMPLES::
sage: import sage.modular.modsym.apply
sage: sage.modular.modsym.apply.Apply()
<sage.modular.modsym.apply.Apply object at...>
"""
fmpz_poly_init(self.f)
fmpz_poly_init(self.g)
fmpz_poly_init(self.ff)
fmpz_poly_init(self.gg)
def __dealloc__(self):
# clear flint polys
fmpz_poly_clear(self.f)
fmpz_poly_clear(self.g)
fmpz_poly_clear(self.ff)
fmpz_poly_clear(self.gg)
cdef int apply_to_monomial_flint(self, fmpz_poly_t ans, int i, int j,
int a, int b, int c, int d) except -1:
if i < 0 or j - i < 0:
raise ValueError("i (=%s) and j-i (=%s) must both be nonnegative."%(i,j-i))
# f = b+a*x, g = d+c*x
fmpz_poly_set_coeff_si(self.f, 0, b)
fmpz_poly_set_coeff_si(self.f, 1, a)
fmpz_poly_set_coeff_si(self.g, 0, d)
fmpz_poly_set_coeff_si(self.g, 1, c)
# h = (f**i)*(g**(j-i))
fmpz_poly_pow(self.ff, self.f, i)
fmpz_poly_pow(self.gg, self.g, j - i)
fmpz_poly_mul(ans, self.ff, self.gg)
return 0
cdef Apply A = Apply()
def apply_to_monomial(int i, int j, int a, int b, int c, int d):
r"""
Return a list of the coefficients of
.. math::
(aX + bY)^i (cX + dY)^{j-i},
where `0 \leq i \leq j`, and `a`, `b`, `c`, `d` are integers.
One should think of `j` as being `k-2` for the application to
modular symbols.
INPUT:
- i, j, a, b, c, d -- all ints
OUTPUT:
list of ints, which are the coefficients
of `Y^j, Y^{j-1}X, \ldots, X^j`, respectively.
EXAMPLE:
We compute that `(X+Y)^2(X-Y) = X^3 + X^2Y - XY^2 - Y^3`::
sage: from sage.modular.modsym.apply import apply_to_monomial
sage: apply_to_monomial(2, 3, 1,1,1,-1)
[-1, -1, 1, 1]
sage: apply_to_monomial(5, 8, 1,2,3,4)
[2048, 9728, 20096, 23584, 17200, 7984, 2304, 378, 27]
sage: apply_to_monomial(6,12, 1,1,1,-1)
[1, 0, -6, 0, 15, 0, -20, 0, 15, 0, -6, 0, 1]
"""
cdef fmpz_poly_t pr
fmpz_poly_init(pr)
A.apply_to_monomial_flint(pr, i, j, a, b, c, d)
cdef Integer res
v = []
for k from 0 <= k <= j:
res = <Integer>PY_NEW(Integer)
fmpz_poly_get_coeff_mpz(res.value, pr, k)
v.append(int(res))
fmpz_poly_clear(pr)
return v
<|end_of_text|>from libcpp.vector cimport vector as libcpp_vector
from libcpp cimport bool
from String cimport *
from ProgressLogger cimport *
from MSSpectrum cimport *
from ChromatogramPeak cimport *
from Peak1D cimport *
cdef extern from "<OpenMS/FORMAT/DTAFile.h>" namespace "OpenMS":
cdef cppclass DTAFile:
DTAFile() except + nogil
DTAFile(DTAFile &) except + nogil # compiler
void load(String filename, MSSpectrum & spectrum) except + nogil
void store(String filename, MSSpectrum & spectrum) except + nogil
<|end_of_text|># distutils: language=c++
# cython: embedsignature=True, language_level=3
# cython: linetrace=True
from libcpp.pair cimport pair
from libcpp.unordered_map cimport unordered_map
from cython.operator cimport dereference
cimport cython
cimport numpy as np
from cython cimport view
from libc.math cimport sqrt
cdef extern from "triplet.h":
cdef cppclass triplet[T, U, V]:
T v1
U v2
V v3
triplet()
triplet(T, U, V)
import numpy as np
ctypedef fused ints:
size_t
np.int32_t
np.int64_t
ctypedef fused pointers:
size_t
np.intp_t
np.int32_t
np.int64_t
@cython.boundscheck(False)
def _build_faces_edges(ints[:, :] simplices):
# the node index in each simplex must be in increasing order
cdef:
int dim = simplices.shape[1] - 1
ints n_simplex = simplices.shape[0]
ints[:] simplex
ints v1, v2, v3
unordered_map[pair[ints, ints], ints] edges
pair[ints, ints] edge
ints n_edges = 0
ints edges_per_simplex = 3 if dim==2 else 6
ints[:, :] simplex_edges
unordered_map[triplet[ints, ints, ints], ints] faces
triplet[ints, ints, ints] face
ints n_faces = 0
ints faces_per_simplex = dim + 1
ints[:, :] simplex_faces
if ints is size_t:
int_type = np.uintp
elif ints is np.int32_t:
int_type = np.int32
elif ints is np.int64_t:
int_type = np.int64
simplex_edges = np.empty((n_simplex, edges_per_simplex), dtype=int_type)
if dim == 3:
simplex_faces = np.empty((n_simplex, faces_per_simplex), dtype=int_type)
else:
simplex_faces = simplex_edges
cdef ints[:,:] edge_pairs = np.array(
[[1, 2], [0, 2], [0, 1], [0, 3], [1, 3], [2, 3]],
dtype=int_type
)
for i_simp in range(n_simplex):
simplex = simplices[i_simp]
# build edges
for i_edge in range(edges_per_simplex):
v1 = simplex[edge_pairs[i_edge, 0]]
v2 = simplex[edge_pairs[i_edge, 1]]
edge = pair[ints, ints](v1, v2)
edge_search = edges.find(edge)
if edge_search!= edges.end():
ind = dereference(edge_search).second
else:
ind = n_edges
edges[edge] = ind
n_edges += 1
simplex_edges[i_simp, i_edge] = ind
# build faces in 3D
if dim == 3:
for i_face in range(4):
if i_face == 0:
v1 = simplex[1]
v2 = simplex[2]
v3 = simplex[3]
elif i_face == 1:
v1 = simplex[0]
v2 = simplex[2]
v3 = simplex[3]
elif i_face == 2:
v1 = simplex[0]
v2 = simplex[1]
v3 = simplex[3]
else:
v1 = simplex[0]
v2 = simplex[1]
v3 = simplex[2]
face = triplet[ints, ints, ints](v1, v2, v3)
face_search = faces.find(face)
if face_search!= faces.end():
ind = dereference(face_search).second
else:
ind = n_faces
faces[face] = ind
n_faces += 1
simplex_faces[i_simp, i_face] = ind
cdef ints[:, :] _edges = np.empty((n_edges, 2), dtype=int_type)
for edge_it in edges:
_edges[edge_it.second, 0] = edge_it.first.first
_edges[edge_it.second, 1] = edge_it.first.second
cdef ints[:, :] _ | Cython |
faces
if dim == 3:
_faces = np.empty((n_faces, 3), dtype=int_type)
for face_it in faces:
_faces[face_it.second, 0] = face_it.first.v1
_faces[face_it.second, 1] = face_it.first.v2
_faces[face_it.second, 2] = face_it.first.v3
else:
_faces = _edges
cdef ints[:, :] face_edges
cdef ints[:] _face
if dim == 3:
face_edges = np.empty((n_faces, 3), dtype=int_type)
for i_face in range(n_faces):
_face = _faces[i_face]
# get indices of each edge in the face
# 3 edges per face
for i_edge in range(3):
if i_edge == 0:
v1 = _face[1]
v2 = _face[2]
elif i_edge == 1:
v1 = _face[0]
v2 = _face[2]
elif i_edge == 2:
v1 = _face[0]
v2 = _face[1]
# because of how faces were constructed, v1 < v2 always
edge = pair[ints, ints](v1, v2)
ind = edges[edge]
face_edges[i_face, i_edge] = ind
else:
face_edges = np.empty((1, 1), dtype=int_type)
return simplex_faces, _faces, simplex_edges, _edges, face_edges
@cython.boundscheck(False)
def _build_adjacency(ints[:, :] simplex_faces, n_faces):
cdef:
size_t n_cells = simplex_faces.shape[0]
int dim = simplex_faces.shape[1] - 1
np.int64_t[:, :] neighbors
np.int64_t[:] visited
ints[:] simplex
ints i_cell, j, k, i_face, i_other
if ints is size_t:
int_type = np.uintp
elif ints is np.int32_t:
int_type = np.int32
elif ints is np.int64_t:
int_type = np.int64
neighbors = np.full((n_cells, dim + 1), -1, dtype=np.int64)
visited = np.full((n_faces), -1, dtype=np.int64)
for i_cell in range(n_cells):
simplex = simplex_faces[i_cell]
for j in range(dim + 1):
i_face = simplex[j]
i_other = visited[i_face]
if i_other == -1:
visited[i_face] = i_cell
else:
neighbors[i_cell, j] = i_other
k = 0
while (k < dim + 1) and (simplex_faces[i_other, k]!= i_face):
k += 1
neighbors[i_other, k] = i_cell
return neighbors
@cython.boundscheck(False)
@cython.linetrace(False)
cdef void _compute_bary_coords(
np.float64_t[:] point,
np.float64_t[:, :] Tinv,
np.float64_t[:] shift,
np.float64_t * bary
) nogil:
cdef:
int dim = point.shape[0]
int i, j
bary[dim] = 1.0
for i in range(dim):
bary[i] = 0.0
for j in range(dim):
bary[i] += Tinv[i, j] * (point[j] - shift[j])
bary[dim] -= bary[i]
@cython.boundscheck(False)
def _directed_search(
np.float64_t[:, :] locs,
pointers[:] nearest_cc,
np.float64_t[:, :] nodes,
ints[:, :] simplex_nodes,
np.int64_t[:, :] neighbors,
np.float64_t[:, :, :] transform,
np.float64_t[:, :] shift,
np.float64_t eps=1E-15,
bint zeros_outside=False,
bint return_bary=True
):
cdef:
int i, j
pointers i_simp
int n_locs = locs.shape[0], dim = locs.shape[1]
int max_directed = 1 + simplex_nodes.shape[0] // 4
int i_directed
bint is_inside
np.int64_t[:] inds = np.full(len(locs), -1, dtype=np.int64)
np.float64_t[:, :] all_barys = np.empty((1, 1), dtype=np.float64)
np.float64_t barys[4]
np.float64_t[:] loc
np.float64_t[:, :] Tinv
np.float64_t[:] rD
if return_bary:
all_barys = np.empty((len(locs), dim+1), dtype=np.float64)
for i in range(n_locs):
loc = locs[i]
i_simp = nearest_cc[i] # start at the nearest cell center
i_directed = 0
while i_directed < max_directed:
Tinv = transform[i_simp]
rD = shift[i_simp]
_compute_bary_coords(loc, Tinv, rD, barys)
j = 0
is_inside = True
while j <= dim:
if barys[j] < -eps:
is_inside = False
# if not -1, move towards neighbor
if neighbors[i_simp, j]!= -1:
i_simp = neighbors[i_simp, j]
break
j += 1
# If inside, I found my container
if is_inside:
break
# Else, if I cycled through every bary
# without breaking out of the above loop, that means I'm completely outside
elif j == dim + 1:
if zeros_outside:
i_simp = -1
break
i_directed += 1
if i_directed == max_directed:
# made it through the whole loop without breaking out
# Mark as failed
i_simp = -2
inds[i] = i_simp
if return_bary:
for j in range(dim+1):
all_barys[i, j] = barys[j]
if return_bary:
return np.array(inds), np.array(all_barys)
return np.array(inds)
@cython.boundscheck(False)
@cython.cdivision(True)
def _interp_cc(
np.float64_t[:, :] locs,
np.float64_t[:, :] cell_centers,
np.float64_t[:] mat_data,
ints[:] mat_indices,
ints[:] mat_indptr,
):
cdef:
ints i, j, diff, start, stop, i_d
ints n_max_per_row = 0
ints[:] close_cells
int dim = locs.shape[1]
np.float64_t[:, :] drs
np.float64_t[:] rs
np.float64_t[:] rhs
np.float64_t[:] lambs
np.float64_t[:] weights
np.float64_t[:] point
np.float64_t[:] close_cell
np.float64_t det, weight_sum
np.float64_t xx, xy, xz, yy, yz, zz
bint too_close
np.float64_t eps = 1E-15
# Find maximum number per row to pre-allocate a storage
for i in range(locs.shape[0]):
diff = mat_indptr[i+1] - mat_indptr[i]
if diff > n_max_per_row:
n_max_per_row = diff
#
drs = np.empty((n_max_per_row, dim), dtype=np.float64)
rs = np.empty((n_max_per_row,), dtype=np.float64)
rhs = np.empty((dim,),dtype=np.float64)
lambs = np.empty((dim,),dtype=np.float64)
for i in range(locs.shape[0]):
point = locs[i]
start = mat_indptr[i]
stop = mat_indptr[i+1]
diff = stop-start
close_cells = mat_indices[start:stop]
for j in range(diff):
rs[j] = 0.0
close_cell = cell_centers[close_cells[j]]
for i_d in range(dim):
drs[j, i_d] = close_cell[i_d] - point[i_d]
rs[j] += drs[j, i_d]*drs[j, i_d]
rs[j] = sqrt(rs[j])
weights = mat_data[start:stop]
weights[:] = 0.0
too_close = False
i_d = 0
for j in range(diff):
if rs[j] < eps:
too_close = True
i_d = j
if too_close:
weights[i_d] = 1.0
else:
for j in range(diff):
for i_d in range(dim):
drs[j, i_d] /= rs[j]
xx = xy = yy = 0.0
rhs[:] = 0.0
if dim == 2:
for j in range(diff):
xx += drs[j, | Cython |
0] * drs[j, 0]
xy += drs[j, 0] * drs[j, 1]
yy += drs[j, 1] * drs[j, 1]
rhs[0] -= drs[j, 0]
rhs[1] -= drs[j, 1]
det = xx * yy - xy * xy
lambs[0] = (yy * rhs[0] - xy * rhs[1])/det
lambs[1] = (-xy * rhs[0] + xx * rhs[1])/det
if dim == 3:
zz = xz = yz = 0.0
for j in range(diff):
xx += drs[j, 0] * drs[j, 0]
xy += drs[j, 0] * drs[j, 1]
yy += drs[j, 1] * drs[j, 1]
xz += drs[j, 0] * drs[j, 2]
yz += drs[j, 1] * drs[j, 2]
zz += drs[j, 2] * drs[j, 2]
rhs[0] -= drs[j, 0]
rhs[1] -= drs[j, 1]
rhs[2] -= drs[j, 2]
det = (
xx * (yy * zz - yz * yz)
+ xy * (xz * yz - xy * zz)
+ xz * (xy * yz - xz * yy)
)
lambs[0] = (
(yy * zz - yz * yz) * rhs[0]
+ (xz * yz - xy * zz) * rhs[1]
+ (xy * yz - xz * yy) * rhs[2]
)/det
lambs[1] = (
(xz * yz - xy * zz) * rhs[0]
+ (xx * zz - xz * xz) * rhs[1]
+ (xy * xz - xx * yz) * rhs[2]
)/det
lambs[2] = (
(xy * yz - xz * yy) * rhs[0]
+ (xz * xy - xx * yz) * rhs[1]
+ (xx * yy - xy * xy) * rhs[2]
)/det
weight_sum = 0.0
for j in range(diff):
weights[j] = 1.0
for i_d in range(dim):
weights[j] += lambs[i_d] * drs[j, i_d]
weights[j] /= rs[j]
weight_sum += weights[j]
for j in range(diff):
weights[j] /= weight_sum
<|end_of_text|># distutils: language=c++
# =============================================================================
# OVR Error Code (OVR_ErrorCode.h) Cython Declaration File
# =============================================================================
#
# ovr_errorcode.pxd
#
# Copyright 2018 Matthew Cutone <cutonem(a)yorku.ca> and Laurie M. Wilcox
# <lmwilcox(a)yorku.ca>; The Centre For Vision Research, York University,
# Toronto, Canada
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""This file exposes Oculus Rift(TM) C API types and functions, allowing Cython
extensions to access them. The declarations in the file are contemporaneous
with version 1.24 (retrieved 04.15.2018) of the Oculus Rift(TM) PC SDK.
"""
from libc.stdint cimport int32_t
cdef extern from "OVR_ErrorCode.h":
ctypedef int32_t ovrResult
ctypedef enum ovrSuccessType:
ovrSuccess = 0
ctypedef enum ovrSuccessTypes:
ovrSuccess_NotVisible = 1000,
ovrSuccess_BoundaryInvalid = 1001,
ovrSuccess_DeviceUnavailable = 1002
ctypedef enum ovrErrorType:
ovrError_MemoryAllocationFailure = -1000,
ovrError_InvalidSession = -1002,
ovrError_Timeout = -1003,
ovrError_NotInitialized = -1004,
ovrError_InvalidParameter = -1005,
ovrError_ServiceError = -1006,
ovrError_NoHmd = -1007,
ovrError_Unsupported = -1009,
ovrError_DeviceUnavailable = -1010,
ovrError_InvalidHeadsetOrientation = -1011,
ovrError_ClientSkippedDestroy = -1012,
ovrError_ClientSkippedShutdown = -1013,
ovrError_ServiceDeadlockDetected = -1014,
ovrError_InvalidOperation = -1015,
ovrError_InsufficientArraySize = -1016,
ovrError_NoExternalCameraInfo = -1017,
ovrError_LostTracking = -1018,
ovrError_ExternalCameraInitializedFailed = -1019,
ovrError_ExternalCameraCaptureFailed = -1020,
ovrError_ExternalCameraNameListsBufferSize = -1021,
ovrError_ExternalCameraNameListsMistmatch = -1022,
ovrError_ExternalCameraNotCalibrated = -1023,
ovrError_ExternalCameraNameWrongSize = -1024,
ovrError_AudioDeviceNotFound = -2001,
ovrError_AudioComError = -2002,
ovrError_Initialize = -3000,
ovrError_LibLoad = -3001,
ovrError_LibVersion = -3002,
ovrError_ServiceConnection = -3003,
ovrError_ServiceVersion = -3004,
ovrError_IncompatibleOS = -3005,
ovrError_DisplayInit = -3006,
ovrError_ServerStart = -3007,
ovrError_Reinitialization = -3008,
ovrError_MismatchedAdapters = -3009,
ovrError_LeakingResources = -3010,
ovrError_ClientVersion = -3011,
ovrError_OutOfDateOS = -3012,
ovrError_OutOfDateGfxDriver = -3013,
ovrError_IncompatibleGPU = -3014,
ovrError_NoValidVRDisplaySystem = -3015,
ovrError_Obsolete = -3016,
ovrError_DisabledOrDefaultAdapter = -3017,
ovrError_HybridGraphicsNotSupported = -3018,
ovrError_DisplayManagerInit = -3019,
ovrError_TrackerDriverInit = -3020,
ovrError_LibSignCheck = -3021,
ovrError_LibPath = -3022,
ovrError_LibSymbols = -3023,
ovrError_RemoteSession = -3024,
ovrError_InitializeVulkan = -3025,
ovrError_BlacklistedGfxDriver = -3026,
ovrError_DisplayLost = -6000,
ovrError_TextureSwapChainFull = -6001,
ovrError_TextureSwapChainInvalid = -6002,
ovrError_GraphicsDeviceReset = -6003,
ovrError_DisplayRemoved = -6004,
ovrError_ContentProtectionNotAvailable = -6005,
ovrError_ApplicationInvisible = -6006,
ovrError_Disallowed = -6007,
ovrError_DisplayPluggedIncorrectly = -6008,
ovrError_DisplayLimitReached = -6009,
ovrError_RuntimeException = -7000,
ovrError_NoCalibration = -9000,
ovrError_OldVersion = -9001,
ovrError_MisformattedBlock = -9002
ctypedef struct ovrErrorInfo:
ovrResult Result
char[512] ErrorString
cdef inline int OVR_SUCCESS(ovrResult result):
return result >= ovrSuccessType.ovrSuccess
cdef inline int OVR_UNQUALIFIED_SUCCESS(ovrResult result):
return result == ovrSuccessType.ovrSuccess
cdef inline int OVR_FAILURE(ovrResult result):
| Cython |
return not OVR_SUCCESS(result)<|end_of_text|># cython: language_level=3
cimport cython
import numpy as np
cimport numpy as np
from libcpp cimport bool
cdef void cySmoothNodesLaplace(double[:,:] nodeArray_,
int[:] nodeStarOffsets,
int[:] nodeStarArray,
int[:] nodeMaterialTypes,
int nNodes_owned,
int nd,
bool simultaneous=*,
bool smoothBoundaries=*,
int[:] fixedNodesBoolArray=*,
double alpha=*)
cdef void cySmoothNodesCentroid(double[:,:] nodeArray_,
int[:] nodeElementOffsets,
int[:] nodeElementsArray,
int[:] nodeMaterialTypes,
double[:] elementVolumesArray,
double[:,:] elementBarycentersArray,
int[:,:] elementNodesArray,
int nNodes_owned,
int[:] fixedNodesBoolArray,
bool simultaneous=*,
bool smoothBoundaries=*,
double alpha=*)
cdef void cyUpdateDilationElements(double[:] elementDilationArray_,
double[:] elementVolumeArray,
double[:] elementVolumeTargetArray,
int nElements)
cdef void cyUpdateDistortionElements(double[:] elementDistortionArray_,
double[:,:,:,:] J_array,
double[:,:] detJ_array,
int nd,
int nElements)
cdef void cyUpdateInverseMeanRatioTriangleNodes(double[:] IMRNodesArray_,
double[:,:] nodeArray,
int[:,:] elementNodesArray,
int[:] nodeElementOffsets,
int[:] nodeElementsArray,
int nElements,
int nNodes,
bool el_average=*)
cdef void cyUpdateInverseMeanRatioTriangleElements(double[:] IMRElementsArray_,
double[:,:] nodeArray,
int[:,:] elementNodesArray,
int nElements)
cdef double cyGetInverseMeanRatioSingleTriangle(int node0,
double[:,:] nodeArray,
int[:,:] elementNodesArray,
int[:] nodeElementOffsets,
int[:] nodeElementsArray,
bool el_average=*)
cdef double[:,:] cySmoothNodesQuality(double[:] distortion,
double[:] dilation,
double[:,:] nodeArray,
int nNodes_owned,
int[:] nodeMaterialTypes,
int[:] nodeElementOffsets,
int[:] nodeElementsArray,
int[:,:] elementNodesArray,
bool apply_directly=*)
cdef int pyxGetLocalNearestNode(double[:] coords,
double[:,:] nodeArray,
int[:] nodeStarOffsets,
int[:] nodeStarArray,
int node)
cdef int pyxGetLocalNearestElement(double[:] coords,
double[:,:] elementBarycentersArray,
int[:,:] elementNeighborsArray,
int eN)
cdef int[:] pyxGetLocalNearestElementIntersection(double[:] coords,
double[:] starting_coords,
double[:,:,:] elementBoundaryNormalsArray,
int[:,:] elementBoundariesArray,
double[:,:] elementBoundaryBarycentersArray,
int[:,:] elementBoundaryElementsArray,
int[:] exteriorElementBoundariesBoolArray,
int eN)
cdef int pyxGetLocalNearestElementAroundNode(double[:] coords,
int[:] nodeElementOffsets,
int[:] nodeElementsArray,
double[:,:] elementBarycentersArray,
int node)
cdef void pyxUpdateElementBoundaryNormalsTetra(double[:,:,:] elementBoundaryNormalsArray_,
double[:,:] nodeArray,
int[:,:] elementBoundariesArray,
int[:,:] elementBoundaryNodesArray,
double[:,:] elementBoundaryBarycentersArray,
double[:,:] elementBarycentersArray,
int nElements)
cdef void pyxUpdateElementBoundaryNormalsTriangle(double[:,:,:] elementBoundaryNormalsArray_,
double[:,:] nodeArray,
int[:,:] elementBoundariesArray,
int[:,:] elementBoundaryNodesArray,
double[:,:] elementBoundaryBarycentersArray,
double[:,:] elementBarycentersArray,
int nElements)
cdef void cyUpdateElementVolumesTriangle(double[:] elementVolumesArray_,
int[:,:] elementNodesArray,
double[:,:] nodeArray,
int nElements)
cdef void cyUpdateElementVolumesTetra(double[:] elementVolumesArray_,
int[:,:] elementNodesArray,
double[:,:] nodeArray,
int nElements)
cdef void cyUpdateElementBarycenters(double[:,:] elementBarycentersArray_,
int[:,:] elementNodesArray,
double[:,:] nodeArray,
int nElements)
cdef np.ndarray cyGetCornerNodesTriangle(double[:,:] nodeArray,
int[:] nodeStarArray,
int[:] nodeStarOffsets,
int[:] nodeMaterialTypes,
int nNodes)
cdef int[:] cyCheckOwnedVariable(int variable_nb_local,
int rank,
int nVariables_owned,
int[:] variableNumbering_subdomain2global,
int[:] variableOffsets_subdomain_owned)
cdef int[:] cyGetGlobalVariable(int variable_nb_local,
int nVariables_owned,
int[:] variableNumbering_subdomain2global,
int[:] variableOffsets_subdomain_owned)
cdef int cyGetLocalVariable(int variable_nb_global,
int rank,
int nVariables_owned,
int[:] variableNumbering_subdomain2global,
int[:] variableOffsets_subdomain_owned)
cdef double[:] cyScalarRecoveryAtNodes(double[:] scalars,
int[:] nodeElementsArray,
int[:] nodeElementOffsets)
cdef double[:] cyScalarRecoveryAtNodesWeighted(double[:] scalars,
int[:] nodeElementsArray,
int[:] nodeElementOffsets,
double[:] detJ_array,
int nNodes)
cdef double[:,:] cyVectorRecoveryAtNodesWeighted(double[:,:] vectors,
int[:] nodeElementsArray,
int[:] nodeElementOffsets,
double[:,:] detJ_array,
int nd)
cdef double[:,:] cyVectorRecoveryAtNodes(double[:,:] vectors,
int[:] nodeElementsArray,
int[:] nodeElementOffsets,
int nd)
cdef void cyFindBoundaryDirectionTriangle(double[:] dir_,
int node,
double[:,:] nodeArray,
int[:] nodeStarOffsets,
int[:] nodeStarArray,
int[:] nodeMaterialTypes)
cdef void cyFindBoundaryDirectionTetra(double[:] dir_,
int node,
double[:,:] nodeArray,
int[:] nodeStarOffsets,
int[:] nodeStarArray,
int[:] nodeMaterialTypes)
<|end_of_text|># distutils: language = c
# cython: cdivision = True
# cython: boundscheck = False
# cython: wraparound = False
# cython: profile = False
from._util cimport log2, apply_weights_2d
from libc.math cimport log
from libc.math cimport abs
cdef FLOAT_t ZERO_TOL = 1e-16
import numpy as np
cdef class BasisFunction:
def __cinit__(BasisFunction self):
self.pruned = False
self.children = []
self.prunable = True
self.child_map = {}
self.splittable = True
def __reduce__(self):
return (self.__class__, (), self._getstate())
def _get_root(self):
return self.parent._get_root()
def _getstate(self):
result = {'pruned': self.pruned,
'children': self.children,
'prunable': self.prunable,
'child_map': self.child_map,
'splittable': self.splittable}
result.update(self._get_parent_state())
return result
def _get_parent_state(self):
return {'parent': self.parent}
def _set_parent_state(self, state):
self.parent = state['parent']
def __setstate__(self, state):
self.pruned = state['pruned']
self.children = state['children']
self.prunable = state['prunable']
self.child_map = state['child_map']
self.splittable = state['splittable']
self._set_parent_state(state)
def _eq(self, other):
if self.__class__ is not other.__class__:
return False
self_state = self._getstate()
other_state = other._getstate()
del self_state['children']
del self_state['child_map']
del other_state['children']
del other_state['child_map']
return self_state == other_state
def __richcmp__(self, other, method):
if method == 2:
return self._eq(other)
elif method == 3:
return not self._eq(other)
else:
return NotImplemented
cpdef bint has_knot(BasisFunction self):
return False
cpdef bint is_prunable(BasisFunction self):
return self.prunable
cpdef bint is_pruned(BasisFunction self):
return self.pruned
cpdef bint is_splittable(BasisFunction self):
return self.splittable
cpdef bint make_splittable(BasisFunction self):
self.splittable = True
cpdef bint make_unsplittable(BasisFunction self):
self.splittable = False
cdef list get_children(BasisFunction self):
return self.children
cpdef _set_parent(self, BasisFunction parent):
'''Calls _add_child.'''
self.parent = parent
self.parent._add_child(self)
cpdef _add_child(self, BasisFunction child):
'''Called by _set_parent.'''
cdef INDEX_t n = len(self.children)
self.children.append(child)
cdef int var = child | Cython |
.get_variable()
if var in self.child_map:
self.child_map[var].append(n)
else:
self.child_map[var] = [n]
cpdef BasisFunction get_parent(self):
return self.parent
cpdef prune(self):
self.pruned = True
cpdef unprune(self):
self.pruned = False
cpdef knots(BasisFunction self, INDEX_t variable):
cdef list children
cdef BasisFunction child
if variable in self.child_map:
children = self.child_map[variable]
else:
return []
cdef INDEX_t n = len(children)
cdef INDEX_t i
cdef list result = []
cdef int idx
for i in range(n):
idx = children[i]
child = self.get_children()[idx]
if child.has_knot():
result.append(child.get_knot_idx())
return result
cpdef INDEX_t degree(BasisFunction self):
return self.parent.degree() + 1
cpdef apply(self, cnp.ndarray[FLOAT_t, ndim=2] X, cnp.ndarray[FLOAT_t, ndim=1] b, bint recurse=True):
'''
X - Data matrix
b - parent vector
recurse - If False, assume b already contains the result of the parent function. Otherwise, recurse to compute
parent function.
'''
cpdef cnp.ndarray[INT_t, ndim = 1] valid_knots(BasisFunction self, cnp.ndarray[FLOAT_t, ndim=1] values, cnp.ndarray[FLOAT_t, ndim=1] variable, int variable_idx, INDEX_t check_every, int endspan, int minspan, FLOAT_t minspan_alpha, INDEX_t n, cnp.ndarray[INT_t, ndim=1] workspace):
'''
values - The unsorted values of self in the data set
variable - The sorted values of variable in the data set
variable_idx - The index of the variable in the data set
workspace - An m-vector (where m is the number of samples) used internally
'''
cdef INDEX_t i
cdef INDEX_t j
cdef INDEX_t k
cdef INDEX_t m = values.shape[0]
cdef FLOAT_t float_tmp
cdef INT_t int_tmp
cdef INDEX_t count
cdef int minspan_
cdef cnp.ndarray[INT_t, ndim = 1] result
cdef INDEX_t num_used
cdef INDEX_t prev
cdef INDEX_t start
cdef int idx
cdef int last_idx
cdef FLOAT_t first_var_value = variable[m - 1]
cdef FLOAT_t last_var_value = variable[m - 1]
# Calculate the used knots
cdef list used_knots = self.knots(variable_idx)
used_knots.sort()
# Initialize workspace to 1 where value is nonzero
# Also, find first_var_value as the maximum variable
# where value is nonzero and last_var_value to the
# minimum variable where value is nonzero
count = 0
for i in range(m):
if abs(values[i]) > ZERO_TOL:
workspace[i] = 1
count += 1
if variable[i] >= first_var_value:
first_var_value = variable[i]
last_var_value = variable[i]
else:
workspace[i] = 0
# Calculate minspan
if minspan < 0:
minspan_ = <int > (-log2(-(1.0 / (n * count)) * log(1.0 - minspan_alpha)) / 2.5)
else:
minspan_ = minspan
# Take out the used points and apply minspan
num_used = len(used_knots)
prev = 0
last_idx = -1
for i in range(num_used):
idx = used_knots[i]
if last_idx == idx:
continue
workspace[idx] = 0
j = idx
k = 0
while j > prev + 1 and k < minspan_:
if workspace[j - 1]:
workspace[j - 1] = False
k += 1
j -= 1
j = idx + 1
k = 0
while j < m and k < minspan_:
if workspace[j]:
workspace[j] = False
k += 1
j += 1
prev = idx
last_idx = idx
# Apply endspan
i = 0
j = 0
while i < endspan:
if workspace[j]:
workspace[j] = 0
i += 1
j += 1
if j == m:
break
i = 0
j = m - 1
while i < endspan:
if workspace[j]:
workspace[j] = 0
i += 1
if j == 0:
break
j -= 1
# Implement check_every
int_tmp = 0
count = 0
for i in range(m):
if workspace[i]:
if (int_tmp % check_every)!= 0:
workspace[i] = 0
else:
count += 1
int_tmp += 1
else:
int_tmp = 0
# Make sure the greatest value is not a candidate (this can happen if
# the first endspan+1 values are the same)
for i in range(m):
if workspace[i]:
if variable[i] == first_var_value:
workspace[i] = 0
count -= 1
else:
break
# Also make sure the least value is not a candidate
for i in range(m):
if workspace[m - i - 1]:
if variable[m - i - 1] == last_var_value:
workspace[m - i - 1] = 0
count -= 1
else:
break
# Create result array and return
result = np.empty(shape=count, dtype=int)
j = 0
for i in range(m):
if workspace[i]:
result[j] = i
j += 1
return result
cdef class PicklePlaceHolderBasisFunction(BasisFunction):
'''This is a place holder for unpickling the basis function tree.'''
pickle_place_holder = PicklePlaceHolderBasisFunction()
cdef class ConstantBasisFunction(BasisFunction):
def __init__(self): # @DuplicatedSignature
self.prunable = False
def _get_root(self):
return self
def _get_parent_state(self):
return {}
def _set_parent_state(self, state):
pass
cpdef INDEX_t degree(ConstantBasisFunction self):
return 0
cpdef translate(ConstantBasisFunctionself, cnp.ndarray[FLOAT_t, ndim=1] slopes, cnp.ndarray[FLOAT_t, ndim=1] intercepts, bint recurse):
pass
cpdef FLOAT_t scale(ConstantBasisFunctionself, cnp.ndarray[FLOAT_t, ndim=1] slopes, cnp.ndarray[FLOAT_t, ndim=1] intercepts):
return < FLOAT_t > 1.0
cpdef _set_parent(self, BasisFunction parent):
raise NotImplementedError
cpdef BasisFunction get_parent(self):
raise NotImplementedError
cpdef apply(self, cnp.ndarray[FLOAT_t, ndim=2] X, cnp.ndarray[FLOAT_t, ndim=1] b, bint recurse=False):
'''
X - Data matrix
b - parent vector
recurse - The ConstantBasisFunction is the parent of all BasisFunctions and never has a parent.
Therefore the recurse argument is ignored. This spares child BasisFunctions from
having to know whether their parents have parents.
'''
cdef INDEX_t i # @DuplicatedSignature
cdef INDEX_t m = len(b)
for i in range(m):
b[i] = <FLOAT_t > 1.0
def __str__(self):
return '(Intercept)'
cdef class HingeBasisFunction(BasisFunction):
#@DuplicatedSignature
def __init__(self, BasisFunction parent, FLOAT_t knot, INDEX_t knot_idx, INDEX_t variable, bint reverse, label=None):
self.knot = knot
self.knot_idx = knot_idx
self.variable = variable
self.reverse = reverse
self.label = label if label is not None else 'x' + str(variable)
self._set_parent(parent)
def __reduce__(self):
return (self.__class__, (pickle_place_holder, 1.0, 1, 1, True, ''), self._getstate())
def _getstate(self):
result = super(HingeBasisFunction, self)._getstate()
result.update({'knot': self.knot,
'knot_idx': self.knot_idx,
'variable': self.variable,
'reverse': self.reverse,
'label': self.label})
return result
def __setstate__(self, state):
self.knot = state['knot']
self.knot_idx = | Cython |
state['knot_idx']
self.variable = state['variable']
self.reverse = state['reverse']
self.label = state['label']
super(HingeBasisFunction, self).__setstate__(state)
cpdef bint has_knot(HingeBasisFunction self):
return True
cpdef translate(HingeBasisFunction self, cnp.ndarray[FLOAT_t, ndim=1] slopes, cnp.ndarray[FLOAT_t, ndim=1] intercepts, bint recurse):
self.knot = slopes[self.variable] * \
self.knot + intercepts[self.variable]
if slopes[self.variable] < 0:
self.reverse = not self.reverse
if recurse:
self.parent.translate(slopes, intercepts)
cpdef FLOAT_t scale(HingeBasisFunction self, cnp.ndarray[FLOAT_t, ndim=1] slopes, cnp.ndarray[FLOAT_t, ndim=1] intercepts):
result = self.parent.scale(slopes, intercepts)
result /= slopes[self.variable]
return result
def __str__(self):
result = ''
if self.variable is not None:
if not self.reverse:
if self.knot >= 0:
result = 'h(%s-%G)' % (self.label, self.knot)
else:
result = 'h(%s+%G)' % (self.label, -self.knot)
else:
result = 'h(%G-%s)' % (self.knot, self.label)
parent = str(
self.parent) if not self.parent.__class__ is ConstantBasisFunction else ''
if parent!= '':
result += '*%s' % (str(self.parent),)
return result
cpdef INDEX_t get_variable(self):
return self.variable
cpdef FLOAT_t get_knot(self):
return self.knot
cpdef bint get_reverse(self):
return self.reverse
cpdef INDEX_t get_knot_idx(self):
return self.knot_idx
cpdef apply(self, cnp.ndarray[FLOAT_t, ndim=2] X, cnp.ndarray[FLOAT_t, ndim=1] b, bint recurse=True):
'''
X - Data matrix
b - parent vector
recurse - If False, assume b already contains the result of the parent function. Otherwise, recurse to compute
parent function.
'''
if recurse:
self.parent.apply(X, b, recurse=True)
cdef INDEX_t i # @DuplicatedSignature
cdef INDEX_t m = len(b) # @DuplicatedSignature
cdef FLOAT_t tmp
if self.reverse:
for i in range(m):
tmp = self.knot - X[i, self.variable]
if tmp < 0:
tmp = <FLOAT_t > 0.0
b[i] *= tmp
else:
for i in range(m):
tmp = X[i, self.variable] - self.knot
if tmp < 0:
tmp = <FLOAT_t > 0.0
b[i] *= tmp
cdef class LinearBasisFunction(BasisFunction):
#@DuplicatedSignature
def __init__(self, BasisFunction parent, INDEX_t variable, label=None):
self.variable = variable
self.label = label if label is not None else 'x' + str(variable)
self._set_parent(parent)
def __reduce__(self):
return (self.__class__, (pickle_place_holder, 1, ''), self._getstate())
def _getstate(self):
result = super(LinearBasisFunction, self)._getstate()
result.update({'variable': self.variable,
'label': self.label})
return result
def __setstate__(self, state):
self.variable = state['variable']
self.label = state['label']
super(LinearBasisFunction, self).__setstate__(state)
cpdef translate(LinearBasisFunctionself, cnp.ndarray[FLOAT_t, ndim=1] slopes, cnp.ndarray[FLOAT_t, ndim=1] intercepts, bint recurse):
pass
cpdef FLOAT_t scale(LinearBasisFunction self, cnp.ndarray[FLOAT_t, ndim=1] slopes, cnp.ndarray[FLOAT_t, ndim=1] intercepts):
result = self.parent.scale(slopes, intercepts)
result /= slopes[self.variable]
return result
def __str__(LinearBasisFunction self):
result = self.label
if not self.parent.__class__ is ConstantBasisFunction:
parent = str(self.parent)
result += '*' + parent
return result
cpdef INDEX_t get_variable(self):
return self.variable
cpdef apply(self, cnp.ndarray[FLOAT_t, ndim=2] X, cnp.ndarray[FLOAT_t, ndim=1] b, bint recurse=True):
'''
X - Data matrix
b - parent vector
recurse - If False, assume b already contains the result of the parent function. Otherwise, recurse to compute
parent function.
'''
if recurse:
self.parent.apply(X, b, recurse=True)
cdef INDEX_t i # @DuplicatedSignature
cdef INDEX_t m = len(b) # @DuplicatedSignature
for i in range(m):
b[i] *= X[i, self.variable]
cdef class Basis:
'''A container that provides functionality related to a set of BasisFunctions with a
common ConstantBasisFunction ancestor. Retains the order in which BasisFunctions are
added.'''
def __init__(Basis self, num_variables): # @DuplicatedSignature
self.order = []
self.num_variables = num_variables
def __reduce__(self):
return (self.__class__, (self.num_variables,), self._getstate())
def _getstate(self):
return {'order': self.order}
def __setstate__(self, state):
self.order = state['order']
def __richcmp__(self, other, method):
if method == 2:
return self._eq(other)
elif method == 3:
return not self._eq(other)
else:
return NotImplemented
def _eq(self, other):
return self.__class__ is other.__class__ and self._getstate() == other._getstate()
def piter(Basis self):
for bf in self.order:
if not bf.is_pruned():
yield bf
def __str__(Basis self):
cdef INDEX_t i
cdef INDEX_t n = len(self)
result = ''
for i in range(n):
result += str(self[i])
result += '\n'
return result
cpdef translate(Basis self, cnp.ndarray[FLOAT_t, ndim=1] slopes, cnp.ndarray[FLOAT_t, ndim=1] intercepts):
cdef INDEX_t n = len(self)
cdef INDEX_t i # @DuplicatedSignature
for i in range(n):
self.order[i].translate(slopes, intercepts, False)
cpdef scale(Basis self, cnp.ndarray[FLOAT_t, ndim=1] slopes, cnp.ndarray[FLOAT_t, ndim=1] intercepts, cnp.ndarray[FLOAT_t, ndim=1] beta):
cdef INDEX_t n = len(self) # @DuplicatedSignature
cdef INDEX_t i # @DuplicatedSignature
cdef INDEX_t j = 0
for i in range(n):
if self.order[i].is_pruned():
continue
beta[j] *= self.order[i].scale(slopes, intercepts)
j += 1
cpdef BasisFunction get_root(Basis self):
return self.root
cpdef append(Basis self, BasisFunction basis_function):
self.order.append(basis_function)
def __iter__(Basis self):
return self.order.__iter__()
def __len__(Basis self):
return self.order.__len__()
cpdef BasisFunction get(Basis self, INDEX_t i):
return self.order[i]
def __getitem__(Basis self, INDEX_t i):
return self.get(i)
cpdef INDEX_t plen(Basis self):
cdef INDEX_t length = 0
cdef INDEX_t i
cdef INDEX_t n = len(self.order)
for i in range(n):
if not self.order[i].is_pruned():
length += 1
return length
cpdef transform(Basis self, cnp.ndarray[FLOAT_t, ndim=2] X, cnp.ndarray[FLOAT_t, ndim=2] B):
cdef INDEX_t i # @DuplicatedSignature
cdef INDEX_t n = self.__len__()
cdef BasisFunction bf
cdef INDEX_t col = 0
for i in range(n):
bf = self.order[i]
if bf.is_pruned():
continue
bf.apply(X, B[:, col], recurse=True)
col += 1
cpdef weighted_transform(Basis self, cnp.ndarray[FLOAT_t, ndim=2] X, cnp.ndarray[FLOAT_t, ndim=2] B, cnp.ndarray[FLOAT_t, ndim=1] weights):
cdef INDEX_t i # @DuplicatedSignature
cdef INDEX_t n = self.__len__()
| Cython |
self.transform(X, B)
apply_weights_2d(B, weights)
<|end_of_text|>#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import folly.iobuf as _fbthrift_iobuf
from thrift.py3.reflection cimport (
NumberType as __NumberType,
StructType as __StructType,
Qualifier as __Qualifier,
)
cimport module.types as _module_types
from thrift.py3.types cimport (
constant_shared_ptr,
default_inst,
)
cdef __StructSpec get_reflection__Foo():
cdef _module_types.Foo defaults = _module_types.Foo.create(
constant_shared_ptr[_module_types.cFoo](
default_inst[_module_types.cFoo]()
)
)
cdef __StructSpec spec = __StructSpec.create(
name="Foo",
kind=__StructType.STRUCT,
annotations={
},
)
spec.add_field(
__FieldSpec.create(
id=1,
name="intField",
type=int,
kind=__NumberType.I32,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=2,
name="optionalIntField",
type=int,
kind=__NumberType.I32,
qualifier=__Qualifier.OPTIONAL,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=3,
name="intFieldWithDefault",
type=int,
kind=__NumberType.I32,
qualifier=__Qualifier.UNQUALIFIED,
default=defaults.intFieldWithDefault,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=4,
name="setField",
type=_module_types.Set__string,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=5,
name="optionalSetField",
type=_module_types.Set__string,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.OPTIONAL,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=6,
name="mapField",
type=_module_types.Map__string_List__string,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=7,
name="optionalMapField",
type=_module_types.Map__string_List__string,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.OPTIONAL,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=8,
name="binaryField",
type=bytes,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
return spec
cdef __StructSpec get_reflection__Baz():
cdef __StructSpec spec = __StructSpec.create(
name="Baz",
kind=__StructType.UNION,
annotations={
},
)
spec.add_field(
__FieldSpec.create(
id=1,
name="intField",
type=int,
kind=__NumberType.I32,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=4,
name="setField",
type=_module_types.Set__string,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=6,
name="mapField",
type=_module_types.Map__string_List__string,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=8,
name="binaryField",
type=bytes,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
return spec
cdef __StructSpec get_reflection__Bar():
cdef _module_types.Bar defaults = _module_types.Bar.create(
constant_shared_ptr[_module_types.cBar](
default_inst[_module_types.cBar]()
)
)
cdef __StructSpec spec = __StructSpec.create(
name="Bar",
kind=__StructType.STRUCT,
annotations={
},
)
spec.add_field(
__FieldSpec.create(
id=1,
name="structField",
type=_module_types.Foo,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=2,
name="optionalStructField",
type=_module_types.Foo,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.OPTIONAL,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=3,
name="structListField",
type=_module_types.List__Foo,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=4,
name="optionalStructListField",
type=_module_types.List__Foo,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.OPTIONAL,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=5,
name="unionField",
type=_module_types.Baz,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.UNQUALIFIED,
default=None,
annotations={
},
),
)
spec.add_field(
__FieldSpec.create(
id=6,
name="optionalUnionField",
type=_module_types.Baz,
kind=__NumberType.NOT_A_NUMBER,
qualifier=__Qualifier.OPTIONAL,
default=None,
annotations={
},
),
)
return spec
cdef __SetSpec get_reflection__Set__string():
return __SetSpec.create(
value=str,
kind=__NumberType.NOT_A_NUMBER,
)
cdef __ListSpec get_reflection__List__string():
return __ListSpec.create(
value=str,
kind=__NumberType.NOT_A_NUMBER,
)
cdef __MapSpec get_reflection__Map__string_List__string():
return __MapSpec.create(
key=str,
key_kind=__NumberType.NOT_A_NUMBER,
value=_module_types.List__string,
value_kind=__NumberType.NOT_A_NUMBER,
)
cdef __ListSpec get_reflection__List__Foo():
return __ListSpec.create(
value=_module_types.Foo,
kind=__NumberType.NOT_A_NUMBER,
)
<|end_of_text|># the original code is from https://github.com/springer-math/Mathematics-of-Epidemics-on-Networks
# this was re-written in cython for computational efficiency
import random
from collections import defaultdict, Counter
cdef class _ListDict_(object):
cdef public dict item_to_position
cdef public list items
cdef public bint weighted
cdef public object weight
cdef public long double max_weight
cdef public long double _total_weight
cdef public long double max_weight_count
def __init__(self, bint weighted=False):
self.item_to_position = {}
self.items = []
self.weighted = weighted
if self.weighted:
self.weight = defaultdict(int) # presume all weights positive
self.max_weight = 0
self._total_weight = 0
self.max_weight_count = 0
def __len__(self):
return len(self.items)
def __contains__(self, object item):
return item in self.item_to_position
cpdef _update_max_weight(self):
C = Counter(self.weight.values()) # may be a faster way to do this, we only need to count the max.
self.max_weight = max(C.keys())
self.max_weight_count = C[self.max_weight]
cpdef insert(self, object item, long double weight=0):
r'''
If not present, then inserts the thing (with weight if appropriate)
if already there, replaces the weight unless weight is 0
If weight is 0, then it removes the item and doesn't replace.
WARNING:
replaces weight if already present, does not increment weight.
'''
if self.__contains__(item):
self.remove(item)
if weight!= 0:
self.update(item, weight_increment=weight)
cpdef update(self, object item, long double weight_increment=-1):
r'''
If not present, then inserts the thing (with weight if appropriate)
if already there, increments weight
WARNING:
increments weight if already present, | Cython |
cannot overwrite weight.
'''
if weight_increment > 0: # will break if passing a weight to unweighted case
if weight_increment > 0 or self.weight[item]!= self.max_weight:
self.weight[item] = self.weight[item] + weight_increment
self._total_weight += weight_increment
if self.weight[item] > self.max_weight:
self.max_weight_count = 1
self.max_weight = self.weight[item]
elif self.weight[item] == self.max_weight:
self.max_weight_count += 1
else: # it's a negative increment and was at max
self.max_weight_count -= 1
self.weight[item] = self.weight[item] + weight_increment
self._total_weight += weight_increment
self.max_weight_count -= 1
if self.max_weight_count == 0:
self._update_max_weight
elif self.weighted:
raise Exception('if weighted, must assign weight_increment:', weight_increment)
if item in self: # we've already got it, do nothing else
return
self.items.append(item)
self.item_to_position[item] = len(self.items) - 1
cpdef remove(self, object choice):
if not self.__contains__(choice):
return
position = self.item_to_position.pop(choice)
last_item = self.items.pop()
if position!= len(self.items):
self.items[position] = last_item
self.item_to_position[last_item] = position
if self.weighted:
weight = self.weight.pop(choice)
self._total_weight -= weight
if weight == self.max_weight:
# if we find ourselves in this case often
# it may be better just to let max_weight be the
# largest weight *ever* encountered, even if all remaining weights are less
#
self.max_weight_count -= 1
if self.max_weight_count == 0 and len(self) > 0:
self._update_max_weight()
cpdef object choose_random(self):
# r'''chooses a random node. If there is a weight, it will use rejection
# sampling to choose a random node until it succeeds'''
if self.weighted:
while True:
choice = random.choice(self.items)
if random.random() < self.weight[choice] / self.max_weight:
break
# r = random.random()*self.total_weight
# for item in self.items:
# r-= self.weight[item]
# if r<0:
# break
return choice
else:
return random.choice(self.items)
cpdef object random_removal(self):
r'''uses other class methods to choose and then remove a random node'''
choice = self.choose_random()
self.remove(choice)
return choice
cpdef long double total_weight(self):
if self.weighted:
return self._total_weight
else:
return len(self.items)
cpdef update_total_weight(self):
self._total_weight = 0
for item in self.items:
self._total_weight += self.weight[item]
<|end_of_text|>'''
TACO: Transcriptome meta-assembly from RNA-Seq
'''
from cpython cimport array
from array import array
import networkx as nx
__author__ = "Matthew Iyer and Yashar Niknafs"
__copyright__ = "Copyright 2016"
__credits__ = ["Matthew Iyer", "Yashar Niknafs"]
__license__ = "GPL"
__version__ = "0.4.0"
__maintainer__ = "Yashar Niknafs"
__email__ = "[email protected]"
__status__ = "Development"
# constant minimum path score
DEF MIN_SCORE = 1.0e-10
def topological_sort(object G):
"""From NetworkX source code
https://github.com/networkx/networkx/blob/v1.10/networkx/algorithms/dag.py
"""
cdef set explored
cdef list order, fringe, new_nodes
cdef int v, w, n
order = []
explored = set()
for v in G.nodes_iter(): # process all vertices in G
if v in explored:
continue
fringe = [v] # nodes yet to look at
while fringe:
w = fringe[-1] # depth first search
if w in explored: # already looked down this branch
fringe.pop()
continue
# Check successors for cycles and for new nodes
new_nodes = []
for n in G[w]:
if n not in explored:
new_nodes.append(n)
if new_nodes: # Add new_nodes to fringe
fringe.extend(new_nodes)
else: # No new nodes so w is fully explored
explored.add(w)
order.append(w)
fringe.pop() # done considering this node
order.reverse()
return order
def find_path(int[:] nodes, float[:] exprs, list succ, int isource, int isink):
cdef float[:] min_exprs
cdef int[:] prevs
cdef list ipath, path
cdef int n, i, j, prev, x
cdef float expr, min_expr, new_min_expr, new_expr
cdef array.array int_array_template = array.array('i', [])
cdef array.array float_array_template = array.array('f', [])
# initialize data structures
n = nodes.shape[0]
min_exprs = array.clone(float_array_template, n, zero=False)
prevs = array.clone(int_array_template, n, zero=False)
for i in xrange(n):
min_exprs[i] = MIN_SCORE
prevs[i] = isink
min_exprs[isource] = exprs[isource]
# traverse nodes in topological sort order
for i in xrange(n):
min_expr = min_exprs[i]
for j in succ[i]:
new_min_expr = min_expr if min_expr < exprs[j] else exprs[j]
if (prevs[j] == isink) or (new_min_expr > min_exprs[j]):
min_exprs[j] = new_min_expr
prevs[j] = i
# traceback to get path
expr = min_exprs[isink]
prev = isink
ipath = [isink]
while True:
prev = prevs[prev]
ipath.append(prev)
if prev == isource:
break
ipath.reverse()
# subtract path
for i in xrange(len(ipath)):
x = ipath[i]
new_expr = exprs[x] - expr
exprs[x] = MIN_SCORE if MIN_SCORE >= new_expr else new_expr
ipath[i] = nodes[x]
return tuple(ipath), expr
def find_paths(object G, object expr_attr, float path_frac=0, int max_paths=0):
cdef dict indexes
cdef int[:] nodes
cdef float[:] exprs
cdef list succ
cdef list results
cdef int n, i, j, isource, isink, iterations
cdef float expr, lowest_expr
cdef tuple path
# initialize data structures
n = len(G)
#nodes = array('i', nx.topological_sort(G))
nodes = array('i', nx.topological_sort(G))
indexes = {}
exprs = array('f', nodes)
succ = []
for i in xrange(n):
indexes[nodes[i]] = i
for i in xrange(n):
exprs[i] = G.node[nodes[i]][expr_attr]
succ.append([indexes[x] for x in G.successors_iter(nodes[i])])
isource = 0
isink = n - 1
# don't run if all nodes are zero
if exprs[isource] < MIN_SCORE:
return []
# find highest scoring path
path, expr = find_path(nodes, exprs, succ, isource, isink)
results = [(path, expr)]
# define threshold score to stop producing paths
lowest_expr = expr * path_frac
if MIN_SCORE > lowest_expr:
lowest_expr = MIN_SCORE
# iterate to find paths
iterations = 1
while True:
if max_paths > 0 and iterations >= max_paths:
break
# find path
path, expr = find_path(nodes, exprs, succ, isource, isink)
if expr <= lowest_expr:
break
# store path
results.append((path, expr))
iterations += 1
return results
<|end_of_text|>import cython
import numpy as np
cimport numpy as np
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef multiply(A, B):
return A*B
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef divide(A, B):
return A/B
<|end_of_text|># -*- coding: utf-8 -*-
# distutils: language = c++
# distutils: sources = cheaptrick.cpp
cdef extern from "../../lib/world/cheaptrick.h" nogil:
void CheapTrick(double *x, int x_length, int fs, double *time_axis, double *f0,
int f0_length, double **spectrogram)
int GetFFTSizeForCheapTrick(int | Cython |
fs)
<|end_of_text|># cython: profile=True
cimport splines_cython
import numpy as np
cimport numpy as np
import cython
cimport cython
cdef Ugrid mygrid
from cython.parallel import prange, parallel
from operator import mul
from itertools import product
cdef class Splines1d:
cdef UBspline_1d_d* __spline__
cdef Ugrid __grid__
cdef BCtype_d __boundaries__
def __init__(self, smin, smax, orders, boundaries=4):
smin = np.atleast_1d(smin)
smax = np.atleast_1d(smax)
orders = np.atleast_1d(orders)
self.__grid__.start = smin[0]
self.__grid__.end = smax[0]
self.__grid__.num = orders[0]
self.__boundaries__.lCode = 4
self.__boundaries__.rCode = 4
def set_spline(self, np.ndarray[np.double_t] table):
cdef double* data = <double *> table.data
self.__spline__ = create_UBspline_1d_d(self.__grid__, self.__boundaries__, data)
@cython.boundscheck(False)
def eval_spline(self, np.ndarray[np.double_t, ndim=1] table):
#
cdef int n = len(table)
cdef int i
cdef UBspline_1d_d* spline = self.__spline__
cdef np.ndarray[np.double_t, ndim=1] output = np.empty(n)
cdef double* data_in = <double*> table.data
cdef double* data = <double*> output.data
with nogil, parallel():
for i in prange(n):
eval_UBspline_1d_d(spline, data_in[i], &data[i])
return output
cdef class Splines2d:
cdef UBspline_2d_d* __spline__
cdef Ugrid __grid_x__, __grid_y__
cdef BCtype_d __boundaries_x__, __boundaries_y__
def __init__(self, smin, smax, orders, boundaries=4):
smin = np.atleast_1d(smin)
smax = np.atleast_1d(smax)
orders = np.atleast_1d(orders)
self.__grid_x__.start = smin[0]
self.__grid_x__.end = smax[0]
self.__grid_x__.num = orders[0]
self.__boundaries_x__.lCode = 4
self.__boundaries_x__.rCode = 4
self.__grid_y__.start = smin[1]
self.__grid_y__.end = smax[1]
self.__grid_y__.num = orders[1]
self.__boundaries_y__.lCode = 4
self.__boundaries_y__.rCode = 4
def set_spline(self, np.ndarray[np.double_t] values):
cdef double* data = <double *> values.data
self.__spline__ = create_UBspline_2d_d(self.__grid_x__, self.__grid_y__, self.__boundaries_x__, self.__boundaries_y__, data)
@cython.boundscheck(False)
def eval_spline(self, np.ndarray[np.double_t, ndim=2] points):
#
cdef int n = points.shape[1]
cdef int i
cdef UBspline_2d_d* spline = self.__spline__
cdef np.ndarray[np.double_t, ndim=1] output = np.empty(n)
cdef np.ndarray[np.double_t, ndim=1] points_x = points[0,:]
cdef np.ndarray[np.double_t, ndim=1] points_y = points[1,:]
cdef double* x_in = <double*> points_x.data
cdef double* y_in = <double*> points_y.data
cdef double* data = <double*> output.data
with nogil, parallel():
for i in prange(n):
eval_UBspline_2d_d(spline, x_in[i], y_in[i], &data[i])
return output
cdef class Splines3d:
cdef UBspline_3d_d* __spline__
cdef Ugrid __grid_x__, __grid_y__, __grid_z__
cdef BCtype_d __boundaries_x__, __boundaries_y__, __boundaries_z__
def __init__(self, smin, smax, orders, boundaries=4):
smin = np.atleast_1d(smin)
smax = np.atleast_1d(smax)
orders = np.atleast_1d(orders)
self.__grid_x__.start = smin[0]
self.__grid_x__.end = smax[0]
self.__grid_x__.num = orders[0]
self.__boundaries_x__.lCode = 4
self.__boundaries_x__.rCode = 4
self.__grid_y__.start = smin[1]
self.__grid_y__.end = smax[1]
self.__grid_y__.num = orders[1]
self.__boundaries_y__.lCode = 4
self.__boundaries_y__.rCode = 4
self.__grid_z__.start = smin[2]
self.__grid_z__.end = smax[2]
self.__grid_z__.num = orders[2]
self.__boundaries_z__.lCode = 4
self.__boundaries_z__.rCode = 4
def set_values(self, np.ndarray[np.double_t] values):
cdef double* data = <double *> values.data
self.__spline__ = create_UBspline_3d_d(self.__grid_x__, self.__grid_y__, self.__grid_z__,
self.__boundaries_x__, self.__boundaries_y__, self.__boundaries_z__, data)
@cython.boundscheck(False)
def interpolate(self, np.ndarray[np.double_t, ndim=2] points):
#
cdef int n = points.shape[1]
cdef int i
cdef UBspline_3d_d* spline = self.__spline__
cdef np.ndarray[np.double_t, ndim=1] output = np.empty(n)
cdef np.ndarray[np.double_t, ndim=1] points_x = points[0,:]
cdef np.ndarray[np.double_t, ndim=1] points_y = points[1,:]
cdef np.ndarray[np.double_t, ndim=1] points_z = points[2,:]
cdef double* x_in = <double*> points_x.data
cdef double* y_in = <double*> points_y.data
cdef double* z_in = <double*> points_z.data
cdef double* data = <double*> output.data
with nogil, parallel():
for i in prange(n):
eval_UBspline_3d_d(spline, x_in[i], y_in[i], z_in[i], &data[i])
return output
#######################################
# Splines with multiple return values #
#######################################
cdef class MSplines1d:
cdef multi_UBspline_1d_d* __spline__
cdef Ugrid __grid_x__
cdef BCtype_d __boundaries_x__
cdef int __n_splines__
cdef np.ndarray values
# cpdef np.ndarray grid
# cpdef int d
def __init__(self, smin, smax, orders, boundaries=4, int n_splines=1):
smin = np.atleast_1d(smin)
smax = np.atleast_1d(smax)
orders = np.atleast_1d(orders)
self.__grid_x__.start = smin[0]
self.__grid_x__.end = smax[0]
self.__grid_x__.num = orders[0]
self.__boundaries_x__.lCode = 4
self.__boundaries_x__.rCode = 4
self.__n_splines__ = n_splines
self.__spline__ = create_multi_UBspline_1d_d(self.__grid_x__, self.__boundaries_x__, n_splines)
def set_values(self, np.ndarray[np.double_t, ndim=2] values):
cdef double* data
cdef int n_splines = self.__n_splines__
cdef np.ndarray[np.double_t,ndim=1] vals
cdef multi_UBspline_1d_d* spline = self.__spline__
for i in range(n_splines):
vals = values[i,:]
data = <double *> vals.data
set_multi_UBspline_1d_d(spline, i, data)
self.values = values
@cython.boundscheck(False)
def interpolate(self, np.ndarray[np.double_t, ndim=2] points):
#
cdef int n = points.shape[1]
cdef int i
cdef int n_v = self.values.shape[0] # number of | Cython |
splines
cdef multi_UBspline_1d_d* spline = self.__spline__
cdef np.ndarray[np.double_t, ndim=1] points_x = points[0,:]
cdef double* x_in = <double*> points_x.data
cdef np.ndarray[np.double_t, ndim=2] output = np.empty( (n, n_v), dtype=np.double )
cdef np.ndarray[np.double_t, ndim=3] doutput = np.empty( (n, n_v, 1), dtype=np.double )
cdef double* data = <double*> output.data
cdef double* d_data = <double*> doutput.data
with nogil, parallel():
for i in prange(n):
eval_multi_UBspline_1d_d_vg(spline, x_in[i], &data[i*n_v], &d_data[i*n_v])
return [output, doutput]
cdef class MSplines2d:
cdef multi_UBspline_2d_d* __spline__
cdef Ugrid __grid_x__, __grid_y__
cdef BCtype_d __boundaries_x__, __boundaries_y__
cdef int __n_splines__
cdef np.ndarray values
cdef int d
def __init__(self, smin, smax, orders, boundaries=4, int n_splines=1):
smin = np.atleast_1d(smin)
smax = np.atleast_1d(smax)
orders = np.atleast_1d(orders)
self.__grid_x__.start = smin[0]
self.__grid_x__.end = smax[0]
self.__grid_x__.num = orders[0]
self.__boundaries_x__.lCode = 4
self.__boundaries_x__.rCode = 4
self.__grid_y__.start = smin[1]
self.__grid_y__.end = smax[1]
self.__grid_y__.num = orders[1]
self.__boundaries_y__.lCode = 4
self.__boundaries_y__.rCode = 4
self.__n_splines__ = n_splines
self.__spline__ = create_multi_UBspline_2d_d(self.__grid_x__, self.__grid_y__, self.__boundaries_x__, self.__boundaries_y__, n_splines)
def set_values(self, np.ndarray[np.double_t, ndim=2] values):
cdef double* data
cdef int n_splines = self.__n_splines__
cdef np.ndarray[np.double_t,ndim=1] vals
cdef multi_UBspline_2d_d* spline = self.__spline__
for i in range(n_splines):
vals = values[i,:]
data = <double *> vals.data
set_multi_UBspline_2d_d(spline, i, data)
self.values = values
@cython.boundscheck(False)
def interpolate(self, np.ndarray[np.double_t, ndim=2] points):
#
cdef int n = points.shape[1]
cdef int i
cdef int n_v = self.values.shape[0] # number of splines
cdef multi_UBspline_2d_d* spline = self.__spline__
cdef np.ndarray[np.double_t, ndim=1] points_x = points[0,:]
cdef np.ndarray[np.double_t, ndim=1] points_y = points[1,:]
cdef double* x_in = <double*> points_x.data
cdef double* y_in = <double*> points_y.data
cdef np.ndarray[np.double_t, ndim=2] output = np.empty( (n, n_v), dtype=np.double )
cdef np.ndarray[np.double_t, ndim=3] doutput = np.empty( (n, n_v, 2), dtype=np.double )
cdef double* data = <double*> output.data
cdef double* d_data = <double*> doutput.data
with nogil, parallel():
for i in prange(n):
eval_multi_UBspline_2d_d_vg(spline, x_in[i], y_in[i], &data[i*n_v], &d_data[2*i*n_v])
return [output, doutput]
cdef class MSplines3d:
cdef multi_UBspline_3d_d* __spline__
cdef Ugrid __grid_x__, __grid_y__, __grid_z__
cdef BCtype_d __boundaries_x__, __boundaries_y__, __boundaries_z__
cdef int __n_splines__
cdef np.ndarray values
cdef int d
def __init__(self, smin, smax, orders, boundaries=4, int n_splines=1):
smin = np.atleast_1d(smin)
smax = np.atleast_1d(smax)
orders = np.atleast_1d(orders)
self.__grid_x__.start = smin[0]
self.__grid_x__.end = smax[0]
self.__grid_x__.num = orders[0]
self.__boundaries_x__.lCode = 4
self.__boundaries_x__.rCode = 4
self.__grid_y__.start = smin[1]
self.__grid_y__.end = smax[1]
self.__grid_y__.num = orders[1]
self.__boundaries_y__.lCode = 4
self.__boundaries_y__.rCode = 4
self.__grid_z__.start = smin[2]
self.__grid_z__.end = smax[2]
self.__grid_z__.num = orders[2]
self.__boundaries_z__.lCode = 4
self.__boundaries_z__.rCode = 4
self.__n_splines__ = n_splines
self.__spline__ = create_multi_UBspline_3d_d(self.__grid_x__, self.__grid_y__, self.__grid_z__,
self.__boundaries_x__, self.__boundaries_y__, self.__boundaries_z__, n_splines)
def set_values(self, np.ndarray[np.double_t, ndim=2] values):
cdef double* data
cdef int n_splines = self.__n_splines__
cdef np.ndarray[np.double_t,ndim=1] vals
cdef multi_UBspline_3d_d* spline = self.__spline__
for i in range(n_splines):
vals = values[i,:]
data = <double *> vals.data
set_multi_UBspline_3d_d(spline, i, data)
self.values = values
@cython.boundscheck(False)
def interpolate(self, np.ndarray[np.double_t, ndim=2] points):
#
cdef int n = points.shape[1]
cdef int i
cdef int n_v = self.values.shape[0] # number of splines
cdef multi_UBspline_3d_d* spline = self.__spline__
cdef np.ndarray[np.double_t, ndim=1] points_x = points[0,:]
cdef np.ndarray[np.double_t, ndim=1] points_y = points[1,:]
cdef np.ndarray[np.double_t, ndim=1] points_z = points[2,:]
cdef double* x_in = <double*> points_x.data
cdef double* y_in = <double*> points_y.data
cdef double* z_in = <double*> points_z.data
cdef np.ndarray[np.double_t, ndim=2] output = np.empty( (n, n_v), dtype=np.double )
cdef np.ndarray[np.double_t, ndim=3] doutput = np.empty( (n, n_v, 3), dtype=np.double )
cdef double* data = <double*> output.data
cdef double* d_data = <double*> doutput.data
with nogil, parallel():
for i in prange(n):
eval_multi_UBspline_3d_d_vg(spline, x_in[i], y_in[i], z_in[i], &data[i*n_v], &d_data[3*i*n_v])
return [output, doutput]
class MultivariateSplines:
def __init__(self, smin, smax, orders):
self.d = len(smin)
assert(len(smax) == self.d)
assert(len(orders) == self.d)
self.smin = smin
self.smax = smax
self.orders = orders
self.__splines__ = None
def set_values(self, values):
if not np.all( np.isfinite(values)):
raise Exception('Trying to interpolate non-finite values')
n_v = values.shape[0]
if self.__splines__ is None:
self.n_v = n_v
if self.d == 1:
self.__splines__ = MSplines1d(self.smin, self.smax, self.orders, n_splines=n_v)
elif | Cython |
self.d == 2:
self.__splines__ = MSplines2d(self.smin, self.smax, self.orders, n_splines=n_v)
elif self.d == 3:
self.__splines__ = MSplines3d(self.smin, self.smax, self.orders, n_splines=n_v)
else:
if n_v!= self.n_v:
raise Exception('Trying to set {} values for the interpolant. Expected : {}'.format(n_v, self.n_v))
self.__splines__.set_values(values)
def interpolate(self, points, with_derivatives=False):
if not np.all( np.isfinite(points)):
raise Exception('Spline interpolator evaluated at non-finite points.')
projection = np.minimum( points, self.smax[:,None]-0.0000000001)
projection = np.maximum( projection, self.smin[:,None]+0.0000000001)
[value, d_value] = self.__splines__.interpolate(projection)
value = np.ascontiguousarray( value.T )
d_value = np.ascontiguousarray( np.rollaxis(d_value, 0, 3 ) )
delta = (points - projection)
# TODO : correct only outside observations
for i in range(value.shape[0]):
value[i,:] += (delta*d_value[i,:,:]).sum(axis=0)
if with_derivatives:
return [value,d_value]
else:
return value
def __call__(self, s):
return self.interpolate(s)
<|end_of_text|>cimport cython
cimport numpy as np
import numpy as np
DTYPE_INT = int
ctypedef np.int_t DTYPE_INT_t
DTYPE_FLOAT = np.float64
ctypedef np.float64_t DTYPE_FLOAT_t
@cython.boundscheck(False)
cpdef _landslide_runout(
DTYPE_FLOAT_t dx,
DTYPE_FLOAT_t phi,
DTYPE_FLOAT_t min_deposition_slope,
np.ndarray[DTYPE_INT_t, ndim=1] stack_rev_sel,
np.ndarray[DTYPE_INT_t, ndim=2] receivers,
np.ndarray[DTYPE_FLOAT_t, ndim=2] fract,
np.ndarray[DTYPE_FLOAT_t, ndim=1] Qs_in,
np.ndarray[DTYPE_FLOAT_t, ndim=1] L_Hill,
np.ndarray[DTYPE_FLOAT_t, ndim=1] Qs_out,
np.ndarray[DTYPE_FLOAT_t, ndim=1] dH_Hill,
np.ndarray[DTYPE_FLOAT_t, ndim=1] H_i_temp,
np.ndarray[DTYPE_FLOAT_t, ndim=1] max_D,
np.ndarray[DTYPE_FLOAT_t, ndim=1] length_adjacent_cells,
):
"""
Calculate landslide runout using a non-local deposition algorithm, see:
* Campforts B., Shobe C.M., Steer P., Vanmaercke M., Lague D., Braun J.
(2020) HyLands 1.0: a hybrid landscape evolution model to simulate
the impact of landslides and landslide-derived sediment on landscape
evolution. Geosci Model Dev: 13(9):3863–86.
"""
# define internal variables
cdef int donor, rcvr, r
cdef double accum, proportion, dH
# Iterate backward through the stack, which means we work from upstream to
# downstream.
for donor in stack_rev_sel:
dH = max(
0,
min(((Qs_in[donor] / dx) / L_Hill[donor]) / (1 - phi), max_D[donor])
)
dH_Hill[donor] += dH
H_i_temp[donor] += dH
Qs_in[donor] -= dH * dx * dx * (1 - phi)
Qs_out[donor] += Qs_in[donor]
for r in range(receivers.shape[1]):
rcvr = receivers[donor, r]
max_D_angle = H_i_temp[donor] - min_deposition_slope*length_adjacent_cells[r] - H_i_temp[rcvr]
max_D[rcvr] = min(max(max_D[rcvr], H_i_temp[donor] - H_i_temp[rcvr]),max_D_angle)
proportion = fract[donor, r]
if proportion > 0. and donor!= rcvr:
Qs_in[rcvr] += Qs_out[donor] * proportion
Qs_in[donor] -= Qs_out[donor] * proportion
<|end_of_text|># file: cython_cpp.pxd
# declare the interace
cdef extern from 'cpp_pi.h':
cdef cppclass PiMaker:
PiMaker()
double get()
void set(int n) except +<|end_of_text|>from.c_core cimport InputArray, OutputArray
cdef extern from "opencv2/imgproc.hpp" namespace "cv" nogil:
enum ColorConversionCodes:
COLOR_BGR2GRAY
void cvtColor(InputArray, OutputArray, int, int)
void cvtColor(InputArray, OutputArray, int)
<|end_of_text|># SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the license found in the LICENSE.txt file in the root
# directory of this source tree.
# =======
# Imports
# =======
# Python
import numpy
from scipy.sparse import issparse, isspmatrix_csr, isspmatrix_csc, csr_matrix
# Cython
from.py_c_linear_operator cimport pycLinearOperator
from.c_linear_operator cimport cLinearOperator
from.c_dense_affine_matrix_function cimport cDenseAffineMatrixFunction
from.c_csr_affine_matrix_function cimport cCSRAffineMatrixFunction
from.c_csc_affine_matrix_function cimport cCSCAffineMatrixFunction
from.._definitions.types cimport IndexType, LongIndexType, FlagType, \
MemoryViewLongIndexType
# ==========================
# pyc Affine Matrix Function
# ==========================
cdef class pycAffineMatrixFunction(pycLinearOperator):
"""
Defines a linear operator that is an affine function of a single parameter.
Given two matrices :math:`\\mathbf{A}` and :math:`\\mathf{B}`, the linear
operator is defined by
.. math::
\\mathbf{A}(t) = \\mathbf{A} + t \\mathbf{B},
where :math:`t \\in \\mathbb{R}` is a parameter.
**Initializing Object:**
The matrices :math:`\\mathbf{A}` and :math:`\\mathbf{B}` are given at the
initialization of the object. These matrices can be a dense matrix as 2D
numpy arrays, or sparse matrices of any format (CSR, CSC, etc) using scipy
sparse module.
.. note::
Initializing the linear operator requires python's GIL. Also, the
following examples should be used in a ``*.pyx`` file and should be
compiled as cython's extension module.
In the following example, we create the object ``Aop`` based on
scipy.sparse matrices of CSR format. Note the format of the input matrices
can also be anything other than ``'csr'``, such as ``'csc'``.
.. code-block:: python
>>> # Use this script in a *.pyx file
>>> import scipy.sparse
>>> # Create to random sparse matrices
>>> n, m = 1000
>>> A = scipy.sparse.random(n, m, format='csr')
>>> B = scipy.sparse.random(n, m, format='csr')
>>> # Create linear operator object
>>> from imate.linear_operator cimport AffineMatrixFunction
>>> cdef AffineMatrixFunction Aop = AffineMatrixFunction(A, B)
The following is an example of defining the operator with dense matrices:
.. code-block:: python
>>> # Use this script in a *.pyx file
>>> import numpy
>>> # Create to random sparse matrices
>>> n, m = 1000
>>> A = numpy.random.randn((n, m), dtype=float)
>>> B = numpy.random.randn((n, m), dtype=float)
>>> # Create linear operator object
>>> from imate.linear_operator cimport AffineMatrixFunction
>>> cdef AffineMatrixFunction Aop = AffineMatrixFunction(A, B)
If the matrix ``B`` is not given, or if it is ``None``, or if it is ``0``,
then the linear operator assumes ``B`` is zero matrix. For example:
.. code-block:: python
# Case 1: Not providing B
>>> cdef AffineMatrixFunction Aop = AffineMatrixFunction(A)
# Case 2: Setting B to None
>>> cdef AffineMatrixFunction Aop = AffineMatrixFunction(A, None)
# Case 3: Setting B to scalar zero
>>> cdef AffineMatrixFunction Aop = AffineMatrixFunction(A, 0)
If the matrix ``B`` is set to | Cython |
the scalar ``1``, the linear operator assumes
that ``B`` is the identity matrix. For example:
.. code-block:: python
>>> cdef AffineMatrixFunction Aop = AffineMatrixFunction(A, 1)
**Setting the Parameter:**
The parameter :math:`t` is given to the object ``Aop`` at **runtime** using
:func:`set_parameters` function.
.. note::
Setting the parameter using :func:`set_parameter` does not require
python's GIL, hence, the parameter can be set in ``nogil`` environment,
if desired.
.. code-block:: python
>>> # Use this script in a *.pyx file
>>> cdef double t = 1.0
>>> # nogil environment is optional
>>> with nogil:
... Aop.set_parameters(&t)
Note that a *pointer* to the parameter should be provided to the function.
**Matrix-Vector Multiplications:**
The linear operator can perform matrix vector multiplication using
:func:`dot` function and the matrix-vector multiplication with the
transposed matrix using :func:`transpose_dot` function.
.. note::
Matrix-vector multiplication using :func:`dot` and
:func:`transpose_dot` functions do not require python's GIL, hence,
they can be called in a ``nogil`` environment, if desired.
.. code-block:: python
>>> # Use this script in a *.pyx file
>>> # Create a vectors as cython's memoryview to numpy arrays
>>> import numpy
>>> cdef double[:] b = numpy.random.randn(m)
>>> cdef double[:] c = numpy.empty((n, 1), dtype=float)
>>> # Perform product on vector b and store the product on vector c
>>> with nogil:
... Aop.dot(&b[0], &c[0])
>>> # Perform product using the transpose of the operator
>>> with nogil:
>>> Aop.transpose_dot(&b[0], &c[0])
.. seealso::
:class:`Matrix`
"""
# =========
# __cinit__
# =========
def __cinit__(self, A, B=None):
"""
Sets matrices A and B.
"""
# Check A
if A is None:
raise ValueError('A cannot be None.')
if A.ndim!= 2:
raise ValueError('Input matrix should be a 2-dimensional array.')
# Data type
if A.dtype == b'float32':
self.data_type_name = b'float32'
elif A.dtype == b'float64':
self.data_type_name = b'float64'
elif A.dtype == b'float128':
self.data_type_name = b'float128'
else:
raise TypeError('Data type should be float32, float64, or'+
'float128.')
# Check if B is noe to be considered as identity matrix
if B is None:
# B is assumed to be identity
B_is_identity = True
else:
# B is neither zero nor identity
B_is_identity = False
# Check similar types of A and B
if not (type(A) == type(B)):
raise TypeError('Matrices A and B should have similar types.')
# Check A and B have the same data types
if not (A.dtype == B.dtype):
raise TypeError('A and B should have similar data types.')
# Check consistent sizes of A and B
if not (A.shape == B.shape):
raise ValueError('A and B should have the same shape.')
# Determine A is sparse or dense
if issparse(A):
# Matrix type codes: 'r' for CSR, and 'c' for CSC
if isspmatrix_csr(A):
# Check sorted indices
if not A.has_sorted_indices:
A.sort_indices()
if (not B_is_identity) and (not B.has_sorted_indices):
B.sort_indices()
# CSR matrix
if self.data_type_name == b'float32':
self.set_csr_matrix_float(A, B, B_is_identity)
elif self.data_type_name == b'float64':
self.set_csr_matrix_double(A, B, B_is_identity)
elif self.data_type_name == b'float128':
self.set_csr_matrix_long_double(A, B, B_is_identity)
elif isspmatrix_csc(A):
# Check sorted indices
if not A.has_sorted_indices:
A.sort_indices()
if (not B_is_identity) and (not B.has_sorted_indices):
B.sort_indices()
# CSC matrix
if self.data_type_name == b'float32':
self.set_csc_matrix_float(A, B, B_is_identity)
elif self.data_type_name == b'float64':
self.set_csc_matrix_double(A, B, B_is_identity)
elif self.data_type_name == b'float128':
self.set_csc_matrix_long_double(A, B, B_is_identity)
else:
# If A is neither CSR or CSC, convert A to CSR
self.A_csr = csr_matrix(A)
if not B_is_identity:
self.B_csr = csr_matrix(B)
else:
self.B_csr = B
# Check sorted indices
if not self.A_csr.has_sorted_indices:
self.A_csr.sort_indices()
if (not B_is_identity) and (not self.B_csr.has_sorted_indices):
self.B_csr.sort_indices()
# CSR matrix
if self.data_type_name == b'float32':
self.set_csr_matrix_float(self.A_csr, self.B_csr,
B_is_identity)
elif self.data_type_name == b'float64':
self.set_csr_matrix_double(self.A_csr, self.B_csr,
B_is_identity)
elif self.data_type_name == b'float128':
self.set_csr_matrix_long_double(self.A_csr, self.B_csr,
B_is_identity)
else:
# Set a dense matrix
if self.data_type_name == b'float32':
self.set_dense_matrix_float(A, B, B_is_identity)
elif self.data_type_name == b'float64':
self.set_dense_matrix_double(A, B, B_is_identity)
elif self.data_type_name == b'float128':
self.set_dense_matrix_long_double(A, B, B_is_identity)
# ======================
# set dense matrix float
# ======================
def set_dense_matrix_float(self, A, B, B_is_identity):
"""
Sets matrix A.
:param A: A 2-dimensional matrix.
:type A: numpy.ndarray, or any scipy.sparse array
"""
# Matrix size
cdef LongIndexType A_num_rows = A.shape[0]
cdef LongIndexType A_num_columns = A.shape[1]
# Contiguity
cdef FlagType A_is_row_major
cdef FlagType B_is_row_major = 0
if A.flags['C_CONTIGUOUS']:
A_is_row_major = 1
elif A.flags['F_CONTIGUOUS']:
A_is_row_major = 0
else:
raise TypeError('Matrix A should be either C or F contiguous.')
if not B_is_identity:
if B.flags['C_CONTIGUOUS']:
B_is_row_major = 1
elif B.flags['F_CONTIGUOUS']:
B_is_row_major = 0
else:
raise TypeError('Matrix B should be either C or F contiguous.')
# Declare memoryviews to get data pointer
cdef float[:, ::1] A_data_mv_c
cdef float[::1, :] A_data_mv_f
cdef float[:, ::1] B_data_mv_c = None
cdef float[::1, :] B_data_mv_f = None
# Declare pointer of A.data and B.data
cdef float* A_data
cdef float* B_data = NULL
# Get pointer to data of A depending on row or column major
if A_is_row_major:
# Memoryview of A for row major matrix
A_data_mv_c = A
# Pointer of the data of A
A_data = &A_data_mv_c[0, 0]
else:
# Memoryview of A for column major matrix
A_data_mv_f = A
# Pointer of the data of A
A_data = &A_data_mv_f[0, 0]
# Get pointer to data of B depending on row or column major
if not B_is_identity:
if B_is_row_major:
# Memoryview of B for row major matrix
B_data_mv_c = B
# Pointer of the data of B
B_data = &B_data_mv_c[0, 0]
else:
# Memoryview of B for column major matrix
B_data_mv_f = B
# Pointer of the data of B
B_data = &B_data_mv_f[0, 0]
# Create a linear operator object
if B_is_identity:
self.Aop_float = new cDenseAffineMatrixFunction[float](
A_data,
A_is_row_major,
A_num_rows,
A_num_columns)
else:
self.Aop_float = new cDenseAffine | Cython |
MatrixFunction[float](
A_data,
A_is_row_major,
A_num_rows,
A_num_columns,
B_data,
B_is_row_major)
# =======================
# set dense matrix double
# =======================
def set_dense_matrix_double(self, A, B, B_is_identity):
"""
Sets matrix A.
:param A: A 2-dimensional matrix.
:type A: numpy.ndarray, or any scipy.sparse array
"""
# Matrix size
cdef LongIndexType A_num_rows = A.shape[0]
cdef LongIndexType A_num_columns = A.shape[1]
# Contiguity
cdef FlagType A_is_row_major
cdef FlagType B_is_row_major = 0
if A.flags['C_CONTIGUOUS']:
A_is_row_major = 1
elif A.flags['F_CONTIGUOUS']:
A_is_row_major = 0
else:
raise TypeError('Matrix A should be either C or F contiguous.')
if not B_is_identity:
if B.flags['C_CONTIGUOUS']:
B_is_row_major = 1
elif B.flags['F_CONTIGUOUS']:
B_is_row_major = 0
else:
raise TypeError('Matrix B should be either C or F contiguous.')
# Declare memoryviews to get data pointer
cdef double[:, ::1] A_data_mv_c
cdef double[::1, :] A_data_mv_f
cdef double[:, ::1] B_data_mv_c = None
cdef double[::1, :] B_data_mv_f = None
# Declare pointer to A.data and B.data
cdef double* A_data
cdef double* B_data = NULL
# Get pointer to data of A depending on row or column major
if A_is_row_major:
# Memoryview of A for row major matrix
A_data_mv_c = A
# Pointer of the data of A
A_data = &A_data_mv_c[0, 0]
else:
# Memoryview of A for column major matrix
A_data_mv_f = A
# Pointer of the data of A
A_data = &A_data_mv_f[0, 0]
# Get pointer to data of B depending on row or column major
if not B_is_identity:
if B_is_row_major:
# Memoryview of B for row major matrix
B_data_mv_c = B
# Pointer of the data of B
B_data = &B_data_mv_c[0, 0]
else:
# Memoryview of B for column major matrix
B_data_mv_f = B
# Pointer of the data of B
B_data = &B_data_mv_f[0, 0]
# Create a linear operator object
if B_is_identity:
self.Aop_double = new cDenseAffineMatrixFunction[double](
A_data,
A_is_row_major,
A_num_rows,
A_num_columns)
else:
self.Aop_double = new cDenseAffineMatrixFunction[double](
A_data,
A_is_row_major,
A_num_rows,
A_num_columns,
B_data,
B_is_row_major)
# ============================
# set dense matrix long double
# ============================
def set_dense_matrix_long_double(self, A, B, B_is_identity):
"""
Sets matrix A.
:param A: A 2-dimensional matrix.
:type A: numpy.ndarray, or any scipy.sparse array
"""
# Matrix size
cdef LongIndexType A_num_rows = A.shape[0]
cdef LongIndexType A_num_columns = A.shape[1]
# Contiguity
cdef FlagType A_is_row_major
cdef FlagType B_is_row_major = 0
if A.flags['C_CONTIGUOUS']:
A_is_row_major = 1
elif A.flags['F_CONTIGUOUS']:
A_is_row_major = 0
else:
raise TypeError('Matrix A should be either C or F contiguous.')
if not B_is_identity:
if B.flags['C_CONTIGUOUS']:
B_is_row_major = 1
elif B.flags['F_CONTIGUOUS']:
B_is_row_major = 0
else:
raise TypeError('Matrix B should be either C or F contiguous.')
# Declare memoryviews to get data pointer
cdef long double[:, ::1] A_data_mv_c
cdef long double[::1, :] A_data_mv_f
cdef long double[:, ::1] B_data_mv_c = None
cdef long double[::1, :] B_data_mv_f = None
# Declare pointer to A.data and B.data
cdef long double* A_data
cdef long double* B_data = NULL
# Get pointer to data of A depending on row or column major
if A_is_row_major:
# Memoryview of A for row major matrix
A_data_mv_c = A
# Pointer of the data of A
A_data = &A_data_mv_c[0, 0]
else:
# Memoryview of A for column major matrix
A_data_mv_f = A
# Pointer of the data of A
A_data = &A_data_mv_f[0, 0]
# Get pointer to data of AB depending on row or column major
if not B_is_identity:
if B_is_row_major:
# Memoryview of A for row major matrix
B_data_mv_c = B
# Pointer of the data of A
B_data = &B_data_mv_c[0, 0]
else:
# Memoryview of A for column major matrix
B_data_mv_f = B
# Pointer of the data of B
B_data = &B_data_mv_f[0, 0]
# Create a linear operator object
if B_is_identity:
self.Aop_long_double = new cDenseAffineMatrixFunction[long double](
A_data,
A_is_row_major,
A_num_rows,
A_num_columns)
else:
self.Aop_long_double = new cDenseAffineMatrixFunction[long double](
A_data,
A_is_row_major,
A_num_rows,
A_num_columns,
B_data,
B_is_row_major)
# ====================
# set csr matrix float
# ====================
def set_csr_matrix_float(self, A, B, B_is_identity):
"""
"""
# Matrix size
cdef LongIndexType A_num_rows = A.shape[0]
cdef LongIndexType A_num_columns = A.shape[1]
# If the input type is the same as LongIndexType, no copy is performed.
self.A_indices_copy = \
A.indices.astype(self.long_index_type_name, copy=False)
self.A_index_pointer_copy = \
A.indptr.astype(self.long_index_type_name, copy=False)
# Declare memoryviews to get pointers
cdef float[:] A_data_mv = A.data
cdef MemoryViewLongIndexType A_indices_mv = self.A_indices_copy
cdef MemoryViewLongIndexType A_index_pointer_mv = \
self.A_index_pointer_copy
cdef float[:] B_data_mv = None
cdef MemoryViewLongIndexType B_indices_mv = None
cdef MemoryViewLongIndexType B_index_pointer_mv = None
if not B_is_identity:
# If input type is the same as LongIndexType, no copy is performed.
self.B_indices_copy = \
B.indices.astype(self.long_index_type_name, copy=False)
self.B_index_pointer_copy = \
B.indptr.astype(self.long_index_type_name, copy=False)
B_data_mv = B.data
B_indices_mv = self.B_indices_copy
B_index_pointer_mv = self.B_index_pointer_copy
# Declare pointers
cdef float* A_data = &A_data_mv[0]
cdef LongIndexType* A_indices = &A_indices_mv[0]
cdef LongIndexType* A_index_pointer = &A_index_pointer_mv[0]
cdef float* B_data = NULL
cdef LongIndexType* B_indices = NULL
cdef LongIndexType* B_index_pointer = NULL
if not B_is_identity:
B_data = &B_data_mv[0]
B_indices = &B_indices_mv[0]
B_index_pointer = &B_index_pointer_mv[0]
# Create a linear operator object
if B_is_identity:
self.Aop_float = new cCSRAffineMatrixFunction[float](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns)
else:
self.Aop_float = new cCSRAffineMatrixFunction[float](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns,
B_data,
B_indices,
B_index_pointer)
# =====================
# set csr matrix double
# =====================
def set_csr_matrix_double(self, A, B, B_is_identity):
"""
"""
# Matrix size
cdef LongIndexType A_num_rows = A.shape[0]
cdef LongIndexType A_num_columns = A.shape[1]
# If the | Cython |
input type is the same as LongIndexType, no copy is performed.
self.A_indices_copy = \
A.indices.astype(self.long_index_type_name, copy=False)
self.A_index_pointer_copy = \
A.indptr.astype(self.long_index_type_name, copy=False)
# Declare memoryviews to get pointers
cdef double[:] A_data_mv = A.data
cdef MemoryViewLongIndexType A_indices_mv = self.A_indices_copy
cdef MemoryViewLongIndexType A_index_pointer_mv = \
self.A_index_pointer_copy
cdef double[:] B_data_mv = None
cdef MemoryViewLongIndexType B_indices_mv = None
cdef MemoryViewLongIndexType B_index_pointer_mv = None
if not B_is_identity:
# If input type is the same as LongIndexType, no copy is performed.
self.B_indices_copy = \
B.indices.astype(self.long_index_type_name, copy=False)
self.B_index_pointer_copy = \
B.indptr.astype(self.long_index_type_name, copy=False)
B_data_mv = B.data
B_indices_mv = self.B_indices_copy
B_index_pointer_mv = self.B_index_pointer_copy
# Declare pointers
cdef double* A_data = &A_data_mv[0]
cdef LongIndexType* A_indices = &A_indices_mv[0]
cdef LongIndexType* A_index_pointer = &A_index_pointer_mv[0]
cdef double* B_data = NULL
cdef LongIndexType* B_indices = NULL
cdef LongIndexType* B_index_pointer = NULL
if not B_is_identity:
B_data = &B_data_mv[0]
B_indices = &B_indices_mv[0]
B_index_pointer = &B_index_pointer_mv[0]
# Create a linear operator object
if B_is_identity:
self.Aop_double = new cCSRAffineMatrixFunction[double](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns)
else:
self.Aop_double = new cCSRAffineMatrixFunction[double](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns,
B_data,
B_indices,
B_index_pointer)
# ==========================
# set csr matrix long double
# ==========================
def set_csr_matrix_long_double(self, A, B, B_is_identity):
"""
"""
# Matrix size
cdef LongIndexType A_num_rows = A.shape[0]
cdef LongIndexType A_num_columns = A.shape[1]
# If the input type is the same as LongIndexType, no copy is performed.
self.A_indices_copy = \
A.indices.astype(self.long_index_type_name, copy=False)
self.A_index_pointer_copy = \
A.indptr.astype(self.long_index_type_name, copy=False)
# Declare memoryviews to get pointers
cdef long double[:] A_data_mv = A.data
cdef MemoryViewLongIndexType A_indices_mv = self.A_indices_copy
cdef MemoryViewLongIndexType A_index_pointer_mv = \
self.A_index_pointer_copy
cdef long double[:] B_data_mv = None
cdef MemoryViewLongIndexType B_indices_mv = None
cdef MemoryViewLongIndexType B_index_pointer_mv = None
if not B_is_identity:
# If input type is the same as LongIndexType, no copy is performed.
self.B_indices_copy = \
B.indices.astype(self.long_index_type_name, copy=False)
self.B_index_pointer_copy = \
B.indptr.astype(self.long_index_type_name, copy=False)
B_data_mv = B.data
B_indices_mv = self.B_indices_copy
B_index_pointer_mv = self.B_index_pointer_copy
# Declare pointers
cdef long double* A_data = &A_data_mv[0]
cdef LongIndexType* A_indices = &A_indices_mv[0]
cdef LongIndexType* A_index_pointer = &A_index_pointer_mv[0]
cdef long double* B_data = NULL
cdef LongIndexType* B_indices = NULL
cdef LongIndexType* B_index_pointer = NULL
if not B_is_identity:
B_data = &B_data_mv[0]
B_indices = &B_indices_mv[0]
B_index_pointer = &B_index_pointer_mv[0]
# Create a linear operator object
if B_is_identity:
self.Aop_long_double = new cCSRAffineMatrixFunction[long double](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns)
else:
self.Aop_long_double = new cCSRAffineMatrixFunction[long double](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns,
B_data,
B_indices,
B_index_pointer)
# ====================
# set csc matrix float
# ====================
def set_csc_matrix_float(self, A, B, B_is_identity):
"""
"""
# Matrix size
cdef LongIndexType A_num_rows = A.shape[0]
cdef LongIndexType A_num_columns = A.shape[1]
# If the input type is the same as LongIndexType, no copy is performed.
self.A_indices_copy = \
A.indices.astype(self.long_index_type_name, copy=False)
self.A_index_pointer_copy = \
A.indptr.astype(self.long_index_type_name, copy=False)
# Declare memoryviews to get pointers
cdef float[:] A_data_mv = A.data
cdef MemoryViewLongIndexType A_indices_mv = self.A_indices_copy
cdef MemoryViewLongIndexType A_index_pointer_mv = \
self.A_index_pointer_copy
cdef float[:] B_data_mv = None
cdef MemoryViewLongIndexType B_indices_mv = None
cdef MemoryViewLongIndexType B_index_pointer_mv = None
if not B_is_identity:
# If input type is the same as LongIndexType, no copy is performed.
self.B_indices_copy = \
B.indices.astype(self.long_index_type_name, copy=False)
self.B_index_pointer_copy = \
B.indptr.astype(self.long_index_type_name, copy=False)
B_data_mv = B.data
B_indices_mv = self.B_indices_copy
B_index_pointer_mv = self.B_index_pointer_copy
# Declare pointers
cdef float* A_data = &A_data_mv[0]
cdef LongIndexType* A_indices = &A_indices_mv[0]
cdef LongIndexType* A_index_pointer = &A_index_pointer_mv[0]
cdef float* B_data = NULL
cdef LongIndexType* B_indices = NULL
cdef LongIndexType* B_index_pointer = NULL
if not B_is_identity:
B_data = &B_data_mv[0]
B_indices = &B_indices_mv[0]
B_index_pointer = &B_index_pointer_mv[0]
# Create a linear operator object
if B_is_identity:
self.Aop_float = new cCSCAffineMatrixFunction[float](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns)
else:
self.Aop_float = new cCSCAffineMatrixFunction[float](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns,
B_data,
B_indices,
B_index_pointer)
# =====================
# set csc matrix double
# =====================
def set_csc_matrix_double(self, A, B, B_is_identity):
"""
"""
# Matrix size
cdef LongIndexType A_num_rows = A.shape[0]
cdef LongIndexType A_num_columns = A.shape[1]
# If the input type is the same as LongIndexType, no copy is performed.
self.A_indices_copy = \
A.indices.astype(self.long_index_type_name, copy=False)
self.A_index_pointer_copy = \
A.indptr.astype(self.long_index_type_name, copy=False)
# Declare memoryviews to get pointers
cdef double[:] A_data_mv = A.data
cdef MemoryViewLongIndexType A_indices_mv = self.A_indices_copy
cdef MemoryViewLongIndexType A_index_pointer_mv = \
self.A_index_pointer_copy
cdef double[:] B_data_mv = None
cdef MemoryViewLongIndexType B_indices_mv = None
cdef MemoryViewLongIndexType B_index_pointer_mv = None
if not B_is_identity:
# If input type is the same as LongIndexType, no copy is performed.
self.B_indices_copy = \
B.indices.astype(self.long_index_type_name, copy=False)
self.B_index_pointer_copy = \
B.indptr.astype(self.long_index_type_name, copy=False)
B_data_mv = B.data
B_indices_mv = self.B_indices_copy
B_index_pointer_mv = self.B_index_pointer_copy
# Declare pointers
cdef double* A_data = &A_data_mv[0]
cdef LongIndexType* A_indices = &A_indices_mv[0]
cdef LongIndexType* A_index_pointer = &A_index_pointer_mv[0]
cdef double* B_data = NULL
cdef | Cython |
LongIndexType* B_indices = NULL
cdef LongIndexType* B_index_pointer = NULL
if not B_is_identity:
B_data = &B_data_mv[0]
B_indices = &B_indices_mv[0]
B_index_pointer = &B_index_pointer_mv[0]
# Create a linear operator object
if B_is_identity:
self.Aop_double = new cCSCAffineMatrixFunction[double](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns)
else:
self.Aop_double = new cCSCAffineMatrixFunction[double](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns,
B_data,
B_indices,
B_index_pointer)
# ==========================
# set csc matrix long double
# ==========================
def set_csc_matrix_long_double(self, A, B, B_is_identity):
"""
"""
# Matrix size
cdef LongIndexType A_num_rows = A.shape[0]
cdef LongIndexType A_num_columns = A.shape[1]
# If the input type is the same as LongIndexType, no copy is performed.
self.A_indices_copy = \
A.indices.astype(self.long_index_type_name, copy=False)
self.A_index_pointer_copy = \
A.indptr.astype(self.long_index_type_name, copy=False)
# Declare memoryviews to get pointers
cdef long double[:] A_data_mv = A.data
cdef MemoryViewLongIndexType A_indices_mv = self.A_indices_copy
cdef MemoryViewLongIndexType A_index_pointer_mv = \
self.A_index_pointer_copy
cdef long double[:] B_data_mv = None
cdef MemoryViewLongIndexType B_indices_mv = None
cdef MemoryViewLongIndexType B_index_pointer_mv = None
if not B_is_identity:
# If input type is the same as LongIndexType, no copy is performed.
self.B_indices_copy = \
B.indices.astype(self.long_index_type_name, copy=False)
self.B_index_pointer_copy = \
B.indptr.astype(self.long_index_type_name, copy=False)
B_data_mv = B.data
B_indices_mv = self.B_indices_copy
B_index_pointer_mv = self.B_index_pointer_copy
# Declare pointers
cdef long double* A_data = &A_data_mv[0]
cdef LongIndexType* A_indices = &A_indices_mv[0]
cdef LongIndexType* A_index_pointer = &A_index_pointer_mv[0]
cdef long double* B_data = NULL
cdef LongIndexType* B_indices = NULL
cdef LongIndexType* B_index_pointer = NULL
if not B_is_identity:
B_data = &B_data_mv[0]
B_indices = &B_indices_mv[0]
B_index_pointer = &B_index_pointer_mv[0]
# Create a linear operator object
if B_is_identity:
self.Aop_long_double = new cCSCAffineMatrixFunction[long double](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns)
else:
self.Aop_long_double = new cCSCAffineMatrixFunction[long double](
A_data,
A_indices,
A_index_pointer,
A_num_rows,
A_num_columns,
B_data,
B_indices,
B_index_pointer)
<|end_of_text|># distutils: language = c++
# not using distutils for libraries, Visual Studio auto-linking doesn't like
include 'quantlib/types.pxi'
cimport pybg.version
from libcpp.map cimport map
from libcpp.string cimport string
from cython.operator cimport dereference as deref, preincrement as inc
from cython.operator cimport address
cimport pybg._curves as _curves
cimport pybg.quantlib.time._date as _qldate
cimport pybg.quantlib.time._period as _qlperiod
cimport pybg.quantlib.time.date as qldate
cimport pybg.quantlib.time._calendar as _calendar
from pybg.quantlib.time._period cimport Frequency as _Frequency
from pybg.quantlib.time._calendar cimport (
BusinessDayConvention as _BusinessDayConvention
)
from pybg.quantlib.handle cimport shared_ptr
from pybg.ql cimport _pydate_from_qldate, _qldate_from_pydate
from pybg.settings import get_eval_date, set_eval_date
from pybg.enums import TimeUnits, Calendars, DayCounters
cimport pybg.quantlib.time.calendar as calendar
from pybg.quantlib.time.daycounter cimport DayCounter
from pybg.tenor import Tenor
from datetime import date
cdef public enum RateHelperType:
DEPO = _curves.DEPO
FRA = _curves.FRA
FUT = _curves.FUT
SWAP = _curves.SWAP
cdef _curves.CurveMap curveMap_from_dict(pycurve):
cdef _curves.CurveMap curve
cdef char* tnr
cdef Rate value
for t, value in pycurve.items():
t = t.upper()
tnr = t
curve[<string>tnr] = value
return curve
cdef object dict_from_CurveMap(_curves.CurveMap crv):
cdef map[string, Rate].iterator iter
pycurve = {}
iter = crv.begin()
while iter!= crv.end():
pycurve[deref(iter).first.c_str()] = deref(iter).second
inc(iter)
return pycurve
cdef class CurveBase:
"""Curve Base
"""
def __cinit__(self):
self._thisptr = NULL
def __dealloc__(self):
if self._thisptr is not NULL:
del self._thisptr
def __init__(self,
calendar.Calendar crv_calendar,
Integer fixingDays,
DayCounter depositDayCounter,
fixedRateFrequency,
fixedInstrumentConvention,
DayCounter fixedInstrumentDayCounter,
DayCounter termStructureDayCounter):
'''
Curve Base Class:
Calendar calendar,
Integer fixingDays,
DayCounter depositDayCounter,
Frequency fixedRateFrequency,
BusinessDayConvention fixedInstrumentConvention,
DayCounter fixedInstrumentDayCounter,
DayCounter termStructureDayCounter
'''
# TODO: give CurveBase the ability to set yieldTermStruct pointer, etc
# and you can use this
self._thisptr = new shared_ptr[_curves.CurveBase]( \
new _curves.CurveBase(
deref(crv_calendar._thisptr),
fixingDays,
deref(depositDayCounter._thisptr),
<_Frequency>fixedRateFrequency,
<_BusinessDayConvention>fixedInstrumentConvention,
deref(fixedInstrumentDayCounter._thisptr),
deref(termStructureDayCounter._thisptr)
)
)
cdef class RateHelperCurve:
"""Rate Helper Curve
"""
def __cinit__(self):
self._thisptr = NULL
def __dealloc__(self):
if self._thisptr is not NULL:
del self._thisptr
def __init__(self, CurveBase curvebase):
cdef _curves.CurveBase *_crvbase
try:
_crvbase = curvebase._thisptr.get()
self._thisptr = new shared_ptr[_curves.RateHelperCurve]( \
new _curves.RateHelperCurve(deref(_crvbase))
)
except:
self._thisptr = new shared_ptr[_curves.RateHelperCurve]( \
new _curves.RateHelperCurve()
)
def add_helpers(self, rh_type, curvemap):
cdef _curves.CurveMap _curvemap
_curvemap = curveMap_from_dict(curvemap)
if rh_type == DEPO:
self._thisptr.get().add_depos(_curvemap)
elif rh_type == FUT:
self._thisptr.get().add_futs(_curvemap)
elif rh_type == SWAP:
self._thisptr.get().add_swaps(_curvemap)
else:
raise ValueError, "Type: %s invalid ratehelper" % rh_type
def update(self, depos=None, futures=None, swaps=None, evaldate=None):
MSG_ARGS = "RateHelperCurve.update must have at least one curve"
assert any((depos, futures, swaps)), MSG_ARGS
cdef _curves.CurveMap depocurve
cdef _curves.CurveMap futcurve
cdef _curves.CurveMap swapcurve
#TODO: validate curve inputs
# check that tenors/future dates don't overlap
if depos:
depocurve = curveMap_from_dict(depos)
if futures:
futcurve = curveMap_from_dict(futures)
if swaps:
swapcurve = curveMap_from_dict(swaps)
if not evaldate:
evaldate = get_eval_date()
self.curveDate = evaldate
self._thisptr.get().update(depocurve | Cython |
, futcurve, swapcurve,
_qldate_from_pydate(self.curveDate))
return self.curveDate
def validateNewCurve(self, depos=None, futures=None, swaps=None):
new_crv = {}
new_crv.update(swaps)
new_crv.update(futures)
new_crv.update(depos)
prv_crv = self.curveQuotes
if prv_crv and all([new_crv.get(h, None) for h in prv_crv]):
isValid = True
else:
isValid = False
return isValid
def advanceCurveDate(self, int ndays, timeunit=None):
if not timeunit:
timeunit = TimeUnits.Days
cdef int _ndays = int(ndays)
self._thisptr.get().advanceCurveDate(int(ndays),
<_qlperiod.TimeUnit> timeunit)
property curveDate:
def __get__(self):
cdef _qldate.Date _refdate = self._thisptr.get().curveDate()
return _pydate_from_qldate(_refdate)
def __set__(self, curve_date):
cdef _qldate.Date _refdate
try:
_refdate = _qldate_from_pydate(curve_date)
except:
_refdate = _qldate_from_pydate(date.today())
self._thisptr.get().setCurveDate(_refdate)
property referenceDate:
def __get__(self):
cdef _qldate.Date _refdate = self._thisptr.get().referenceDate()
return _pydate_from_qldate(_refdate)
property maxDate:
def __get__(self):
cdef _qldate.Date _qdate_ref = self._thisptr.get().maxDate()
return _pydate_from_qldate(_qdate_ref)
property fixingdays:
def __get__(self):
cdef int fixdays_ref = self._thisptr.get().fixingDays()
return fixdays_ref
property curveQuotes:
def __get__(self):
cdef _curves.CurveMap crv = self._thisptr.get().curveQuotes()
pycrv = dict_from_CurveMap(crv)
return pycrv
property calendar:
def __get__(self):
cdef _calendar.Calendar crv_cal = self._thisptr.get().calendar()
cdef string cal_name = crv_cal.name()
return Calendars().get(cal_name.c_str(), None)
# Curve functions
def tenorquote(self, key):
key = key.upper()
cdef char* tnr = key
cdef Rate rate = self._thisptr.get().tenorquote(<string>tnr)
return rate
def par_rate(self, tenor, freq=2):
"""
Returns par rate for given tenor.
tenor: e.g. "3M", "10Y"
freq: coupon frequency as integer number of payments for year
"""
tnr = Tenor(tenor)
nperiods = tnr.numberOfPeriods(freq)
dfreq = float(freq)
mdisc = (1. - self.discount(tnr.term))
cpndisc = sum([self.discount((n+1.)/dfreq) for n in range(nperiods)])
parrate = dfreq * (mdisc / cpndisc if cpndisc > 0. else mdisc)
return parrate
def discount(self, ref):
cdef double yrs
cdef double discfactor
if type(ref) == type(date.today()):
yrs = DayCounters.year_fraction(self.referenceDate, ref)
else:
try:
yrs = <double>ref
except:
yrs = 0.0
discfactor = self._thisptr.get().discount(yrs, True)
return discfactor
#BondCurve
#Create curve from bond helpers
cdef class BondHelperQuote:
"""BondHelperQuote
"""
def __cinit__(self):
self._thisptr = NULL
def __dealloc__(self):
if self._thisptr is not NULL:
del self._thisptr
def __init__(self,
Real px_quote,
object maturity,
Rate coupon,
object issue_date=None):
if issue_date:
self._thisptr = new shared_ptr[_curves.BondHelperQuote]( \
new _curves.BondHelperQuote(px_quote,
_qldate_from_pydate(maturity),
coupon,
_qldate_from_pydate(issue_date)) )
else:
self._thisptr = new shared_ptr[_curves.BondHelperQuote]( \
new _curves.BondHelperQuote(px_quote,
_qldate_from_pydate(maturity),
coupon) )
# Inspectors
property maturity:
def __get__(self):
cdef _qldate.Date mty
cdef object result
mty = self._thisptr.get().maturity()
result = _pydate_from_qldate(mty)
return result
property coupon:
def __get__(self):
cdef Real val
val = self._thisptr.get().coupon()
return val
property quote:
def __get__(self):
cdef Real val
val = self._thisptr.get().quote()
return val
cdef _curves.BondCurveMap bondCurveMap_from_dict(pycurve):
cdef _curves.BondCurveMap curve
cdef char* bnd_id
for t, values in pycurve.items():
t = t.upper()
bnd_id = t
curve[<string>bnd_id] = deref(BondHelperQuote(*values)._thisptr.get())
return curve
cdef class BondCurve:
"""Bond Helper Curve
"""
def __init__(self, CurveBase curvebase):
cdef _curves.CurveBase *_crvbase
try:
_crvbase = curvebase._thisptr.get()
self._thisptr = new shared_ptr[_curves.RateHelperCurve]( \
new _curves.BondCurve(deref(_crvbase))
)
except:
self._thisptr = new shared_ptr[_curves.RateHelperCurve]( \
new _curves.BondCurve()
)
def add_helpers(self, bondcurve, depocurve=None):
cdef _curves.BondCurveMap _bondcurve
cdef _curves.CurveMap _depocurve
if depocurve:
_depocurve = curveMap_from_dict(depocurve)
self._thisptr.get().add_depos(_depocurve)
_bondcurve = bondCurveMap_from_dict(bondcurve)
(<_curves.BondCurve *>self._thisptr.get()).add_bonds(_bondcurve)
def update(self, bondcurve, depocurve=None, evaldate=None):
cdef _curves.BondCurveMap _bondcurve
cdef _curves.CurveMap _depocurve
if depocurve:
_depocurve = curveMap_from_dict(depocurve)
_bondcurve = bondCurveMap_from_dict(bondcurve)
if not evaldate:
evaldate = get_eval_date()
self.curveDate = evaldate
(<_curves.BondCurve *>self._thisptr.get()).update(
_bondcurve,
_depocurve,
_qldate_from_pydate(self.curveDate)
)
return self.curveDate
def validateNewCurve(self, depos=None, bonds=None):
new_crv = {}
new_crv.update(bonds)
new_crv.update(depos)
prv_crv = self.curveQuotes
if prv_crv and all([new_crv.get(h, None) for h in prv_crv]):
isValid = True
else:
isValid = False
return isValid
<|end_of_text|>import sys
import numpy as np
cimport numpy as np
from libc.stdint cimport intptr_t
cdef extern from "ftz.h":
int fftz(float* x, size_t length)
int dftz(double* x, size_t length)
def ftz(np.ndarray data):
"""Flush denormalized numbers to zero in place.
Denormalized numbers ("denorms") are extremely small numbers (less than
1.2-38 for single precision or 2.2e-308 in double precision) which are
handled poorly on most modern archicectures. Arithmetic involving denorms
can be up to 100x slower than arithmetic on standard floating point
numbers. Denorms can also give annoying behavior, like overflowing when
you take their reciprocal.
Parameters
----------
data : np.ndarray, dtype = {float32 or float64}
An array of floating point numbers.
"""
if not (data.flags['C_CONTIGUOUS'] or data.flags['F_CONTIGUOUS']):
return _ftz_numpy(data)
if len(data) == 0:
raise ValueError('data must have length > 0')
cdef np.ndarray[dtype=np.float32_t] f | Cython |
data
cdef np.ndarray[dtype=np.float64_t] ddata
if data.dtype == np.float32:
fdata = data.reshape(-1)
fftz(&fdata[0], len(fdata))
elif data.dtype == np.float64:
ddata = data.reshape(-1)
dftz(&ddata[0], len(ddata))
else:
raise TypeError('data must be of type float32 or float64.')
def _ftz_numpy(np.ndarray data):
"""Flush denormalized numbers to zero in place using numpy.
This code does not require contiguity or alignment, but is not as fast.
Parameters
----------
data : np.ndarray, dtype = {float32 or float64}
An array of floating point numbers.
"""
if data.dtype not in [np.float32, np.float64]:
raise TypeError('data must be of type float32 or float64.')
bound = np.finfo(data.dtype).tiny
mask = np.logical_or(data > bound, data < -bound)
np.multiply(data, mask, out=data)
<|end_of_text|># mode: error
def f(obj1a, obj1b):
cdef int int1, int2, int3
cdef int *ptr2
int1, int3, obj1a = int2, ptr2, obj1b # error
_ERRORS = u"""
6:27: Cannot assign type 'int *' to 'int'
"""
<|end_of_text|># distutils: language = c++
import pandas as pd
from cython.operator cimport postincrement as inc, postdecrement as dec, dereference as deref
from mmabm.sharedc cimport Side, OType
cdef class Orderbook:
'''
Orderbook tracks, processes and matches orders.
Orderbook is a set of linked lists and dictionaries containing trades, bids and asks.
One dictionary contains a history of all orders;
two other dictionaries contain priced bid and ask orders with linked lists for access;
one dictionary contains trades matched with orders on the book.
Orderbook also provides methods for storing and retrieving orders and maintaining a
history of the book.
Public attributes: order_history, confirm_modify_collector, confirm_trade_collector,
trade_book and traded.
Public methods: add_order_to_book(), process_order(), order_history_to_h5(), trade_book_to_h5(),
sip_to_h5() and report_top_of_book()
'''
def __init__(self):
'''
Initialize the Orderbook with a set of empty lists and dicts and other defaults
order_history is a list of all incoming orders (dicts) in the order received
_bid_book_prices and _ask_book_prices are linked (sorted) lists of bid and ask prices
which serve as pointers to:
_bid_book and _ask_book: dicts of current order book state and dicts of orders
the oder id lists maintain time priority for each order at a given price.
confirm_modify_collector and confirm_trade_collector are lists that carry information (dicts) from the
order processor and/or matching engine to the traders
trade_book is a list if trades in sequence
_order_index identifies the sequence of orders in event time
'''
self.order_history = []
self._bids = BookSide()
self._asks = BookSide()
self.confirm_trade_collector = []
self._sip_collector = []
self.trade_book = []
self._order_index = 0
self._ex_index = 0
self._lookup = LookUp()
self.traded = False
cpdef add_order_to_history(self, dict order):
self._order_index += 1
self.order_history.append({'exid': self._order_index, 'order_id': order['order_id'], 'trader_id': order['trader_id'],
'timestamp': order['timestamp'], 'type': order['type'], 'quantity': order['quantity'],
'side': order['side'], 'price': order['price']})
cpdef add_order_to_book(self, int trader_id, int order_id, int timestamp, int quantity, Side side, int price):
cdef BookSide *b = &self._bids if side == Side.BID else &self._asks
l = deref(b).find(price)
if l == deref(b).end():
l = deref(b).insert(OneLevel(price, Level(0, 0, Quotes()))).first
inc(deref(l).second.cnt)
deref(l).second.qty = deref(l).second.qty + quantity
q = deref(l).second.quotes.insert(deref(l).second.quotes.end(), Quote(trader_id, order_id, timestamp, quantity, side, price))
self._lookup.insert(OneLookUp(OrderId(trader_id, order_id), BLQ(b, l, q)))
cdef void _remove_order(self, int trader_id, int order_id, int quantity):
cdef OrderId oid = OrderId(trader_id, order_id)
cdef BLQ *blq = &self._lookup[oid]
deref(blq.bs_it).second.qty = deref(blq.bs_it).second.qty - quantity
deref(blq.bs_it).second.quotes.erase(blq.q_it)
dec(deref(blq.bs_it).second.cnt)
if not deref(blq.bs_it).second.cnt:
blq.bs_ptr.erase(blq.bs_it)
self._lookup.erase(oid)
cdef void _modify_order(self, int trader_id, int order_id, int quantity):
cdef OrderId oid = OrderId(trader_id, order_id)
cdef BLQ *blq = &self._lookup[oid]
deref(blq.bs_it).second.qty = deref(blq.bs_it).second.qty - quantity
deref(blq.q_it).qty = deref(blq.q_it).qty - quantity
if not deref(blq.q_it).qty:
deref(blq.bs_it).second.quotes.erase(blq.q_it)
dec(deref(blq.bs_it).second.cnt)
if not deref(blq.bs_it).second.cnt:
blq.bs_ptr.erase(blq.bs_it)
self._lookup.erase(oid)
cdef void _add_trade_to_book(self, int resting_trader_id, int resting_order_id, int resting_timestamp,
int incoming_trader_id, int incoming_order_id,
int timestamp, int price, int quantity, Side side):
self.trade_book.append({'resting_trader_id': resting_trader_id,'resting_order_id': resting_order_id,'resting_timestamp': resting_timestamp,
'incoming_trader_id': incoming_trader_id, 'incoming_order_id': incoming_order_id, 'timestamp': timestamp, 'price': price,
'quantity': quantity,'side': side})
cdef void _confirm_trade(self, int timestamp, Side order_side, int order_quantity, int order_id,
int order_price, int trader_id):
self.confirm_trade_collector.append({'timestamp': timestamp, 'trader': trader_id, 'order_id': order_id,
'quantity': order_quantity,'side': order_side, 'price': order_price})
cdef BookTop get_bid(self):
if self._bids.empty():
return BookTop(0, 0)
else:
return BookTop(deref(self._bids.rbegin()).first, deref(self._bids.rbegin()).second.qty)
cdef BookTop get_ask(self):
if self._asks.empty():
return BookTop(0, 0)
else:
return BookTop(deref(self._asks.begin()).first, deref(self._asks.begin()).second.qty)
cpdef process_order(self, dict order):
self.traded = False
self.add_order_to_history(order)
if order['type'] == OType.ADD:
if order['side'] == Side.BID:
if order['price'] >= self.get_ask().first:
self._match_trade(order['trader_id'], order['order_id'], order['timestamp'],
order['quantity'], order['side'], order['price'])
else:
self.add_order_to_book(order['trader_id'], order['order_id'], order['timestamp'],
order['quantity'], order['side'], order['price'])
else:
if order['price'] <= self.get_bid().first:
self._match_trade(order['trader_id'], order['order_id'], order['timestamp'],
order['quantity'], order['side'], order['price'])
else:
self.add_order_to_book(order['trader_id'], order['order_id'], order['timestamp'],
order['quantity'], order['side'], order['price'])
else:
if order['type'] == OType.CANCEL:
self._remove_order(order['trader_id'], order['order_id'], order['quantity'])
else:
self._modify_order(order['trader_id'], order['order_id'], order['quantity'])
cdef void _match_trade(self, int trader_id, int order_id, int timestamp, int quantity, Side side, int price):
self.traded = True
self.confirm_trade_collector.clear()
cdef int best
if side == Side.BID:
while quantity > 0:
best = self.get_ask().first
if best:
| Cython |
if price >= best:
qq = self._asks[best].quotes.front()
if quantity >= qq.qty:
self._confirm_trade(timestamp, qq.side, qq.qty, qq.order_id,
qq.price, qq.trader_id)
self._add_trade_to_book(qq.trader_id, qq.order_id, qq.timestamp,
trader_id, order_id, timestamp,
qq.price, qq.qty, side)
quantity -= qq.qty
self._remove_order(qq.trader_id, qq.order_id, qq.qty)
else:
self._confirm_trade(timestamp, qq.side, quantity, qq.order_id,
qq.price, qq.trader_id)
self._add_trade_to_book(qq.trader_id, qq.order_id, qq.timestamp,
trader_id, order_id, timestamp,
qq.price, quantity, side)
self._modify_order(qq.trader_id, qq.order_id, qq.qty)
break
else:
self.add_order_to_book(trader_id, order_id, timestamp, quantity, side, price)
break
else:
print('Ask Market Collapse with order {0} - {1}'.format(trader_id, order_id))
break
else:
while quantity > 0:
best = self.get_bid().first
if best:
if price <= best:
qq = self._bids[best].quotes.front()
if quantity >= qq.qty:
self._confirm_trade(timestamp, qq.side, qq.qty, qq.order_id,
qq.price, qq.trader_id)
self._add_trade_to_book(qq.trader_id, qq.order_id, qq.timestamp,
trader_id, order_id, timestamp,
qq.price, qq.qty, side)
quantity -= qq.qty
self._remove_order(qq.trader_id, qq.order_id, qq.qty)
else:
self._confirm_trade(timestamp, qq.side, quantity, qq.order_id,
qq.price, qq.trader_id)
self._add_trade_to_book(qq.trader_id, qq.order_id, qq.timestamp,
trader_id, order_id, timestamp,
qq.price, quantity, side)
self._modify_order(qq.trader_id, qq.order_id, qq.qty)
break
else:
self.add_order_to_book(trader_id, order_id, timestamp, quantity, side, price)
break
else:
print('Bid Market Collapse with order {0} - {1}'.format(trader_id, order_id))
break
def order_history_to_h5(self, filename):
temp_df = pd.DataFrame(self.order_history)
temp_df.to_hdf(filename, 'orders', append=True, format='table', complevel=5, complib='blosc')
self.order_history.clear()
def trade_book_to_h5(self, filename):
temp_df = pd.DataFrame(self.trade_book)
temp_df.to_hdf(filename, 'trades', append=True, format='table', complevel=5, complib='blosc')
self.trade_book.clear()
def sip_to_h5(self, filename):
temp_df = pd.DataFrame(self._sip_collector)
temp_df.to_hdf(filename, 'tob', append=True, format='table', complevel=5, complib='blosc')
self._sip_collector.clear()
cpdef dict report_top_of_book(self, int now_time):
best_ask = self.get_ask()
best_bid = self.get_bid()
cdef dict tob = {'timestamp': now_time, 'best_bid': best_bid.first, 'best_ask': best_ask.first, 'bid_size': best_bid.second, 'ask_size': best_ask.second}
self._sip_collector.append(tob)
return tob
<|end_of_text|># encoding: utf-8
# cython: language_level=3
# cython: cdivision=True
# cython: boundscheck=False
# cython: wraparound=False
# cython: nonecheck=False
# cython: initializedcheck=False
from libc.math cimport exp, log
import numpy as np
cimport numpy as np
cpdef double log_likelihood(double[:, :, ::1] Y,
double[:, ::1] X,
double[:, ::1] lmbda,
double[:, ::1] delta,
size_t n_features) nogil:
cdef size_t k, i, j, p, q
cdef size_t n_layers = Y.shape[0]
cdef size_t n_nodes = delta.shape[1]
cdef double eta = 0.
cdef double loglik = 0.
for k in range(n_layers):
for i in range(n_nodes):
for j in range(i):
if Y[k, i, j]!= -1.:
eta = delta[k, i] + delta[k, j]
for p in range(n_features):
eta += lmbda[k, p] * X[i, p] * X[j, p]
loglik += Y[k, i, j] * eta - log(1 + exp(eta))
return loglik
<|end_of_text|>import errno
import os
import grp
import pwd
from functools import lru_cache
from libc.errno cimport errno as c_errno
from cpython.mem cimport PyMem_Free
from libc.stddef cimport wchar_t
cdef extern from "wchar.h":
# https://www.man7.org/linux/man-pages/man3/wcswidth.3.html
cdef int wcswidth(const wchar_t *s, size_t n)
cdef extern from "Python.h":
# https://docs.python.org/3/c-api/unicode.html#c.PyUnicode_AsWideCharString
wchar_t* PyUnicode_AsWideCharString(object, Py_ssize_t*) except NULL
def get_errno():
return c_errno
def swidth(s):
cdef Py_ssize_t size
cdef wchar_t *as_wchar = PyUnicode_AsWideCharString(s, &size)
terminal_width = wcswidth(as_wchar, <size_t>size)
PyMem_Free(as_wchar)
if terminal_width >= 0:
return terminal_width
else:
return len(s)
def process_alive(host, pid, thread):
"""
Check whether the (host, pid, thread_id) combination corresponds to a process potentially alive.
If the process is local, then this will be accurate. If the process is not local, then this
returns always True, since there is no real way to check.
"""
from. import local_pid_alive
from. import hostid
assert isinstance(host, str)
assert isinstance(hostid, str)
assert isinstance(pid, int)
assert isinstance(thread, int)
if host!= hostid:
return True
if thread!= 0:
# Currently thread is always 0, if we ever decide to set this to a non-zero value,
# this code needs to be revisited, too, to do a sensible thing
return True
return local_pid_alive(pid)
def local_pid_alive(pid):
"""Return whether *pid* is alive."""
try:
# This doesn't work on Windows.
# This does not kill anything, 0 means "see if we can send a signal to this process or not".
# Possible errors: No such process (== stale lock) or permission denied (not a stale lock).
# If the exception is not raised that means such a pid is valid and we can send a signal to it.
os.kill(pid, 0)
return True
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH = no such process
return False
# Any other error (eg. permissions) means that the process ID refers to a live process.
return True
@lru_cache(maxsize=None)
def uid2user(uid, default=None):
try:
return pwd.getpwuid(uid).pw_name
except KeyError:
return default
@lru_cache(maxsize=None)
def user2uid(user, default=None):
if not user:
return default
try:
return pwd.getpwnam(user).pw_uid
except KeyError:
return default
@lru_cache(maxsize=None)
def gid2group(gid, default=None):
try:
return grp.getgrgid(gid).gr_name
except KeyError:
return default
@lru_cache(maxsize=None)
def group2gid(group, default=None):
if not group:
return default
try:
return grp.getgrnam(group).gr_gid
except KeyError:
return default
def posix_acl_use_stored_uid_gid(acl):
"""Replace the user/group field with the stored uid/gid
"""
assert isinstance(acl, bytes)
from..helpers import safe_decode, safe_encode
entries = []
for entry in safe_decode(acl).split('\n'):
if entry:
fields = entry.split(':')
if len(fields) == 4:
entries.append(':'.join([fields[0], fields[3], fields[2]]))
else:
entries.append(entry)
return safe_encode('\n'.join(entries))
def getosusername():
"""Return the os user name."""
uid = os.getuid()
return uid2user(uid, uid)
<|end_of_text|># -*- coding: utf-8 -*-
# cython: profile=False
cdef bint is_utc(object tz)
cdef bint is_tzlocal | Cython |
(object tz)
cdef bint treat_tz_as_pytz(object tz)
cdef bint treat_tz_as_dateutil(object tz)
cpdef object get_timezone(object tz)
cpdef get_utcoffset(tzinfo, obj)
<|end_of_text|>import numpy as np
from syri.bin.func.myUsefulFunctions import *
import sys
import time
from igraph import *
from collections import Counter, deque, defaultdict
from scipy.stats import *
from datetime import datetime, date
import pandas as pd
from multiprocessing import Pool
from functools import partial
import os
from gc import collect
from Bio.SeqIO import parse
import logging
import psutil
from Bio.Alphabet import generic_dna
from re import findall
cimport numpy as np
cimport cython
np.random.seed(1)
from syri.pyxFiles.synsearchFunctions import readCoords
def readSRData(cwdPath, prefix, dup = False):
if not isinstance(dup, bool):
sys.exit("need boolean")
if dup:
fin = ["synOut.txt","invOut.txt", "TLOut.txt", "invTLOut.txt", "dupOut.txt", "invDupOut.txt", "ctxOut.txt"]
else:
fin = ["synOut.txt","invOut.txt", "TLOut.txt", "invTLOut.txt","ctxOut.txt"]
annoCoords = pd.DataFrame()
for fileType in fin[:-1]:
try:
fileData = pd.read_table(cwdPath+prefix+fileType, header=None, dtype = object)
except pd.errors.ParserError as _e:
fileData = pd.read_table(cwdPath+prefix+fileType, header=None, dtype = object, engine ="python")
except pd.io.common.EmptyDataError:
print(fileType, " is empty. Skipping analysing it.")
continue
except Exception as _e:
print("ERROR: while trying to read ", fileType, _e)
continue
annoIndices = np.where(fileData[0] == "#")[0]
annoIndices = np.append(annoIndices,len(fileData))
repCount = annoIndices[1:] - annoIndices[:-1] - 1
annoData = fileData.loc[fileData[0] == "#"].copy()
coordsData = fileData.loc[fileData[0]!="#"].copy()
coordsData = coordsData[[0,1,2,3]].astype(dtype = "int64")
reps = []
for i in annoData[1].unique():
reps.extend(list(range(len(np.where(annoData[1] == i)[0]))))
reps = np.repeat(reps, repCount)
coordsData["group"] = reps
coordsData["aChr"] = list(np.repeat(annoData[1], repCount))
coordsData["bChr"] = list(np.repeat(annoData[5], repCount))
coordsData["state"] = fileType.split("Out.txt")[0]
annoCoords = annoCoords.append(coordsData.copy())
try:
pass
fileData = pd.read_table(cwdPath+prefix+"ctxOut.txt", header = None, dtype = object)
except pd.errors.ParserError as e:
fileData = pd.read_table(cwdPath+prefix+"ctxOut.txt", header=None, dtype = object, engine ="python")
except pd.io.common.EmptyDataError:
print("ctxOut.txt is empty. Skipping analysing it.")
except Exception as e:
print("ERROR: while trying to read ", fileType, "Out.txt", e)
annoIndices = np.where(fileData[0] =="#")[0]
states = list(fileData[8].loc[annoIndices])
coordsData = fileData.loc[fileData[0] =="#"].copy()
coordsData1 = fileData.loc[fileData[0]!="#", [0,1,2,3]].copy().astype(dtype="int")
annoIndices = np.append(annoIndices,len(fileData))
repCount = annoIndices[1:] - annoIndices[:-1] - 1
reps = np.repeat(range(len(annoIndices)-1), repCount)
stateReps = np.repeat(states, repCount)
coordsData1["aChr"] = np.repeat(coordsData[1], repCount).tolist()
coordsData1["bChr"] = np.repeat(coordsData[5], repCount).tolist()
coordsData1["group"] = reps
coordsData1["state"] = stateReps
coordsData1 = coordsData1[[0,1,2,3,"group","aChr","bChr","state"]]
coordsData1.loc[coordsData1.state == "translocation","state"] = "ctx"
coordsData1.loc[coordsData1.state == "invTranslocation","state"] = "invCtx"
coordsData1.loc[coordsData1.state == "duplication","state"] = "ctxDup"
coordsData1.loc[coordsData1.state == "invDuplication","state"] = "ctxInvDup"
if not dup:
coordsData1 = coordsData1.loc[coordsData1["state"].isin(["ctx","invCtx"])]
annoCoords = annoCoords.append(coordsData1)
annoCoords.columns = ["aStart","aEnd","bStart","bEnd","group","aChr","bChr","state"]
annoCoords.sort_values(by = ["aChr", "aStart","aEnd","bChr", "bStart","bEnd"], inplace = True)
annoCoords.index = range(len(annoCoords))
return annoCoords
def runss(_id, _sspath, _delta, allAlignments):
from subprocess import Popen, PIPE
_block = allAlignments.loc[allAlignments.id == _id].copy()
if 1 < len(pd.unique(_block["aChr"])) or 1 < len(pd.unique(_block["bChr"])):
sys.exit("More than one chromosome found for a SR")
_p = Popen([_sspath + " -HrTS " + _delta], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
_out = _p.communicate(input=_block[["aStart", "aEnd", "bStart", "bEnd", "aChr", "bChr"]].to_string(index=False, header=False).encode())
return "\t".join(["#",
str(_block[["aStart", "aEnd"]].min().min()),
str(_block[["aStart", "aEnd"]].max().max()),
str(_block[["bStart", "bEnd"]].min().min()),
str(_block[["bStart", "bEnd"]].max().max()),
pd.unique(_block["aChr"])[0],
pd.unique(_block["bChr"])[0]])+ "\n" + _out[0].decode("UTF-8")
def getshv(args):
logger = logging.getLogger("ShV")
cwdpath = args.dir
prefix = args.prefix
allAlignments = readSRData(cwdpath, prefix, args.all)
allAlignments["id"] = allAlignments.group.astype("str") + allAlignments.aChr + allAlignments.bChr + allAlignments.state
allBlocks = pd.unique(allAlignments.id)
if not args.cigar:
logger.debug("finding short variation using MUMmer alignments")
nc = args.nCores
## Could filter SNPs based no show-snp 'buff', but removed this feature
# buff = args.buff
buff = 0
sspath = args.sspath
delta = args.delta.name
if delta not in os.listdir(cwdpath):
logger.error("Delta file missing")
sys.exit()
blocklists = [allBlocks[_i:(_i+nc)] for _i in range(0, len(allBlocks), nc)]
with open(cwdpath + prefix + "snps.txt", "w") as fout:
for _id in blocklists:
with Pool(processes=nc) as pool:
out = pool.map(partial(runss, _sspath=sspath, _delta=delta, allAlignments=allAlignments), _id)
for _snp in out:
fout.write(_snp)
if buff > 0:
with open("snps.txt", "r") as fin:
with open("snps_buff"+str(buff)+".txt", "w") as fout:
for line in fin:
if line[0] == "#":
fout.write(line)
else:
_l = line.strip().split("\t")
if _l[1]!= "." and _l[2]!= "." and int(_l[4]) < buff:
continue
else:
fout.write(line)
return None
else:
logger.debug("finding short variation using CIGAR string")
coordsfin = args.infile.name
chrmatch = args.chrmatch
coords = readCoords(coordsfin, chrmatch, cigar=True)
refg = {fasta.id:fasta.seq for fasta in parse(args.ref.name, 'fasta', generic_dna)}
qryg = {fasta.id:fasta.seq for fasta in parse(args.qry.name, 'fasta', generic_dna)}
with open('snps.txt', 'w') as fout:
for b in allBlocks:
block = allAlignments.loc[allAlignments.id == b].copy()
fout.write("\t". | Cython |
join(["#",
str(block[["aStart", "aEnd"]].min().min()),
str(block[["aStart", "aEnd"]].max().max()),
str(block[["bStart", "bEnd"]].min().min()),
str(block[["bStart", "bEnd"]].max().max()),
pd.unique(block["aChr"])[0],
pd.unique(block["bChr"])[0]])+ "\n")
for row in block.itertuples(index=False):
if not 'inv' in row.id:
cg = coords.loc[(coords.aStart == row.aStart) &
(coords.aEnd == row.aEnd) &
(coords.bStart == row.bStart) &
(coords.bEnd == row.bEnd) &
(coords.aChr == row.aChr) &
(coords.bChr == row.bChr), 'cigar']
brks = findall("(\d+)([IDX=])?", cg.iloc[0])
# chech for validity of CIGAR string
cuts = np.unique([i[1] for i in brks])
for i in cuts:
if i not in ['X','=','I','D']:
logger.error('Invalid CIGAR string. Only (X/=/I/D) operators are allowed')
sys.exit()
refseq = refg[row.aChr][(row.aStart-1):row.aEnd]
qryseq = qryg[row.bChr][(row.bStart-1):row.bEnd]
posa = 0 # number of bases covered in genome a
posb = 0 # number of bases covered in genome b
for i in brks:
if i[1] == '=':
posa += int(i[0])
posb += int(i[0])
elif i[1] == 'X':
for j in range(int(i[0])):
out = [row.aStart+posa+j, refseq[posa+j], qryseq[posb+j], row.bStart+posb+j, 0, 0, 0, 0, 1, 1, row.aChr, row.bChr]
fout.write("\t".join(list(map(str, out))) + '\n')
posa += int(i[0])
posb += int(i[0])
elif i[1] == 'I':
for j in range(int(i[0])):
out = [row.aStart+posa-1, '.', qryseq[posb+j], row.bStart+posb+j, 0, 0, 0, 0, 1, 1, row.aChr, row.bChr]
fout.write("\t".join(list(map(str, out))) + '\n')
posb += int(i[0])
elif i[1] == 'D':
for j in range(int(i[0])):
out = [row.aStart+posa+j, refseq[posa+j], '.', row.bStart+posb-1, 0, 0, 0, 0, 1, 1,row.aChr, row.bChr]
fout.write("\t".join(list(map(str, out))) + '\n')
posa += int(i[0])
else:
cg = coords.loc[(coords.aStart == row.aStart) &
(coords.aEnd == row.aEnd) &
(coords.bStart == row.bStart) &
(coords.bEnd == row.bEnd) &
(coords.aChr == row.aChr) &
(coords.bChr == row.bChr), 'cigar']
brks = findall("(\d+)([IDX=])?", cg.iloc[0])
# chech for validity of CIGAR string
cuts = np.unique([i[1] for i in brks])
for i in cuts:
if i not in ['X','=','I','D']:
logger.error('Invalid CIGAR string. Only (X/=/I/D) operators are allowed')
sys.exit()
refseq = refg[row.aChr][(row.aStart-1):row.aEnd]
qryseq = qryg[row.bChr][(row.bEnd-1):row.bStart].reverse_complement()
# with open('snps.txt', 'w') as fout:
posa = 0 # number of bases covered in genome a
posb = 0 # number of bases covered in genome b
for i in brks:
if i[1] == '=':
posa += int(i[0])
posb += int(i[0])
elif i[1] == 'X':
for j in range(int(i[0])):
out = [row.aStart+posa+j, refseq[posa+j], qryseq[posb+j], row.bStart-posb-j, 0, 0, 0, 0, 1, 1, row.aChr, row.bChr]
fout.write("\t".join(list(map(str, out))) + '\n')
posa += int(i[0])
posb += int(i[0])
elif i[1] == 'I':
for j in range(int(i[0])):
out = [row.aStart+posa-1, '.', qryseq[posb+j], row.bStart-posb-j, 0, 0, 0, 0, 1, 1, row.aChr, row.bChr]
fout.write("\t".join(list(map(str, out))) + '\n')
posb += int(i[0])
elif i[1] == 'D':
for j in range(int(i[0])):
out = [row.aStart+posa+j, refseq[posa+j], '.', row.bStart-posb+1, 0, 0, 0, 0, 1, 1,row.aChr, row.bChr]
fout.write("\t".join(list(map(str, out))) + '\n')
posa += int(i[0])
return
def minimapToTSV(finpath, lenCut, idenCut):
'''
This function transforms mimimap2 output file generated with parameters --eqx -cx asm* to a.tsv format suitable to
be used by SyRI.
'''
with open(finpath, 'r') as fin:
with open('coords.txt', 'w') as fout:
for line in fin:
line = line.strip().split()
# add one to convert to 1-based positioning
line = [int(line[7])+1, int(line[8]), int(line[2])+1, int(line[3]), int(line[8])-int(line[7]), int(line[3])-int(line[2]), format((int(line[9])/int(line[10]))*100, '.2f'), 1, 1 if line[4] == '+' else -1, line[5], line[0], line[-1].split(":")[-1]]
if line[4] > lenCut and line[5] > lenCut and float(line[6]) > idenCut:
fout.write("\t".join(list(map(str, line))) + "\n")
<|end_of_text|># distutils: language = c++
# distutils: sources = stereo.cpp
import cython
from cython cimport view
import numpy as np
cimport numpy as np
from libcpp cimport bool
# see http://makerwannabe.blogspot.ca/2013/09/calling-opencv-functions-via-cython.html
cdef extern from "opencv2/opencv.hpp" namespace "cv":
cdef cppclass Mat:
Mat() except +
Mat(int, int, int) except +
void create(int, int, int)
void* data
int rows
int cols
cdef extern from "opencv2/opencv.hpp":
cdef int CV_8U # np.uint8
cdef int CV_32F # np.float32
cdef int CV_32S # np.int32
cdef extern from "volume.h":
cdef cppclass volume[T]:
volume(int, int, int, bool)
int width()
int height()
int depth()
T *data
T ***access
cdef extern from "stereo.h":
cdef Mat stereo_ms(
Mat a, Mat b, Mat seed,
int values, int iters, int levels, float smooth,
float data_weight, float data_max, float data_exp,
float seed_weight, float disc_max)
cdef Mat stereo_ms_fovea(
Mat a, Mat b, Mat seed,
Mat fovea_corners, Mat fovea_shapes,
int values, int iters, int levels, int fovea_levels,
float smooth, float data_weight, float data_max, float data_exp,
float seed_weight, float disc_max, bool fine_periphery, int min_level)
cdef volume[float]* stereo_ms_volume(
Mat a, Mat b, Mat seed,
int values, int iters, int levels, float smooth,
float data_weight, float data_max, float data_exp,
float seed_weight, float disc_max)
cdef Mat stereo_ms_probseed(
Mat img1, Mat img2, Mat seedlist,
int values, int iters, int levels, float smooth,
float data_weight, float data_max, float seed_weight, float disc_max | Cython |
)
cdef Mat stereo_ss_region(Mat, Mat, int, float, float, float, float)
cdef Mat stereo_ms_region(Mat, Mat, int, int, float, float, float, float)
cdef Mat stereo_ms_region(Mat, Mat, Mat, int, int, float, float, float, float, float)
def stereo(
np.ndarray[uchar, ndim=2, mode="c"] a,
np.ndarray[uchar, ndim=2, mode="c"] b,
np.ndarray[uchar, ndim=2, mode="c"] seed = np.array([[]], dtype='uint8'),
int values=64, int iters=5, int levels=5, float smooth=0.7,
float data_weight=0.07, float data_max=15, float data_exp=1,
float seed_weight=1, float disc_max=1.7, bool return_volume=False):
assert a.shape[0] == b.shape[0] and a.shape[1] == b.shape[1]
cdef int m = a.shape[0], n = a.shape[1]
# copy data on
cdef Mat x
x.create(m, n, CV_8U)
cdef Mat y
y.create(m, n, CV_8U)
(<np.uint8_t[:m, :n]> x.data)[:, :] = a
(<np.uint8_t[:m, :n]> y.data)[:, :] = b
cdef Mat u
if seed.size > 0:
u.create(m, n, CV_8U)
(<np.uint8_t[:m, :n]> u.data)[:, :] = seed
else:
u.create(0, 0, CV_8U)
# declare C variables (doesn't work inside IF block)
cdef Mat zi
cdef np.ndarray[uchar, ndim=2, mode="c"] ci
cdef volume[float]* zv
cdef np.ndarray[float, ndim=3, mode="c"] cv
if not return_volume:
# run belief propagation
zi = stereo_ms(x, y, u, values, iters, levels, smooth,
data_weight, data_max, data_exp, seed_weight, disc_max)
# copy data off
ci = np.zeros_like(a)
ci[:, :] = <np.uint8_t[:m, :n]> zi.data
return ci
else:
# run belief propagation
zv = stereo_ms_volume(
x, y, u, values, iters, levels, smooth,
data_weight, data_max, data_exp, seed_weight, disc_max)
# copy data off
cv = np.zeros((m, n, values), dtype='float32')
cv[:, :] = <np.float32_t[:m, :n, :values]> zv.data
del zv
return cv
def stereo_fovea(
np.ndarray[uchar, ndim=2, mode="c"] a,
np.ndarray[uchar, ndim=2, mode="c"] b,
np.ndarray[int, ndim=2, mode="c"] fovea_corners,
np.ndarray[int, ndim=2, mode="c"] fovea_shapes,
np.ndarray[uchar, ndim=2, mode="c"] seed = np.array([[]], dtype='uint8'),
int values=64, int iters=5, int levels=5, int fovea_levels=1,
float smooth=0.7, float data_weight=0.07, float data_max=15, float data_exp=1,
float seed_weight=1, float disc_max=1.7, bool fine_periphery=1,
int min_level=0):
"""BP with two levels: coarse on the outside, fine in the fovea"""
assert a.shape[0] == b.shape[0] and a.shape[1] == b.shape[1]
cdef int m = a.shape[0], n = a.shape[1]
assert(levels > 0)
assert(fovea_levels < levels)
assert(min_level <= fovea_levels)
# copy data on
cdef Mat x
x.create(m, n, CV_8U)
cdef Mat y
y.create(m, n, CV_8U)
(<np.uint8_t[:m, :n]> x.data)[:, :] = a
(<np.uint8_t[:m, :n]> y.data)[:, :] = b
cdef Mat u
if seed.size > 0:
u.create(m, n, CV_8U)
(<np.uint8_t[:m, :n]> u.data)[:, :] = seed
else:
u.create(0, 0, CV_8U)
assert fovea_corners.shape[0] == fovea_shapes.shape[0]
assert fovea_corners.shape[1] == fovea_shapes.shape[1] == 2
nfoveas = fovea_corners.shape[0]
cdef Mat fcorners, fshapes
fcorners.create(nfoveas, 2, CV_32S)
fshapes.create(nfoveas, 2, CV_32S)
if nfoveas > 0:
(<np.int32_t[:nfoveas, :2]> fcorners.data)[:, :] = fovea_corners
(<np.int32_t[:nfoveas, :2]> fshapes.data)[:, :] = fovea_shapes
assert(nfoveas == 0 or min_level < fovea_levels)
# run belief propagation
cdef Mat zi = stereo_ms_fovea(
x, y, u, fcorners, fshapes, values, iters, levels, fovea_levels,
smooth, data_weight, data_max, data_exp, seed_weight, disc_max,
fine_periphery, min_level)
# copy data off
cdef np.ndarray[uchar, ndim=2, mode="c"] ci = np.zeros(
(zi.rows, zi.cols), dtype='uint8')
ci[:, :] = <np.uint8_t[:zi.rows, :zi.cols]> zi.data
return ci
def stereo_probseed(
np.ndarray[uchar, ndim=2, mode="c"] a,
np.ndarray[uchar, ndim=2, mode="c"] b,
np.ndarray[float, ndim=2, mode="c"] seedlist,
int values=64, int iters=5, int levels=5,
float smooth=0.7, float data_weight=0.07, float data_max=15,
float seed_weight=1, float disc_max=1.7):
assert a.shape[0] == b.shape[0] and a.shape[1] == b.shape[1]
cdef int m = a.shape[0], n = a.shape[1]
# copy data on
cdef Mat x
x.create(m, n, CV_8U)
cdef Mat y
y.create(m, n, CV_8U)
(<np.uint8_t[:m, :n]> x.data)[:, :] = a
(<np.uint8_t[:m, :n]> y.data)[:, :] = b
cdef int n_seed = seedlist.shape[0], seedlen = seedlist.shape[1]
cdef Mat u
u.create(n_seed, seedlen, CV_32F)
(<np.float32_t[:n_seed, :seedlen]> u.data)[:, :] = seedlist
# run belief propagation
cdef Mat z = stereo_ms_probseed(
x, y, u, values, iters, levels, smooth,
data_weight, data_max, seed_weight, disc_max)
# copy data off
cdef np.ndarray[uchar, ndim=2, mode="c"] c = np.zeros_like(a)
c[:, :] = <np.uint8_t[:m, :n]> z.data
return c
def stereo_region(
np.ndarray[uchar, ndim=2, mode="c"] a,
np.ndarray[uchar, ndim=2, mode="c"] b,
np.ndarray[uchar, ndim=2, mode="c"] s,
int iters=5, int levels=5, float smooth=0.7,
float data_weight=0.07, float data_max=15, float disc_max=1.7):
assert a.shape[0] == b.shape[0]
# copy data on
cdef Mat x
x.create(a.shape[0], a.shape[1], CV_8U)
cdef Mat y
y.create(b.shape[0], b.shape[1], CV_8U)
(<np.uint8_t[:a.shape[0], :a.shape[1]]> x.data)[:, :] = a
(<np.uint8_t[:b.shape[0], :b.shape[1]]> y.data)[:, :] = b
cdef Mat seed
seed.create(s.shape[0], s.shape[1], CV_8U)
(<np.uint8_t[:s.shape[0], :s.shape[1]]> seed.data)[:, :] = s
cdef Mat z
| Cython |
# z = stereo_ss_region(x, y, iters, smooth, data_weight, data_max, disc_max)
seed_weight = 50
# z = stereo_ms_region(x, y, seed, iters, levels, smooth,
# data_weight, data_max, seed_weight, disc_max)
z = stereo_ms_region(x, y, iters, levels, smooth,
data_weight, data_max, disc_max)
# copy data off
cdef np.ndarray[uchar, ndim=2, mode="c"] c = np.zeros_like(a)
c[:, :] = <np.uint8_t[:c.shape[0], :c.shape[1]]> z.data
return c
<|end_of_text|>cdef size_t get_current_stream_ptr()
cpdef get_current_stream()
<|end_of_text|>from quantlib.types cimport Real, Size
from libcpp cimport bool
cdef extern from 'ql/termstructures/iterativebootstrap.hpp' namespace 'QuantLib':
cdef cppclass IterativeBootstrap[C]:
IterativeBootstrap(Real accuracy)
IterativeBootstrap(Real accuracy, # = Null<Real>(),
Real minValue, # = Null<Real>(),
Real maxValue, # = Null<Real>(),
Size maxAttempts, # = 1,
Real maxFactor, # = 2.0,
Real minFactor, # = 2.0,
bool dontThrow, # = false,
Size dontThrowSteps)
<|end_of_text|>#!/usr/bin python3
#proun
#compax comp created by spencer williams
from libc.stdlib cimport *
from libc.stdio cimport *
import time
import re
import os
import sys
import requests
import shutil
from PyQt5.QtWidgets import *
from PyQt5.QtWebChannel import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWebEngine import *
from PyQt5.QtWebEngineWidgets import *
from PyQt5.QtPrintSupport import *
from PyQt5.QtMultimedia import *
from PyQt5.QtMultimediaWidgets import *
#key notes difference between exec_ and show(is that exec_ domainates focus and runs separate) and show is just part of teh parent widget
""""
this is how to include c files the extern way
cdef extern from "jelexus.c":
void testicles(int blip)
cdef struct Foo:
int make
float moo
void* mera
"""
#download and stream
def _download_and_stream(url=
"https://www.sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4",is_online_stream=False):
_d_url = url
#chunk size
chunk_size = 512
r = requests.get(_d_url,stream=is_online_stream)
with open("temp/jigga.mp4","wb") as f:
for chunk in r.iter_content(chunk_size=chunk_size):
f.write(chunk)
print(_d_url)
return "temp/jigga.mp4"
class da_extern_video_Window(QWidget):
def __init__(self):
super(da_extern_video_Window, self).__init__()
#set data var
self._da_online_path = ""
if(self._da_online_path == ""):
self._set_url_link("https://www.sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4")
self._da_online_path = self._get_da_url_link()
#set position slider
self.positionSlider = QSlider(Qt.Horizontal)
#set error label
self.errorLabel = QLabel()
#create a media player object
self.player = QMediaPlayer(self)
#create a video widget for the video to be blit on
self.viewer = QVideoWidget(self)
#set the player to play the video blit
self.player.setVideoOutput(self.viewer)
#handle the state of the video to be changed
self.player.stateChanged.connect(self.handleStateChanged)
#set the video title
self.setWindowTitle("browser video player....")
#what to do when the position of the video is changed
self.player.positionChanged.connect(self.positionChanged)
#handle the duration of selected file
self.player.durationChanged.connect(self.durationChanged)
#handle the error
self.player.error.connect(self.handleError)
#play button
self.button1 = QPushButton('Play', self)
#stop button
self.button2 = QPushButton('Stop', self)
#call function (or method) when play button is clicked
self.button1.clicked.connect(self.player.play)
#call function (or method) when stop button is clicked
self.button2.clicked.connect(self.player.stop)
#enable stop button
self.button2.setEnabled(True)
#do a grid layout
layout = QGridLayout(self)
#add the video to thw layout
layout.addWidget(self.viewer, 0, 0, 1, 2)
#add the play button to the laayout
layout.addWidget(self.button1, 1, 0)
#add the stop button to the grid layout
layout.addWidget(self.button2, 1, 1)
#add the position slider to the layout
layout.addWidget(self.positionSlider,2,0)
#add the error label to the layout
layout.addWidget(self.errorLabel,3,0)
#create a buffer object just in case
self._buffer = QBuffer(self)
#create a data object
self._data = None
#do the rest of the functions
#self._init_handle_video_nuttin()
#initialize the video
def _init_handle_video_nuttin(self):
#set postion range
self.positionSlider.setRange(0,0)
#set what to do when slider is moved
self.positionSlider.sliderMoved.connect(self.setPosition)
#set sizr policy
self.errorLabel.setSizePolicy(QSizePolicy.Preferred,QSizePolicy.Maximum)
_eek_online_path = self._get_da_url_link()
edt = _download_and_stream(str(_eek_online_path),True)
print("!@#$! diagnostics for download data")
print(edt)
print("Now fucking fuck the fuck off!")
path = str("temp/jigga.mp4")
if path:
self.button1.setEnabled(False)
self.button2.setEnabled(True)
self.player.setMedia(QMediaContent(QUrl.fromLocalFile(QFileInfo(path).absoluteFilePath())))
self.player.play()
else:
print("error has occurred, cause")
print(path)
def handleStateChanged(self, state):
if state == QMediaPlayer.StoppedState:
self.button1.setEnabled(True)
self.button2.setEnabled(False)
else:
self.button1.setEnabled(False)
self.button2.setEnabled(True)
def positionChanged(self,position):
self.positionSlider.setValue(position)
def durationChanged(self,duration):
self.positionSlider.setRange(0,duration)
def setPosition(self,position):
self.player.setPosition(position)
def handleError(self):
self.button1.setEnabled(False)
self.errorLabel.setText("Fuckin' error: "+self.player.errorString())
def _set_url_link(self,online_path="https://www.sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4"):
self._da_online_path = online_path
def _get_da_url_link(self):
return self._da_online_path
#add a hide and unhide feature for all the dockable areas
class De_da_dock_button(QWidget):
def __init__(dis,*args,**kwargs):
super(De_da_dock_button,dis).__init__(*args,**kwargs)
dis.setStyleSheet("""background: red; color: green; """)
#set dock button title
dis.setWindowTitle("show/hide docks")
#create the button
dis.da_button_1 = QPushButton("show/hide all docks",dis)
#connect the signal
dis.da_button_1.clicked.connect(dis._da_sho_n_hyde_dock)
#create a vbox layout
dis.vbox = QVBoxLayout()
#add to the main widget
dis.vbox.addWidget(dis.da_button_1)
#add layout to the widget
dis.setLayout(dis.vbox)
#connect the signal for the dock show and hide button
@pyqtSlot()
def _da_sho_n_hyde_dock(dis):
print("dock button initialized...")
pass
#start the loop for the regular plugin method
def _run_da_reg_control_plugin(dis):
dis.show()
#try to create a terminal embed within a widget as needed
class DaTerminal(QWidget):
def __init__(dis,dynamic_scale=False,parent=None,*args,**kwargs):
super(DaTerminal,dis).__init__(parent,*args,**kwargs)
dis.setStyleSheet("""background: red; color: green; """)
#set terminal title
dis.setWindowTitle("da compax comp terminal")
dis.process = QProcess(dis)
#set process channel mode
dis.process.setProcessChannelMode(QProcess.MergedChannels)
#set up process factors
dis.process.readyRead.connect(dis._on_terminal_output)
#when the process has an error
dis.process.errorOccurred.connect(dis._on_da_terminal_error)
#when the process is basically finished.
dis.process.finished.connect(dis._on_terminal_exit)
dis.dynamic_scale = dynamic_scale
dis.terminal = QWidget(dis)
layout = QVBoxLayout(dis | Cython |
)
layout.addWidget(dis.terminal)
#works with also urxvt
if (not dis.dynamic_scale) or (dis.dynamic_scale == "False"):
dis.setFixedSize(640,480)
print("toasty")
print("checking dynamic scale status")
print("dynamic scale status is : {} \n".format(dis.dynamic_scale))
print(os.environ["PATH"])
#initialize path safekeeping (redundant?)
dis.Label__oldEnv = 0
#initializing the terminal
dis.__da_init_terminal_startit()
#safely add terminal source executable to app
def __add_terminal_to_env(dis):
dis.Label__oldEnv = os.environ["PATH"]
print(dis.Label__oldEnv)
os.environ["PATH"] = "/compax_comp/datterminalstandalonesexxxx/:"+dis.Label__oldEnv
print("checking temp env")
print("la temp dir iz: ")
print(os.environ["PATH"])
print("rejoyce in the end of this dianostic of ensuring temp switch")
#safely remove terminal source executable from os path to avoid system tampering
def __remove_terminal_from_env(dis):
print("removing path buggy")
os.environ["PATH"] = dis.Label__oldEnv
print("path changed")
print("environment var is:")
print(os.environ["PATH"])
#commands content for handling slots for signals
def __da_init_terminal_startit(dis):
dis.__add_terminal_to_env()
command = str("rxvt-unicode")
dis.process.start(command,['-embed',str(int(dis.winId())), '-bg','black','-fg','blue', '-hc','red','-cr','green','-pr','yellow'])
dis.process.waitForStarted(-1)
dis.__remove_terminal_from_env()
#set the output for the terminal
@pyqtSlot()
def _on_terminal_output(dis):
da_data = bytes(dis._process.readAll()).decode().replace('\r\n','\n')
print(da_data)
#handle on error for the terminal
@pyqtSlot()
def _on_da_terminal_error(error):
print("")
print("there's a fgoiddamn fuckin error. Process error: {0}".format(str(error)))
print("")
#handle the terminal exiting
@pyqtSlot()
def _on_terminal_exit(exitCode):
print("")
print("Exit kode = {0}".format(str(exitCode)))
#attempt to override the close event to end the terminal process properly
def closeEvent(dis,event):
dis.process.terminate()
dis.process.waitForFinished()
event.accept()
#set if terminal scale will be dynamic or not
def set_dynamic_scale(dis,dynamic_result=True):
dis.dynamic_scale = dynamic_result
#handle dash blank
class Ae_web_page(QWebEnginePage):
def createWindow(dis, _type):
page = QWebEnginePage(dis)
page.urlChanged.connect(dis.on_url_changed)
return page
@pyqtSlot(QUrl)
def on_url_changed(dis,url):
page = dis.sender()
dis.setUrl(url)
page.deleteLater()
#create the compax comp browser
class da_Web_Browser(QWidget):
def __init__(dis,*args,**kwargs):
super(da_Web_Browser,dis).__init__(*args,**kwargs)
#create the browser tab title
dis.title_tab_name = "da web browser"
#set the web browser
dis.browser = QWebEngineView()
#set the web browser settings object
#dis.browser_settings = dis.browser.settings()
dis.browser_settings = QWebEngineSettings.globalSettings()
#set plugins and javascript to be enabled
dis.browser_settings.setAttribute(QWebEngineSettings.PluginsEnabled,True)
dis.browser_settings.setAttribute(QWebEngineSettings.JavascriptEnabled,True)
dis.browser_settings.setAttribute(QWebEngineSettings.LocalContentCanAccessRemoteUrls, True)
dis.browser_settings.fullscreenSupportEnabled=True
dis.browser_settings. allowRunningInsecureContent = True
dis.browser_settings.screenCaptureEnabled = True
dis.browser_settings.webGLEnabled = True
dis._dis_da_url = "https://www.bitchute.com/video/oPmPhCZoWmo9"
#set homepage by default
page = Ae_web_page(dis.browser)
dis.browser.setPage(page)
dis.browser.load(QUrl(dis._dis_da_url))
dis._da_source_html_data = requests.get(dis._dis_da_url).text
#what to do on the changed url
#dis.browser.urlChanged.connect(lambda : dis.browser.renew_urlbar(QUrl("https://mgtow.com"),dis) )
#when the page finishes loading
dis.browser.loadFinished.connect(lambda : dis.setWindowTitle(dis.browser.page().title()))
#set the vbox layout
dis.vboxlayout = QVBoxLayout()
#set the horizontal group box for the browser controls
dis.hboxgroup = QGroupBox("da browser controlz")
#set the horizontal layout
dis.hboxlayout = QHBoxLayout()
#get the url line edit
dis.url_box = QLineEdit()
#set up the popup
dis.Browser_control_window = QWidget()
#set dimensions for the controls
dis.Browser_control_window.setGeometry(100,100,100,100)
#set the vbox layout for the browser controls
dis._bvd_vbox = QVBoxLayout()
#set the object button to call the browser controls
dis._call_Buttin = QPushButton("display da controls",dis)
#set the slot to the button signal
dis._call_Buttin.clicked.connect(dis._show_browser_controls)
#set the load page button
dis._load_pagedd_url = QPushButton("load yer url",dis)
dis._load_pagedd_url.clicked.connect(dis.load_da_page )
#set the back page button
dis._vefor_go_back_button = QPushButton("go back",dis)
dis._vefor_go_back_button.clicked.connect(dis._back_buitton)
#skip forward a page button
dis._eefor_forward_button = QPushButton("skip forward",dis)
dis._eefor_forward_button.clicked.connect(dis._skip_forward_button_i)
#reload or refresh a page
dis.__breelooad = QPushButton("refresh da page",dis)
dis.__breelooad.clicked.connect(dis._dat_da_relode)
#stop the page
dis._dat_stop_da_page_buttonshp = QPushButton("stop loading da page",dis)
dis._dat_stop_da_page_buttonshp.clicked.connect(dis._dat_stop_da_page)
#set up the url box
dis.url_box.setPlaceholderText("enter da url here. https://,ftp://,http://,etc...")
#set load the page from the url box when return or enter is pressed
dis.url_box.returnPressed.connect(dis.load_da_page)
#add buttons to the horizontal box layout for the browser controls
"""
print('back enabled', w.page().action(QWebEnginePage.Back).isEnabled())
print('forward enabled', w.page().action(QWebEnginePage.Forward).isEnabled())
"""
#load url
dis.hboxlayout.addWidget( dis._load_pagedd_url)
#go back a page
dis.hboxlayout.addWidget( dis._vefor_go_back_button )
#go forward a page
dis.hboxlayout.addWidget(dis._eefor_forward_button)
#refresh a page
dis.hboxlayout.addWidget(dis.__breelooad)
#stop loading a page
dis.hboxlayout.addWidget(dis._dat_stop_da_page_buttonshp)
#set the layout for the popup window and add the horizontal group box contents to it
dis._bvd_vbox.addWidget(dis.hboxgroup)
dis.Browser_control_window.setLayout(dis._bvd_vbox)
#add widgets to the vertical layout
dis.vboxlayout.addWidget(dis.url_box)
dis.vboxlayout.addWidget(dis.browser)
dis.vboxlayout.addWidget(dis._call_Buttin)
#set the current horizontal layout to the horizontal box group
dis.hboxgroup.setLayout(dis.hboxlayout)
#set the overall layout
dis.setLayout(dis.vboxlayout)
#set to run via the loop
def _da_run_da_custom_plugin(dis):
dis.show()
print("browser plugin is loading for shit browser")
#set the tab title name as necessary
def _set_da_custom_title(dis,title="da mgtow browser"):
#set the title to the class title
dis.title_tab_name = title
#get the tab title name as needed
def _get_da_custom_title(dis):
return dis.title_tab_name
#set up the button to load the browser controls
@pyqtSlot()
def _show_browser_controls(dis):
dis.Browser_control_window.show()
print("now sho' in' browser controls popup")
#set up connection to load a new page
@pyqtSlot()
def load_da_page(dis):
dis._dis_da_url = dis.url_box.text()
#load the webpage
dis.browser.load(QUrl(dis._dis_da_url))
#call the video capture intitally
dis._sq_video_init()
print("fucking page is loaded. fuck off")
#set up to intercept and download videos
@pyqtSlot()
def _sq_video_init | Cython |
(dis):
#check if page contains either a mp4 file or a youtube type style m3u8 data and if it does, open the video player with the temporary data
print("sq video init")
#set up a back function
@pyqtSlot()
def _back_buitton(dis):
print('back enabled', dis.browser.page().action(QWebEnginePage.Back).isEnabled())
dis.browser.page().triggerAction(QWebEnginePage.Back)
#set up a skip forward function
@pyqtSlot()
def _skip_forward_button_i(dis):
dis.browser.page().triggerAction(QWebEnginePage.Forward)
#reload page connection
@pyqtSlot()
def _dat_da_relode(dis):
dis.browser.reload()
print("page reloadin")
#stop the page loading
@pyqtSlot()
def _dat_stop_da_page(dis):
dis.browser.page().triggerAction(QWebEnginePage.Stop)
print("loading manually destroyed. I hope you're proud of yourself, you sick fuck!")
#create basic window for later use
class daHomeBlot(QWidget):
def __init__(dis,*args,**kwargs):
super(daHomeBlot,dis).__init__(*args,**kwargs)
dis.setStyleSheet("""background:black; color:green;""")
#create the browser engine
dis.kk = QWebEngineView()
#first page to load on creation of the web browser engine
dis.kk.load(QUrl("https://upwork.com"))
#set up the interface for the text editor
dis.textEdit = QTextEdit()
#create the url box
dis.da_url_box = QLineEdit()
#set the page load when enter is pressed
dis.da_url_box.returnPressed.connect(dis.meme_clinc)
#set the size of the url box
dis.da_url_box.setGeometry(0.01,0.01,0.2,0.2)
dis.title = 'Upwork proposal system'
dis.left = 10
dis.top = 10
dis.width = 920
dis.height = 600
dis.initUI()
def initUI(dis):
dis.setWindowTitle(dis.title)
dis.setGeometry(dis.left,dis.top,dis.width,dis.height)
dis.createHorizontalLayout()
#set the placeholder for the url link
dis.da_url_box.setPlaceholderText("enter your url here. https://,http://,ftp://, etc...")
windowLayout = QVBoxLayout()
windowLayout.addWidget(dis.da_url_box)
windowLayout.addWidget(dis.horizontalGroupBox)
dis.browzeloadbutton = QPushButton("load url", dis)
dis.browzeloadbutton.clicked.connect(dis.meme_clinc)
windowLayout.addWidget(dis.browzeloadbutton)
dis.setLayout(windowLayout)
def createHorizontalLayout(dis):
dis.horizontalGroupBox = QGroupBox("dat proposal text editor")
layout = QHBoxLayout()
meme = dis.textEdit
layout.addWidget(meme)
mer = QPushButton('generate random proposal templates',dis)
mer.clicked.connect(dis.mer_clink)
layout.addWidget(mer)
layout.addWidget(dis.kk)
dis.horizontalGroupBox.setLayout(layout)
@pyqtSlot()
def meme_clinc(dis):
print('aneemeee')
print(dis.kk)
dis.kk.load(QUrl(dis.da_url_box.text()))
@pyqtSlot()
def mer_clink(dis):
print('create wendoh')
dis.kk.createWindow(QWebEnginePage().WebWindowType())
"""""
#set the browser thread
class Browser_thread(QThread):
signal = pyqtSignal('PyQt_PyObject')
def __init__(this_thread):
QThread.__init__(this_thread)
def run(this_thread):
print("thwead runz!")
class w_runnable(QRunnable):
def __init__(dis,url,fn,*args,**kwargs):
super(w_runnable,dis).__init__()
dis.url = url
dis.fn = fn
dis.args = args
dis.kwargs = kwargs
@pyqtSlot()
def run(dis):
dis.fn(*dis.args,**dis.kwargs)
_check_video_tag = "<video "
_check_mp4 = ".mp4"
_check_m8u3 = ".m8u3"
_1_da_source_html_data = str(requests.get(dis.url).text)
#raw html get request
_1_a = requests.get(dis.url)
print("get status: find mp4")
_1_mp4_find_status = _1_da_source_html_data.find(_check_mp4)
print(_1_mp4_find_status)
if(_1_mp4_find_status < 0):
print("video find failed")
else:
print("mp4 found on page")
_1_soup_bs_parsed_data = BS(_1_a.content,'html.parser')
print("<end of FUCKING DIAGNOSTICS>")
print("flamboyancy revee")
print("status report")
#https://www.bitchute.com/video/oPmPhCZoWmo9/
print(_1_mp4_find_status)
if(_1_mp4_find_status < 0):
print("video find failed rot in hell!")
else:
#prettify data
print(_1_soup_bs_parsed_data.prettify())
dis._1_text_output = da_distractfree_writer()
dis._1_text_output.setText(_1_da_source_html_data)
dis._1_text_output.show()
xdata = _1_soup_bs_parsed_data.find('source')
print("is it over?")
print(xdata['src'])
_1_udemy_soup_bs_parsed_data = BS(_1_a.content,'html.parser')
udemy_capture = _1_udemy_soup_bs_parsed_data.find('video')
dis.veek = da_extern_video_Window()
if(xdata['src']):
dis.veek._set_url_link(str(xdata['src']))
elif(udemy_capture['src']):
dis.veek._set_url_link(str(udemy_capture['src']))
else:
dis.veek._set_url_link("https://www.sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4")
dis.veek._init_handle_video_nuttin()
dis.veek.show()
"""
#The main window
class daMainWindow(QMainWindow):
def __init__(this_object):
super(daMainWindow,this_object).__init__()
print("damainwindowlives")
#create the threadpool
this_object.threadpool = QThreadPool()
print("Multithreading technology with a maximum of %d fucking threads" % this_object.threadpool.maxThreadCount())
this_object.Da_System_Icon = QIcon( "images/anutechlogo.png")
#set current file focus path
this_object.dafilepath = ''
#init custom tab
this_object.tabs = QTabWidget()
#start the directory view for the file system in the folder
this_object.model = QFileSystemModel()
#set the system path
this_object.model.setRootPath(QDir.currentPath())
#set the directory tree
this_object.tree = QTreeView()
#set the directory model for the tree view
this_object.tree.setModel(this_object.model)
#set directory options
this_object.tree.setAnimated(True)
this_object.tree.setIndentation(20)
this_object.tree.setSortingEnabled(True)
this_object.tree.setWindowTitle("Da Directorhy")
this_object.tree.resize(640,480)
#initialize the terminal system
this_object._start_dat_terminal_n_debug_system()
#set the icon for the program
print(this_object.Da_System_Icon)
#set the vboxlayout
this_object.vboxlayout = QVBoxLayout()
#this_object.setWindowIcon()
#set the title for the main window
this_object.dawindowtitle = "Compax comp c-ver 0.0.1"
this_object.setWindowTitle(this_object.dawindowtitle)
#make homepage doc widget
this_object.docked = QDockWidget("control station",this_object)
#add editor portion
this_object.editRDok = QDockWidget("custom dock",this_object)
#add custom dock area
this_object.addDockWidget(Qt.RightDockWidgetArea,this_object.editRDok)
#add doc widget to the left hand area
this_object.addDockWidget(Qt.LeftDockWidgetArea,this_object.docked)
#the widget to be docked. can be text editor or browser editor, or anything really as long as it's a QWidget object
this_object.dockedWidget = QWidget(this_object)
#add tabs to the custom area dock
this_object.customDockareawidget = this_object.tabs
#set the widget to be used in the dock widget
this_object.docked.setWidget(this_object.dockedWidget)
#set the custom dock area widget for the tabs
this_object.editRDok.setWidget(this_object.customDockareawidget)
#set the layodut for the left dock, we will use the vbox object dynamically
this_object.dockedWidget.setLayout(this_object.vboxlayout)
this_object.setCentralWidget(this_object.tree)
#call function when items on the directory view are double clicked.
this_object.tree.doubleClicked.connect(this_object.open_dat_newttt_file)
| Cython |
#set text document area
this_object.editoreDock = QDockWidget("Editor area",this_object)
#set the tabs to use for the docking
this_object.editorTabs = QTabWidget()
#set the tab to a text editor
#set the doc tab widget to use on the document area
this_object.editoreDock.setWidget(this_object.editorTabs)
#set the widget to be placed on the top or center by default
this_object.addDockWidget(Qt.TopDockWidgetArea,this_object.editoreDock)
#set the file toolbars
this_object.file_toolbar = QToolBar("main")
this_object.file_toolbar.setIconSize(QSize(14,14))
this_object.addToolBar(this_object.file_toolbar)
this_object.workspace_toolbar = QToolBar("workspace")
this_object.workspace_toolbar.setIconSize(QSize(14,14))
this_object.addToolBar(this_object.workspace_toolbar)
#set the file menus
this_object.file_menu = this_object.menuBar().addMenu("&main")
this_object.workspace_menu = this_object.menuBar().addMenu("&workspace")
#set up the control list
this_object.control_list = []
#set up default controls and the browser
# for example this_object._add_control_object(QPushButton("piece of fucking shit"))
#set up the regular control area
this_object.reg_ct_list = []
"""set the control docks button for reg dock area
dissadint_dock = De_da_dock_button()
this_object._add_reg_control_object(dissadint_dock)
"""
#initialize the control list
this_object.init_control_area()
#initialize the regular control area
this_object._init_reg_control_area()
this_object.kabito = da_Web_Browser()
this_object.kabito.show()
#initialize the toolbars
this_object.initialize_toolbar()
#start the program
this_object.initialize_program()
#initalize the browser
def _web_init_pee_dee(this_object):
#set the web browser by default
print("thread connect")
#set up for the regular control area
def _init_reg_control_area(this_object):
print("reg control area triggered")
print(this_object.reg_ct_list)
for reg_ct_index in range(len(this_object.reg_ct_list)):
this_object.vboxlayout.addWidget(this_object.reg_ct_list[reg_ct_index])
this_object.reg_ct_list[reg_ct_index]._run_da_reg_control_plugin()
#set up the custom control dock area
def init_control_area(this_object):
## add the hbox and add widgets codes here for possible extension
for control_list_index in range(len(this_object.control_list)):
this_object.customDockareawidget.addTab(this_object.control_list[control_list_index],str( this_object.control_list[control_list_index]._get_da_custom_title() ))
this_object.control_list[control_list_index]._da_run_da_custom_plugin()
#add the regular control widgets to the regular control list
def _add_reg_control_object(this_object,da_QWidget):
this_object.reg_ct_list.append(da_QWidget)
print(this_object.reg_ct_list)
#add the control widgets to the control list
def _add_control_object(this_object,da_QWidget):
this_object.control_list.append(da_QWidget)
def initialize_program(this_object):
#set the main window size
this_object.setGeometry(50,50,800,600)
#show the main window
this_object.show()
#include a new perogrammer instance
this_object.ide_workspace = daHomeBlot()
#standalone writer
this_object.da_writer = da_distractfree_writer()
#update title
def update_title(this_object,updated_title="sexy chids"):
print(this_object.dawindowtitle)
this_object.setWindowTitle(updated_title)
this_object.ide_workspace.setWindowTitle(updated_title)
this_object.da_writer.setWindowTitle(updated_title)
def initialize_toolbar(this_object):
#set actions
cut_action = QAction(this_object.Da_System_Icon,"cut",this_object)
open_code_action = QAction(this_object.Da_System_Icon,"Upwork proposal environment",this_object)
open_writer_action = QAction(this_object.Da_System_Icon,"standalone writer",this_object)
open_file_action = QAction(this_object.Da_System_Icon,"open da file",this_object)
save_file_action = QAction(this_object.Da_System_Icon,"save da file",this_object)
saveas_file_action = QAction(this_object.Da_System_Icon,"save da file as",this_object)
print_action = QAction(this_object.Da_System_Icon,"print da file",this_object)
terminal_open_action = QAction(this_object.Da_System_Icon,"open da terminal",this_object)
show_n_hyde_dock_action = QAction(this_object.Da_System_Icon,"Show hidden docks",this_object)
show_n_hide_web_browser_action = QAction(this_object.Da_System_Icon,"Show web browser",this_object)
#set status tips
open_code_action.setStatusTip("enter Upwork proposal workspace")
open_writer_action.setStatusTip("distraction-free writers mode")
print_action.setStatusTip("Print current page out")
saveas_file_action.setStatusTip("save the fucking file as")
open_file_action.setStatusTip("open the file")
save_file_action.setStatusTip("save the file")
terminal_open_action.setStatusTip("open da terminal")
show_n_hyde_dock_action.setStatusTip("Show the hidden dockable sections of your IDE. Useful in case you close them out and need them back up")
show_n_hide_web_browser_action.setStatusTip("show the companion web browser")
#set triggers (QAction().triggered.connect(function))
open_code_action.triggered.connect(this_object.set_ide_workspace)
open_writer_action.triggered.connect(this_object.set_da_distractfree_writer)
print_action.triggered.connect(this_object.file_print)
saveas_file_action.triggered.connect(this_object.file_saveas)
save_file_action.triggered.connect(this_object.file_save)
open_file_action.triggered.connect(this_object.file_open)
terminal_open_action.triggered.connect(this_object.__open_da_terminal)
show_n_hyde_dock_action.triggered.connect(this_object._id_da_sho_n_hyde_dock)
show_n_hide_web_browser_action.triggered.connect(this_object._we_show_da_web_browser)
#add each action to the menu and toolbar respectively
this_object.workspace_menu.addAction(open_code_action)
this_object.workspace_toolbar.addAction(open_code_action)
this_object.workspace_menu.addAction(open_writer_action)
this_object.workspace_toolbar.addAction(open_writer_action)
this_object.workspace_menu.addAction(show_n_hyde_dock_action)
this_object.workspace_toolbar.addAction(show_n_hyde_dock_action)
this_object.workspace_menu.addAction(show_n_hide_web_browser_action)
this_object.workspace_toolbar.addAction(show_n_hide_web_browser_action)
this_object.file_menu.addAction(open_file_action)
this_object.file_toolbar.addAction(open_file_action)
this_object.file_menu.addAction(show_n_hyde_dock_action)
this_object.file_toolbar.addAction(show_n_hyde_dock_action)
this_object.file_menu.addAction(save_file_action)
this_object.file_menu.addAction(saveas_file_action)
this_object.file_menu.addAction(print_action)
this_object.file_menu.addAction(terminal_open_action)
this_object.file_toolbar.addAction(save_file_action)
this_object.file_toolbar.addAction(saveas_file_action)
this_object.file_toolbar.addAction(print_action)
this_object.file_toolbar.addAction(terminal_open_action)
#set terminal to also be included with the workspace menu
this_object.workspace_menu.addAction(terminal_open_action)
this_object.workspace_toolbar.addAction(terminal_open_action)
#set the file toolbar
#set the web browser
@pyqtSlot()
def _we_show_da_web_browser(this_object):
if(this_object.kabito.hide()):
this_object.kabito.show()
this_object.kabito.show()
#set the ide workspace to select
@pyqtSlot()
def set_ide_workspace(this_object):
this_object.ide_workspace.show()
@pyqtSlot()
def __open_da_terminal(this_object):
this_object.term = DaTerminal()
this_object.term.show()
#set the stand alone writer to sleect
@pyqtSlot()
def set_da_distractfree_writer(this_object):
#add the writer to the vboxy layout
this_object.vboxlayout.addWidget(this_object.da_writer)
#set the title of the notepad
this_object.da_writer.setWindowTitle("da text pad")
#show the text pad
this_object.da_writer.show()
#open file
@pyqtSlot()
def file_open(this_object):
#set path to the file according to the file dialog
path, _ = QFileDialog.getOpenFileName(this_object,"Open da file","","da python filez (*.py);;All Da files(*.*)")
#if the path is set (if file successfully opens)
if path:
#try to read each line and put the resulting parsed file into text variable
try:
with open(path,'rU') as f:
text = f.read()
#catch the exception should one occur
except Exception as e:
#open critical qt dialog to show the error
this_object.dialog_critical(str(e))
else:
#if path fails to load, set the path to the current document being edited on
this | Cython |
_object.dafilepath = path
#open a new distract free writer for use in the tab
opentempnu_dtr = da_distractfree_writer()
#set the plain text of the text editor on the ide.
opentempnu_dtr.setPlainText(text)
#add the writer to the editor tab section
this_object.editorTabs.addTab(opentempnu_dtr,this_object.Da_System_Icon,str(this_object.dafilepath))
#set the path of the currently active tab to the focusable file path
this_object.editorTabs.currentWidget().path = this_object.dafilepath
#debug to check and see if the widget path fits
print(this_object.editorTabs.currentWidget().path)
#if path fails to load, set the path to the current document being edited on to just do it again. redundant? absolutely but i will optimize later.
path = this_object.editorTabs.currentWidget().path
#set the plain text of the text editor on the ide. redundant
this_object.editorTabs.currentWidget().setPlainText(text)
#update the title
this_object.update_title(path)
#file save
@pyqtSlot()
def file_save(this_object):
print("testing the save path:::as follows:")
print(this_object.editorTabs.currentWidget().path)
print("localized focused path")
print(this_object.dafilepath)
this_object.editorTabs.currentWidget().path = this_object.dafilepath
if (this_object.editorTabs.currentWidget().path is None):
return this_object.file_saveas()
else:
this_object._save_to_path(this_object.editorTabs.currentWidget().path)
#file save as
@pyqtSlot()
def file_saveas(this_object):
path, _ = QFileDialog.getSaveFileName(this_object,"save dis file","","All Da files(*.*)")
if not path:
return
this_object.editorTabs.currentWidget().path = path
this_object._save_to_path(this_object.editorTabs.currentWidget().path)
#save to path
def _save_to_path(this_object,path):
text = this_object.editorTabs.currentWidget().toPlainText()
if not path:
print("path error occurred...")
print(path)
else:
plat_path = path
print("path seems to be working. running diagnostics...")
print(path)
print(plat_path)
try:
with open(plat_path,'w') as f:
f.write(text)
except Exception as e:
this_object.dialog_critical(str(e))
else:
this_object.editorTabs.currentWidget().path = path
this_object.update_title(this_object.editorTabs.currentWidget().path)
#file print
@pyqtSlot()
def file_print(this_object):
d1g = QPrintDialog()
if d1g.exec_():
this_object.editorTabs.currentWidget().print_(d1g.printer())
#attempt to override the close event to end the terminal process properly
def closeEvent(this_object,event):
this_object.window1.process.terminate()
this_object.window1.process.waitForFinished()
event.accept()
#open file from directory structure
#@pyqtSlot()
def open_dat_newttt_file(this_object,index):
cdef int k = 3
#set path to the file according to the file dialog
print(index)
path = this_object.model.filePath(index)
print(path)
#if the path is set (if file successfully opens)
if path:
#try to read each line and put the resulting parsed file into text variable
try:
with open(path,'rU') as f:
text = f.read()
#catch the exception should one occur
except Exception as e:
#open critical qt dialog to show the error
QDialog().dialog_critical(str(e))
else:
#if path fails to load, set the path to the current document being edited on
this_object.dafilepath = path
nu_dtr = da_distractfree_writer()
#set the plain text of the text editor on the ide.
nu_dtr.setPlainText(text)
this_object.editorTabs.addTab(nu_dtr,this_object.Da_System_Icon,str(this_object.dafilepath))
this_object.editorTabs.currentWidget().path = this_object.dafilepath
print(this_object.editorTabs.currentWidget().path)
print(k)
#initialize the terminal program and run it in a dynamic dockable tab in the bottom
def _start_dat_terminal_n_debug_system(this_object):
#start a dockable sequence
this_object.term_dock = QDockWidget("Terminal & debugging tools",this_object)
#add terminals tabs
this_object.terminal_tabzee = QTabWidget()
#add initial terminal set the dynamic scale to true
this_object.__the_first_terminal = DaTerminal(True)
#set the dynamic scale manually and once true, make sure the terminal scales with the window activity
this_object.__the_first_terminal.set_dynamic_scale(True)
this_object.__the_first_terminal.setStyleSheet("QWidget{height:100%; width:100%;}")
#add the terminal to the tab
this_object.terminal_tabzee.addTab(this_object.__the_first_terminal,"terminal 1")
#set the tab widget for use on the terminal dock area
this_object.term_dock.setWidget(this_object.terminal_tabzee)
#set the widget on bottom by default
this_object.addDockWidget(Qt.BottomDockWidgetArea,this_object.term_dock)
#bring up companion webkit app on the side
@pyqtSlot()
def _bring_up_nu_browser(this_object):
mimp = da_Web_Browser()
mimp.show()
print("web button signal")
#handle showing and hiding of dock system
@pyqtSlot()
def _id_da_sho_n_hyde_dock(this_object):
#capture the dock objects
dock_1 = this_object.docked
dock_2 = this_object.editRDok
dock_3 = this_object.editoreDock
dock_4 = this_object.term_dock
#check to see if docks are hidden and to show them if they are
if(dock_1.isHidden()):
dock_1.show()
if(dock_2.isHidden()):
dock_2.show()
if(dock_3.isHidden()):
dock_3.show()
if(dock_4.isHidden()):
dock_4.show()
#writers tab without distractions
class da_distractfree_writer(QTextEdit):
def __init__(self,*args,**kwargs):
super(da_distractfree_writer,self).__init__(*args,**kwargs)
self.setAcceptRichText(False)
fixedfont = QFontDatabase.systemFont(QFontDatabase.FixedFont)
fixedfont.setPointSize(16)
self.setFont(fixedfont)
self.setGeometry(10,10,800,600)
#set the path to the current file
#set to none by default
self.path = None
self.run_writer_program()
#todo: set widget to capture text file contents
def set_file_contents(self):
pass
#initialize the text editor of the program
def run_writer_program(self):
#debug(module, name, pm=False)
printf("text editor loaded")
self.setDocumentTitle("texteditor")
#we make sure for word wrap to true
if(self.wordWrapMode() == 4):
print("Pure word wrap mode is active")
cnxf_check = self.wordWrapMode() if (self.wordWrapMode() == 4) else 0
print("cnxf check word wrap mod confirms...")
print(cnxf_check)
else:
print("something went wrong with true word wrap...attempting to fix it")
self.setWordWrapMode(4)
x_faxkter = self.wordWrapMode() if (self.wordWrapMode() == 4) else 0
if not x_faxkter == 4:
print("true word wrap failed. I think I am broken...")
#set the font weight for the text editor
self.setFontWeight(42)
#set the text background color
self.setTextBackgroundColor(QColor(255,212,22,75))
#set the text color
self.setTextColor(QColor(255,0,0,255))
#set the cursor width
self.setCursorWidth(11)
#text editor can paste
self.canPaste = True
#set auto formatting
self.setAutoFormatting(QTextEdit.AutoAll)
cdef int x_xdabber = 13
printf("\ndigital slut #: %d \n",x_xdabber)
pass
#start of the program
cdef start_main_compax_comp():
app = QApplication(sys.argv)
compax_executable = daMainWindow()
if compax_executable:
sys.exit(app.exec_())
#call the main c function here in python
def s_da_main_program():
start_main_compax_comp()<|end_of_text|>from libcpp cimport bool
cdef extern from "matrix.h" namespace "implicit::gpu" nogil:
cdef cppclass CSRMatrix:
CSRMatrix(int rows, int cols, int nonzeros,
const int * indptr, const int * indices, const float * data) except +
cdef cppclass COOMatrix:
COOMatrix(int rows, int cols, int non | Cython |
zeros,
const int * row, const int * col, const float * data) except +
cdef cppclass Vector[T]:
Vector(int size, T * data)
cdef cppclass Matrix:
Matrix(int rows, int cols, float * data, bool host) except +
void to_host(float * output) except +
<|end_of_text|>from psage.modform.maass.permutation_alg cimport MyPermutation
cpdef are_mod1_equivalent(MyPermutation R1,MyPermutation S1, MyPermutation R2,MyPermutation S2,int verbose=?)
cdef int are_mod1_equivalent_c(int N,MyPermutation S1,MyPermutation R1,MyPermutation S2,MyPermutation R2,int* pres,int verbose=?,int check=?)
<|end_of_text|># distutils: language=c++
from libcpp.vector cimport vector
from libcpp.string cimport string
from.base cimport _Algorithm, Algorithm
from.graph cimport _Graph, Graph
from.structures cimport count
cdef extern from "<networkit/embedding/Node2Vec.hpp>":
cdef cppclass _Node2Vec "NetworKit::Node2Vec"(_Algorithm):
_Node2Vec(_Graph G, double P, double Q, count L, count N, count D) except +
vector[vector[float]] &getFeatures() except +
cdef class Node2Vec(Algorithm):
"""
Node2Vec(G, P, Q, L, N, D)
Algorithm to extract features from the graph with the node2vec(word2vec)
algorithm according to [https://arxiv.org/pdf/1607.00653v1.pdf].
Node2Vec learns embeddings for nodes in a graph by optimizing a neighborhood preserving
objective. In order to achieve this, biased random walks are initiated for every node and the
result is of probabilistic nature. Several input parameters control the specific behavior of
the random walks. Amongst others Node2Vec is able to produce embeddings for visualization
(D=2 or D=3) and machine learning (D=128 [default]). Both directed and undirected graphs
withouth isolated nodes are supported.
Note
----
This algorithm could take a lot of time on large networks (many nodes).
Parameters
----------
G : networkit.Graph
The graph.
P : float
The ratio for returning to the previous node on a walk.
For P > max(Q,1) it is less likely to sample an already-visited node in the following two steps.
For P < min(Q,1) it is more likely to sample an already-visited node in the following two steps.
Q : float
The ratio for the direction of the next step
For Q > 1 the random walk is biased towards nodes close to the previous one.
For Q < 1 the random walk is biased towards nodes which are further away from the previous one.
L : int
The walk length.
N : int
The number of walks per node.
D: int
The dimension of the calculated embedding.
"""
cdef Graph _G
def __cinit__(self, Graph G, P=1, Q=1, L=80, N=10, D=128):
self._G = G
self._this = new _Node2Vec(G._this, P, Q, L, N, D)
def getFeatures(self):
"""
getFeatures()
Returns all feature vectors
Returns
-------
list(list(float))
A vector containing feature vectors of all nodes
"""
return (<_Node2Vec*>(self._this)).getFeatures()
<|end_of_text|>from cpython cimport Py_INCREF, Py_DECREF
from.nginx_core cimport ngx_log_error, NGX_LOG_CRIT, NGX_AGAIN, from_nginx_str
from.ngx_http cimport ngx_http_request_t, ngx_http_core_run_phases
from.ngx_http cimport ngx_http_get_module_ctx, ngx_http_set_ctx
import traceback
cdef class Request:
cdef:
ngx_http_request_t *request
public Log log
object future
public str request_line
public str uri
public str args
public str extension
public str unparsed_uri
public str method_name
public str http_protocol
def __init__(self, *args):
raise NotImplementedError
def _started(self):
return self.future is not None
def _start(self, fut):
self.future = fut
Py_INCREF(self)
return NGX_AGAIN
def _result(self):
if self.future.done():
Py_DECREF(self)
ngx_http_set_ctx(self.request, NULL, ngx_python_module)
return self.future.result()
return NGX_AGAIN
def __repr__(self):
return f'Request({self.method_name} {self.uri})'
def __str__(self):
return f''' request_line: {self.request_line}
uri: {self.uri}
args: {self.args}
extension: {self.extension}
unparsed_uri: {self.unparsed_uri}
method_name: {self.method_name}
http_protocol: {self.http_protocol}'''
@staticmethod
cdef Request from_ptr(ngx_http_request_t *request):
cdef:
void *rv
Request new_req
rv = ngx_http_get_module_ctx(request, ngx_python_module)
if rv == NULL:
new_req = Request.__new__(Request)
new_req.request = request
new_req.log = Log.from_ptr(request.connection.log)
new_req.request_line = from_nginx_str(request.request_line)
new_req.uri = from_nginx_str(request.uri)
new_req.args = from_nginx_str(request.args)
new_req.extension = from_nginx_str(request.exten)
new_req.unparsed_uri = from_nginx_str(request.unparsed_uri)
new_req.method_name = from_nginx_str(request.method_name)
new_req.http_protocol = from_nginx_str(request.http_protocol)
ngx_http_set_ctx(request, <void *>new_req, ngx_python_module)
return new_req
else:
return <object>rv
cdef public ngx_int_t nginxpy_post_read(ngx_http_request_t *request):
try:
from. import hooks
return hooks.post_read(Request.from_ptr(request))
except:
ngx_log_error(NGX_LOG_CRIT, request.connection.log, 0,
b'Error occured in post_read:\n' +
traceback.format_exc().encode())
return 500
def run_phases(Request request):
ngx_http_core_run_phases(request.request)
<|end_of_text|>"""
Brief: cython wrapper for renderer
Author: [email protected]
Date: 2018/6/10
"""
from __future__ import division
from libcpp cimport bool
import numpy as np
cimport numpy as np # for np.ndarray
cdef extern from "renderMesh.h":
void renderMesh(double* FM, int fNum,
double* VM, int vNum,
double* intrinsics,
int height, int width,
float* depth, bool *mask,
double linewidth)
DTYPE = np.float64
ctypedef np.float64_t DTYPE_t
def renderMesh_py(np.ndarray[DTYPE_t, ndim=2] vertices,
np.ndarray[DTYPE_t, ndim=2] faces,
np.ndarray[DTYPE_t, ndim=1] intrinsic,
int height, int width, DTYPE_t linewidth):
cdef int v_num = vertices.shape[0];
cdef int f_num = faces.shape[0];
vertices = vertices.transpose().copy()
faces = faces.transpose().copy()
cdef np.ndarray[DTYPE_t, ndim=1] color;
cdef np.ndarray[np.float32_t, ndim=2] depth = np.zeros((height, width), dtype=np.float32);
cdef np.ndarray[np.uint8_t, ndim=2, cast=True] mask = np.zeros((height, width), dtype=np.uint8);
cdef bool *mask_bool = <bool*> &mask[0, 0]
renderMesh(&faces[0, 0], f_num,
&vertices[0, 0], v_num,
&intrinsic[0],
height, width,
&depth[0, 0],
mask_bool,
linewidth)
depth[mask == 0] = -1.0
mask = mask[::-1, :]
depth = depth[::-1, :]
return depth, mask
<|end_of_text|># distutils: language=c++
# -*- coding: utf-8 -*-
'''functions mapping from words/phrases to IDs and vice versa'''
# C++ setting
from libcpp cimport bool
# Local libraries
from nlputils.data_structs.trie import TwoWayIDMap
#wordMap = TwoWayIDMap()
#wordMap = {}
phraseMap = TwoWayIDMap()
#phraseMap = {}
cdef class StringEnumerator:
# defined in vocab.pxd
# cdef dict dict_str2id
# cdef list list_id2str
def __cinit__(self):
self.dict_str2id = {}
self.list_id2str = []
cpdef bool append(self, str string):
self.str2id(string)
return True
cpdef long str2id(self, str string):
cdef long new_id
if string in self.dict_str2id:
return self.dict_str | Cython |
2id[string]
else:
new_id = len(self.list_id2str)
self.list_id2str.append(string)
self.dict_str2id[string] = new_id
return new_id
cpdef str id2str(self, long number):
if 0 <= number and number < len(self.list_id2str):
return self.list_id2str[number]
else:
raise IndexError("id %s is not registered in vocabulary set" % (number,))
def ids(self):
cdef long i = 0, length = len(self.list_id2str)
while i < length:
yield i
i += 1
def strings(self):
cdef str string
for string in self.list_id2str:
yield string
def __iter__(self):
return self.strings()
def __len__(self):
return len(self.list_id2str)
cdef StringEnumerator word_enum = StringEnumerator()
cdef StringEnumerator phrase_enum = StringEnumerator()
cpdef long word2id(str word):
return word_enum.str2id(word)
#return wordMap[word]
cpdef str id2word(long number):
return word_enum.id2str(number)
#return wordMap.id2str(number)
cpdef str phrase2idvec(str phrase):
if not phrase:
return ''
#return str.join(',', map(str, map(word2id, phrase.split(' '))))
#return str.join(',', map(str, map(word2id, phrase.split())))
return str.join(',', map(str, map(word2id, phrase.strip().split(' '))))
cpdef str idvec2phrase(str idvec):
if not idvec:
return ''
return str.join(' ', map(id2word, map(int, idvec.split(','))))
cpdef long phrase2id(str phrase):
cdef str idvec = str.join(',', map(str, map(word2id, phrase.split(' '))))
return phraseMap[idvec]
cpdef str id2phrase(long number):
cdef str idvec = phraseMap.id2str(number)
return str.join(' ', map(id2word, map(int, idvec.split(','))))
<|end_of_text|>
cdef class PinnedMemoryPointer:
cdef:
readonly object mem
readonly size_t ptr
Py_ssize_t _shape[1]
Py_ssize_t _strides[1]
cpdef Py_ssize_t size(self)
cpdef _add_to_watch_list(event, obj)
cpdef PinnedMemoryPointer alloc_pinned_memory(Py_ssize_t size)
cpdef set_pinned_memory_allocator(allocator=*)
cdef class PinnedMemoryPool:
cdef:
object _alloc
dict _in_use
object _free
object __weakref__
object _weakref
object _lock
Py_ssize_t _allocation_unit_size
cpdef PinnedMemoryPointer malloc(self, Py_ssize_t size)
cpdef free(self, size_t ptr, Py_ssize_t size)
cpdef free_all_blocks(self)
cpdef n_free_blocks(self)
<|end_of_text|># Currently Cython does not support std::optional.
# See: https://github.com/cython/cython/pull/3294
from libcpp cimport bool
cdef extern from "<optional>" namespace "std" nogil:
cdef cppclass nullopt_t:
nullopt_t()
cdef nullopt_t nullopt
cdef cppclass optional[T]:
ctypedef T value_type
optional()
optional(nullopt_t)
optional(optional&) except +
optional(T&) except +
bool has_value()
T& value()
T& value_or[U](U& default_value)
void swap(optional&)
void reset()
T& emplace(...)
T& operator*()
# T* operator->() # Not Supported
optional& operator=(optional&)
optional& operator=[U](U&)
bool operator bool()
bool operator!()
bool operator==[U](optional&, U&)
bool operator!=[U](optional&, U&)
bool operator<[U](optional&, U&)
bool operator>[U](optional&, U&)
bool operator<=[U](optional&, U&)
bool operator>=[U](optional&, U&)
optional[T] make_optional[T](...) except +
<|end_of_text|># file: pycgns.pyx
# cython: language_level=3
# cython: c_string_type=str, c_string_encoding=ascii
include "lcgns.pyx"
<|end_of_text|>import Cython
import numpy as np
cimport numpy as cnp
from libc.math cimport sqrt
from libc.math cimport cos
from libc.math cimport acos
from libc.math cimport pow
# assumes that all-zero elements are taken care of before function
cpdef cnp.ndarray[double, ndim=1] calculateTangent(cnp.ndarray arg):
# casting from numpy array
cdef double[9] arg_as_array = [
<double> arg[0][0], <double> arg[0][1], <double> arg[0][2],
<double> arg[1][0], <double> arg[1][1], <double> arg[1][2],
<double> arg[2][0], <double> arg[2][1], <double> arg[2][2]]
# create the matrix (M - lambda I)
cdef double smallestEgValue = calculateEigenValue(arg_as_array, arg)
# update matrix
arg_as_array[0] = arg_as_array[0] - smallestEgValue
arg_as_array[4] = arg_as_array[4] - smallestEgValue
arg_as_array[8] = arg_as_array[8] - smallestEgValue
# the not yet normalized eigenvector
cdef double[3] raw_egVec
raw_egVec = calcEgVecByCrossProduct(arg_as_array, raw_egVec)
# normalize
cdef double[3] egVec = normalizeVector(raw_egVec)
# cast to numpy array
cdef cnp.ndarray[cnp.double_t, ndim=1] npEgVec = np.asarray(egVec)
return npEgVec
# function for finding eigenvalues through cross products
# the method relies on that the input matrix is normal, which is fulfilled for
# symmetric matrices
cdef double* calcEgVecByCrossProduct(double[9] arg, double[3] egVec):
# -------- Case 1 (most likely) --------
# two indep. columns, geometric multiplicitiy: 1
# -> the column space has rank 2
# idea:
# use two of the vectors spanning the column space to find the egVector
# by taking the cross product
# implementation:
# take cross product of two random vectors.
# If the cross product is zero, they are parallell, which won't work. If so,
# try the next pair, and potentially the third.
# Then return the first nonzero vector
# constant to protect from rounding of errors
cdef double cutOffConstant = 1e-15
# prep
cdef double[3] v1 = [arg[0], arg[1], arg[2]]
cdef double[3] v2 = [arg[3], arg[4], arg[5]]
cdef double[3] v3 = [arg[6], arg[7], arg[8]]
# return any nonzero orthogonal vector
# (also make sure that the vector only nonzero due to arithmetic faults)
egVec = calcCross(v1,v2, egVec)
if(calcDot(egVec, egVec) > cutOffConstant):
return egVec
egVec = calcCross(v1, v3, egVec)
if(calcDot(egVec, egVec) > cutOffConstant):
return egVec
egVec = calcCross(v2,v3, egVec)
if(calcDot(egVec, egVec) > cutOffConstant):
return egVec
# -------- Case 2 & 3 --------
# one independent columns = geometric multiplicitiy: 2
# zero independent columns = geometric multiplicitiy: 3
# in this case, return the zero vector since there is no
# tangent. It would therefore be misleading to return an
# eigenvector corresponding to the eigenvalue.
egVec[0] = 0
egVec[1] = 0
egVec[2] = 0
return egVec
#!!!! OBS!!!! only works for symmetrical matrices
# implemented from https://d1rkab7tlqy5f1.cloudfront.net/TNW/Over%20faculteit/
# Decaan/Publications/1999/SCIA99GKNBLVea.pdf
cdef double calculateEigenValue(double[9] my_matrix, cnp.ndarray arg):
# all values are not needed since the matrix is symmetric
cdef double m_00 = my_matrix[0]
cdef double m_01 = my_matrix[1]
cdef double m_02 = my_matrix[2]
cdef double m_11 = my_matrix[4]
cdef double m_12 = my_matrix[5]
cdef double m_ | Cython |
22 = my_matrix[8]
# coefficents for characteristic equation
cdef double a = -(m_00 + m_11 + m_22)
cdef double b = m_00 * m_11 + m_00 * m_22 + m_11 * m_22 - \
( (m_01 * m_01) + (m_02 * m_02) + (m_12 * m_12) )
cdef double c = m_22 * (m_01 * m_01) + m_11 * (m_02 * m_02) + \
m_00 * (m_12 * m_12) - \
(m_00 * m_11 * m_22 + 2 * m_01 * m_02 * m_12)
# prep for formula
cdef double Q = (a * a - 3 * b)/9
cdef double R = ( 2 * pow(a,3) - 9 * a * b + 27 * c ) / 54
cdef double qSqrt = sqrt( pow(Q,3) )
# make sure not to divide by zero
if(qSqrt == 0):
eigs, vecs = np.linalg.eigh(arg)
minEgValue = np.argmin(abs(eigs))
return <double> minEgValue
# make sure that R / qSqrt does not step outside [-1,1] (may happen by arithmetic
# rounding errors)
cdef double r_qSrt = R / qSqrt
if(r_qSrt < -1):
r_qSrt = -1
if(r_qSrt > 1):
r_qSrt = 1
# prep for formula
cdef double theta = acos( r_qSrt )
cdef double PI = 3.14159265358979323846
# formula for computing the eigenvalues
cdef double eig1 = -2 * sqrt(Q) * cos( theta/3 ) - a / 3
cdef double eig2 = -2 * sqrt(Q) * cos( (theta + 2 * PI)/3 ) - a / 3
cdef double eig3 = -2 * sqrt(Q) * cos( (theta - 2 * PI)/3 ) - a / 3
# find the eigenvalue with the smallest absolute value
# (eig1 <= eig3 <= eig2 but have not found a reliable source on that eig1
# is always positive)
cdef double smallestEgValue
if(eig1*eig1 < eig2*eig2):
if(eig1*eig1 < eig3*eig3):
# eig1^2 is smaller than both eig1^2 and eig2^3
smallestEgValue = eig1
else:
# eig3^2 is smaller than both eig1^2 and eig2^2
smallestEgValue = eig3
else:
if(eig2*eig2 < eig3*eig3):
# eig2^2 is smaller than both eig1^2 and eig3^2
smallestEgValue = eig2
else:
# eig3^2 is smaller than both eig1^2 and eig2^2
smallestEgValue = eig3
return smallestEgValue
# # function for normalizing a 3*1 vector
cdef double* normalizeVector(double[3] vector):
cdef double norm = sqrt(calcDot(vector, vector))
# make sure not to divide by zero
if(norm == 0):
return vector
vector[0] = vector[0]/norm
vector[1] = vector[1]/norm
vector[2] = vector[2]/norm
return vector
# function for rotating a vector two times (using operation taken from
# standard rotational matrix described at eg wikipedia)
cdef inline double* rotateVector(double[3] v, double[3] v_rotated):
#first rotation
cdef double x = v[0]
cdef double y = v[1] * 0.86 - v[0]/2
cdef double z = v[1]/2 + v[0] * 0.86
# second rotation
v_rotated[0] = x * 0.86 - y/2
v_rotated[1] = x/2 + y * 0.86
v_rotated[2] = z
return v_rotated
# function for calculating cross product of two 3x1 vectors
cdef inline double* calcCross(double[3] v1, double[3] v2, double[3] crossVec):
crossVec[0] = v1[1] * v2[2] - v1[2] * v2[1]
crossVec[1] = v1[2] * v2[0] - v1[0] * v2[2]
crossVec[2] = v1[0] * v2[1] - v1[1] * v2[0]
return crossVec
# function for calculating dot product between two 3x1 vectors
cdef inline double calcDot(double[3] v1, double[3] v2):
cdef double prod = v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]
return prod
<|end_of_text|>from cpython.unicode cimport PyUnicode_Tailmatch
from libcpp.string cimport string
from libcpp.unordered_map cimport unordered_map
cdef extern from "pyport.h" nogil:
Py_ssize_t PY_SSIZE_T_MAX
import time, io, cPickle, config
from common import docno_matcher
cdef int main(str infn, str outfn):
cdef:
list storage = [u'']*300
int doc_idx = 0
unicode row = u''
unicode document = u''
int rows_in_storage = 0
int i = 0
unsigned long tic
unordered_map[string,int] DOCNO_dict
tic = time.time()
for row in io.open(infn, mode='rt', encoding='utf8', errors='strict',
buffering=1000000):
row = row.strip()
storage[i] = row
i += 1
if PyUnicode_Tailmatch(row, u"</DOC>", 0, PY_SSIZE_T_MAX, 1) == 1:
doc_idx += 1
rows_in_storage = i
i=0
document = u' '.join(storage[:rows_in_storage])
if doc_idx % 100000 == 0:
print doc_idx, doc_idx / 95000.0, '%', (time.time() - tic)/60,'min'
docno = docno_matcher.match(document).group(1)
DOCNO_dict[docno] = doc_idx
with open(outfn, "wb") as f:
cPickle.dump(DOCNO_dict, f, protocol=-1)
return 1
def main_py(infn=config.TREC_WEB_DBPEDIA, outfn=config.TREC_WEB_DOCNO2ID_PKL):
main(infn, outfn)<|end_of_text|>#cython: nonecheck=True
"""
A HilbertArray is a vector in a HilbertSpace. Internally, it is backed by a
numpy.array. HilbertArray's are to be created using the
:meth:`HilbertSpace.array` method.
"""
from collections import defaultdict
import numpy as np
cimport cpython
cimport numpy as np
from qitensor import have_sage
from qitensor.exceptions import DuplicatedSpaceError, HilbertError, \
HilbertIndexError, HilbertShapeError, \
NotKetSpaceError, MismatchedSpaceError
from qitensor.space import HilbertSpace, _shape_product, create_space1, create_space2
from qitensor.space cimport HilbertSpace, _shape_product, create_space1, create_space2
from qitensor.atom import HilbertAtom
from qitensor.atom cimport HilbertAtom
from qitensor.arrayformatter import FORMATTER
from qitensor.subspace import TensorSubspace
__all__ = ['HilbertArray']
def _parse_space(s):
if s is None:
return None
elif isinstance(s, HilbertSpace):
return s.sorted_kets + s.sorted_bras
else:
return sum((_parse_space(x) for x in s), [])
def _unreduce_v1(space, nparray):
"""
This is the function that handles restoring a pickle.
"""
return space.array(nparray)
# This holds the result of all the difficult thinking that HilbertArray.tensordot() needs.
# It is cached for speed.
cdef dict _td_wisdom_cache = dict()
#
cdef class TensordotWisdom:
cdef tuple contract_axes
cdef int out_num_axes
cdef HilbertSpace ret_space
cdef tuple transpose_axes
def __init__(self, hs, ohs, contraction_spaces):
cdef fro | Cython |
zenset mul_space
if contraction_spaces is None:
mul_space = frozenset([x.H for x in hs.bra_set]) & ohs.ket_set
elif isinstance(contraction_spaces, frozenset):
mul_space = contraction_spaces
for x in mul_space:
if not isinstance(x, HilbertSpace):
raise TypeError('contraction space must consist of '+
'HilbertAtoms')
if x.is_dual():
raise NotKetSpaceError('contraction space must consist of '+
'kets')
elif isinstance(contraction_spaces, HilbertSpace):
if len(contraction_spaces.bra_set) > 0:
raise NotKetSpaceError('contraction space must consist of kets')
mul_space = contraction_spaces.ket_set
else:
raise TypeError('contraction space must be HilbertSpace '+
'or frozenset')
for x in mul_space:
assert isinstance(x, HilbertAtom)
assert not x.is_dual
cdef frozenset mul_H = frozenset([x.H for x in mul_space])
#print mul_space
cdef list mul_space_sorted = sorted(mul_space)
cdef list axes_self = [hs.axes_lookup[x.H] for x in mul_space_sorted]
cdef list axes_other = [ohs.axes_lookup[x] for x in mul_space_sorted]
self.contract_axes = (axes_self, axes_other)
cdef list td_axes = \
hs.sorted_kets + \
[x for x in hs.sorted_bras if not x in mul_H] + \
[x for x in ohs.sorted_kets if not x in mul_space] + \
ohs.sorted_bras
self.out_num_axes = len(td_axes)
cdef:
frozenset ket1
frozenset ket2
frozenset bra1
frozenset bra2
if self.out_num_axes:
ket1 = hs.ket_set
ket2 = (ohs.ket_set-mul_space)
bra1 = (hs.bra_set-mul_H)
bra2 = ohs.bra_set
if not (ket1.isdisjoint(ket2) and bra1.isdisjoint(bra2)):
raise DuplicatedSpaceError(
create_space2(ket1 & ket2, bra1 & bra2))
self.ret_space = create_space2(ket1 | ket2, bra1 | bra2)
self.transpose_axes = tuple([td_axes.index(x) for x in self.ret_space.axes])
cdef class HilbertArray:
def __init__(self, HilbertSpace space, data, cpython.bool noinit_data, cpython.bool reshape, input_axes):
"""
Don't call this constructor yourself, use HilbertSpace.array
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.array([1, 2]); x
HilbertArray(|a>,
array([ 1.+0.j, 2.+0.j]))
sage: from qitensor import qudit
sage: ha = qudit('a', 3)
sage: hb = qudit('b', 5)
sage: x = (ha.O * hb).random_array()
sage: TestSuite(x).run()
"""
cdef HilbertSpace hs = space
# (Sphinx docstring)
#: The HilbertSpace of this array.
self.space = hs
# (Sphinx docstring)
#: An array telling which HilbertAtom corresponds to each axis of ``self.nparray``.
self.axes = hs.axes
cdef tuple data_shape
if noinit_data:
assert data is None
assert input_axes is None
# (Sphinx docstring)
#: Provides direct access to the underlying numpy array.
self.nparray = None
elif data is None:
if not input_axes is None:
raise HilbertError("data parameter must be given when input_axes is given")
self.nparray = np.zeros(hs.shape, dtype=hs.base_field.dtype)
else:
self.nparray = np.array(data, dtype=hs.base_field.dtype)
if input_axes is None:
data_shape = hs.shape
else:
data_shape = tuple([ len(spc.indices) for spc in input_axes ])
# make sure given array is the right size
if reshape:
if self.nparray.size!= _shape_product(data_shape):
raise HilbertShapeError(_shape_product(data.shape),
_shape_product(data_shape))
self.nparray = self.nparray.reshape(data_shape)
if np.shape(self.nparray)!= data_shape:
raise HilbertShapeError(np.shape(self.nparray), data_shape)
if input_axes is not None:
if frozenset(input_axes)!= frozenset(self.axes):
raise MismatchedSpaceError("input_axes doesn't match array space")
shuffle = [ input_axes.index(x) for x in self.axes ]
self.nparray = self.nparray.transpose(shuffle)
if self.nparray is not None:
cast_fn = space.base_field.input_cast_function()
self.nparray = np.vectorize(cast_fn)(self.nparray)
def __reduce__(self):
"""
Tells pickle how to store this object.
"""
return _unreduce_v1, (self.space, self.nparray)
cpdef copy(self):
"""
Creates a copy (not a view) of this array.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.array([1, 2]); x
HilbertArray(|a>,
array([ 1.+0.j, 2.+0.j]))
>>> y = x.copy()
>>> y[0] = 3
>>> y
HilbertArray(|a>,
array([ 3.+0.j, 2.+0.j]))
>>> x
HilbertArray(|a>,
array([ 1.+0.j, 2.+0.j]))
"""
ret = self.space.array(None, True)
ret.nparray = self.nparray.copy()
return ret
cpdef _reassign(self, HilbertArray other):
"""
Used internally to change the contents of a HilbertArray without creating a new object.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.random_unitary()
>>> y = ha.random_unitary()
>>> z = x
>>> x *= y
>>> x is z
True
"""
self.space = other.space
self.nparray = other.nparray
self.axes = other.axes
cpdef get_dim(self, atom):
"""
Returns the axis corresponding to the given HilbertAtom.
This is useful when working with the underlying numpy array.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> x = (ha * hb).array([[1, 2], [4, 8]])
>>> [x.get_dim(h) for h in (ha, hb)]
[0, 1]
>>> x.nparray.sum(axis=x.get_dim(ha))
array([ 5.+0.j, 10.+0.j])
>>> x.nparray.sum(axis=x.get_dim(hb))
array([ 3.+0.j, 12.+0.j])
"""
return self.space.axes_lookup[atom]
cpdef _assert_same_axes(self, other):
"""
Throws an exception if self.axes!= other.axes. Used when determining whether
two arrays are compatible for certain operations (such as addition).
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> x = ha.array()
>>> y = ha.array()
>>> z = hb.array()
>>> x._assert_same_axes(y)
>>> x._assert_same_axes(z)
Traceback (most recent call last):
...
MismatchedSpaceError: 'Mismatched HilbertSpaces: |a> vs. |b>'
"""
if self.axes!= other.axes:
raise MismatchedSpaceError('Mismatched HilbertSpaces: '+
repr(self.space)+' vs. '+repr(other.space))
cpdef assert_density_matrix(self, \
check_hermitian=True, check_normalized=True, check_positive=True \
):
"""
Throws an error unless the input is a density matrix.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> ha.random_density().assert_density_matrix()
>>> # small errors are accepted
>>> (ha.random_density() + ha.O.random_array()*1e-14).assert_density_matrix()
>>> ha.eye().assert_density_matrix()
Traceback (most recent call last):
...
HilbertError: 'density matrix was not normalized: trace=(2+0j)'
>>> (ha.ket(0) * hb.bra(0)).assert_density_matrix()
Traceback (most recent call last):
...
HilbertError: 'not a density matrix: |a><b|'
>>> | Cython |
ha.random_unitary().assert_density_matrix()
Traceback (most recent call last):
...
HilbertError: 'not a density matrix: not Hermitian'
>>> U = ha.random_unitary()
>>> (U * ha.diag([ 1.1, -0.1 ]) * U.H).assert_density_matrix()
Traceback (most recent call last):
...
HilbertError: 'not a density matrix: had negative eigenvalue (-0.1)'
"""
toler = 1e-9
if not self.space.is_symmetric():
raise HilbertError("not a density matrix: "+str(self.space))
if check_hermitian and not (self == self.H or self.closeto(self.H)):
raise HilbertError("not a density matrix: not Hermitian")
if check_normalized:
tr = self.trace()
if abs(tr - 1) > toler:
raise HilbertError('density matrix was not normalized: trace='+str(tr))
if check_positive:
ew = np.min(self.eigvals(hermit=True))
if ew < -toler:
raise HilbertError('not a density matrix: had negative eigenvalue ('+
str(ew)+')')
cpdef is_positive(self):
"""
Returns true if this is a positive operator.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> (1.23 * ha.random_density()).is_positive()
True
>>> # small errors are accepted
>>> (ha.random_density() + ha.O.random_array()*1e-14).is_positive()
True
>>> (ha.ket(0) * hb.bra(0)).is_positive()
False
>>> ha.random_unitary().is_positive()
False
>>> U = ha.random_unitary()
>>> (U * ha.diag([ 1.1, -0.1 ]) * U.H).is_positive()
False
>>> (U * ha.diag([ 1.1, 0.0 ]) * U.H).is_positive()
True
"""
toler = 1e-9
if not self.space.is_symmetric():
return False
if not (self == self.H or self.closeto(self.H)):
return False
ew = np.min(self.eigvalsh())
return ew > -toler
cpdef set_data(self, new_data):
"""
Sets this array equal to the given argument.
:param new_data: the new data
:type new_data: HilbertArray or anything that can be made into a
numpy.array
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> x = (ha*hb).array()
>>> y = (ha*hb).random_array()
>>> x.set_data(y)
>>> x == y
True
>>> x.set_data([[1, 2], [3, 4]])
>>> x
HilbertArray(|a,b>,
array([[ 1.+0.j, 2.+0.j],
[ 3.+0.j, 4.+0.j]]))
"""
if isinstance(new_data, HilbertArray):
self._assert_same_axes(new_data)
self.set_data(new_data.nparray)
else:
# This is needed to make slices work properly
self.nparray[:] = new_data
cpdef tensordot(self, HilbertArray other, contraction_spaces=None):
"""
Inner or outer product of two arrays.
:param other: the other array taking place in this operation
:type other: HilbertArray
:param contraction_spaces: the spaces on which to do a tensor
contraction
:type other: None, frozenset, or HilbertSpace; default None
If ``contraction_spaces`` is ``None`` (the default), contraction will
be across the intersection of the bra space of this array and the ket
space of ``other``. If a ``frozenset`` is given, it should consist of
``HilbertAtom`` objects which are kets. If a ``HilbertSpace`` is
given, it must be a ket space.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> hc = qubit('c')
>>> x = (ha * hb.H * hc.H).random_array()
>>> x.space
|a><b,c|
>>> y = (hc * ha.H).random_array()
>>> y.space
|c><a|
>>> x.tensordot(y) == x * y
True
>>> x.tensordot(y).space
|a><a,b|
>>> x.tensordot(y, frozenset()).space
|a,c><a,b,c|
>>> x.tensordot(y, hc).space
|a><a,b|
>>> (ha.bra(0) * hb.bra(0)) * (ha.ket(0) * hb.ket(0))
(1+0j)
>>> (ha.bra(0) * hb.bra(0)) * (ha.ket(0) * hb.ket(1))
0j
>>> xa = ha.O.random_array()
>>> xb = hb.O.random_array()
>>> ((xa*xa)*(xb*xb)).space
|a,b><a,b|
>>> ((xa*xa)*(xb*xb)).closeto((xa*xb)*(xa*xb))
True
"""
#print str(self.space)+'*'+str(other.space)
self.space.base_field.assert_same(other.space.base_field)
# Shortcut for common case.
# Not needed now that wisdom optimization is used.
#if (contraction_spaces is None) and (self.space._is_simple_dyad) and (self.space == other.space) and (self.space == self.space.H):
# return self.space.array(np.dot(self.nparray, other.nparray))
wisdom_key = (self.space, other.space, contraction_spaces)
cdef TensordotWisdom wisdom = _td_wisdom_cache.get(wisdom_key, None)
if wisdom is None:
wisdom = TensordotWisdom(self.space, other.space, contraction_spaces)
_td_wisdom_cache[wisdom_key] = wisdom
cdef np.ndarray td = np.tensordot(self.nparray, other.nparray,
axes=wisdom.contract_axes)
assert td.dtype == self.space.base_field.dtype
assert wisdom.out_num_axes == td.ndim
if wisdom.out_num_axes == 0:
# convert 0-d array to scalar
return td[()]
else:
ret = wisdom.ret_space.array(None, True)
ret.nparray = td.transpose(wisdom.transpose_axes)
return ret
cpdef tensor(self, HilbertArray other):
"""
Perform a tensor product between two array.
"""
return self.tensordot(other, contraction_spaces=frozenset())
cpdef transpose(self, tpose_axes=None):
"""
Perform a transpose or partial transpose operation.
:param tpose_axes: the space on which to transpose
:type tpose_axes: HilbertSpace or None; default None
If ``tpose_axes`` is ``None`` a full transpose is performed.
Otherwise, ``tpose_axes`` should be a ``HilbertSpace``. The array will
be transposed across all axes which are part of the bra space or ket
space of ``tpose_axes``.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> x = ha.O.array([[1, 2], [3, 4]]); x
HilbertArray(|a><a|,
array([[ 1.+0.j, 2.+0.j],
[ 3.+0.j, 4.+0.j]]))
>>> x.transpose()
HilbertArray(|a><a|,
array([[ 1.+0.j, 3.+0.j],
[ 2.+0.j, 4.+0.j]]))
>>> x.transpose() == x.T
True
>>> y = (ha * hb).random_array()
>>> y.space
|a,b>
>>> y.transpose(ha).space
|b><a|
>>> y.transpose(ha) == y.transpose(ha.H)
True
"""
if tpose_axes is None:
tpose_axes = self.space
tpose_atoms = []
for x in tpose_axes.bra_set | tpose_axes.ket_set:
if not (x in self.axes or x.H in self.axes):
raise HilbertError('Hilbert space not part of this '+
'array: '+repr(x))
if x.is_dual:
tpose_atoms.append(x.H)
else:
tpose_atoms.append(x)
in_space_dualled = []
for x in self.axes:
y = x.H if x.is_dual else x
if y in tpose_atoms:
in_space_dualled.append(x.H)
| Cython |
else:
in_space_dualled.append(x)
out_space = create_space1(in_space_dualled)
ret = out_space.array(noinit_data=True)
permute = tuple([in_space_dualled.index(x) for x in ret.axes])
ret.nparray = self.nparray.transpose(permute)
return ret
cpdef relabel(self, from_spaces, to_spaces=None):
"""
Returns a HilbertArray with the same data as this one, but with axes relabelled.
:param from_space: the old space, or list of spaces, or dict mapping old->new
:type from_space: HilbertSpace, list, dict
:param to_space: the new space (or list of spaces)
:type to_space: HilbertSpace, list
This method changes the labels of the axes of this array. It is permitted to
change a bra to a ket or vice-versa, in which case the effect is equivalent to
doing a partial transpose. There are three ways to call this method:
* You can pass two HilbertSpaces, in which case the first space will be
relabeled to the second one. If the passed spaces are not HilbertAtoms
(i.e. they are composites like ``|a,b>``) then the mapping from atom to
atom is done using the same alphabetical sorting that is used when
displaying the name of the space. In this case, the other two ways of
calling relabel would probably be clearer.
* You can pass a pair of tuples of HilbertSpaces. In this case the first
space from the first list gets renamed to the first space from the second
list, and so on.
* You can pass a dictionary of HilbertSpaces of the form {from_atom => to_atom,...}.
FIXME - does it return a view? should it?
>>> import numpy
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> hc = qubit('c')
>>> x = (ha * hb).random_array()
>>> x.space
|a,b>
>>> x.relabel(hb, hc).space
|a,c>
>>> numpy.allclose(x.relabel(hb, hc).nparray, x.nparray)
True
>>> x.relabel(ha, hc).space
|b,c>
>>> # underlying nparray is different because axes order changed
>>> numpy.allclose(x.relabel(ha, hc).nparray, x.nparray)
False
>>> x.relabel(ha * hb, ha.prime * hb.prime).space
|a',b'>
>>> y = ha.O.array()
>>> y.space
|a><a|
>>> # relabeling is needed because |a,a> is not allowed
>>> y.relabel(ha.H, ha.prime.H).transpose(ha.prime).space
|a,a'>
>>> z = (ha*hb.H*hc.O).random_array()
>>> z.space
|a,c><b,c|
>>> z1 = z.relabel((ha, hc.H), (ha.H, hc.prime))
>>> z1.space
|c,c'><a,b|
>>> z2 = z.relabel({ha: ha.H, hc.H: hc.prime})
>>> z2.space
|c,c'><a,b|
>>> z1 == z2
True
>>> z == z1.relabel({ha.H: ha, hc.prime: hc.H})
True
>>> z.relabel({ hb.H*hc.H: ha.H*hb.H }) == z.relabel({ hb.H: ha.H, hc.H: hb.H })
True
"""
### Turn input into a mapping
if isinstance(from_spaces, HilbertSpace):
from_spaces = (from_spaces, )
if isinstance(to_spaces, HilbertSpace):
to_spaces = (to_spaces, )
if isinstance(from_spaces, dict):
assert to_spaces is None
mapping = from_spaces
HilbertSpace._assert_nodup_space(mapping.values(), "an output space was listed twice")
else:
# Cast to list, in case we were given a generator. Throw error if not list-like.
from_spaces = list(from_spaces)
to_spaces = list(to_spaces)
for (a, b) in zip(from_spaces, to_spaces):
if not isinstance(a, HilbertSpace) or not isinstance(b, HilbertSpace):
raise HilbertError("expected a HilbertSpace")
if a.dim()!= b.dim():
# dimension will be checked again below, but this guards against inputs
# like ((a*b,c), (x,y*z)).
raise HilbertShapeError(a.dim(), b.dim())
from_spaces = HilbertSpace._expand_list_to_atoms(from_spaces)
to_spaces = HilbertSpace._expand_list_to_atoms(to_spaces)
if len(from_spaces)!= len(to_spaces):
raise MismatchedSpaceError("Number of spaces does not match")
HilbertSpace._assert_nodup_space(from_spaces, "an input space was listed twice")
HilbertSpace._assert_nodup_space(to_spaces, "an output space was listed twice")
mapping = dict(zip(from_spaces, to_spaces))
### Split up any HilbertSpace in the mapping into HilbertAtoms
m2 = {}
for (k,v) in mapping.items():
if not isinstance(k, HilbertSpace) or not isinstance(v, HilbertSpace):
raise HilbertError("expected a HilbertSpace")
atoms_k = sorted(k.ket_set) + sorted(k.bra_set)
atoms_v = sorted(v.ket_set) + sorted(v.bra_set)
if len(atoms_k)!= len(atoms_v):
raise MismatchedSpaceError("Number of spaces does not match")
for (ak,av) in zip(atoms_k, atoms_v):
m2[ak] = av
mapping = m2
### Validate
for (k,v) in mapping.items():
assert isinstance(k, HilbertAtom)
assert isinstance(v, HilbertAtom)
if not k in self.space.bra_ket_set:
raise MismatchedSpaceError("not in input space: "+repr(k))
if k.dim()!= v.dim():
raise HilbertShapeError(k.dim(), v.dim())
### Produce the result
xlate_list = [ mapping[x] if x in mapping else x for x in self.axes ]
HilbertSpace._assert_nodup_space(xlate_list, "relabling would cause a duplicated space")
new_space = create_space1(xlate_list)
return new_space.array(data=self.nparray, input_axes=xlate_list)
cpdef relabel_prime(self):
"""
Returns a relabeled array with primed spaces.
See also: :func:`relabel`, :func:`qitensor.atom.HilbertAtom.prime`
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> x = (ha*hb).array()
>>> x.relabel_prime().space
|a',b'>
>>> (x * x.relabel_prime()).space
|a,a',b,b'>
"""
return self.relabel(self.axes, [x.prime for x in self.axes])
cpdef apply_map(self, fn):
"""
Apply the given function to each element of the array.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> v = ha.array([1, -2])
>>> v.apply_map(lambda x: abs(x))
HilbertArray(|a>,
array([ 1.+0.j, 2.+0.j]))
"""
dtype = self.space.base_field.dtype
arr = np.vectorize(fn, otypes=[dtype])(self.nparray)
return self.space.array(arr)
cpdef closeto(self, HilbertArray other, rtol=1e-05, atol=1e-08):
"""
Checks whether two arrays are nearly equal, similar to numpy.allclose.
For details, see the numpy.allclose documentation.
>>> from qitensor import qudit
>>> ha = qudit('a', 10)
>>> x = ha.random_array()
>>> y = ha.random_array()
>>> x.closeto(y)
False
>>> x.closeto(x * (1+1e-10))
True
>>> x == x * (1+1e-10)
False
"""
if not isinstance(other, HilbertArray):
raise TypeError('other must be HilbertArray')
self._assert_same_axes(other)
return np.allclose(self.nparray, other.nparray, rtol=rtol, atol=atol)
def __richcmp__(self, other, op):
"""
Checks whether two arrays are equal.
>>> from qitensor import qudit
>>> ha = qudit('a', 10)
>>> hb = qudit('b', 10)
>>> x = ha.array()
>>> y = ha.random_array()
>>> x == x and y == y
True
>>> x == y
False
>>> x == y * | Cython |
0
True
>>> x == x.relabel({ ha: hb })
False
>>> x!= x or y!= y
False
>>> x!= y
True
>>> x!= y * 0
False
>>> x!= x.relabel({ ha: hb })
True
>>> x == 0
FIXME - does this doctest even run?
>>> x > 0
FIXME - does this doctest even run?
"""
if not isinstance(other, HilbertArray):
eq = False
elif self.space!= other.space:
eq = False
else:
eq = np.all(self.nparray == other.nparray)
if op == 2:
return eq
elif op == 3:
return not eq
else:
return NotImplemented
cpdef lmul(self, HilbertArray other):
"""
Returns other*self.
This is useful for listing operations in chronoligical order when
implementing quantum circuits.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> state = ha.random_array()
>>> ha.X * ha.Y * state == state.lmul(ha.Y).lmul(ha.X)
True
"""
return other * self
def __mul__(self, other):
"""
Multiplies two arrays, or an array and a scalar.
Given two arrays, contracts between the common bra spaces of the left
argument and ket spaces of the right argument. This is equivalent to
calling self.tensordot(other).
Given an array and scalar, does the obvious thing.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> hc = qubit('c')
>>> hx = qubit('x')
>>> hy = qubit('y')
>>> q = (ha*hb.H*hc.H).random_array()
>>> q.space
|a><b,c|
>>> r = (hc*hx*hy.H).random_array()
>>> r.space
|c,x><y|
>>> (q*r).space
|a,x><b,y|
>>> (q+q).closeto(2*q)
True
>>> (q+q).closeto(q*2)
True
"""
# Cython calls arithmetic methods with arguments reversed instead of __r*__ methods
if not isinstance(self, HilbertArray):
return other * self
if isinstance(other, HilbertArray):
return self.tensordot(other)
else:
cast_fn = self.space.base_field.input_cast_function()
try:
x = cast_fn(other)
except TypeError:
return NotImplemented
ret = self.copy()
ret *= x
return ret
def __imul__(self, other):
"""
In-place multiplication.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.random_unitary()
>>> y = ha.random_unitary()
>>> x_copy = x.copy()
>>> x_ref = x
>>> x *= y
>>> x is x_ref
True
>>> x is x_copy
False
>>> x *= 2
>>> x is x_ref
True
>>> x.closeto(x_copy*y*2)
True
"""
if isinstance(other, HilbertArray):
self._reassign(self * other)
else:
cast_fn = self.space.base_field.input_cast_function()
try:
x = cast_fn(other)
except TypeError:
return NotImplemented
self.nparray *= x
return self
def __add__(self, other):
"""
Adds two arrays.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> x = ha.random_array()
>>> y = hb.random_array()
>>> (x+x).closeto(2*x)
True
>>> x+y
Traceback (most recent call last):
...
MismatchedSpaceError: 'Mismatched HilbertSpaces: |a> vs. |b>'
"""
# Cython calls arithmetic methods with arguments reversed instead of __r*__ methods
if not isinstance(self, HilbertArray) or not isinstance(other, HilbertArray):
return NotImplemented
ret = self.copy()
ret += other
return ret
def __iadd__(self, other):
"""
In-place addition.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.random_array()
>>> y = ha.random_array()
>>> x_copy = x.copy()
>>> x_ref = x
>>> x += y
>>> x is x_ref
True
>>> x is x_copy
False
>>> x.closeto(x_copy+y)
True
"""
if not isinstance(other, HilbertArray):
return NotImplemented
self._assert_same_axes(other)
self.nparray += other.nparray
return self
def __sub__(self, other):
"""
Subtracts two arrays.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> x = ha.random_array()
>>> y = hb.random_array()
>>> (2*x-x).closeto(x)
True
>>> x-y
Traceback (most recent call last):
...
MismatchedSpaceError: 'Mismatched HilbertSpaces: |a> vs. |b>'
"""
if not isinstance(other, HilbertArray):
return NotImplemented
ret = self.copy()
ret -= other
return ret
def __neg__(self):
"""
Returns negation of this array.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.random_array()
>>> y = -x
>>> (x+y).norm() == 0
True
"""
return self * -1
def __isub__(self, other):
"""
In-place subtraction.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.random_array()
>>> y = ha.random_array()
>>> x_copy = x.copy()
>>> x_ref = x
>>> x -= y
>>> x is x_ref
True
>>> x is x_copy
False
>>> x.closeto(x_copy-y)
True
"""
if not isinstance(other, HilbertArray):
return NotImplemented
self._assert_same_axes(other)
self.nparray -= other.nparray
return self
def _mydiv(self, other):
"""
Divide by a scalar.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.random_array()
>>> (x/2).closeto(x*0.5)
True
>>> # the following exception may be thrown from within numpy
>>> x / x
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for /: 'qitensor.array.HilbertArray' and 'qitensor.array.HilbertArray'
"""
# Cython calls arithmetic methods with arguments reversed instead of __r*__ methods
if not isinstance(self, HilbertArray):
return NotImplemented
cast_fn = self.space.base_field.input_cast_function()
try:
x = cast_fn(other)
except TypeError:
return NotImplemented
ret = self.copy()
ret /= x
return ret
def __div__(self, other):
return self._mydiv(other)
def __truediv__(self, other):
return self._mydiv(other)
def _myidiv(self, other):
"""
In-place division by a scalar.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.random_array()
>>> x_copy = x.copy()
>>> x_ref = x
>>> x /= 2
>>> x is x_ref
True
>>> x is x_copy
False
>>> x.closeto(x_copy*0.5)
True
"""
cast_fn = self.space.base_field.input_cast_function()
try:
x = cast_fn(other)
except TypeError:
return NotImplemented
self.nparray /= x
return self
def __idiv__(self, other):
return self._myidiv(other)
def __itruediv__(self, other):
return self._myidiv(other)
def __pow__(self, other, mod):
assert mod is None
if self.space!= self.space.H:
raise HilbertError('bra space must be the same as ket space '+
'(space was '+repr(self.space)+')')
return self.np_matrix_transform( \
lambda x: self.space.base_field.mat_pow(x, other))
def __ipow__(self, other):
self.nparray[:] = self.__pow__(other).nparray
return self
cpdef _index_key_to_map(self, key):
| Cython |
"""
Converts indices to a standard form, for use by _get_set_item.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> x = ha.array()
>>> y = (ha.O*hb).array()
>>> x._index_key_to_map(2)
{|a>: 2}
>>> y._index_key_to_map(2)
Traceback (most recent call last):
...
HilbertIndexError: 'Wrong number of indices given (1 for |a,b><a|)'
>>> sorted(y._index_key_to_map((1,2,3)).items())
[(|a>, 1), (<a|, 3), (|b>, 2)]
>>> sorted(y._index_key_to_map([1,2,3]).items())
[(|a>, 1), (<a|, 3), (|b>, 2)]
>>> y._index_key_to_map((1,2,3,4))
Traceback (most recent call last):
...
HilbertIndexError: 'Wrong number of indices given (4 for |a,b><a|)'
>>> sorted(y._index_key_to_map({ ha: 1, ha.H: 2}).items())
[(|a>, 1), (<a|, 2)]
>>> y._index_key_to_map({ ha: 1, hb.H: 2})
Traceback (most recent call last):
...
HilbertIndexError: 'Hilbert space not part of this array: <b|'
"""
index_map = {}
if isinstance(key, dict):
for (k, v) in key.items():
if not isinstance(k, HilbertSpace):
raise TypeError('not a HilbertSpace: '+repr(k))
if not isinstance(v, list) and not isinstance(v, tuple):
v = (v,)
atoms = k.sorted_kets + k.sorted_bras
if len(atoms)!= len(v):
raise HilbertIndexError("Number of indices doesn't match "+
"number of HilbertSpaces")
for (hilb, idx) in zip(atoms, v):
index_map[hilb] = idx
elif isinstance(key, tuple) or isinstance(key, list):
if len(key)!= len(self.axes):
raise HilbertIndexError("Wrong number of indices given "+
"(%d for %s)" % (len(key), str(self.space)))
for (i, k) in enumerate(key):
if isinstance(k, slice):
if k == slice(None):
# full slice is the same as not specifying anything for
# this axis
pass
else:
raise HilbertIndexError("Slices are not allowed")
else:
index_map[self.axes[i]] = k
else:
if len(self.axes)!= 1:
raise HilbertIndexError("Wrong number of indices given "+
"(1 for %s)" % str(self.space))
if isinstance(key, slice):
if key == slice(None):
# full slice is the same as not specifying anything for
# this axis
pass
else:
raise HilbertIndexError("Slices are not allowed")
else:
index_map[self.axes[0]] = key
for (spc, idx) in index_map.items():
if not isinstance(spc, HilbertSpace):
raise TypeError('not a HilbertSpace: '+repr(spc))
if not spc in self.axes:
raise HilbertIndexError('Hilbert space not part of this '+
'array: '+repr(spc))
return index_map
cpdef _get_set_item(self, key, do_set=False, set_val=None):
"""
The guts for the __getitem__ and __setitem__ methods.
Doctests are in doc/examples/slices.rst.
"""
index_map = self._index_key_to_map(key)
out_axes = []
slice_list = []
for x in self.axes:
if x in index_map:
try:
idx_n = x.indices.index(index_map[x])
except ValueError:
raise HilbertIndexError('Index set for '+repr(x)+' does '+
'not contain '+repr(index_map[x]))
slice_list.append(idx_n)
else:
slice_list.append(slice(None))
out_axes.append(x)
assert len(slice_list) == self.nparray.ndim
if do_set and len(out_axes) == 0:
# must do assignment like this, since in the 1-d case numpy will
# return a scalar rather than a view
self.nparray[tuple(slice_list)] = set_val
sliced = self.nparray[tuple(slice_list)]
if len(out_axes) == 0:
# Return a scalar, not a HilbertArray.
# We already did do_set, if applicable.
return sliced
else:
assert len(sliced.shape) == len(out_axes)
ret = create_space1(out_axes). \
array(noinit_data=True)
permute = tuple([out_axes.index(x) for x in ret.axes])
ret.nparray = sliced.transpose(permute)
if do_set:
ret.set_data(set_val)
return ret
def __getitem__(self, key):
"""
Gets an item or slice.
Doctests are in doc/examples/slices.rst.
"""
return self._get_set_item(key)
def __setitem__(self, key, val):
"""
Sets an item or slice.
Doctests are in doc/examples/slices.rst.
"""
self._get_set_item(key, True, val)
cpdef _space_string(self, spc_set):
return repr(create_space1(spc_set))
cpdef _get_row_col_spaces(self, row_space=None, col_space=None):
"""
Parses the row_space and col_space parameters used by various functions.
Returns a tuple consisting of a list of row_space atoms and a list of
col_space atoms. For more information, see the documentation for
:func:`as_np_matrix`.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> x = (ha.O*hb).array()
>>> x._get_row_col_spaces()
([<a|], [|a>, |b>])
>>> x._get_row_col_spaces(row_space=ha)
([|a>], [|b>, <a|])
>>> x._get_row_col_spaces(row_space=ha*hb)
([|a>, |b>], [<a|])
>>> x._get_row_col_spaces(row_space=(ha, hb))
([|a>, |b>], [<a|])
>>> x._get_row_col_spaces(row_space=(ha, hb.H))
Traceback (most recent call last):
...
MismatchedSpaceError: "not in array's index set: <b|"
>>> x._get_row_col_spaces(col_space=ha)
([|b>, <a|], [|a>])
>>> x._get_row_col_spaces(col_space=(ha, hb.H))
Traceback (most recent call last):
...
MismatchedSpaceError: "not in array's index set: <b|"
>>> x._get_row_col_spaces(col_space=(ha, hb*ha.H))
([], [|a>, |b>, <a|])
>>> x._get_row_col_spaces(row_space=ha, col_space=hb)
Traceback (most recent call last):
...
MismatchedSpaceError: 'all indices must be in col_set or row_set, these were missing: <a|'
>>> x._get_row_col_spaces(row_space=ha.O, col_space=hb*ha)
Traceback (most recent call last):
...
MismatchedSpaceError:'space is in both col and row sets: |a>'
"""
col_space = _parse_space(col_space)
row_space = _parse_space(row_space)
if row_space is None and col_space is None:
col_space = self.space.sorted_kets
row_space = self.space.sorted_bras
elif row_space is None:
row_space = [x for x in self.axes if not x in col_space]
elif col_space is None:
col_space = [x for x in self.axes if not x in row_space]
col_set = frozenset(col_space)
row_set = frozenset(row_space)
if not col_set.isdisjoint(row_set):
raise MismatchedSpaceError( \
'space is in both col and row sets: '+self._space_string(col_set & row_set))
if not row_set <= self.space.bra_ket_set:
raise MismatchedSpaceError( \
"not in array's index set: "+self._space_string(row_set - self.space.bra_ket_set))
if not col_set <= self.space.bra_ket_set:
raise MismatchedSpaceError( \
"not in array's index set: "+self._space_string(col_set - self.space.bra_ket_set))
if not col_set | row_set == self.space.bra_ket_set:
raise MismatchedSpaceError( \
'all indices must be in col_set or row_set, these were missing: '+ \
self._space_string(self.space | Cython |
.bra_ket_set-(col_set | row_set)))
return (row_space, col_space)
cpdef diag(self, as_np=False):
"""
Returns the diagonal elements of this operator, as a ket vector (if as_np=False) or as
a numpy array (if as_np=True). Only applicable to square operators.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> A = ha.O.array([[1,2],[3,4]])
>>> B = hb.O.array([[1,10],[100,1000]])
>>> A.diag()
HilbertArray(|a>,
array([ 1.+0.j, 4.+0.j]))
>>> A.diag(as_np=True)
array([ 1.+0.j, 4.+0.j])
>>> (A*B).diag()
HilbertArray(|a,b>,
array([[ 1.+0.j, 1000.+0.j],
[ 4.+0.j, 4000.+0.j]]))
>>> (A*B).diag(as_np=True)
array([ 1.+0.j, 1000.+0.j, 4.+0.j, 4000.+0.j])
"""
cdef int D = self.space.assert_square()
np_diag = np.diagonal(self.nparray.reshape(D, D))
if as_np:
return np_diag
else:
return self.space.ket_space().array(np_diag, False, True)
cpdef as_np_matrix(self, dtype=None, row_space=None, col_space=None):
"""
Returns the underlying data as a numpy.matrix. Returns a copy, not a view.
:param dtype: datatype of returned matrix.
:param row_space: the HilbertSpace to use for the row space of the matrix,
default is the bra space of the input array.
:type row_space: HilbertSpace, list, or tuple
:param col_space: the HilbertSpace to use for the column space of the matrix,
default is the ket space of the input array.
:type col_space: HilbertSpace, list, or tuple
>>> import numpy
>>> from qitensor import qubit, qudit
>>> ha = qudit('a', 3)
>>> hb = qudit('b', 4)
>>> x = (ha.O * hb).random_array()
>>> x.space
|a,b><a|
>>> x.as_np_matrix().shape
(12, 3)
>>> # returns a copy, not a view
>>> x.as_np_matrix().fill(0); x.norm() == 0
False
>>> x.as_np_matrix(col_space=ha.O).shape
(9, 4)
>>> x.as_np_matrix(row_space=ha.O).shape
(4, 9)
>>> numpy.allclose(x.as_np_matrix(col_space=ha.O), x.as_np_matrix(row_space=ha.O).T)
True
>>> # If you pass a list, you can control the storage order.
>>> x.as_np_matrix(col_space=[ha, hb])[1,2] == x[{ ha: 0, hb: 1, ha.H: 2 }]
True
>>> x.as_np_matrix(col_space=[hb, ha])[1,2] == x[{ hb: 0, ha: 1, ha.H: 2 }]
True
"""
# shortcut for common case
if self.space._is_simple_dyad and row_space is None and col_space is None:
return np.matrix(self.nparray, dtype=dtype)
rowcol_kw = { 'row_space': row_space, 'col_space': col_space }
(row_space, col_space) = self._get_row_col_spaces(**rowcol_kw)
col_size = _shape_product([x.dim() for x in col_space])
row_size = _shape_product([x.dim() for x in row_space])
axes = [self.get_dim(x) for x in col_space + row_space]
#print col_size, row_size, axes
v = self.nparray.transpose(axes).reshape(col_size, row_size)
return np.matrix(v, dtype=dtype)
cpdef np_matrix_transform(self, f, transpose_dims=False, row_space=None, col_space=None):
"""
Performs a numpy matrix operation.
:param f: operation to perform
:type f: lambda function
:param transpose_dims: if True, the resultant Hilbert space is
transposed
:type transpose_dims: bool
:param row_space: the HilbertSpace to use for the row space of the matrix,
default is the bra space of the input array.
:type row_space: HilbertSpace, list, or tuple
:param col_space: the HilbertSpace to use for the column space of the matrix,
default is the ket space of the input array.
:type col_space: HilbertSpace, list, or tuple
>>> from qitensor import qubit, qudit
>>> import numpy.linalg
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> hc = qudit('c', 4)
>>> x = (ha * hb.H).random_array()
>>> x.space
|a><b|
>>> y = x.np_matrix_transform(numpy.linalg.inv, transpose_dims=True)
>>> y.space
|b><a|
>>> y == x.I
True
>>> v = ha.random_array()
>>> (v.np_matrix_transform(lambda x: x*2) - v*2).norm() < 1e-14
True
>>> w = (ha*hc*hb.H).random_array()
>>> wi = w.np_matrix_transform(numpy.linalg.inv, transpose_dims=True, row_space=hc)
>>> wi == w.inv(hc)
True
"""
#m = self.as_np_matrix()
#m = f(m)
#out_hilb = self.space
#if transpose_dims:
# out_hilb = out_hilb.H
#return out_hilb.reshaped_np_matrix(m)
rowcol_kw = { 'row_space': row_space, 'col_space': col_space }
(row_space, col_space) = self._get_row_col_spaces(**rowcol_kw)
m = self.as_np_matrix(**rowcol_kw)
m = f(m)
if m is None:
raise HilbertError("transformer returned None")
if transpose_dims:
out_hilb = self.space.H
out_axes = [x.H for x in row_space+col_space]
else:
out_hilb = self.space
out_axes = col_space+row_space
return out_hilb.array(m, reshape=True, input_axes=out_axes)
@property
def H(self):
"""
Returns the adjoint (Hermitian conjugate) of this array.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.array([1j, 0]); x
HilbertArray(|a>,
array([ 0.+1.j, 0.+0.j]))
>>> x.H
HilbertArray(<a|,
array([ 0.-1.j, 0.-0.j]))
>>> y = ha.O.array([[1+2j, 3+4j], [5+6j, 7+8j]]); y
HilbertArray(|a><a|,
array([[ 1.+2.j, 3.+4.j],
[ 5.+6.j, 7.+8.j]]))
>>> y.H
HilbertArray(|a><a|,
array([[ 1.-2.j, 5.-6.j],
[ 3.-4.j, 7.-8.j]]))
"""
cdef object xxx = self.space.base_field # FIXME - need to forget type to avoid Cython error
return self.np_matrix_transform( \
xxx.mat_adjoint, \
transpose_dims=True)
@property
def I(self):
"""
Returns the matrix inverse of this array.
It is required that the dimension of the bra space be equal to the
dimension of the ket space.
This is just a shortcut for ``self.inv()``, which offers more options.
See also: :func:`inv`
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> x = ha.O.random_array()
>>> (x * x.I - ha.eye()).norm() < 1e-13
True
>>> hb = qubit('b')
>>> hc = qudit('c', 4)
>>> y = (ha * hb * hc.H).random_array()
>>> (y * y.I - (ha * hb).eye()).norm() < 1e-13
True
>>> (y.I * y - hc.eye()).norm() < 1e-13
True
"""
return self.inv()
@property
def T(self):
"""
Returns the transpose of this array.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha | Cython |
.array([1j, 0]); x
HilbertArray(|a>,
array([ 0.+1.j, 0.+0.j]))
>>> x.T
HilbertArray(<a|,
array([ 0.+1.j, 0.+0.j]))
>>> y = ha.O.array([[1+2j, 3+4j], [5+6j, 7+8j]]); y
HilbertArray(|a><a|,
array([[ 1.+2.j, 3.+4.j],
[ 5.+6.j, 7.+8.j]]))
>>> y.T
HilbertArray(|a><a|,
array([[ 1.+2.j, 5.+6.j],
[ 3.+4.j, 7.+8.j]]))
"""
# transpose should be the same for all base_field's
return self.np_matrix_transform(lambda x: x.T, transpose_dims=True)
@property
def O(self):
"""
Makes a density operator from a pure state.
The input must be a ket vector. The output is ``self * self.H``.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.array([1j, 2]); x
HilbertArray(|a>,
array([ 0.+1.j, 2.+0.j]))
>>> x.O
HilbertArray(|a><a|,
array([[ 1.+0.j, 0.+2.j],
[ 0.-2.j, 4.+0.j]]))
"""
if self.space.bra_set:
raise NotKetSpaceError('self.O only applies to ket spaces')
else:
return self * self.H
cpdef det(self):
"""
Returns the matrix determinant of this array.
It is required that the dimension of the bra space be equal to the
dimension of the ket space.
>>> import numpy.linalg
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> hc = qudit('c', 4)
>>> y = (ha * hb * hc.H).random_array()
>>> abs( y.det() - numpy.linalg.det(y.as_np_matrix()) ) < 1e-14
True
"""
return self.space.base_field.mat_det(self.as_np_matrix())
cpdef fill(self, val):
"""
Fills every entry of this array with a constant value.
NOTE: the array is modified in-place and is not returned.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.random_array()
>>> x.fill(2)
>>> x
HilbertArray(|a>,
array([ 2.+0.j, 2.+0.j]))
"""
# fill should be the same for all base_field's
self.nparray.fill(val)
cpdef norm(self, p=2):
"""
Returns the vector norm of this array.
If p is given, then the :math:`\ell_p` norm is computed.
See also: :func:`schatten_norm`, :func:`trace_norm`
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> x = ha.array([3, 4])
>>> x.norm()
5.0
>>> y = ha.O.array([[1, 2], [3, 4]])
>>> y.norm() ** 2
30.0
>>> y.norm(p=1)
10.0
>>> y.norm(p=np.inf)
4.0
>>> hb = qudit('b', 6)
>>> x = hb.array([1, 1, 1, 2, 2, 2])
>>> x.norm(3)
3.0
"""
return self.space.base_field.mat_norm(self.nparray, p)
cpdef trace_norm(self, row_space=None, col_space=None):
"""
Returns the sum of the singular values of this operator.
:param row_space: the HilbertSpace to use for the row space of the matrix,
default is the bra space of the input array.
:type row_space: HilbertSpace, list, or tuple
:param col_space: the HilbertSpace to use for the column space of the matrix,
default is the ket space of the input array.
:type col_space: HilbertSpace, list, or tuple
See also: :func:`schatten_norm`
>>> from qitensor import qudit
>>> ha = qudit('a', 3)
>>> sv = [2, 3, 7]
>>> M = ha.random_unitary() * ha.diag(sv) * ha.random_unitary()
>>> abs(M.trace_norm() - 12) < 1e-14
True
"""
return self.schatten_norm(1, row_space=row_space, col_space=col_space)
cpdef op_norm(self, row_space=None, col_space=None):
"""
Returns the maximum singular value of this operator.
:param row_space: the HilbertSpace to use for the row space of the matrix,
default is the bra space of the input array.
:type row_space: HilbertSpace, list, or tuple
:param col_space: the HilbertSpace to use for the column space of the matrix,
default is the ket space of the input array.
:type col_space: HilbertSpace, list, or tuple
See also: :func:`schatten_norm`
>>> from qitensor import qudit
>>> ha = qudit('a', 3)
>>> sv = [2, 3, 7]
>>> M = ha.random_unitary() * ha.diag(sv) * ha.random_unitary()
>>> abs(M.op_norm() - 7) < 1e-14
True
"""
return self.schatten_norm('inf', row_space=row_space, col_space=col_space)
cpdef schatten_norm(self, p, row_space=None, col_space=None):
"""
Returns the Schatten p-norm of this operator.
:param row_space: the HilbertSpace to use for the row space of the matrix,
default is the bra space of the input array.
:type row_space: HilbertSpace, list, or tuple
:param col_space: the HilbertSpace to use for the column space of the matrix,
default is the ket space of the input array.
:type col_space: HilbertSpace, list, or tuple
See also: :func:`trace_norm`
>>> from qitensor import qubit, qudit
>>> ha = qudit('a', 3)
>>> sv = [2, 3, 7]
>>> M = ha.random_unitary() * ha.diag(sv) * ha.random_unitary()
>>> np.abs(M.schatten_norm(1) - np.sum([x for x in sv])) < 1e-14
True
>>> np.abs(M.schatten_norm(4) - np.sum([x**4 for x in sv])**(1.0/4.0)) < 1e-14
True
"""
sv = self.singular_vals(row_space=row_space, col_space=col_space)
if p == 'inf':
return np.max(sv)
else:
return np.sum([ x**p for x in sv ]) ** self.space.base_field.frac(1, p)
cpdef normalize(self):
"""
Normalizes array in-place.
See also: :func:`normalized`
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.array([3, 4])
>>> x.normalize()
>>> x
HilbertArray(|a>,
array([ 0.6+0.j, 0.8+0.j]))
"""
self /= self.norm()
cpdef normalized(self):
"""
Returns a normalized copy of this array.
See also: :func:`normalize`
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.array([3, 4])
>>> x.normalized()
HilbertArray(|a>,
array([ 0.6+0.j, 0.8+0.j]))
"""
return self / self.norm()
cpdef inv(self, row_space=None):
"""
Returns the matrix inverse of this array.
:param row_space: the HilbertSpace to use for the row space of the matrix,
default is the bra space of the input array. This parameter allows
computing the inverse of the cross operator.
:type row_space: HilbertSpace, list, or tuple
See also: :func:`I`
>>> from qitensor import qubit, qudit
>>> import numpy.linalg
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> hc = qudit('c', 4)
>>> x = ha.O.random_array()
>>> (x * x.inv() - ha.eye()).norm() < 1e-13
True
| Cython |
>>> y = (ha * hb * hc.H).random_array()
>>> (y.space, y.inv().space)
(|a,b><c|, |c><a,b|)
>>> (y * y.inv() - (ha * hb).eye()).norm() < 1e-13
True
>>> (y.inv() * y - hc.eye()).norm() < 1e-13
True
>>> z = (ha * hc * hb.H).random_array()
>>> (z.space, z.inv(hc).space)
(|a,c><b|, |b><a,c|)
>>> ((z * z.inv(hc)).trace(ha) - hc.eye()).norm() < 1e-14
True
>>> (z.inv(hc).tensordot(z, contraction_spaces=hc) - (ha*hb).O.eye()).norm() < 1e-14
True
"""
cdef object xxx = self.space.base_field # FIXME - need to forget type to avoid Cython error
return self.np_matrix_transform( \
xxx.mat_inverse, \
transpose_dims=True,
row_space=row_space)
def pinv(self, rcond=1e-15):
"""
Returns the Moore-Penrose pseudoinverse of this array.
:param rcond: cutoff for small singular values (see numpy.linalg.pinv
docs for more info)
:type rcond: float; default 1e-15
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> hb = qudit('b', 3)
>>> x = (ha * hb.H).random_array()
>>> x.as_np_matrix().shape
(2, 3)
>>> (x * x.pinv() - ha.eye()).norm() < 1e-13
True
"""
return self.np_matrix_transform( \
lambda x: self.space.base_field.mat_pinv(x, rcond), \
transpose_dims=True)
cpdef conj(self):
"""
Returns the complex conjugate of this array.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> x = ha.array([1j, 0]); x
HilbertArray(|a>,
array([ 0.+1.j, 0.+0.j]))
>>> x.conj()
HilbertArray(|a>,
array([ 0.-1.j, 0.-0.j]))
>>> y = ha.O.array([[1+2j, 3+4j], [5+6j, 7+8j]]); y
HilbertArray(|a><a|,
array([[ 1.+2.j, 3.+4.j],
[ 5.+6.j, 7.+8.j]]))
>>> y.conj()
HilbertArray(|a><a|,
array([[ 1.-2.j, 3.-4.j],
[ 5.-6.j, 7.-8.j]]))
"""
cdef object xxx = self.space.base_field # FIXME - need to forget type to avoid Cython error
return self.np_matrix_transform( \
xxx.mat_conj)
cpdef conj_by(self, U):
"""
Returns U*self*U.H.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> U = ha.random_unitary()
>>> x = ha.random_density()
>>> (U*x*U.H - x.conj_by(U)).norm() < 1e-13
True
"""
return U*self*U.H
def trace(self, axes=None):
"""
Returns the (full or partial) trace of this array.
FIXME - update docs with advanced trace features
:param axes: axes to trace over, all axes if None (in which case the bra
space must be the same as the ket space)
:type axes: HilbertSpace; default None
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> hb = qudit('b', 3)
>>> hc = qubit('c')
>>> x = ha.O.random_array()
>>> y = hb.O.random_array()
>>> z = hc.random_array()
>>> abs(x.trace() - (x[0, 0] + x[1, 1])) < 1e-14
True
>>> abs(y.trace() - (y[0, 0] + y[1, 1] + y[2, 2])) < 1e-14
True
>>> abs(x.trace() * y.trace() - (x*y).trace()) < 1e-14
True
>>> n = hb.random_array().normalized()
>>> # trace of a projector
>>> abs( (n*n.H).trace() - 1 ) < 1e-14
True
>>> ( (x*y).trace(ha) - x.trace() * y ).norm() < 1e-14
True
>>> ( (x*y).trace(hb) - x * y.trace() ).norm() < 1e-14
True
>>> abs( (x*y).trace(ha*hb) - (x*y).trace() ) < 1e-14
True
>>> abs( (x*y).trace(ha).trace(hb) - (x*y).trace() ) < 1e-14
True
>>> abs( x.trace(ha) - x.trace(ha.H) ) < 1e-14
True
>>> abs( x.trace(ha) - x.trace(ha.O) ) < 1e-14
True
>>> ( (x*z).trace(ha) - x.trace() * z ).norm() < 1e-14
True
>>> ( (x*z.H).trace(ha) - x.trace() * z.H ).norm() < 1e-14
True
>>> # trace between a pair of ket spaces (map-state duality stuff)
>>> n = hb.random_array().normalized()
>>> w = n.H.relabel({ hb.H: hb.prime }) * n * z
>>> w.space
|b,b',c>
>>> w.trace({ hb: hb.prime }).space
|c>
>>> w.trace({ hb: hb.prime }).closeto(z)
True
>>> m = ha.random_array().normalized()
>>> v = w * m * m.H.relabel(ha.H, hc.H)
>>> v.space
|a,b,b',c><c|
>>> v.trace({ hb: hb.prime, hc.H: ha }).space
|c>
>>> v.trace({ hb: hb.prime, hc.H: ha }).closeto(z)
True
"""
if axes is None:
if self.space!= self.space.H:
raise HilbertError('bra space does not equal ket space; '+
'please specify axes')
# The full trace is handled specially here, for efficiency.
return np.trace( self.as_np_matrix() )
if isinstance(axes, HilbertSpace):
axes = axes.bra_ket_set
# and then process further in the next if block
if not isinstance(axes, dict):
axes_set = set()
for s in HilbertSpace._expand_list_to_atoms(list(axes)):
if not s in self.space.bra_ket_set:
raise HilbertError('not in ket set: '+repr(s))
axes_set.add(s.H if s.is_dual else s)
axes = dict((s, s.H) for s in axes_set)
assert isinstance(axes, dict)
HilbertSpace._assert_nodup_space(list(axes.keys())+list(axes.values()), "a space was listed twice")
for (k, v) in axes.items():
if not k in self.space.bra_ket_set:
raise HilbertError("not in this array's space: "+repr(k))
if not v in self.space.bra_ket_set:
raise HilbertError("not in this array's space: "+repr(v))
# The full trace is handled specially here, for efficiency.
if frozenset(list(axes.keys())+list(axes.values())) == self.space.bra_ket_set:
return np.trace( self.as_np_matrix() )
working = self
for (s1, s2) in axes.items():
axis1 = working.get_dim(s1)
axis2 = working.get_dim(s2)
arr = np.trace( working.nparray, axis1=axis1, axis2=axis2 )
out_space = working.space.bra_ket_set - frozenset((s1, s2))
if len(out_space) == 0:
# arr should be a scalar
working = arr
else:
out_space = create_space1(out_space)
working = out_space.array(arr)
return working
def tracekeep(self, keep_spc):
"""
Trace out all but the given spaces of a density operator.
Actually, a density operator is not needed, but the bra and ket spaces must be the same.
FIXME - doctests
"""
| Cython |
if self.space!= self.space.H:
raise HilbertError("self did not have equal bra and ket spaces: "+str(self.space))
if keep_spc == keep_spc.H:
keep_spc = keep_spc.ket_space()
keep_spc.assert_ket_space()
self_spc = self.space.ket_space()
if self_spc == keep_spc:
return self
if not (keep_spc.ket_set <= self_spc.ket_set):
raise MismatchedSpaceError('space not part of array: '+str(keep_spc)+' vs. '+str(self_spc))
return self.trace(self_spc / keep_spc)
def expm(self):
"""
Return the matrix exponential of this array.
It is required that the dimension of the bra space be equal to the
dimension of the ket space.
>>> import numpy
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> ((ha.X * numpy.pi * 1j).expm() + ha.eye()).norm() < 1e-12
True
"""
return self.np_matrix_transform( \
lambda x: self.space.base_field.mat_expm(x))
def logm(self):
"""
Return the matrix logarithm of this array.
It is required that the dimension of the bra space be equal to the
dimension of the ket space.
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> op = ha.X * hb.Z
>>> (op.logm().expm() - op).norm() < 1e-14
True
"""
return self.np_matrix_transform( \
lambda x: self.space.base_field.mat_logm(x))
cpdef svd(self, full_matrices=True, inner_space=None):
"""
Return the singular value decomposition of this array.
:param full_matrices: if True, U and V are square. If False,
S is square.
:type full_matrices: bool; default True
:param inner_space: Hilbert space for S.
:type inner_space: HilbertSpace
x.svd() returns a tuple (U, S, V) such that:
* ``x == U * S * V``
* ``U.H * U`` is identity
* ``S`` is diagonal
* ``V * V.H`` is identity
If full_matrices is True:
* U and V will be square.
* If inner_space is None, the bra and ket spaces of U will be the same
as the ket space of the input and the bra and ket spaces of V will be
the same as the bra space of the input. The Hilbert space of S will be
the same as that of the input. If the input is not square (the
dimension of the bra space does not match that of the ket space) then S
will not be square.
* If inner_space is not None, it should be a HilbertSpace whose bra and
ket dimensions are the same as those of the input.
If full_matrices is False:
* S will be square. One of U or V will be square.
* If inner_space is None, the bra and ket spaces of S will be the same.
Either the bra or the ket space of the input will be used for S,
whichever is of smaller dimension. If they are of equal dimension
but are not the same spaces, then there is an ambiguity and an
exception will be raised. In this case, you must manually specify
inner_space.
* If inner_space is not None, it should be a ket space, and must be
of the same dimension as the smaller of the bra or ket spaces of the
input. The given space will be used for both the bra and the ket
space of S.
See also: :func:`singular_vals`, :func:`svd_list`
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> hc = qubit('c')
>>> x = (ha * hb.H * hc.H).random_array()
>>> x.space
|a><b,c|
>>> (U, S, V) = x.svd()
>>> [h.space for h in (U, S, V)]
[|a><a|, |a><b,c|, |b,c><b,c|]
>>> (U * S * V - x).norm() < 1e-14
True
>>> (U, S, V) = x.svd(full_matrices=False)
>>> [h.space for h in (U, S, V)]
[|a><a|, |a><a|, |a><b,c|]
>>> (U * S * V - x).norm() < 1e-14
True
>>> hS = qubit('d1') * qudit('d2', 4).H
>>> hS
|d1><d2|
>>> (U, S, V) = x.svd(full_matrices=True, inner_space=hS)
>>> [h.space for h in (U, S, V)]
[|a><d1|, |d1><d2|, |d2><b,c|]
>>> (U * S * V - x).norm() < 1e-14
True
>>> hS = qubit('d')
>>> (U, S, V) = x.svd(full_matrices=False, inner_space=hS)
>>> [h.space for h in (U, S, V)]
[|a><d|, |d><d|, |d><b,c|]
>>> (U * S * V - x).norm() < 1e-14
True
"""
hs = self.space
if inner_space is None:
if full_matrices:
inner_space = hs
else:
bs = hs.bra_space()
ks = hs.ket_space()
bra_size = _shape_product(bs.shape)
ket_size = _shape_product(ks.shape)
if ks == bs:
inner_space = ks
elif bra_size < ket_size:
inner_space = bs.H
elif ket_size < bra_size:
inner_space = ks
else:
# Ambiguity as to which space to take, force user to
# specify.
raise HilbertError('Please specify which Hilbert space to '+
'use for the singular values of this square matrix')
if not isinstance(inner_space, HilbertSpace):
raise TypeError('inner_space must be a HilbertSpace')
(u, s, v) = hs.base_field.mat_svd(self.as_np_matrix(), full_matrices)
if full_matrices:
u_space = hs.ket_space() * inner_space.ket_space().H
v_space = hs.bra_space() * inner_space.bra_space().H
U = u_space.reshaped_np_matrix(u)
V = v_space.reshaped_np_matrix(v)
dim1 = _shape_product(inner_space.ket_space().shape)
dim2 = _shape_product(inner_space.bra_space().shape)
min_dim = np.min([dim1, dim2])
Sm = np.zeros((dim1, dim2), dtype=hs.base_field.dtype)
Sm[:min_dim, :min_dim] = np.diag(s)
S = inner_space.reshaped_np_matrix(Sm)
else:
inner_space.assert_ket_space()
u_space = hs.ket_space() * inner_space.H
v_space = inner_space * hs.bra_space()
U = u_space.reshaped_np_matrix(u)
V = v_space.reshaped_np_matrix(v)
s_mat_space = inner_space * inner_space.H
S = s_mat_space.diag(s)
return (U, S, V)
cpdef svd_list(self, row_space=None, col_space=None, thresh=0):
"""
Computes a singular value decomposition or Schmidt decomposition of
this array.
x.svd_list() returns a tuple (U, S, V) such that:
* U is a list of arrays in the space defined by the ``col_space``
parameter (by default the ket space of the input)
* V is a list of arrays in the space defined by the ``row_space``
parameter (by default the bra space of the input)
* S is a 1-d numpy array of positive numbers (the singular values)
* :math:`x = \sum_i S_i U_i \otimes V_i`
* The U are orthonormal, as are the V
:param col_space: the HilbertSpace to use for U, default is the ket
space of the input array.
:type col_space: HilbertSpace, list, or tuple
:param row_space: the HilbertSpace to use for V, default is the bra
space of the input array.
:type row_space: HilbertSpace, list, or tuple
:param thresh: threshold below which singular values will be
considered to be zero and discarded (default is to keep all
singular values)
:type thresh: float
See also: | Cython |
:func:`singular_vals`, :func:`svd`
>>> from qitensor import qubit, qudit
>>> ha = qudit('a', 3)
>>> hb = qubit('b')
>>> hc = qubit('c')
>>> W = (ha * hb.H * hc.H).random_array()
>>> W.space
|a><b,c|
>>> # test basic properties of SVD
>>> import numpy as np
>>> (Ul, sl, Vl) = W.svd_list()
>>> (len(Ul), len(sl), len(Vl))
(3, 3, 3)
>>> (Ul[0].space, Vl[0].space)
(|a>, <b,c|)
>>> np.allclose(np.array([[x.H*y for x in Ul] for y in Ul]), np.eye(len(sl)))
True
>>> np.allclose(np.array([[x*y.H for x in Vl] for y in Vl]), np.eye(len(sl)))
True
>>> (np.sum([u*s*v for (u,s,v) in zip(Ul, sl, Vl)]) - W).norm() < 1e-14
True
>>> # take SVD across the |a><b| vs. <c| cut
>>> import numpy
>>> (Ul, sl, Vl) = W.svd_list(col_space=ha*hb.H)
>>> (len(Ul), len(sl), len(Vl))
(2, 2, 2)
>>> (Ul[0].space, Vl[0].space)
(|a><b|, <c|)
>>> np.allclose(np.array([[(x.H*y).trace() for x in Ul] for y in Ul]), np.eye(len(sl)))
True
>>> np.allclose(np.array([[x*y.H for x in Vl] for y in Vl]), np.eye(len(sl)))
True
>>> (np.sum([u*s*v for (u,s,v) in zip(Ul, sl, Vl)]) - W).norm() < 1e-14
True
>>> # as above, but with col_space given as a list
>>> (Ul, sl, Vl) = W.svd_list(col_space=[hb.H, ha])
>>> (Ul[0].space, Vl[0].space)
(|a><b|, <c|)
>>> (np.sum([u*s*v for (u,s,v) in zip(Ul, sl, Vl)]) - W).norm() < 1e-14
True
"""
hs = self.space
rowcol_kw = { 'row_space': row_space, 'col_space': col_space }
(row_space, col_space) = self._get_row_col_spaces(**rowcol_kw)
assert len(row_space) > 0
assert len(col_space) > 0
m = self.as_np_matrix(**rowcol_kw)
(u, s, v) = hs.base_field.mat_svd(m, False)
#print u.shape
#print s.shape
#print v.shape
#print row_space
#print col_space
U_list = np.array([ \
np.product(col_space).array(x, reshape=True, input_axes=col_space) \
for x in u.T])
V_list = np.array([ \
np.product(row_space).array(x, reshape=True, input_axes=row_space) \
for x in v])
if thresh > 0:
U_list = U_list[s > thresh]
V_list = V_list[s > thresh]
s = s[s > thresh]
return (U_list, s, V_list)
cpdef singular_vals(self, row_space=None, col_space=None):
"""
Returns the singular values of this array.
For the meaning of row_space and col_space, see the documentation
for :func:`svd` or :func:`svd_list`.
See also: :func:`svd`, :func:`svd_list`
>>> from qitensor import qubit, qudit
>>> import numpy
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> hc = qubit('c')
>>> x = (ha * hb.H * hc.H).random_array()
>>> numpy.allclose(numpy.diag(x.svd()[1].as_np_matrix()), x.singular_vals())
True
"""
rowcol_kw = { 'row_space': row_space, 'col_space': col_space }
m = self.as_np_matrix(**rowcol_kw)
return self.space.base_field.mat_svd_vals(m)
cpdef eig(self, w_space=None, hermit=False):
"""
Return the eigenvalues and right eigenvectors of this array.
:param w_space: space for the diagonal matrix, if None the space of the
input array is used.
:type w_space: HilbertSpace; default None
:param hermit: set this to True if the input is Hermitian
:type hermit: bool; default False
NOTE: in the case of degenerate eigenvalues, with hermit=False, it may
be the case that the returned eigenvectors array is not full rank. See
the documentation for numpy.linalg.eig for details.
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> hb = qudit('b', 3)
>>> hc = qudit('c', 6)
>>> epsilon = 1e-13
>>> op = (ha*hb).O.random_array()
>>> # make a normal operator
>>> op = op.H * op
>>> (W, V) = op.eig()
>>> V.space
|a,b><a,b|
>>> W.space
|a,b><a,b|
>>> (V.H * V - (ha*hb).eye()).norm() < epsilon
True
>>> (V.H * op * V - W).norm() < epsilon
True
>>> (op * V - V * W).norm() < epsilon
True
>>> # NOTE: this is not a normal operator, so V won't be unitary.
>>> op = (ha*hb).O.random_array()
>>> (W, V) = op.eig(w_space=hc)
>>> V.space
|a,b><c|
>>> W.space
|c><c|
>>> (op * V - V * W).norm() < epsilon
True
>>> vec = hb.random_array().normalized()
>>> dyad = vec * vec.H
>>> (W, V) = dyad.eig(hermit=True)
>>> (W - hb.diag([0, 0, 1])).norm() < epsilon
True
>>> (V.H * V - hb.eye()).norm() < epsilon
True
>>> vec2 = V[:, hb.dim()-1]
>>> # Correct for phase ambiguity
>>> vec2 *= (vec[0]/vec2[0]) / abs(vec[0]/vec2[0])
>>> (vec - vec2).norm() < epsilon
True
"""
if not self.space.is_symmetric():
raise HilbertError('bra space must be the same as ket space '+
'(space was '+repr(self.space)+')')
if w_space is None:
w_space = self.space.ket_space()
w_space.assert_ket_space()
(w, v) = self.space.base_field.mat_eig(self.as_np_matrix(), hermit)
# sort eigenvalues in ascending order of real component
srt = np.argsort(w)
w = w[srt]
v = v[:, srt]
W = (w_space * w_space.H).diag(w)
V = (self.space.ket_space() * w_space.H).reshaped_np_matrix(v)
return (W, V)
cpdef eigvals(self, hermit=False):
"""
Return the eigenvalues of this array, sorted in order of ascending
real component.
:param hermit: set this to True if the input is Hermitian. In this
case, the returned eigenvalues will be real.
:type hermit: bool; default False
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> hb = qudit('b', 3)
>>> epsilon = 1e-13
>>> op = (ha*hb).O.random_array()
>>> # make a normal operator
>>> op = op.H * op
>>> (W1, V1) = op.eig()
>>> W2 = op.eigvals()
>>> (ha*hb).diag(W2) == W1
True
"""
if not self.space.is_symmetric():
raise HilbertError('bra space must be the same as ket space '+
'(space was '+repr(self.space)+')')
w = self.space.base_field.mat_eigvals(self.as_np_matrix(), hermit)
# sort eigenvalues in ascending order of real component
w = np.sort(w)
if hermit:
assert np.all(np.imag(w) == 0)
w = np.real(w)
return w
cp | Cython |
def eigvalsh(self):
"""
Alias for eigvals(hermit=True).
"""
return self.eigvals(hermit=True)
cpdef eigproj(self, hermit=False, grouping_tol=1e-9):
"""
Decompose an operator into eigenspace projectors.
Returns a list of eigenvalues `(w_i)` and a list of projectors `(P_i)`
such that :math:`M = \sum_i w_i P_i`.
:param hermit: set this to `True` if the input is Hermitian. The returned eigenvalues
will then be real.
:param grouping_tol: threshold for grouping similar eigenvalues.
>>> from qitensor import qudit
>>> ha = qudit('a', 5)
>>> hb = qudit('b', 2)
>>> U = ha.random_unitary() * hb.eye()
>>> (wl, Pl) = U.eigproj()
>>> np.all([ (P*P-P).norm() < 1e-12 for P in Pl ])
True
>>> np.all([ (P.H-P).norm() < 1e-12 for P in Pl ])
True
>>> (U - np.sum([ w*P for (w, P) in zip(wl, Pl) ])).norm() < 1e-12
True
"""
# Tolerance for things that really should be zero (regardless of grouping_tol).
tol = 1e-12
if not self.space.is_symmetric():
raise HilbertError('bra space must be the same as ket space '+
'(space was '+repr(self.space)+')')
if (self*self.H - self.H*self).norm() > tol:
raise HilbertError('operator was not normal')
(ew_list, ev_list) = self.space.base_field.mat_eig(self.as_np_matrix(), hermit)
# sort eigenvalues in ascending order of real component
srt = np.argsort(ew_list)
ew_list = ew_list[srt]
ev_list = ev_list[:, srt]
# Make eigenvectors orthonormal. The numpy documentation claims ew_list will be unitary if
# the input is normal, but this seems to not be the case.
for i in range(ev_list.shape[1]):
for j in range(i):
p = (ev_list[:,i].T * ev_list[:,j].conj()).item()
ev_list[:,i] -= p * ev_list[:,j]
ev_list[:,i] /= self.space.base_field.mat_norm(ev_list[:,i], 2)
#print np.dot(ev_list.conj().T, ev_list)
uniq_ew = []
indices_for_ew = defaultdict(list)
for (ew_idx, ew) in enumerate(ew_list):
u_idx = -1 if len(uniq_ew)==0 else np.argmin(np.abs(uniq_ew - ew))
if u_idx >= 0 and np.abs(uniq_ew - ew)[u_idx] > grouping_tol:
u_idx = -1
if u_idx < 0:
u_idx = len(uniq_ew)
uniq_ew.append(ew)
indices_for_ew[uniq_ew[u_idx]].append(ew_idx)
P_list = []
for ew in uniq_ew:
indices = indices_for_ew[ew]
P = np.sum([np.outer(ev_list[:,i], ev_list[:,i].conj()) for i in indices], axis=0)
P = self.space.reshaped_np_matrix(P)
assert (P*P - P).norm() < tol
assert (P.H - P).norm() < tol
P_list.append(P)
M = np.sum([ ew*P for (ew,P) in zip(uniq_ew, P_list) ], axis=0)
# Errors may have been introduced by the grouping of similar eigenvalues. Hopefully
# the following threshold is enough.
final_tol = grouping_tol * np.sqrt(self.space.ket_space().dim()) * 2.0
assert (M - self).norm() < final_tol
return (uniq_ew, P_list)
cpdef sqrt(self):
"""
Return the square root of this matrix.
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> hb = qudit('b', 3)
>>> P = (ha*hb).O.random_array()
>>> # make a positive operator
>>> P = P.H * P
>>> P.space
|a,b><a,b|
>>> Q = P.sqrt()
>>> Q.space
|a,b><a,b|
>>> P.closeto(Q.H * Q)
True
>>> P.closeto(Q * Q.H)
True
"""
if not self.space.is_symmetric():
raise HilbertError('bra space must be the same as ket space '+
'(space was '+repr(self.space)+')')
if not self.closeto(self.H):
raise HilbertError('matrix was not Hermitian')
(W, V) = self.eig(hermit=True)
W = np.diag(W.as_np_matrix())
if not np.all(W >= -1e-12):
raise HilbertError('matrix was not positive')
W = self.space.diag(np.sqrt(np.where(W >= 0, W, 0)))
return V * W * V.H
cpdef entropy(self, normalize=False, checks=True):
"""
Returns the von Neumann entropy of a density operator, in bits.
:param normalize: if True, the input is automatically normalized to
trace one. If false, an exception is raised if the trace is not
one.
:type normalize: bool; default False
:param checks: if False, don't check that the input is a valid density
matrix or Hermitian. This is sometimes needed for symbolic
computations.
:type checks: bool; default True
>>> import numpy as np
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> hb = qudit('b', 3)
>>> # entropy of a pure state is zero
>>> ha.ket(0).O.entropy()
0.0
>>> # a fully mixed state of dimension 2
>>> (ha.ket(0).O/2 + ha.ket(1).O/2).entropy()
1.0
>>> # a fully mixed state of dimension 3
>>> abs( (hb.eye()/3).entropy() - np.log2(3) ) < 1e-10
True
>>> # automatic normalization
>>> abs( hb.eye().entropy(normalize=True) - np.log2(3) ) < 1e-10
True
>>> # a bipartite pure state
>>> s = (ha.ket(0) * hb.array([1/np.sqrt(2),0,0]) + ha.ket(1) * hb.array([0,0.5,0.5]))
>>> np.round(s.O.entropy(), 10)
0.0
>>> # entanglement across the a-b cut
>>> (s.O.trace(hb)).entropy()
1.0
>>> # entanglement across the a-b cut is the same as across b-a cut
>>> s = (ha*hb).random_array().normalized().O
>>> abs(s.trace(ha).entropy() - s.trace(hb).entropy()) < 1e-10
True
"""
self.assert_density_matrix(
check_hermitian=checks,
check_normalized=(checks and not normalize),
# we do our own positivity check
check_positive=False,
)
schmidt = self.eigvals(hermit=True)
if normalize:
schmidt /= abs(self.trace())
if checks:
# should have been taken care of by normalization above
assert abs(sum(schmidt)-1) < 1e-9
if not np.all(schmidt >= -1e-9):
raise HilbertError('density matrix was not positive: '+str(schmidt))
return sum([-self.space.base_field.xlog2x(x) for x in schmidt])
cpdef purity(self, normalize=False, checks=True):
"""
Returns the purity of a density operator, ``(self*self).trace()``.
:param normalize: if True, the input is automatically normalized to
trace one. If false, an exception is raised if the trace is not
one.
:type normalize: bool; default False
:param checks: if False, don't check that the input is a valid density
matrix or Hermitian. This is sometimes needed for symbolic
computations.
:type checks: bool; default True
>>> import numpy as np
>>> from qitensor import qubit, qudit
>>> ha = qubit('a')
>>> # purity of a pure state is one
>>> ha.ket(0).O.purity()
1.0
>>> # a fully mixed state of dimension 2
>>> (ha.ket(0).O/2 + ha.ket(1).O/2).purity | Cython |
()
0.5
>>> # automatic normalization
>>> ha.eye().purity(normalize=True)
0.5
"""
self.assert_density_matrix(
check_hermitian=checks,
check_normalized=(checks and not normalize),
# positivity doesn't really matter
check_positive=False,
)
purity = (self*self).trace()
if normalize:
purity /= self.trace() ** 2
assert abs(purity.imag) < 1e-12
return purity.real
def mutual_info(self, ha, hb):
"""
FIXME - docs
"""
self.assert_density_matrix()
if ha == ha.H:
ha = ha.ket_space()
if hb == hb.H:
hb = hb.ket_space()
ha.assert_ket_space()
hb.assert_ket_space()
if ha.ket_set & hb.ket_set:
raise MismatchedSpaceError('spaces are not disjoint: '+str(ha)+' vs. '+str(hb))
Sa = self.tracekeep(ha).entropy()
Sb = self.tracekeep(hb).entropy()
Sab = self.tracekeep(ha*hb).entropy()
return Sa + Sb - Sab
def relative_entropy(self, other, toler=1e-12):
"""
FIXME - docs
>>> from qitensor import qudit
>>> ha = qudit('a', 3)
>>> hb = qudit('b', 4)
>>> rho = (ha*hb).random_density()
>>> rho_a = rho.trace(hb)
>>> rho_b = rho.trace(ha)
>>> abs(rho.relative_entropy(rho_a * rho_b) - rho.mutual_info(ha, hb)) < 1e-14
True
>>> sigma = (ha*hb).random_density()
>>> re1 = rho.relative_entropy(sigma)
>>> re2 = (rho * (rho.logm() - sigma.logm())).trace() / np.log(2)
>>> abs(re1 - re2) < 1e-13
True
>>> U = ha.random_unitary()
>>> r = U * ha.diag([0.3, 0.7, 0]) * U.H
>>> r.relative_entropy(r) < 1e-12
True
>>> # A little fuzz is tolerated...
>>> s = U * ha.diag([0.3, 0.7, 1e-13]) * U.H
>>> s /= s.trace()
>>> abs(s.relative_entropy(r)) < 1e-10
True
>>> #... but too much fuzz is not.
>>> t = U * ha.diag([0.3, 0.7, 1e-6]) * U.H
>>> t /= t.trace()
>>> t.relative_entropy(r)
inf
"""
self.assert_density_matrix()
other.assert_density_matrix()
self._assert_same_axes(other)
bf = self.space.base_field
(W, V) = other.eig(hermit=True)
arr1 = (V.H*self*V).diag(as_np=True)
arr2 = W.diag(as_np=True)
q = np.sum([ \
0 if x<=toler else \
-bf.infty() if y<=toler else \
x*bf.log2(y) for (x, y) in zip(arr1, arr2) \
])
ret = -q-self.entropy()
assert abs(ret.imag) < toler
ret = ret.real
assert ret > -toler*10
return 0 if ret < 0 else ret
cpdef fidelity(self, HilbertArray other):
"""
Compute the fidelity between two density operators.
The fidelity is defined as the trace norm of the product of square roots
of two operators,
:math:`\\lVert \\sqrt{\\rho} \\sqrt{\\sigma} \\rVert_{\\textrm{tr}}`.
>>> from qitensor import qudit
>>> ha = qudit('a', 4)
>>> hb = qudit('b', 4)
>>> # Create random bipartite states.
>>> psi = (ha*hb).random_array().normalized()
>>> phi = (ha*hb).random_array().normalized()
>>> # Fidelity of pure states is the overlap.
>>> abs(psi.O.fidelity(phi.O) - abs(psi.H * phi)) < 1e-12
True
>>> # Fidelity is nonincreasing under quantum operations.
>>> psi.O.trace(hb).fidelity(phi.O.trace(hb)) >= psi.O.fidelity(phi.O)
True
"""
self.assert_density_matrix()
other.assert_density_matrix()
self._assert_same_axes(other)
return (self.sqrt() * other.sqrt()).trace_norm()
cpdef QR(self, inner_space=None):
"""
Returns operators Q and R such that Q is an isometry, R is upper triangular, and self=Q*R.
The bra space of Q (and the ket space of R) is the smaller of the bra or ket spaces of
the input. This can be overridden using the inner_space parameter.
:param inner_space: bra space of Q (and the ket space of R)
:type inner_space: HilbertSpace
>>> from qitensor import qubit
>>> ha = qubit('a')
>>> hb = qubit('b')
>>> hc = qubit('c')
>>> m = (hb*hc*ha.H).random_array()
>>> (q, r) = m.QR()
>>> (q.space, r.space)
(|b,c><a|, |a><a|)
>>> (q.H * q - ha.eye()).norm() < 1e-14
True
>>> (q*r - m).norm() < 1e-14
True
>>> (q, r) = ha.O.random_array().QR(inner_space=hb)
>>> (q.space, r.space)
(|a><b|, |b><a|)
"""
hs = self.space
mat = self.as_np_matrix()
(q, r) = self.space.base_field.mat_qr(mat)
if inner_space is None:
if mat.shape[0] < mat.shape[1]:
inner_space = hs.ket_space()
else:
inner_space = hs.bra_space().H
inner_space.assert_ket_space()
Q = (hs.ket_space() * inner_space.H).reshaped_np_matrix(q)
R = (inner_space * hs.bra_space()).reshaped_np_matrix(r)
return (Q, R)
cpdef tuple measure(self, HilbertSpace spc=None, cpython.bool normalize=False):
"""
Performs a measurement of a quantum state (ket or density operator) in
the computational basis.
The result is random, with probability distribution consistent with the
laws of quantum mechanics. The return value is a tuple, with the first
element being the index corresponding to the measurement outcome and
the second element being the density operator corresponding to the
state of the remaining subsystems (or the value 1 if there are none).
FIXME - this function is under development and the usage will change.
For example, for a ket input, the "remaining subsystems" state
should be returned as a ket rather than a density operator.
FIXME - doctests
#>>> from qitensor import qudit
#>>> ha=qudit('a', 3); hb=qudit('b', 4); x = (ha*hb).random_array()
#>>> x.measure()
"""
if len(self.space.ket_set) == 0:
raise HilbertError("measure doesn't apply to a bra space")
if len(self.space.bra_set) == 0:
return self.O.measure(spc)
if self.space!= self.space.H:
raise HilbertError("measure only applies to kets or density operators")
if spc is None:
spc = self.space.ket_space()
if spc.O == self.space:
reduced = self
else:
reduced = self.trace(self.space / spc.O)
prob = reduced.diag().nparray
sum_prob = np.sum(prob)
if sum_prob == 0:
raise HilbertError("state was equal to zero")
if not normalize:
if abs(sum_prob - 1) > 1e-12:
raise HilbertError("state was not normalized")
prob /= sum_prob
flatidx = np.argmax(np.cumsum(prob.flatten()) > np.random.rand())
idx = np.unravel_index(flatidx, prob.shape)
if spc.O == self.space:
remaining = 1
else:
remaining = self[dict(zip(spc.sorted_kets, idx) + zip(spc.H.sorted_bras, idx))]
remaining /= remaining.trace()
if len(idx) == 1:
idx = idx[0]
return (idx, remaining)
cpdef span(self, axes='all'):
"""
Returns a TensorSubspace for the column/row/mixed space of this array.
:param axes: the axes to take the span of
:type new_data: string ('all', 'col', 'row') or HilbertSpace
>>> from qitensor import | Cython |
qudit
>>> ha = qudit('a', 2)
>>> hb = qudit('b', 3)
>>> hc = qudit('c', 3)
>>> iso = (hb*ha.H).random_isometry()
>>> proj = iso * iso.H
>>> proj.span('col')
<TensorSubspace of dim 2 over space (|b>)>
>>> bigop = proj * ha.random_array() * hc.H.random_array()
>>> bigop.space
|a,b><b,c|
>>> # span of column space
>>> bigop.span('col')
<TensorSubspace of dim 2 over space (|a,b>)>
>>> # dimension 1 because it is a product operator $|b><b| \otimes |a><c|$
>>> bigop.span(hb.O)
<TensorSubspace of dim 1 over space (|b><b|)>
>>> bigop.span(hb.O).tensor_prod(bigop.span(ha)).equiv(
... bigop.span(hb.O*ha))
True
"""
if axes == 'all':
axes = self.space
elif axes == 'col':
axes = self.space.ket_space()
elif axes == 'row':
axes = self.space.bra_space()
assert isinstance(axes, HilbertSpace)
assert axes.bra_ket_set <= self.space.bra_ket_set
space_axes = [i for (i,s) in enumerate(self.axes) if s in axes.bra_ket_set]
group_axes = [i for (i,s) in enumerate(self.axes) if s not in axes.bra_ket_set]
group_dim = self.space.dim() // axes.dim()
assert len(group_axes) + len(space_axes) == len(self.axes)
v = self.nparray.transpose(group_axes+space_axes)
v = v.reshape((group_dim,) + axes.shape)
return TensorSubspace.from_span(v, hilb_space=axes)
########## stuff that only works in Sage ##########
def n(self, prec=None, digits=None):
"""
Converts symbolic values to numeric values (only useful in Sage).
sage: from qitensor import qubit
sage: ha = qubit('a', dtype=SR)
sage: v = ha.array([log(4), log(8)])
sage: v
HilbertArray(|a>,
array([log(4), log(8)], dtype=object))
sage: v.n()
HilbertArray(|a>,
array([1.38629436111989, 2.07944154167984], dtype=object))
"""
return self.np_matrix_transform( \
lambda x: self.space.base_field.mat_n(x, prec, digits))
def simplify(self):
"""
Simplifies symbolic expressions (only useful in Sage).
"""
return self.np_matrix_transform( \
lambda x: self.space.base_field.mat_simplify(x))
def simplify_full(self):
"""
Simplifies symbolic expressions (only useful in Sage).
sage: from qitensor import qubit
sage: ha = qubit('a', dtype=SR)
sage: v = ha.array([log(4), log(8)])
sage: v / log(2)
HilbertArray(|a>,
array([log(4)/log(2), log(8)/log(2)], dtype=object))
sage: (v / log(2)).simplify_full()
HilbertArray(|a>,
array([2, 3], dtype=object))
"""
return self.np_matrix_transform( \
lambda x: self.space.base_field.mat_simplify(x, True))
def _matrix_(self, R=None):
"""
Returns a Sage Matrix for this array.
This supports casting to a Matrix in Sage. You can also use the
equivalent :func:`sage_matrix` method.
sage: from qitensor import qubit
sage: ha = qubit('a', dtype=SR)
sage: v = ha.array([log(4), log(8)])
sage: Matrix(v)
[log(4)]
[log(8)]
"""
return self.sage_matrix(R)
cpdef sage_matrix(self, R=None):
"""
Returns a Sage Matrix for this array.
It is probably preferable to just do Matrix(arr).
sage: from qitensor import qubit
sage: ha = qubit('a', dtype=SR)
sage: v = ha.array([log(4), log(8)])
sage: v.sage_matrix()
[log(4)]
[log(8)]
"""
if not have_sage:
raise HilbertError('This is only available under Sage')
return self.space.base_field.matrix_np_to_sage( \
self.as_np_matrix(), R)
def _latex_(self):
"""
Returns the latex representation of this array, for Sage.
"""
return FORMATTER.array_latex_block_table(self, use_hline=False)
cpdef sage_block_matrix(self, R=None):
"""
Returns a Sage Matrix for this array, with blocks corresponding to
subsystem structure.
sage: from qitensor import qubit
sage: ha = qubit('a', dtype=SR)
sage: hb = qubit('b', dtype=SR)
sage: (ha.X * hb.Z).sage_block_matrix()
[ 0 0| 1 0]
[ 0 0| 0 -1]
[-----+-----]
[ 1 0| 0 0]
[ 0 -1| 0 0]
"""
if not have_sage:
raise HilbertError('This is only available under Sage')
hs = self.space
blocks = [self]
nrows = 1
ncols = 1
if len(hs.sorted_kets) > 1:
h = hs.sorted_kets[0]
blocks = [m[{h: i}] for m in blocks for i in h.indices]
nrows = len(h.indices)
if len(hs.sorted_bras) > 1:
h = hs.sorted_bras[0]
blocks = [m[{h: i}] for m in blocks for i in h.indices]
ncols = len(h.indices)
blocks = [x.sage_matrix(R=R) for x in blocks]
import sage.all
return sage.all.block_matrix(blocks, nrows=nrows, ncols=ncols, subdivide=True)
cpdef sage_matrix_transform(self, f, transpose_dims=False):
"""
Just like :func:`np_matrix_transform` but does operations on a Sage Matrix.
sage: from qitensor import qubit
sage: ha = qubit('a', dtype=SR)
sage: ha.Y.sage_matrix_transform(lambda m: m.transpose())
HilbertArray(|a><a|,
array([[0, I],
[-I, 0]], dtype=object))
"""
if not have_sage:
raise HilbertError('This is only available under Sage')
out_hilb = self.space
if transpose_dims:
out_hilb = out_hilb.H
m = self.sage_matrix()
m = f(m)
return out_hilb.reshaped_sage_matrix(m)
def __str__(self):
return FORMATTER.array_str(self)
def __repr__(self):
return FORMATTER.array_repr(self)
########## IPython stuff ##########
def _repr_latex_(self):
"""
Returns the latex representation, for IPython.
"""
if not FORMATTER.ipy_table_format_mode == 'latex':
return None
latex = FORMATTER.array_latex_block_table(self, use_hline=True)
return '$$'+latex+'$$'
def _repr_png_(self):
"""
Returns a latex representation, for Sage.
"""
if not FORMATTER.ipy_table_format_mode == 'png':
return None
# the following is adapted from sympyprint.py
from IPython.lib.latextools import latex_to_png
s = FORMATTER.array_latex_block_table(self, use_hline=True)
# As matplotlib does not support display style, dvipng backend is used here.
png = latex_to_png(s, backend='dvipng', wrap=True)
return png
def _repr_html_(self):
"""
Returns the HTML representation, for IPython.
"""
if not FORMATTER.ipy_table_format_mode == 'html':
return None
return FORMATTER.array_html_block_table(self)
if __name__ == "__main__":
import doctest
doctest.testmod()
<|end_of_text|># cython: language_level=3
from ariesk.utils.bloom_filter cimport BloomGrid
from ariesk.cluster cimport Cluster
from ariesk.dbs.core_db cimport CoreDB
import numpy as np
cimport numpy as npc
cdef class GridCoverDB(CoreDB):
# A too simple dict based cache
# for initial testing only as this will grow without bounds
cdef public dict cluster_cache
cdef public npc.uint64_t[:, :] hash_functions
cdef public int sub_k
cdef public int n_hashes
cdef public int array_size
cpdef _build_tables(self)
cpdef _build_indices(self)
| Cython |
cpdef _drop_indices(self)
cdef npc.uint64_t[:, :] load_hash_functions(self)
cdef build_save_hash_functions(self)
cdef npc.uint8_t[:, :] get_cluster_members(self, int centroid_id)
cdef Cluster get_cluster(self, int centroid_id)
cdef store_inner_clusters(self, Cluster cluster)
cdef retrieve_inner_clusters(self, Cluster cluster)
cdef store_bloom_grid(self, Cluster cluster)
cpdef build_and_store_bloom_grid(self, int centroid_id)
cpdef BloomGrid retrieve_bloom_grid(self, int centroid_id)
cpdef load_other(self, GridCoverDB other)
<|end_of_text|>"""
Cython code restricted to scalar ODEs.
Variables are declared with types.
Functions as arguments are represented by classes and instances.
Numpy arrays are declared with 1) fixed number of dimensions,
2) element type, 3) negative indices turned off, 4) bounds checking
off, and 5) contiguous memory.
"""
import numpy as np
cimport numpy as np
cimport cython
cdef class Problem:
cpdef double rhs(self, double u, double t):
return 0
cdef class Problem1(Problem):
cpdef double rhs(self, double u, double t):
return -u +1 # u = 1-exp(-t)
cdef extern from "math.h":
double exp(double)
cdef class Problem2(Problem):
cpdef double rhs(self, double u, double t):
return - u + exp(-2*t)
ctypedef np.float64_t DT
cdef class ODEMethod:
cpdef advance(self, double u_1, int n, double t_1,
double dt, Problem p):
return 0
cdef class Method_RK2(ODEMethod):
cpdef advance(self, double u_1, int n, double t_1,
double dt, Problem p):
cdef double K1, K2, unew
K1 = dt*p.rhs(u_1, t_1)
K2 = dt*p.rhs(u_1 + 0.5*K1, t_1 + 0.5*dt)
unew = u_1 + K2
return unew
# Create names compatible with ode1.py
RK2 = Method_RK2()
problem1 = Problem1()
problem2 = Problem2()
@cython.boundscheck(False) # turn off bounds checking for this func.
def solver(Problem f,
double I,
np.ndarray[DT, ndim=1, negative_indices=False,
mode='c'] t,
ODEMethod method):
cdef int N = len(t)-1
#cdef np.ndarray[DT, ndim=1, negative_indices=False, mode='c'] u = np.zeros(N+1, dtype=np.float_t)
#Cython does not like type specification via dtype when the buffer
#declares the type
cdef np.ndarray[DT, ndim=1, negative_indices=False,
mode='c'] u = np.zeros(N+1)
u[0] = I
cdef int n
for n in range(N):
u[n+1] = method.advance(u[n], n, t[n], t[n+1]-t[n], f)
return u, t
<|end_of_text|>import numpy as np
cimport numpy as np
from libc.math cimport sqrt, exp, log, pi
from tqdm import tqdm
import pygame
cdef double dot(np.ndarray[double, ndim=1] v1,
np.ndarray[double, ndim=1] v2):
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]
cdef double norm(np.ndarray[double, ndim=1] vec):
return sqrt(dot(vec, vec))
cdef np.ndarray[double, ndim=1] normalize(np.ndarray[double, ndim=1] vec):
cdef double N = norm(vec)
if N!= 0:
return vec_scale(vec, 1/N)
else:
return np.zeros(3).astype(np.float64)
cdef np.ndarray[double, ndim=1] vec_add(np.ndarray[double, ndim=1] v1,
np.ndarray[double, ndim=1] v2):
cdef np.ndarray[double, ndim=1] v_return = np.zeros(3).astype(np.float64)
v_return[0] = v1[0] + v2[0]
v_return[1] = v1[1] + v2[1]
v_return[2] = v1[2] + v2[2]
return v_return
cdef np.ndarray[double, ndim=1] vec_sub(np.ndarray[double, ndim=1] v1,
np.ndarray[double, ndim=1] v2):
cdef np.ndarray[double, ndim=1] v_return = np.zeros(3).astype(np.float64)
v_return[0] = v1[0] - v2[0]
v_return[1] = v1[1] - v2[1]
v_return[2] = v1[2] - v2[2]
return v_return
cdef np.ndarray[double, ndim=1] vec_scale(np.ndarray[double, ndim=1] vec,
double scale):
cdef np.ndarray[double, ndim=1] v_return = np.zeros(3).astype(np.float64)
v_return[0] = scale * vec[0]
v_return[1] = scale * vec[1]
v_return[2] = scale * vec[2]
return v_return
cdef vec_sum(np.ndarray[double, ndim=1] v1,
np.ndarray[double, ndim=1] v2,
np.ndarray[double, ndim=1] v3):
return vec_add(vec_add(v1, v2), v3)
cdef np.ndarray[double, ndim=1] gravity_force(np.ndarray[double, ndim=1] pos1,
np.ndarray[double, ndim=1] pos2,
double mass1, double mass2,
double G):
# Force directed from 1 to 2
cdef np.ndarray[double, ndim=1] norm_dist_vec = normalize(vec_sub(pos2, pos1))
cdef double F_magnitude = G * mass1 * mass2 / dist2(pos1, pos2)
return vec_scale(norm_dist_vec, F_magnitude)
cdef double dist2(np.ndarray[double, ndim=1] v1,
np.ndarray[double, ndim=1] v2):
return (v1[0]-v2[0])**2 + (v1[1]-v2[1])**2 + (v1[2]-v2[2])**2
cdef double kinetic_energy(np.ndarray[double, ndim=1] v,
double m):
return 0.5 * m * dot(v, v)
class body:
def __init__(self, pos, vel, mass=1.0, color='FFFFFF', R=1.0):
self.pos = pos
self.vel = vel
self.mass = mass
self.mass_ = 1/mass
self.radius = R
self.neighbors = []
self.Force = np.zeros(3).astype(np.float64)
self.acc = np.zeros(3).astype(np.float64)
self.acc2 = np.zeros(3).astype(np.float64)
R = int(color[0:2], 16)
G = int(color[2:4], 16)
B = int(color[4:6], 16)
self.color = [R, G, B]
def add_neighbors(self, group):
my_neighbors = group.copy()
my_neighbors.remove(self)
for neighbor in my_neighbors:
self.neighbors.append(neighbor)
def remove_neighbor(self, neighbor):
if neighbor in self.neighbors:
self.neighbors.remove(neighbor)
def reset_neighbors(self):
self.neighbors = []
def add_force(self, F):
self.Force = vec_add(self.Force, F)
def reset_forces(self):
self.Force = np.zeros(3).astype(np.float64)
def set_gravity(self, G=1):
for b2 in self.neighbors:
F = np.zeros(3).astype(np.float64)
F = gravity_force(self.pos, b2.pos, self.mass, b2.mass, G)
self.add_force(F)
b2.add_force(-F)
self.remove_neighbor(b2)
def move1(self, dt):
self.acc = vec_scale(self.Force, self.mass_)
self.pos = vec_sum(self.pos, vec_scale(self.vel, dt), vec_scale(self.acc, 0.5*dt**2))
self.reset_forces()
def move2(self, dt, G=1):
self.set_gravity(G)
self.acc2 = vec_scale(self.Force, self.mass_)
self.vel = vec_add(self.vel, vec_scale(vec_add(self.acc, self.acc2), 0.5*dt**2))
def temp_move(self, dt, G=1):
self.reset_forces()
self.set | Cython |
_gravity(G)
self.acc = vec_scale(self.Force, self.mass_)
dv = np.zeros(3).astype(np.float64)
dv = vec_scale(self.acc, dt)
self.vel = vec_add(self.vel, dv)
dx = np.zeros(3).astype(np.float64)
dx = vec_scale(self.vel, dt)
self.pos = vec_add(self.pos, dx)
def Ek(self):
return kinetic_energy(self.vel, self.mass)
def draw(self, canvas, ref=np.zeros(3), r=1):
pos = (self.pos+ref)[0:2]
pygame.draw.circle(canvas, self.color, pos.astype(int)[:2], int(self.radius))
<|end_of_text|>from numpy import empty, zeros
cimport numpy as np
cdef int mandelbrot_escape(float complex c, int n):
cdef float complex z
cdef int i
z = 0
for i in range(n):
z = z*z + c
if z.real*z.real + z.imag*z.imag > 4.0:
break
else:
i = 0
return i
def generate_mandelbrot(np.ndarray[float, ndim=1] xs, np.ndarray[float, ndim=1] ys, int n):
cdef unsigned int i,j
cdef unsigned int N = len(xs)
cdef unsigned int M = len(ys)
cdef float complex z
cdef np.ndarray[int, ndim=2] d = empty(dtype='i', shape=(M, N))
for j in range(M):
for i in range(N):
z = xs[i] + ys[j]*1j
d[j,i] = mandelbrot_escape(z, n)
return d<|end_of_text|># Wrapper for xtcio.h: I/O for xtc files.
from cpython.array cimport array
import cython
from array import array
from xdrlib import Unpacker
import numpy as np
cimport numpy as np
import os
from utility cimport *
from math cimport *
from fileio cimport *
from.gromacs.reader import INDEX_MAGIC, SubscriptableReader, XTCFrame
from.errors import InvalidIndexException, InvalidMagicException, XTCError
cdef extern from "gromacs/fileio/xtcio.h":
t_fileio *open_xtc(const char *filename, const char *mode) nogil
void close_xtc(t_fileio *fio) nogil
int read_first_xtc(t_fileio *fio,
int *natoms, gmx_int64_t *step, real *time,
matrix box, rvec **x, real *prec, gmx_bool *_bOK)
int read_next_xtc(t_fileio *fio,
int natoms, gmx_int64_t *step, real *time,
matrix box, rvec *x, real *prec, gmx_bool *_bOK) nogil
int write_xtc(t_fileio *fio, int natoms, gmx_int64_t step, real time, rvec *box, rvec *x, real prec)
if sizeof(real) == 4:
np_real = np.float32
else:
np_real = np.float
#cdef array get_xtc_index_by_frames(t_fileio *fio, int length, int natoms):
# cdef:
#gmx_bool _bOK
## int frame = 1
# cdef array cache = array('L')
# gmx_fio_rewind(fio)
# cache.append(gmx_fio_ftell(fio))
# while xtc_seek_frame(fio, frame*500, natoms) == 0:
# cache.append(gmx_fio_ftell(fio))
# #print(frame, cache[-1])
# frame += 1
# if frame == length:
# break
# return cache
cdef array get_xtc_index(t_fileio *fio):
cdef:
gmx_bool _bOK
int natoms, frame, state = 1
gmx_int64_t step
real time, prec
matrix box
rvec *x
cdef array cache = array('L')
gmx_fio_rewind(fio)
cache.append(gmx_fio_ftell(fio))
read_first_xtc(fio, &natoms, &step, &time, box, &x, &prec, &_bOK)
cache.append(gmx_fio_ftell(fio))
while read_next_xtc(fio, natoms, &step, &time, box, x, &prec, &_bOK):
cache.append(gmx_fio_ftell(fio))
# the last index is invalid
return cache[:-1]
def read_xtcframe(fname, pos, natoms):
cdef t_fileio *fio = open_xtc(fname.encode(), 'r')
gmx_fio_seek(fio, pos)
cdef:
matrix box
gmx_bool _bOK
real time, prec
gmx_int64_t cur_step
np.ndarray[real, ndim=2] coords = np.empty((natoms, 3), dtype=np_real)
int nratms = natoms
with nogil:
read_next_xtc(fio, nratms, &cur_step, &time, box, <rvec *>coords.data, &prec, &_bOK)
close_xtc(fio)
if _bOK:
frame = XTCFrame()
frame._coordinates = coords
frame.index = cur_step
frame.time = time
frame.box = box
return frame
else:
raise
cdef class XTCReader:
cdef:
t_fileio *fio
int natoms
gmx_int64_t cur_step
real start_time, timestep, prec, cur_time
bint has_cache, has_times
array _cache, _times
public str filename
@property
def cache(self):
if self.has_cache:
return self._cache
@cache.setter
def cache(self, indices):
self._cache = array('L')
for i in indices:
self._cache.append(i)
self.has_cache = True
@property
def times(self):
if self.has_times:
return self._times
@times.setter
def times(self, times):
self._times = array('f')
for t in times:
self._times.append(t)
self.has_times = True
# @property
# def filename(self):
# return self._filename.decode()
def seek(self, int frame):
if self.has_cache:
gmx_fio_seek(self.fio, self.cache[frame])
def make_cache(self):
self._cache = get_xtc_index(self.fio)
self.has_cache = True
def load_cache(self, indexfile, ignore_time=False):
xtc_stat = os.stat(self.filename)
c_time = int(xtc_stat.st_ctime)
m_time = int(xtc_stat.st_mtime)
size = xtc_stat.st_size
with open(indexfile, 'rb') as fd:
unpacker = Unpacker(fd.read())
# first 4 * 8 bytes are used for checks, followed by N * (8 + 4) bytes of data
length = int((len(unpacker.get_buffer()) - 32) / 12)
if length < 0:
raise InvalidIndexException
if unpacker.unpack_hyper()!= INDEX_MAGIC:
raise InvalidMagicException
if unpacker.unpack_hyper()!= c_time and not ignore_time:
raise InvalidIndexException
if unpacker.unpack_hyper()!= m_time and not ignore_time:
raise InvalidIndexException
if unpacker.unpack_hyper()!= size:
raise InvalidIndexException
self._cache = array('L')
self._times = array('f')
try:
while True:
self._cache.append(unpacker.unpack_hyper())
self._times.append(unpacker.unpack_float())
except EOFError:
pass
self.has_cache = True
self.has_times = True
def __cinit__(self):
self.has_cache = False
self.has_times = False
def __init__(self, filename, indexfile=None, make_cache=False, ignore_timestamps=False):
if isinstance(filename, str):
self.filename = filename
filename = filename.encode()
else:
self.filename = filename.decode()
cdef:
gmx_int64_t step
matrix box
gmx_bool _bOK
real time, prec
rvec *x
if not os.path.exists(filename):
raise OSError('File not found: {}'.format(filename))
if filename.decode().split('.')[-1]!= 'xtc':
raise XTCError('File is not of xtc type: {}'.format(filename))
self.fio = open_xtc(filename, b'r')
read_first_xtc(self.fio, &self.natoms, &step, &time, box, &x, &prec, &_bOK)
if indexfile is not None:
try:
self.load_cache(indexfile, ignore_time=ignore_timestamps)
except InvalidIndexException:
if make_cache:
pass
else:
raise
| Cython |
if make_cache:
self.make_cache()
def __len__(self):
if self.has_cache:
return len(self.cache)
def __getitem__(self, frame):
cdef matrix box
cdef gmx_bool _bOK
cdef real time
cdef np.ndarray[real, ndim=2] coords = np.empty((self.natoms, 3), dtype=np_real)
if frame < len(self):
self.seek(frame)
read_next_xtc(self.fio, self.natoms, &self.cur_step, &time, box,
<rvec *>coords.data, &self.prec, &_bOK)
if _bOK:
frame = XTCFrame()
frame._coordinates = coords
frame.index = self.cur_step
frame.time = time
frame.box = box
return frame
else:
raise
else:
raise IndexError('Frame {} is out of range for trajectory of length {}.'.format(frame, len(self)))
def __getstate__(self):
# state = self.__dict__.copy()
state = {}
state['natoms'] = self.natoms
state['cur_step'] = self.cur_step
state['start_time'] = self.start_time
state['timestep'] = self.timestep
state['prec'] = self.prec
state['cur_time'] = self.cur_time
state['has_cache'] = self.has_cache
state['has_times'] = self.has_times
state['_cache'] = self._cache
state['filename'] = self.filename
return state
def __setstate__(self, state):
self.natoms = state.pop('natoms')
self.cur_step = state.pop('cur_step')
self.start_time = state.pop('start_time')
self.timestep = state.pop('timestep')
self.prec = state.pop('prec')
self.cur_time = state.pop('cur_time')
self.has_cache = state.pop('has_cache')
self.has_times = state.pop('has_times')
self._cache = state.pop('_cache')
self.filename = state.pop('filename')
self.fio = open_xtc(self.filename.encode(), b'r')
#self.__dict__.update(state)
@cython.binding(True)
def append_xtcfile(filename, step, time, box, coords, prec):
if isinstance(filename, str):
filename = filename.encode()
cdef np.ndarray[real, ndim=2] b = np.asarray(box, dtype=np.float32)
cdef np.ndarray[real, ndim=2] x = np.asarray(coords, dtype=np.float32)
cdef t_fileio *fio = open_xtc(filename, b'a')
write_xtc(fio, len(coords), step, time, <rvec *>b.data, <rvec *>x.data, <real> prec)
close_xtc(fio)
<|end_of_text|>###cython: boundscheck=False, wraparound=False, nonecheck=False, optimize.use_switch=True
# COMPILATION
# C:\>python setup_project.py build_ext --inplace
# EXECUTABLE
# C:\>pip install pyinstaller
# C:\>pyinstaller --onefile fire_demo.spec
# CYTHON IS REQUIRED
try:
cimport cython
from cython.parallel cimport prange
except ImportError:
raise ImportError("\n<cython> library is missing on your system."
"\nTry: \n C:\\pip install cython on a window command prompt.")
try:
import numpy
from numpy import zeros, asarray, ndarray, uint32, uint8, float32
except ImportError:
raise ImportError("\nNumpy library is missing on your system."
"\nTry: \n C:\\pip install numpy on a window command prompt.")
# PYGAME IS REQUIRED
try:
import pygame
from pygame import Color, Surface, SRCALPHA, RLEACCEL, BufferProxy
from pygame.surfarray import pixels3d, array_alpha, pixels_alpha, array3d
from pygame.image import frombuffer
except ImportError:
raise ImportError("\n<Pygame> library is missing on your system."
"\nTry: \n C:\\pip install pygame on a window command prompt.")
try:
from rand import randrange, randrangefloat
except ImportError:
raise ImportError("\n<rand> library is missing on your system or rand.pyx is not cynthonized.")
try:
from hsl cimport struct_hsl_to_rgb, rgb, rgba
except ImportError:
raise ImportError("\n<hsl> library is missing on your system or hsl.pyx is not cynthonized.")
from libc.stdio cimport printf
from libc.stdlib cimport rand
# ---------------------- INTERFACE -----------------------------
# FUNCTION BELOW CAN BE ACCESS DIRECTLY FROM PYTHON CODE
cpdef make_palette(size: int, height: int, fh: float=0.25, fs: float=255.0, fl: float=2.0):
return make_palette_c(size, height, fh, fs, fl)
# --------------------------------------------------------------
DEF OPENMP = True
if OPENMP == True:
DEF THREAD_NUMBER = 8
else:
DEF THREAD_NUMNER = 1
DEF SCHEDULE ='static'
DEF ONE_360 = 1.0 / 360.0
DEF ONE_255 = 1.0 / 255.0
# Load C code
cdef extern from 'randnumber.c':
float randRangeFloat(float lower, float upper)nogil
int randRange(int lower, int upper)nogil
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
@cython.cdivision(True)
cdef inline unsigned int rgb_to_int(int red, int green, int blue)nogil:
"""
CONVERT RGB MODEL INTO A PYTHON INTEGER EQUIVALENT TO THE FUNCTION PYGAME MAP_RGB()
:param red : Red color value, must be in range [0..255]
:param green : Green color value, must be in range [0..255]
:param blue : Blue color, must be in range [0.255]
:return : returns a positive python integer representing the RGB values(int32)
"""
return 65536 * red + 256 * green + blue
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
@cython.cdivision(True)
cdef inline rgb int_to_rgb(unsigned int n)nogil:
"""
CONVERT A PYTHON INTEGER INTO A RGB COLOUR MODEL (UNSIGNED CHAR VALUES [0..255]).
EQUIVALENT TO PYGAME UNMAP_RGB()
:param n : positive integer value to convert
:return : return a C structure rgb containing RGB values
"""
cdef:
rgb rgb_
rgb_.r = n >> 16 & 255 # red int32
rgb_.g = n >> 8 & 255 # green int32
rgb_.b = n & 255 # blue int32
return rgb_
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
@cython.cdivision(True)
cdef inline rgba int_to_rgba(int n)nogil:
"""
Type Capacity
Int16 -- (-32,768 to +32,767)
Int32 -- (-2,147,483,648 to +2,147,483,647)
Int64 -- (-9,223,372,036,854,775,808 to +9,223,372,036,854,775,807)
CONVERT A PYTHON INTEGER INTO A RGBA COLOUR MODEL.
EQUIVALENT TO PYGAME UNMAP_RGB()
:param n : strictly positive integer value to convert (c int32)
:return : return a C structure containing RGBA values
Integer value is unmapped into RGBA values (unsigned char type, [0... 255]
"""
cdef:
rgba rgba_
rgba_.a = (n >> 24) & 255 # alpha
rgba_.r = (n >> 16) & 255 # red
rgba_.g = (n >> 8) & 255 # green
rgba_.b = n & 255 # blue
return rgba_
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
@cython.cdivision(True)
cdef inline int rgba_to_int(unsigned char red, unsigned char green, unsigned char blue, unsigned char alpha)nogil:
"""
Int16 -- (-32,768 to +32,767)
Int32 -- (-2,147,483,648 to +2,147,483,647)
Int64 -- (-9,223,372,036,854,775,808 to +9,223,372,036,854,775,807)
CONVERT RGBA MODEL INTO A MAPPED PYTHON INTEGER (INT32) EQUIVALENT TO PYGAME MAP_RGB()
OUTPUT INTEGER VALUE BETWEEN (-2,147,483,648 TO +2,147,483,647).
:param red : unsigned char; Red color must be in range[0...255]
:param green : unsigned char; Green color value must be in range[0... 255]
:param blue : unsigned char; Blue color must | Cython |
be in range[0... 255]
:param alpha : unsigned char; Alpha must be in range [0...255]
:return: returns a python integer (int32, see above for description) representing the RGBA values
"""
return (alpha << 24) + (red << 16) + (green << 8) + blue
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
@cython.cdivision(True)
cdef make_palette_c(int width, int height, float fh, float fs, float fl):
"""
CREATE A PALETTE OF RGB COLORS (WIDTH X HEIGHT)
e.g:
# below: palette of 256 colors & surface (width=256, height=50).
# hue * 6, saturation = 255.0, lightness * 2.0
palette, surf = make_palette(256, 50, 6, 255, 2)
palette, surf = make_palette(256, 50, 4, 255, 2)
:param width : integer, Palette width
:param height : integer, palette height
:param fh : float, hue factor
:param fs : float, saturation factor
:param fl : float, lightness factor
:return : Return a tuple ndarray type uint32 and pygame.Surface (width, height)
"""
assert width > 0, "Argument width should be > 0, got %s " % width
assert height > 0, "Argument height should be > 0, got %s " % height
cdef:
unsigned int [:] palette = ndarray(width, uint32)
unsigned char [:, :, :] pal = ndarray((width, height, 3), dtype=uint8)
int x, y
float h, s, l
rgb rgb_
int ii = 0
with nogil:
for x in prange(width):
h, s, l = <float>x * fh, min(fs, 255.0), min(<float>x * fl, 255.0)
rgb_ = struct_hsl_to_rgb(h * ONE_360, s * ONE_255, l * ONE_255)
# build the palette (1d buffer int values)
palette[x] = rgb_to_int(<int>(rgb_.r * 255.0),
<int>(rgb_.g * 255.0),
<int>(rgb_.b * 255.0 * 0.5))
# Create a 3d array containing rgb values
for x in range(width):
rgb_ = int_to_rgb(palette[ii])
for y in prange(height):
pal[x, y, 0] = <unsigned char>rgb_.r
pal[x, y, 1] = <unsigned char>rgb_.g
pal[x, y, 2] = <unsigned char>rgb_.b
ii += 1
return asarray(palette), pygame.surfarray.make_surface(asarray(pal))
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
@cython.cdivision(True)
cpdef fire_texture24(int width, int height, int frame, float factor, pal, mask):
"""
CREATE AN ANIMATED FLAME EFFECT OF SIZES (WIDTH, HEIGHT).
THE FLAME EFFECT DOES NOT CONTAINS ALPHA TRANSPARENCY 24 bits
e.g:
width = 200
height = 200
palette, surf = make_palette(256, 50, 4, 255, 2)
mask = numpy.full((width, height), 255, dtype=numpy.uint8)
buff = fire_effect24(width, height, 1000, 3.95, palette, mask)
:param width : integer; max width of the effect
:param height : integer; max height of the effect
:param frame : integer; number of frames for the animation
:param factor : float; change the flame height, default is 3.95
:param pal : define a color palette e.g make_palette(256, 50, 6, 255, 2)
:param mask : Ideally a black and white texture transformed into a 2d array shapes (w, h)
black pixel will cancel the effect.
The mask should have the exact same sizes than passed argument (width, height)
:return: Return a python list containing all the 24-bit surfaces.
"""
assert isinstance(width, int), \
"Argument width should be a python int, got %s " % type(width)
assert isinstance(height, int), \
"Argument height should be a python int, got %s " % type(height)
assert isinstance(frame, int), \
"Argument frame should be a python int, got %s " % type(frame)
assert isinstance(factor, float), \
"Argument factor should be a python float, got %s " % type(factor)
assert isinstance(mask, ndarray), \
"Argument mask should be a numpy.ndarray, got %s " % type(mask)
if not frame > 0:
raise ValueError('Argument frame should be > 0, %s'% frame)
if width == 0 or height == 0:
raise ValueError('Image with incorrect dimensions '
'(width>0, height>0) got (width:%s, height:%s)' % (width, height))
cdef:
int w, h
try:
w, h = mask.shape[:2]
except (ValueError, pygame.error) as e:
raise ValueError('\nArray shape not understood.')
if width!= w or height!= h:
raise ValueError('Incorrect mask dimensions '
'mask should be (width=%s, height=%s), '
'got (width=%s, height=%s)' %(width, height, w, h))
cdef:
float [:, ::1] fire = zeros((height, width), dtype=float32)
# flame opacity palette
unsigned int [::1] alpha = make_palette(256, 1, 1, 0, 2)[0]
unsigned int [:, :, ::1] out = zeros((height, width, 3), dtype=uint32)
unsigned int [::1] palette = pal
unsigned char [:, :] mask_ = mask
int x = 0, y = 0, i = 0, f
float d
rgb rgb_
list_ = []
for f in range(frame):
for x in range(width):
fire[height-1, x] = randrange(1, 255)
with nogil:
for y in prange(0, height - 1):
for x in range(0, width - 1):
if mask_[x, y]!= 0:
d = (fire[(y + 1) % height, (x - 1 + width) % width]
+ fire[(y + 1) % height, x % width]
+ fire[(y + 1) % height, (x + 1) % width]
+ fire[(y + 2) % height, x % width]) / factor
d -= rand() * 0.0001
if d > 255.0:
d = 255.0
if d < 0:
d = 0
fire[y, x] = d
rgb_ = int_to_rgb(palette[<unsigned int>d])
out[y, x, 0], out[y, x, 1], out[y, x, 2] = \
<unsigned char>rgb_.r, <unsigned char>rgb_.g, <unsigned char>rgb_.b
else:
out[y, x, 0], out[y, x, 1], out[y, x, 2] = 0, 0, 0
surface = pygame.image.frombuffer(asarray(out, dtype=uint8), (width, height), 'RGB')
list_.append(surface)
return list_
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
@cython.cdivision(True)
cpdef fire_texture32(int width, int height, int frame, float factor, pal):
"""
CREATE AN ANIMATED FLAME EFFECT OF SIZES (WIDTH, HEIGHT).
THE FLAME EFFECT CONTAINS PER-PIXEL TRANSPARENCY
e.g:
width, height = 200, 200
image = pygame.image.load("LOAD YOUR IMAGE")
image = pygame.transform.smoothscale(image, (width, height))
buff = fire_effect32(width, height, 1000, 3.95, palette)
:param width: integer; max width of the effect
:param height: integer; max height of the effect
:param frame: integer; number of frames for the animation
:param factor: float; change the flame height, default is 3.95
:param pal: define a color palette e.g make_palette(256, 50, 6, 255, 2)
:return: Return a python list containing all the per-pixel surfaces.
"""
assert isinstance | Cython |
(width, int), \
"Argument width should be a python int, got %s " % type(width)
assert isinstance(height, int), \
"Argument height should be a python int, got %s " % type(height)
assert isinstance(frame, int), \
"Argument frame should be a python int, got %s " % type(frame)
assert isinstance(factor, float), \
"Argument factor should be a python float, got %s " % type(factor)
if not frame > 0:
raise ValueError('Argument frame should be > 0, %s'% frame)
if width == 0 or height == 0:
raise ValueError('Image with incorrect dimensions '
'(width>0, height>0) got (width:%s, height:%s)' % (width, height))
cdef:
float [:, ::1] fire = zeros((height, width), dtype=float32)
# flame opacity palette
unsigned int [::1] alpha = make_palette(256, 1, 1, 0, 2)[0]
unsigned int [:, :, ::1] out = zeros((height, width, 4), dtype=uint32)
unsigned int [::1] palette = pal
int x = 0, y = 0, i = 0, f
float d
rgb rgb_
list_ = []
for f in range(frame):
for x in range(width):
fire[height-1, x] = randrange(1, 255)
with nogil:
for y in prange(0, height - 1):
for x in range(0, width - 1):
d = (fire[(y + 1) % height, (x - 1 + width) % width]
+ fire[(y + 1) % height, x % width]
+ fire[(y + 1) % height, (x + 1) % width]
+ fire[(y + 2) % height, x % width]) / factor
d -= rand() * 0.0001
if d > 255.0:
d = 255.0
if d < 0:
d = 0
fire[y, x] = d
rgb_ = int_to_rgb(palette[<unsigned int>d])
out[y, x, 0], out[y, x, 1], \
out[y, x, 2], out[y, x, 3] = <unsigned char>rgb_.r, \
<unsigned char>rgb_.g, <unsigned char>rgb_.b, alpha[<unsigned int>d]
surface = pygame.image.frombuffer(asarray(out, dtype=uint8), (width, height), 'RGBA')
list_.append(surface)
return list_
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.nonecheck(False)
@cython.cdivision(True)
cpdef fire_surface24(int width, int height, float factor, pal, float [:, ::1] fire):
"""
:param width : integer; max width of the effect
:param height: integer; max height of the effect
:param factor: float; factor to reduce the flame effect
:param pal : ndarray; Color palette 1d numpy array (colors buffer unsigned int values)
:param fire : ndarray; 2d array (x, y) (contiguous) containing float values
:return :
"""
cdef:
# flame opacity palette
unsigned int [:, :, ::1] out = zeros((height, width, 3), dtype=uint32)
unsigned int [::1] palette = pal
int x = 0, y = 0
float d
unsigned char r=0, g=0, b=0
unsigned int ii=0
unsigned c1 = 0, c2 = 0
with nogil:
for x in prange(width):
fire[height - 1, x] = randRange(0, 255)
for y in range(0, height - 1):
for x in prange(0, width - 1):
c1 = (y + 1) % height
c2 = x % width
d = (fire[c1, (x - 1 + width) % width]
+ fire[c1, c2]
+ fire[c1, (x + 1) % width]
+ fire[(y + 2) % height, c2]) * factor
d -= rand() * 0.0001
# Cap the values
if d > 255.0:
d = 255.0
if d < 0:
d = 0
fire[y, x] = d
ii = palette[<unsigned int>d]
r = (ii >> 16) & 255 # red int32
g = (ii >> 8) & 255 # green int32
b = ii & 255 # blue int32
out[y, x, 0], out[y, x, 1], out[y, x, 2] = r, g, b
return asarray(out).transpose(1, 0, 2), fire
<|end_of_text|>import numpy as _N
cimport numpy as _N
#import kfcomMPmv_ram as _kfcom
import kfcomMPmv as _kfcom
import time as _tm
cimport cython
import warnings
warnings.filterwarnings("error")
dDTYPE = _N.double
ctypedef _N.double_t dDTYPE_t
"""
c functions
"""
cdef extern from "math.h":
double exp(double)
double sqrt(double)
double log(double)
double abs(double)
"""
p AR order
Ftrgt Ftrgt[0] noise amp. Ftrgt[1:] AR(p) coeffs
f freq.
f0 bandpass
f1
zr amp. at band stop
"""
######################## FFBS
#def armdl_FFBS_1itrMP(y, Rv, F, q2, N, k, fx00, fV00): # approximation
@cython.boundscheck(False)
@cython.wraparound(False)
def armdl_FFBS_1itrMP(args): # approximation
"""
for Multiprocessor, aguments need to be put into a list.
"""
y = args[0]
Rv = args[1]
F = args[2]
q2 = args[3]
N = args[4]
cdef int k = args[5]
fx00 = args[6]
fV00 = args[7]
fx = _N.empty((N + 1, k, 1))
fV = _N.empty((N + 1, k, k))
fx[0] = fx00
fV[0] = fV00
GQGT = _N.zeros((k, k))
GQGT[0, 0] = q2
########## FF
#t1 = _tm.time()
FFdv(y, Rv, N, k, F, GQGT, fx, fV)
#t2 = _tm.time()
########## BS
smXN = _N.random.multivariate_normal(fx[N,:,0], fV[N], size=1)
#t1 = _tm.time()
#smpls = _kfcom.BSvec(F, N, k, GQGT, fx, fV, smXN)
smpls = _kfcom.BSvec_orig(F, N, k, GQGT, fx, fV, smXN)
#t2 = _tm.time()
#print (t2-t1)
return [smpls, fx, fV]
@cython.cdivision(True)
@cython.boundscheck(False)
@cython.wraparound(False)
def FFdv(double[::1] y, double[::1] Rv, N, long k, F, GQGT, fx, fV): # approximate KF # k==1,dynamic variance
#print "FFdv"
# do this until p_V has settled into stable values
H = _N.zeros((1, k)) # row vector
H[0, 0] = 1
cdef double q2 = GQGT[0, 0]
Ik = _N.identity(k)
px = _N.empty((N + 1, k, 1)) # naive and analytic calculated same way
fx_ram = _N.empty((N+1, k, 1))
pV = _N.empty((N + 1, k, k))
pV_ram = _N.empty((N+1, k, k))
fV_ram = _N.empty((N+1, k, k))
cdef double* p_y = &y[0]
cdef double* p_Rv = &Rv[0]
K = _N.empty((N + 1, k, 1))
| Cython |
K_ram = _N.empty((N + 1, k, 1))
cdef double[:, :, ::1] K_rammv = K_ram # forward filter
cdef double* p_K_ram = &K_rammv[0, 0, 0]
"""
temporary storage
"""
IKH = _N.eye(k) # only contents of first column modified
VFT = _N.empty((k, k))
FVFT = _N.empty((k, k))
KyHpx = _N.empty((k, 1))
# need memory views for these
# F, fx, px need memory views
# K, KH
# IKH
cdef double[:, ::1] Fmv = F
cdef double* p_F = &Fmv[0, 0]
cdef double[:, :, ::1] fxmv = fx # forward filter
cdef double* p_fx = &fxmv[0, 0, 0]
cdef double[:, :, ::1] fVmv = fV # forward filter
cdef double* p_fV = &fVmv[0, 0, 0]
cdef double[:, :, ::1] pxmv = px
cdef double* p_px = &pxmv[0, 0, 0]
cdef double[:, :, ::1] pVmv = pV
cdef double* p_pV = &pVmv[0, 0, 0]
cdef double[:, :, ::1] fx_ram_mv = fx_ram
cdef double* p_fx_ram = &fx_ram_mv[0, 0, 0]
cdef double[:, :, ::1] pV_ram_mv = pV_ram
cdef double* p_pV_ram = &pV_ram_mv[0, 0, 0]
cdef double[:, :, ::1] fV_ram_mv = fV_ram
cdef double* p_fV_ram = &fV_ram_mv[0, 0, 0]
cdef double[:, :, ::1] Kmv = K
cdef double[:, ::1] IKHmv = IKH
cdef int n, i, j, ii, jj, nKK, nK, ik, n_m1_KK, i_m1_K, iik
cdef double dd = 0, val, Kfac
for n from 1 <= n < N + 1:
t2t1 = 0
t3t2 = 0
t1 = _tm.time()
nKK = n * k * k
nK = n*k
n_m1_KK = (n-1) * k * k
dd = 0
# prediction mean (naive and analytic method are the same)
for i in xrange(1, k):# use same loop to copy and do dot product
ik = i*k
dd += p_F[i]*p_fx[n_m1_KK + ik]
p_px[nKK + ik] = p_fx[n_m1_KK + (i-1)*k] # shift older state
p_px[nKK] = dd + p_F[0]*p_fx[n_m1_KK] # 1-step prediction
##### covariance, 1-step prediction
#### upper 1x1
val = 0
for ii in xrange(k):
iik = ii*k
val += p_F[ii]*p_F[ii]*p_fV[n_m1_KK + iik + ii]
for jj in xrange(ii+1, k):
val += 2*p_F[ii]*p_F[jj]*p_fV[n_m1_KK + iik+jj]
p_pV_ram[nKK] = val + q2
#### lower k-1 x k-1
for ii in xrange(1, k):
for jj in xrange(ii, k):
p_pV_ram[nKK+ ii*k+ jj] = p_pV_ram[nKK+ jj*k+ ii] = p_fV[n_m1_KK + (ii-1)*k + jj-1]
# = p_fV[n_m1_KK + (ii-1)*k + jj]
#### (1 x k-1) and (k-1 x 1)
for j in xrange(1, k):
val = 0
for ii in xrange(k):
val += p_F[ii]*p_fV[n_m1_KK+ ii*k + j-1]
p_pV_ram[nKK + j] = val
p_pV_ram[nKK + j*k] = val
t2 = _tm.time()
# naive method
_N.dot(fV[n - 1], F.T, out=VFT)
_N.dot(F, VFT, out=pV[n]) # prediction
pVmv[n, 0, 0] += q2
t3 = _tm.time()
t2t1 += t2-t1
t3t2 += t3-t2
# print "----------%%%%%%%%%%%%%%%%%%%%%%%%"
# print (t2-t1)
# print (t3-t2)
# print pV_ram[n]
# print pV[n]
# print "----------"
t1 = _tm.time()
################################################ ANALYTIC
###### Kalman gain
Kfac = 1. / (p_pV_ram[nKK] + p_Rv[n]) # scalar
for i in xrange(k):
p_K_ram[nK + i] = p_pV_ram[nKK + i*k] * Kfac
################# filter mean
for i in xrange(k):
p_fx_ram[nK+i] = p_px[nK+ i] + p_K_ram[nK+ i]*(p_y[n] - p_px[nK])
for j in xrange(i, k):
p_fV_ram[nKK+i*k+ j] = p_pV_ram[nKK+ i*k+ j] - p_pV_ram[nKK+j]*p_K_ram[nK+i]
p_fV_ram[nKK+j*k + i] = p_fV_ram[nKK+i*k+ j]
############################################### NAIVE
t2 = _tm.time()
###### Kalman gain
mat = 1 / (pVmv[n, 0, 0] + Rv[n]) # scalar
K[n, :, 0] = pV[n, :, 0] * mat
################# filter mean
_N.multiply(K[n], y[n] - pxmv[n, 0, 0], out=KyHpx)
_N.add(px[n], KyHpx, out=fx[n])
# (I - KH), KH is zeros except first column
IKHmv[0, 0] = 1 - Kmv[n, 0, 0]
for i in xrange(1, k):
IKHmv[i, 0] = -Kmv[n, i, 0]
# (I - KH)
################# filter covariance naive
_N.dot(IKH, pV[n], out=fV[n])
t3 = _tm.time()
t2t1 += t2-t1
t3t2 += t3-t2
print "!!!!!!!!!!!!!!!!!----------------"
print "t2t1 %.3e" % t2t1
print "t3t2 %.3e" % t3t2
print fx[n]
print fx_ram[n]
print fV[n]
print fV_ram[n]
def FFdv_orig(double[::1] y, Rv, N, k, F, GQGT, fx, fV): # approximate KF # k==1,dynamic variance
#print "FFdv"
# do this until p_V has settled into stable values
H = _N.zeros((1, k)) # row vector
H[0, 0] = 1
cdef double q2 = GQGT[0, 0]
Ik = _N.identity(k)
px = _N.empty((N + 1, k, 1))
pV = _N.empty((N + 1, k, k))
pV_ram = _N.empty((N+1, k, k))
cdef double[:, :, ::1] pV_ram_mv = pV_ram
cdef double* p_pV_ram = &pV_ram_mv[0, 0, 0]
K = _N.empty((N + 1, k, 1))
"""
temporary storage
"""
IKH = _N.eye(k) # only contents of first column modified
VFT = _N.empty((k, k))
FVFT = _N.empty((k, k))
KyHpx = _N | Cython |
.empty((k, 1))
# need memory views for these
# F, fx, px need memory views
# K, KH
# IKH
cdef double[:, ::1] Fmv = F
cdef double[:, :, ::1] fxmv = fx # forward filter
cdef double[:, :, ::1] pxmv = px
cdef double[:, :, ::1] pVmv = pV
#cdef double[::1] Rvmv = Rv
cdef double[:, :, ::1] Kmv = K
cdef double[:, ::1] IKHmv = IKH
cdef _N.intp_t n, i
cdef double dd = 0
for n from 1 <= n < N + 1:
dd = 0
# prediction mean
for i in xrange(1, k):# use same loop to copy and do dot product
dd += Fmv[0, i]*fxmv[n-1, i, 0]
pxmv[n, i, 0] = fxmv[n-1, i-1, 0] # shift older state
pxmv[n, 0, 0] = dd + Fmv[0, 0]*fxmv[n-1, 0, 0] # 1-step prediction
# covariance, 1-step prediction
#### upper 1x1
pV_ram[n, 0, 0] = 0
for ii in xrange(k):
pV_ram[n,0,0] += F[0,ii]*F[0,ii]*fV[n-1,ii,ii]
for jj in xrange(ii+1, k):
pV_ram[n,0,0] += 2*F[0,ii]*F[0,jj]*fV[n-1,ii,jj]
pV_ram[n,0,0] += q2
#### lower k-1 x k-1
for ii in xrange(1, k):
for jj in xrange(ii, k):
pV_ram[n, ii, jj] = fV[n-1,ii-1,jj-1]
pV_ram[n, jj, ii] = fV[n-1,ii-1,jj-1]
#### (1 x k-1) and (k-1 x 1)
for j in xrange(1, k):
val = 0
for ii in xrange(k):
val += F[0, ii]*fV[n-1, ii, j-1]
pV_ram[n, 0, j] = val
pV_ram[n, j, 0] = val
# naive method
_N.dot(fV[n - 1], F.T, out=VFT)
_N.dot(F, VFT, out=pV[n]) # prediction
pVmv[n, 0, 0] += q2
print "----------"
print pV_ram[n]
print pV[n]
print "----------"
###### Kalman gain
mat = 1 / (pVmv[n, 0, 0] + Rv[n]) # scalar
K[n, :, 0] = pV[n, :, 0] * mat
################# filter mean
_N.multiply(K[n], y[n] - pxmv[n, 0, 0], out=KyHpx)
_N.add(px[n], KyHpx, out=fx[n])
# (I - KH), KH is zeros except first column
IKHmv[0, 0] = 1 - Kmv[n, 0, 0]
for i in xrange(1, k):
IKHmv[i, 0] = -Kmv[n, i, 0]
# (I - KH)
################# filter covariance
_N.dot(IKH, pV[n], out=fV[n])
def FF1dv(_d, offset=0): # approximate KF # k==1,dynamic variance
GQGT = _d.G[0,0]*_d.G[0, 0] * _d.Q
k = _d.k
px = _d.p_x
pV = _d.p_V
fx = _d.f_x
fV = _d.f_V
Rv = _d.Rv
K = _d.K
# do this until p_V has settled into stable values
for n from 1 <= n < _d.N + 1:
px[n,0,0] = _d.F[0,0] * fx[n - 1,0,0]
# pV[n,0,0] = _d.F[0,0] * fV[n - 1,0,0] * _d.F.T[0,0] + GQGT
pV[n,0,0] = _d.F[0,0] * fV[n - 1,0,0] * _d.F[0,0] + GQGT
#_d.p_Vi[n,0,0] = 1/pV[n,0,0]
# mat = 1 / (_d.H[0,0]*pV[n,0,0]*_d.H[0,0] + Rv[n])
mat = 1 / (pV[n,0,0] + Rv[n])
# K[n,0,0] = pV[n]*_d.H[0,0]*mat
K[n,0,0] = pV[n,0,0]*mat
# fx[n,0,0] = px[n,0,0] + K[n,0,0]*(_d.y[n] - offset[n] - _d.H[0,0]* px[n,0,0])
# fx[n,0,0] = px[n,0,0] + K[n,0,0]*(_d.y[n] - _d.H[0,0]* px[n,0,0])
fx[n,0,0] = px[n,0,0] + K[n,0,0]*(_d.y[n] - px[n,0,0])
# fV[n,0,0] = (1 - K[n,0,0]* _d.H[0,0])* pV[n,0,0]
fV[n,0,0] = (1 - K[n,0,0])* pV[n,0,0]
<|end_of_text|>from cpython cimport bool
from sanpera cimport c_api
cdef class Vector:
cdef int _x
cdef int _y
cdef class Size(Vector):
cdef _fit(self, other, minmax, bool upscale, bool downscale)
cdef class Rectangle:
cdef int _x1
cdef int _x2
cdef int _y1
cdef int _y2
cdef c_api.RectangleInfo to_rect_info(self)
<|end_of_text|>from __future__ import print_function
import sys
import numpy as np
cimport numpy as np
from numpy.linalg import det
DTYPE = np.double
ctypedef np.double_t DTYPE_t
def get_best_channel_scaling(np.ndarray shad, np.ndarray matte_r):
"""
Find scaling factors for green and blue channels of the matte that result
in the best unshadowed result. The coefficients are found with brute-force
2D search.
"""
assert shad.dtype == DTYPE and matte_r.dtype == DTYPE
cdef int h = shad.shape[0]
cdef int w = shad.shape[1]
scaling_range = np.arange(0.9, 1.2, 0.01)
cdef double min_error = 1000000.0
cdef double error
cdef double s_g_best = 1.0
cdef double s_b_best = 1.0
# declare vars
cdef int index_b
cdef int index_g
cdef double s_g
cdef double s_b
cdef int x
cdef int y
cdef int n_steps = len(scaling_range)
cdef np.ndarray matte = np.dstack([matte_r, matte_r, matte_r])
cdef np.ndarray unshad = np.array(shad, dtype=DTYPE)
cdef np.ndarray colors = np.zeros([3, shad.shape[0] * shad.shape[1]], dtype=DTYPE)
for index_b in xrange(n_steps):
s_b = scaling_range[index_b]
# modify the blue channel of the matte using s_b
matte[:,:,2] = (matte_r - 1.0) / s_b + 1.0
for index_g in xrange(n_steps):
s_g = scaling_range[index_g]
# modify the green channel of the matte using s_g
matte[:,:,1] = (mat | Cython |
te_r - 1.0) / s_g + 1.0
# get unshadowed image
unshad = shad / matte
# extract all the pixels from the image
for y in xrange(h):
for x in xrange(w):
colors[0, y * w + x] = unshad[y, x, 0]
colors[1, y * w + x] = unshad[y, x, 1]
colors[2, y * w + x] = unshad[y, x, 2]
# the error is proportional to the determinant of the covariance
# of all the pixel colors
error = np.log(det(np.cov(colors)))
if error < min_error:
min_error = error
s_g_best = s_g
s_b_best = s_b
return s_g_best, s_b_best
<|end_of_text|>import collections
import math
from SplitRoute cimport convert_tsp_to_vrp
from TwoOpt cimport TwoOpt
from LocalSearch cimport move, move_2_reverse, swap_1_1, swap_2_2, swap_3_3_reversed, swap_3_3
from utils cimport calculate_route_cost
import bisect
cdef class TabuSearch:
"""
A simple Tabu Search class
Attributes:
initial_solution: initial solution for the tabu method
reduced_costs: ordered by cost reduced cost of transport problem
iterations: max number of iterations
tenure: maximum number of iterations of permanence of a move in the tabu list
costs: cost dict
q: demand at nodes
Q: capacity vehicles
N: nodes without deposit
"""
def __init__(self, initial_solution, reduced_costs_arcs, reduced_costs_costs, iterations, tenure, costs, q, Q, N):
"""
Construct a Tabu Search Object
Parameters:
initial_solution: initial solution for the tabu method
reduced_costs_arcs: arcs ordered by reduced cost of transport problem
reduced_costs_costs: cost ordered by reduced cost of transport problem
iterations: max number of iterations
tenure: maximum number of iterations of permanence of a move in the tabu list
costs: cost dict
q: demand at nodes
Q: capacity vehicles
N: nodes without deposit
"""
self.initial_solution = initial_solution
self.reduced_costs_arcs = reduced_costs_arcs
self.reduced_costs_costs = reduced_costs_costs
self.iterations = iterations
self.tenure = tenure
self.costs = costs
self.q = q
self.Q = Q
self.N = N
self.initial_tenure = tenure
self.A = set()
# add deposit arcs
for node in N:
self.A.add((0, node))
self.A.add((node, 0))
cdef set granular(self, list N, float max_cost):
"""
Parameters:
N: nodes without deposit
max_cost: max reduced cost to consider
"""
cdef set A = set(self.A)
index_last_reduced_cost = bisect.bisect_right(self.reduced_costs_costs, max_cost)
# add arcs with reduced costs included in the threshold
A.update(self.reduced_costs_arcs[0:index_last_reduced_cost])
return A
cdef tuple diversification(self, float *max_cost, float percentage_increment, dict best_valid_neighborhood, int *best_count, tabu_list, int tenure_increment):
"""
Diversification step
"""
# after iteration_number_max iterations without best solution, augment the granularity
max_cost[0] = max_cost[0] + abs(max_cost[0]*percentage_increment)
# if the number is near 0 we can't increment it using percentages
if max_cost[0] > -100 and max_cost[0] <= 0:
max_cost[0] = 1000
A = self.granular(self.N, max_cost[0])
best_count[0] = 0
self.tenure += tenure_increment
tabu_list = collections.deque(tabu_list, maxlen=self.tenure)
best_valid_neighborhood = move_2_reverse(best_valid_neighborhood, self.q, self.Q, self.costs)
best_valid_neighborhood = swap_3_3_reversed(best_valid_neighborhood, self.q, self.Q, self.costs)
best_valid_neighborhood = swap_3_3(best_valid_neighborhood, self.q, self.Q, self.costs)
best_valid_neighborhood = swap_2_2(best_valid_neighborhood, self.q, self.Q, self.costs)
best_valid_neighborhood = swap_1_1(best_valid_neighborhood, self.q, self.Q, self.costs)
best_valid_neighborhood = move(best_valid_neighborhood, self.q, self.Q, self.costs)
return best_valid_neighborhood, A, tabu_list
cpdef dict start(self, float initial_cost):
"""
Start the tabu search
Parameters:
initial_cost: initial solution cost
"""
cdef int iteration_number_max = 18
cdef int tenure_increment = 7
cdef float percentage_increment = 0.1
cdef float percentage_decrement = 0.2
cdef list trip
tabu_list = collections.deque(maxlen=self.tenure)
cdef list route = self.initial_solution
cdef dict best_route = {"route": self.initial_solution, "cost": initial_cost}
cdef dict best_valid_neighborhood = {"move": None, "route": self.initial_solution, "cost": initial_cost, "last_nodes": None}
cdef dict best_tsp_route = best_valid_neighborhood
cdef int it_count = 0
cdef int best_count = 0
cdef float min_cost = self.reduced_costs_costs[0]
cdef float max_cost = min_cost
cdef float max_reduced_cost = self.reduced_costs_costs[-1]
cdef set A = self.granular(self.N, max_cost)
opt = TwoOpt(self.costs)
cdef list solutions = [{"iteration": -1, "f obj": initial_cost, "best": True}]
cdef list two_opt_neighborhoods
cdef list vrp_route
cdef float vrp_cost
# stop condition
while self.iterations-it_count > 0:
if max_cost >= max_reduced_cost:
break
if best_count >= iteration_number_max:
best_valid_neighborhood, A, tabu_list = self.diversification(&max_cost, percentage_increment, best_valid_neighborhood, &best_count, tabu_list, tenure_increment)
two_opt_neighborhoods = [best_valid_neighborhood]
else:
two_opt_neighborhoods = opt.start(route, A, tabu_list, self.q, self.Q)
if len(two_opt_neighborhoods) > 0:
best_valid_neighborhood = two_opt_neighborhoods[0]
tabu_list.append(best_valid_neighborhood["move"])
# create a feasible route for VRP
vrp_route = convert_tsp_to_vrp(best_valid_neighborhood["route"], self.q, len(best_valid_neighborhood["route"]), self.Q, self.costs)
vrp_route = list(filter(None, vrp_route))
vrp_cost = 0
for trip in vrp_route:
vrp_cost += calculate_route_cost(self.costs, trip)
if vrp_cost > best_route["cost"]:
break
solutions.append({"iteration": it_count, "f obj": vrp_cost, "best": False})
if vrp_cost < best_route["cost"]:
# New best solution found
solutions[-1]["best"] = True
best_route["route"] = vrp_route
best_route["cost"] = vrp_cost
route = best_valid_neighborhood["route"]
best_tsp_route = best_valid_neighborhood
best_count = 0
# decrease granularity
max_cost = max_cost - abs(max_cost*percentage_decrement)
A = self.granular(self.N, max_cost)
best_count = 0
# add best solution arcs to granular
for node, next_node in zip(best_valid_neighborhood["route"], best_valid_neighborhood["route"][1:]):
A.add((node, next_node))
self.tenure -= tenure_increment
if self.tenure <= 0:
self.tenure = self.initial_tenure
tabu_list = collections.deque(tabu_list, maxlen=self.tenure)
else:
best_count += 1
else:
# if a valid neighborhood is not found, diversificate
best_count = iteration_number_max
it_count += 1
return best_route
#, solutions<|end_of_text|>
# invalid syntax (not handled by the parser)
def syntax1():
a = b = c = d = e = f = g = h = i = 1 # prevent undefined names
*a
*1
*"abc"
*a*b
[*a, *b]
(a, b, *c, d, e, f, *g, h, i)
def syntax2():
list_of_sequences = [[1,2], [3,4]]
| Cython |