text
stringlengths 1.18k
92.5k
| lang
stringclasses 39
values |
---|---|
# encoding: utf-8
# cython: cdivision=True
# cython: boundscheck=False
# cython: wraparound=False
cimport numpy as np
ctypedef np.uint32_t UINT32_t
cpdef void _get_weights(double[:] w, int[:] subset, long[:] counter, long batch_size,
double learning_rate, double offset)
cpdef double _get_simple_weights(int[:] subset, long[:] counter, long batch_size,
double learning_rate, double offset)
cpdef long _update_code_sparse_batch(double[:] X_data,
int[:] X_indices,
int[:] X_indptr,
int n_rows,
int n_cols,
long[:] row_batch,
long[:] sample_subset,
double alpha,
double learning_rate,
double offset,
long var_red,
long projection,
double[::1, :] D_,
double[:, ::1] code_,
double[::1, :] A_,
double[::1, :] B_,
double[::1, :] G_,
double[::1, :] beta_,
double[:] multiplier_,
long[:] counter_,
long[:] row_counter_,
double[::1, :] D_subset,
double[::1, :] code_temp,
double[::1, :] G_temp,
double[::1, :] _this_X,
double[:] w_temp,
char[:] subset_mask,
int[:] dict_subset,
int[:] dict_subset_lim,
double[::1, :] dummy_2d_float,
) except *
cpdef void _update_code(double[::1, :] this_X,
int[:] subset,
long[:] sample_subset,
double alpha,
double learning_rate,
double offset,
long var_red,
long projection,
double[::1, :] D_,
double[:, ::1] code_,
double[::1, :] A_,
double[::1, :] B_,
double[::1, :] G_,
double[::1, :] beta_,
double[:] multiplier_,
long[:] counter_,
long[:] row_counter_,
double[::1, :] full_X,
double[::1, :] D_subset,
double[::1, :] code_temp,
double[::1, :] G_temp,
double[:] w_temp) except *
cpdef void _update_dict(double[::1, :] D_,
int[:] dict_subset,
bint fit_intercept,
double l1_ratio,
long projection,
long var_red,
double[::1, :] A_,
double[::1, :] B_,
double[::1, :] G_,
long[:] _D_range,
double[::1, :] _R,
double[::1, :] _D_subset,
double[:] _norm_temp,
double[:] _proj_temp)
cpdef void _predict(double[:] X_data,
int[:] X_indices,
int[:] X_indptr,
double[:, ::1] P,
double[::1, :] Q)
cpdef void _update_subset(bint replacement,
long _len_subset,
int[:] _subset_range,
int[:] _subset_lim,
int[:] _temp_subset,
UINT32_t random_seed)<|end_of_text|># distutils: language = c++
from pycalphad.core.phase_rec cimport PhaseRecord
cdef public class CompositionSet(object)[type CompositionSetType, object CompositionSetObject]:
cdef public PhaseRecord phase_record
cdef readonly double[::1] dof, X
cdef double[:,::1] _X_2d_view
cdef public double NP
cdef public bint fixed
cdef readonly double energy
cdef double[::1] _energy_2d_view
cpdef void update(self, double[::1] site_fracs, double phase_amt, double[::1] state_variables)
<|end_of_text|>"""Export definitions from engine.h"""
from libcpp.string cimport string
from libcpp cimport bool as cpp_bool
from libcpp.set cimport set
from libcpp.vector cimport vector
from libcpp.utility cimport pair
from.IkConceptProximity cimport ProximityPairVector_t
cdef extern from "../../engine/src/engine.h" namespace "iknowdata" nogil:
enum Attribute:
Negation = 1
DateTime = 2
PositiveSentiment = 5
NegativeSentiment = 6
EntityVector = 7
Frequency = 9
Duration = 10
Measurement = 11
Certainty = 12
Generic1 = 13
Generic2 = 14
Generic3 = 15
cdef extern from "../../engine/src/engine.h" namespace "iknowdata::Entity" nogil:
const size_t kNoConcept = <size_t>(-1)
enum eType:
NonRelevant = 0,
Concept
Relation
PathRelevant
cdef extern from "../../engine/src/engine.h" namespace "iknowdata" nogil:
ctypedef unsigned short Entity_Ref
ctypedef unsigned short Attribute_Ref
cdef extern from "../../engine/src/engine.h" namespace "iknowdata::Sent_Attribute" nogil:
ctypedef vector[pair[string, string]] Sent_Attribute_Parameters
cdef extern from "../../engine/src/engine.h" namespace "iknowdata::Sentence" nogil:
ctypedef vector[Entity] Entities
ctypedef vector[Sent_Attribute] Sent_Attributes
ctypedef vector[Entity_Ref] Path
ctypedef vector[Path_Attribute] Path_Attributes
cdef extern from "../../engine/src/engine.h" namespace "iknowdata::Text_Source" nogil:
ctypedef ProximityPairVector_t Proximity
ctypedef vector[Sentence] Sentences
cdef extern from "../../engine/src/engine.h" namespace "iknowdata" nogil:
struct Entity:
eType type "type_"
size_t offset_start "offset_start_", offset_stop "offset_stop_"
string index "index_"
double dominance_value "dominance_value_"
size_t entity_id "entity_id_"
struct Sent_Attribute:
Attribute type "type_"
size_t offset_start "offset_start_", offset_stop "offset_stop_"
string marker "marker_"
Sent_Attribute_Parameters parameters "parameters_"
Entity_Ref entity_ref
Path entity_vector
struct Path_Attribute:
Attribute type
unsigned short pos
unsigned short span
struct Sentence:
Entities entities
Sent_Attributes sent_attributes
Path path
Path_Attributes path_attributes
struct Text_Source:
Sentences sentences
Proximity proximity
cdef extern from "../../engine/src/engine.h" nogil:
cdef cppclass CPPUserDictionary "UserDictionary":
CPPUserDictionary() except +
void clear() except +
int addLabel(const string& literal, const char* UdctLabel) except +
void addSEndCondition(const string& literal, cpp_bool b_end) except +
void addConceptTerm(const string& literal) except +
void addRelationTerm(const string& literal) except +
void addNonrelevantTerm(const string& literal) except +
void addNegationTerm(const string& literal) except +
void addPositiveSentimentTerm(const string& literal) except +
void addNegativeSentimentTerm(const string& literal) except +
void addUnitTerm(const string& literal) except +
void addNumberTerm(const string& literal) except +
void addTimeTerm(const string& literal) except +
int addCertaintyLevel(const string& literal, int level) except +
cdef extern from "../../engine/src/engine.h" nogil:
cdef cppclass CPPiKnowEngine "iKnowEngine":
Text_Source m_index
vector[string] m_traces
CPPiKnowEngine() except +
void index(const string& text_source, const string& language, cpp_bool traces) except +
void loadUserDictionary(CPPUserDictionary& udct) except +
void unloadUserDictionary() except +
@staticmethod
const set[string]& GetLanguagesSet() except +
@staticmethod
string NormalizeText(const string& text_source, const string& language, cpp_bool bUserDct, cpp_bool bLowerCase, cpp_bool bStripPunct) except +
<|end_of_text|># cython: profile=False
# cython: cdivision=True
# cython: boundscheck=False
# cython: wraparound=False
import cython
cimport cython
from cpython cimport array
import numpy as np
cimport numpy as np
# from libc.stdlib cimport free
from libc.math cimport atan, sqrt, sin, cos, floor, ceil
# from libc.math cimport isnan as npy_isnan
# from libc.math cimport isinf as npy_isinf
# from cython.parallel import parallel, prange
# from libc.math cimport isnan, isinf
# OpenCV
try:
import cv2
except ImportError:
raise ImportError('OpenCV did not load')
# Scikits-image
try:
from skimage.feature import hog as HOG
from skimage.feature import local_binary_pattern as LBP
# from skimage.feature import greycomatrix, greycoprops
from skimage.transform import probabilistic_hough_line as PHL
except:
raise ImportError('Skimage.feature did not load')
# import pyximport
# pyximport.install(setup_args={'include_dirs | Cython |
': [np.get_include()]})
old_settings = np.seterr(all='ignore')
DTYPE_int = np.int
ctypedef np.int_t DTYPE_int_t
DTYPE_intp = np.intp
ctypedef np.intp_t DTYPE_intp_t
DTYPE_int64 = np.int64
ctypedef np.int64_t DTYPE_int64_t
DTYPE_uint8 = np.uint8
ctypedef np.uint8_t DTYPE_uint8_t
DTYPE_uint16 = np.uint16
ctypedef np.uint16_t DTYPE_uint16_t
DTYPE_uint32 = np.uint32
ctypedef np.uint32_t DTYPE_uint32_t
DTYPE_uint64 = np.uint64
ctypedef np.uint64_t DTYPE_uint64_t
DTYPE_float32 = np.float32
ctypedef np.float32_t DTYPE_float32_t
DTYPE_float64 = np.float64
ctypedef np.float64_t DTYPE_float64_t
# cdef npceil = np.ceil
# cdef extern from 'numpy/npy_math.h':
# DTYPE_float32_t npy_ceil(DTYPE_float32_t x) nogil
cdef extern from 'numpy/npy_math.h':
bint npy_isnan(DTYPE_float32_t x) nogil
cdef extern from 'numpy/npy_math.h':
bint npy_isinf(DTYPE_float32_t x) nogil
# cdef extern from'math.h':
# DTYPE_float32_t ceil(DTYPE_float32_t x)
# cdef extern from 'numpy/npy_math.h':
# DTYPE_float32_t npy_floor(DTYPE_float32_t x)
# Define a function pointer to a metric.
ctypedef DTYPE_float32_t[:, :, :, ::1] (*metric_ptr)(DTYPE_float32_t[:, :, :, ::1], DTYPE_float32_t[:, :], DTYPE_float32_t[:], DTYPE_float32_t[:], int, DTYPE_float32_t[:, :, :, ::1]) nogil
cdef inline DTYPE_float32_t roundd(DTYPE_float32_t val) nogil:
return floor(val + 0.5)
cdef inline DTYPE_float32_t sqrt_f(DTYPE_float32_t sx) nogil:
return sx * 0.5
cdef inline DTYPE_float32_t abs_f(DTYPE_float32_t sx) nogil:
return sx * -1.0 if sx < 0 else sx
cdef inline DTYPE_uint8_t abs_ui(DTYPE_uint8_t sx) nogil:
return sx * -1 if sx < 0 else sx
cdef inline Py_ssize_t abs_s(Py_ssize_t sx) nogil:
return sx * -1 if sx < 0 else sx
cdef inline int n_rows_cols(int pixel_index, int rows_cols, int block_size) nogil:
return rows_cols if pixel_index + rows_cols < block_size else block_size - pixel_index
cdef inline DTYPE_float32_t pow2(DTYPE_float32_t sx) nogil:
return sx * sx
cdef inline DTYPE_float32_t pow3(DTYPE_float32_t sx) nogil:
return sx * sx * sx
cdef inline DTYPE_float32_t pow4(DTYPE_float32_t sx) nogil:
return sx * sx * sx * sx
cdef inline int _get_min_sample_i(int s1, int s2) nogil:
return s2 if s2 < s1 else s1
cdef inline DTYPE_float32_t _get_min_sample(DTYPE_float32_t s1, DTYPE_float32_t s2) nogil:
return s2 if s2 < s1 else s1
cdef inline DTYPE_uint8_t _get_min_sample_int(DTYPE_uint8_t s1, DTYPE_uint8_t s2) nogil:
return s2 if s2 < s1 else s1
cdef inline DTYPE_float32_t _get_max_sample(DTYPE_float32_t s1, DTYPE_float32_t s2) nogil:
return s2 if s2 > s1 else s1
cdef inline DTYPE_uint8_t _get_max_sample_int(DTYPE_uint8_t s1, DTYPE_uint8_t s2) nogil:
return s2 if s2 > s1 else s1
cdef inline DTYPE_float32_t _euclidean_distance(DTYPE_float32_t x1, DTYPE_float32_t y1, DTYPE_float32_t x2, DTYPE_float32_t y2) nogil:
return (((x1 - x2)**2.) + ((y1 - y2)**2.))**0.5
cdef inline DTYPE_float32_t _get_line_length(DTYPE_float32_t y1, DTYPE_float32_t x1, DTYPE_float32_t y2, DTYPE_float32_t x2) nogil:
return ((y1 - x1)**2 + (y2 - x2)**2)**0.5
cdef unsigned int _get_output_length(int rows,
int cols,
int scales_block,
int block_size,
int scale_length,
int n_features):
cdef:
Py_ssize_t i, j, ki
unsigned int out_len = 0
for i from 0 <= i < rows-scales_block by block_size:
for j from 0 <= j < cols-scales_block by block_size:
for ki in range(0, scale_length):
out_len += n_features
return out_len
cdef DTYPE_uint8_t _get_min(DTYPE_uint8_t[:, :] block, int rs, int cs) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_uint8_t m = 255
for bi in range(0, rs):
for bj in range(0, cs):
m = _get_min_sample_int(m, block[bi, bj])
return m
cdef DTYPE_float32_t _get_max_f2d(DTYPE_float32_t[:, :] block, int rs, int cs) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_float32_t m = -9999999.
for bi in range(0, rs):
for bj in range(0, cs):
m = _get_max_sample(m, block[bi, bj])
return m
cdef int _get_max(DTYPE_uint8_t[:, :] block, Py_ssize_t rs, Py_ssize_t cs) nogil:
cdef:
Py_ssize_t bi, bj
int m = -255
for bi in range(0, rs):
for bj in range(0, cs):
m = _get_max_sample_int(m, block[bi, bj])
return m
cdef DTYPE_float32_t _get_max_f(DTYPE_float32_t[:] in_row, int cols) nogil:
cdef:
Py_ssize_t a
DTYPE_float32_t m = in_row[0]
for a in range(1, cols):
m = _get_max_sample(m, in_row[a])
return m
cdef DTYPE_float32_t _get_sum_uint8(DTYPE_uint8_t[:, :] block, int rs, int cs) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_float32_t block_sum = 0.
for bi in range(0, rs):
for bj in range(0, cs):
block_sum += float(block[bi, bj])
return block_sum
cdef DTYPE_float32_t _get_sum(DTYPE_float32_t[:, :] block, int rs, int cs) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_float32_t block_sum = 0.
for bi in range(0, rs):
for bj in range(0, cs):
block_sum += block[bi, bj]
return block_sum
cdef DTYPE_float32_t _get_mean(DTYPE_float32_t[:, :] block, int rs, int cs) nogil:
cdef:
DTYPE_float32_t n_samps = float(rs*cs)
return _get_sum(block, rs, cs) / n_samps
cdef void _get_mean_var(DTYPE_float32_t[:, :] block,
int rs,
int cs,
DTYPE_float32_t[:] out_values_) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_float32_t n_samps = float(rs*cs)
DTYPE_float32_t mu = _get_mean(block, rs, cs)
DTYPE_float32_t block_var = 0.
for bi in range(0, rs):
for bj in range(0, cs):
block_var += pow2(float(block[bi, bj]) - mu)
out_values_[0] = mu
out_values_[1] = block_var / n_samps
cdef DTYPE_float32_t _get_weighted_sum(DTYPE_float32_t[:, ::1] block,
DTYPE_float32_t[:, ::1] weights,
int rs,
int cs) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_float32_t block_sum = 0.
DTYPE_float32_t dv
for bi in range(0, rs):
for bj in range(0, | Cython |
cs):
dv = block[bi, bj] / weights[bi, bj]
if not npy_isnan(dv) and not npy_isinf(dv):
block_sum += dv
return block_sum
cdef DTYPE_float32_t _get_weighted_sum_byte(DTYPE_uint8_t[:, :] block, DTYPE_float32_t[:, :] weights, int rs, int cs) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_float32_t block_sum = 0.
DTYPE_float32_t dv
for bi in range(0, rs):
for bj in range(0, cs):
dv = float(block[bi, bj]) / weights[bi, bj]
if not npy_isnan(dv) and not npy_isinf(dv):
block_sum += dv
return block_sum
cdef DTYPE_float32_t _get_weighted_mean(DTYPE_float32_t[:, ::1] block,
DTYPE_float32_t[:, ::1] weights,
int rs,
int cs) nogil:
cdef:
DTYPE_float32_t n_samps = float(rs*cs)
return _get_weighted_sum(block, weights, rs, cs) / n_samps
cdef DTYPE_float32_t _get_weighted_mean_byte(DTYPE_uint8_t[:, :] block, DTYPE_float32_t[:, :] weights, int rs, int cs) nogil:
cdef:
DTYPE_float32_t n_samps = float(rs*cs)
return _get_weighted_sum_byte(block, weights, rs, cs) / n_samps
cdef void _get_weighted_mean_var(DTYPE_float32_t[:, ::1] block,
DTYPE_float32_t[:, ::1] weights,
int rs,
int cs,
DTYPE_float32_t[::1] out_values_) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_float32_t n_samps = float(rs*cs)
DTYPE_float32_t mu = _get_weighted_mean(block, weights, rs, cs)
DTYPE_float32_t block_var = 0.
for bi in range(0, rs):
for bj in range(0, cs):
block_var += pow2(float(block[bi, bj]) - mu)
out_values_[0] = mu
out_values_[1] = block_var / n_samps
cdef void _get_weighted_mean_var_byte(DTYPE_uint8_t[:, ::1] block,
DTYPE_float32_t[:, ::1] weights,
int rs,
int cs,
DTYPE_float32_t[::1] out_values_) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_float32_t n_samps = float(rs*cs)
DTYPE_float32_t block_mu = _get_weighted_mean_byte(block, weights, rs, cs)
DTYPE_float32_t block_var = 0.
for bi in range(0, rs):
for bj in range(0, cs):
block_var += pow2(float(block[bi, bj]) - block_mu)
out_values_[0] = block_mu
out_values_[1] = block_var / n_samps
# cdef void _get_directional_weighted_mean_var(DTYPE_float32_t[:, :] block,
# int rs,
# int cs,
# DTYPE_float32_t[:] out_values_) nogil:
#
# cdef:
# int rs_half = <int>(rs / 2.)
# int cs_half = <int>(cs / 2.)
# int rs_qu = <int>(rs_half / 2.)
# int cs_qu = <int>(cs_half / 2.)
#
# # Upper left box
# out_values_[0] = _get_mean(block[:rs_half, :cs_half], rs_half, cs_half)
#
# # Upper center box
# out_values_[1] = _get_mean(block[:rs_half, cs_qu:cs_qu+cs_half], rs_half, cs_half)
#
# # Upper right box
# out_values_[2] = _get_mean(block[:rs_half, cs_half:], rs_half, cs_half)
#
# # Left box
# out_values_[3] = _get_mean(block[rs_qu:rs_qu+rs_half, :cs_half], rs_half, cs_half)
#
# # Center box
# out_values_[4] = _get_mean(block[rs_qu:rs_qu+rs_half, cs_qu:cs_qu+cs_half], rs_half, cs_half)
#
# # Right box
# out_values_[5] = _get_mean(block[rs_qu:rs_qu+rs_half, cs_half:], rs_half, cs_half)
#
# # Lower left box
# out_values_[6] = _get_mean(block[rs_half:, :cs_half], rs_half, cs_half)
#
# # Lower center box
# out_values_[7] = _get_mean(block[rs_half:, cs_qu:cs_qu+cs_half], rs_half, cs_half)
#
# # Lower right box
# out_values_[8] = _get_mean(block[rs_half:, cs_half:], rs_half, cs_half)
# cdef void _get_angle_stats(DTYPE_float32_t[:, :] block,
# int rs,
# int cs,
# DTYPE_float32_t[:] out_values_) nogil:
#
# cdef:
# Py_ssize_t i, j, jc
# int rsh = <int>(rs / 2.)
# int csh = <int>(cs / 2.)
# DTYPE_float32_t line_sum
#
# # Center horizontal
# line_sum = 0.
# for j in range(0, cs):
# line_sum += block[rsh, j]
#
# out_values_[0] = line_sum / cs
#
# # Center vertical
# line_sum = 0.
# for i in range(0, rs):
# line_sum += block[i, csh]
#
# out_values_[1] = line_sum / rs
#
# # Top left to bottom right
# line_sum = 0.
# for i in range(0, rs):
# line_sum += block[i, i]
#
# out_values_[2] = line_sum / rs
#
# # Top right to bottom left
# line_sum = 0.
# jc = cs - 1
# for i in range(0, rs):
# line_sum += block[i, jc]
# jc -= 1
#
# out_values_[3] = line_sum / rs
cdef DTYPE_float32_t _get_mean_uint8(DTYPE_uint8_t[:, :] block, int rs, int cs) nogil:
cdef:
DTYPE_float32_t n_samps = float(rs*cs)
return _get_sum_uint8(block, rs, cs) / n_samps
cdef DTYPE_float32_t _get_std_1d(DTYPE_float32_t[:] block_line, int cs, DTYPE_float32_t psi) nogil:
cdef:
Py_ssize_t bj
DTYPE_float32_t block_std = 0.
for bj in range(0, cs):
block_std += pow2(block_line[bj] - psi)
return sqrt(block_std / cs)
cdef DTYPE_float32_t _get_std_1d_uint16(DTYPE_uint16_t[:] block, int cs) nogil:
cdef:
Py_ssize_t bj
DTYPE_float32_t mu = _get_mean_1d_uint16(block, cs)
DTYPE_float32_t block_var = 0.
for bj in range(0, cs):
block_var += pow2(float(block[bj]) - mu)
return sqrt_f(block_var / cs)
cdef DTYPE_float32_t _get_var(DTYPE_float32_t[:, :] block, int rs, int cs, DTYPE_float32_t ddof=1.) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_float32_t mu = _get_mean(block, rs, cs)
DTYPE_float32_t block_var = 0.
for bi in range(0, rs):
for bj in range(0, cs):
block_var += pow2(float(block[bi, bj]) - mu)
return block_var / ((rs*cs) - ddof)
cdef DTYPE_float32_t _get_var_uint8(DTYPE_uint8_t[:, :] block, int rs, int cs, DTYPE_float32_t ddof=1.) nogil:
cdef:
Py_ssize_t bi, bj
DTYPE_float32_t mu = _get_mean_uint8(block, rs, cs)
DTYPE_float32_t block_var = 0.
for bi in range(0, rs):
for bj in range(0, cs):
block_var += pow2(float(block[bi, bj]) - mu)
return block_var / ((rs*cs) - ddof)
cdef DTYPE_float32_t _get_sum1d(DTYPE_float32_t[:] block, int cs) nogil:
cdef:
Py_ssize_t bj
DTYPE_float32_t block_sum = block[0 | Cython |
]
# for bj in prange(0, cs, nogil=True, num_threads=cs, schedule='static'):
for bj in range(1, cs):
block_sum += block[bj]
return block_sum
cdef DTYPE_float32_t _get_mean_1d(DTYPE_float32_t[:] block, int cs) nogil:
return _get_sum1d(block, cs) / cs
cdef DTYPE_float32_t _get_var_1d(DTYPE_float32_t[:] block, int cs, DTYPE_float32_t mu, DTYPE_float32_t ddof=1.) nogil:
cdef:
Py_ssize_t bj
DTYPE_float32_t block_var = 0.
for bj in range(0, cs):
block_var += pow2(float(block[bj]) - mu)
return block_var / (cs - ddof)
cdef DTYPE_float32_t _get_sum1d_uint16(DTYPE_uint16_t[:] block, int cs) nogil:
cdef:
Py_ssize_t bj
DTYPE_uint16_t block_sum = block[0]
for bj in range(1, cs):
block_sum += block[bj]
return float(block_sum)
cdef DTYPE_float32_t _get_mean_1d_uint16(DTYPE_uint16_t[:] block, int cs) nogil:
return _get_sum1d_uint16(block, cs) / cs
cdef void draw_line(Py_ssize_t y0, Py_ssize_t x0, Py_ssize_t y1, Py_ssize_t x1, DTYPE_uint16_t[:, :] rc_) nogil:
"""
*Graciously adapted from the Scikit-image team @ https://github.com/scikit-image/scikit-image/blob/master/skimage/draw/_draw.pyx
Generate line pixel coordinates.
Parameters
----------
y0, x0 : int
Starting position (row, column).
y1, x1 : int
End position (row, column).
Returns
-------
rr, cc : (N,) ndarray of int
Indices of pixels that belong to the line.
May be used to directly index into an array, e.g.
``img[rr, cc] = 1``.
See Also
--------
line_aa : Anti-aliased line generator
Examples
--------
>>> from skimage.draw import line
>>> img = np.zeros((10, 10), dtype=np.uint8)
>>> rr, cc = line(1, 1, 8, 8)
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
cdef:
char steep = 0
Py_ssize_t x = x0
Py_ssize_t y = y0
Py_ssize_t dx = abs_s(x1 - x0)
Py_ssize_t dy = abs_s(y1 - y0)
Py_ssize_t sx, sy, d, i
if (x1 - x) > 0:
sx = 1
else:
sx = -1
if (y1 - y) > 0:
sy = 1
else:
sy = -1
if dy > dx:
steep = 1
x, y = y, x
dx, dy = dy, dx
sx, sy = sy, sx
d = (2 * dy) - dx
# rc = np.empty((2, dx+1), dtype='uint16')
#rc = <double * >malloc((n ** 2) * sizeof(double))
# cc = rr.copy()
# rr = clone(template, int(dx)+1, True)
# cc = clone(template, int(dx)+1, True)
for i in range(0, dx):
if steep:
rc_[0, i] = x
rc_[1, i] = y
else:
rc_[0, i] = y
rc_[1, i] = x
while d >= 0:
y += sy
d -= 2 * dx
x += sx
d += 2 * dy
rc_[0, dx] = y1
rc_[1, dx] = x1
# Store the real line length
rc_[2, 0] = dx + 1
cdef void _get_stats(DTYPE_float32_t[:] block, int samps, DTYPE_float32_t[:] output_array) nogil:
"""Calculate the central moments 1-4"""
cdef:
Py_ssize_t idx
DTYPE_float32_t the_max = _get_max_f(block, samps)
DTYPE_float32_t m1 = _get_mean_1d(block, samps) # 1st moment (mean)
DTYPE_float32_t m2 = _get_var_1d(block, samps, m1) # 2nd moment (variance)
DTYPE_float32_t stdev = sqrt_f(m2) # standard deviation
DTYPE_float32_t bx = block[0]
DTYPE_float32_t val_dev = bx - m1
DTYPE_float32_t m3 = pow3(val_dev) # 3rd moment (standard deviation)
DTYPE_float32_t m4 = pow4(val_dev) # 4th moment (kurtosis)
for idx in range(1, samps):
bx = block[idx]
val_dev = bx - m1
m3 += pow3(val_dev)
m4 += pow4(val_dev)
m3 /= samps
m4 /= samps
output_array[0] = the_max # max
output_array[1] = m1 # mean
output_array[2] = m2 # variance
output_array[3] = m3 / pow3(stdev) # skewness: ratio of 3rd moment and standard dev. cubed
output_array[4] = m4 / pow4(stdev) # kurtosis
cdef void _get_moments(DTYPE_float32_t[::1] img_arr, DTYPE_float32_t[::1] output) nogil:
"""Get the moments for 1d array"""
cdef:
int img_arr_cols = img_arr.shape[0]
if _get_max_f(img_arr, img_arr_cols)!= 0:
_get_stats(img_arr, img_arr_cols, output)
# Gabor filter bank
cdef void _convolution(DTYPE_float32_t[:, :] block2convolve,
DTYPE_float32_t[:, :] gkernel,
int br, int bc,
int knr, int knc,
int knrh, int knch,
DTYPE_float32_t[:, :] out_convolved) nogil:
""""2d convolution of a Gabor kernel over a local window"""
cdef:
Py_ssize_t bi, bj, bki, bkj
DTYPE_float32_t kernel_sum
# Move the kernel
for bi in range(0, br-knr):
for bj in range(0, bc-knc):
kernel_sum = 0.
# Process the kernel
for bki in range(0, knr):
for bkj in range(0, knc):
kernel_sum += block2convolve[bi+bki, bj+bkj] * gkernel[bki, bkj]
out_convolved[bi+knrh, bj+knch] = kernel_sum
cdef void _feature_gabor(DTYPE_float32_t[:, :, ::1] ch_bdka,
int blk,
DTYPE_uint16_t[::1] scs,
int out_len,
int scales_half,
int scales_block,
int n_kernels,
int rows,
int cols,
int scale_length | Cython |
,
int end_scale,
DTYPE_float32_t[::1] out_list_):
"""
Returns at each scale at each kernel
1: Mean
2: Variance
"""
cdef:
Py_ssize_t i, j, ki, kl, pi, scale_kernel
DTYPE_uint16_t k
unsigned int rs, cs, k_half
DTYPE_float32_t[:, ::1] ch_bd
# list ch_bd_k = []
# np.ndarray[DTYPE_float32_t, ndim=3] ch_bdka_array = np.zeros((n_kernels, rows, cols), dtype='float32')
# DTYPE_float32_t[:, :, :] ch_bdka = np.zeros((n_kernels, rows, cols), dtype='float32')
# DTYPE_float32_t[:, :] ch_bd_gabor
# DTYPE_float32_t[:] sts
# list st
int pix_ctr = 0
# int knr = kernels[0].shape[0]
# int knc = kernels[0].shape[1]
# int knrh = <int>(knr / 2.)
# int knch = <int>(knc / 2.)
# DTYPE_float32_t[:, :] gkernel
# DTYPE_float32_t[:] out_values
DTYPE_float32_t[:, :, ::1] dist_weights_stack = np.zeros((scale_length*n_kernels,
end_scale*2,
end_scale*2), dtype='float32')
int bcr, bcc
DTYPE_float32_t[::1] in_zs = np.zeros(2, dtype='float32')
DTYPE_float32_t[:, ::1] dist_weights, dw
scale_kernel = 0
for ki in range(0, scale_length):
k = scs[ki]
k_half = <int>(k / 2.)
rs = (scales_half - k_half + k) - (scales_half - k_half)
cs = (scales_half - k_half + k) - (scales_half - k_half)
dist_weights = np.zeros((rs, cs), dtype='float32')
for kl in range(0, n_kernels):
dist_weights_stack[scale_kernel, :rs, :cs] = _create_weights(dist_weights, rs, cs)
scale_kernel += 1
with nogil:
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
scale_kernel = 0
for ki in range(0, scale_length):
k = scs[ki]
k_half = <int>(k / 2.)
rs = (scales_half - k_half + k) - (scales_half - k_half)
cs = (scales_half - k_half + k) - (scales_half - k_half)
for kl in range(0, n_kernels):
ch_bd = ch_bdka[scale_kernel,
i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]
bcr = ch_bd.shape[0]
bcc = ch_bd.shape[1]
dw = dist_weights_stack[scale_kernel, :rs, :cs]
# _convolution(ch_bd, gkernel, bcr, bcc, knr, knc, knrh, knch, ch_bd_gabor)
_get_weighted_mean_var(ch_bd, dw, bcr, bcc, in_zs)
# _get_angle_stats(ch_bd_gabor, bcr, bcc, in_zs)
# _get_directional_weighted_mean_var(ch_bd_gabor, bcr, bcc, in_zs)
# _get_moments(in_zs, sts)
for pi in range(0, 2):
out_list_[pix_ctr] = in_zs[pi]
pix_ctr += 1
scale_kernel += 1
def feature_gabor(DTYPE_float32_t[:, :, ::1] chbd, int blk, list scs, int end_scale, int n_kernels=8):
cdef:
Py_ssize_t i, j, ki, kl
int scales_half = <int>(end_scale / 2.)
int scales_block = end_scale - blk
int rows = chbd.shape[1]
int cols = chbd.shape[2]
DTYPE_uint16_t[::1] scales_array = np.array(scs, dtype='uint16')
int scale_length = scales_array.shape[0]
DTYPE_float32_t[::1] out_list
unsigned int out_len = 0
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
for ki in range(0, scale_length):
for kl in range(0, n_kernels):
out_len += 2
out_list = np.zeros(out_len, dtype='float32')
_feature_gabor(chbd,
blk,
scales_array,
out_len,
scales_half,
scales_block,
n_kernels,
rows,
cols,
scale_length,
end_scale,
out_list)
return np.float32(out_list)
# Histogram of Oriented Gradients
# cdef np.ndarray[DTYPE_float32_t, ndim=1] calc_hog(np.ndarray[DTYPE_float32_t, ndim=2] mag_chunk,
# np.ndarray[DTYPE_float32_t, ndim=2] ang_chunk,
# DTYPE_float32_t pi2, int bin_n,
# Py_ssize_t block_rows, Py_ssize_t block_cols):
#
# # Quantizing bin values
# cdef np.ndarray[DTYPE_uint16_t, ndim=2] bins = np.uint16(bin_n * ang_chunk / pi2)
#
# return np.float32(np.bincount(np.array(bins).ravel(), weights=mag_chunk.ravel(), minlength=bin_n))
#
#
cdef void _feature_hog(DTYPE_float32_t[:, ::1] chbd,
int blk,
DTYPE_uint16_t[:] scs,
int end_scale,
int scales_half,
int scales_block,
int out_len,
int rows,
int cols,
int scale_length,
DTYPE_float32_t[::1] out_list_):
"""
Computes the Histogram of Oriented Gradients
At each scale, returns:
1: Mean
2: Variance
3: Skew
4: Kurtosis
"""
cdef:
Py_ssize_t i, j, ki, sti, block_rows, block_cols
DTYPE_uint16_t k, k_half
DTYPE_float32_t[:, ::1] ch_bd
int pix_ctr = 0
bin_n = 9
DTYPE_float32_t[::1] sts = np.zeros(5, dtype='float32')
DTYPE_float32_t[::1] sts_ = sts.copy()
DTYPE_float32_t[::1] hog_results
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
for ki in range(0, scale_length):
k = scs[ki]
k_half = <int>(k / 2.)
ch_bd = chbd[i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]
block_rows = ch_bd.shape[0]
block_cols = ch_bd.shape[1]
if _get_max_f2d(ch_bd, block_rows, block_cols) > 0:
hog_results = np.float32(np.ascontiguousarray(HOG(np.float32(ch_bd),
pixels_per_cell=(block_rows, block_cols),
cells_per_block=(1, 1))))
sts_[...] = sts
_get_moments(hog_results, sts_)
for sti in range(0, 5):
out_list_[pix_ctr] = sts_[sti]
pix_ctr += 1
else:
pix_ctr += 5
def feature_hog(DTYPE_float32_t[:, ::1] chbd, int blk, list scs, int end_scale):
cdef:
Py_ssize_t i, j, ki
int scales_half = <int>(end_scale / 2.0)
int scales_block = end_scale - blk
int rows = chbd.shape[0]
int cols = chbd.shape[1]
DTYPE_uint16_t[:] scales_array = np.array(scs, dtype='uint16')
int scale_length = scales_array.shape[0]
unsigned int out_len = _get_output_length(rows, cols, scales_block, blk, scale_length, 5)
DTYPE_float32_t[::1] out_list = np.zeros(out_len, dtype='float32')
_feature_hog(chbd,
blk,
scales_array,
end_scale,
scales_half,
scales_block,
out_len,
rows,
cols,
scale_length,
out_list)
return np.float32(out_list)
cdef void _add_dmps(DTYPE_float | Cython |
32_t[:, ::1] ch_bd_array,
int block_rows,
int block_cols,
DTYPE_float32_t[::1] dmp_vector_array) nogil:
cdef:
Py_ssize_t ri, cj
for ri in range(0, block_rows):
for cj in range(0, block_cols):
dmp_vector_array[0] += ch_bd_array[ri, cj]
cdef void _feature_dmp(DTYPE_float32_t[:, ::1] chbd,
int blk,
DTYPE_uint16_t[::1] scs,
int end_scale,
int scales_half,
int scales_block,
int out_len,
int rows,
int cols,
int scale_length,
DTYPE_float32_t[::1] out_list_):
cdef:
Py_ssize_t i, j, ki, sti, block_rows, block_cols
DTYPE_uint16_t k, k_half
unsigned int rc_start, rc_end, rc
DTYPE_float32_t[:, ::1] ch_bd
int pix_ctr = 0
#DTYPE_float32_t[::1] sts = np.zeros(5, dtype='float32')
#DTYPE_float32_t[::1] sts_ = sts.copy()
#DTYPE_float32_t[::1] dmp_vector = np.zeros(1, dtype='float32')
#DTYPE_float32_t[::1] dmp_vector_ = dmp_vector.copy()
DTYPE_float32_t[:, ::1] dist_weights, dw
DTYPE_float32_t[:, :, ::1] dist_weights_stack = np.zeros((scale_length, end_scale*2, end_scale*2), dtype='float32')
DTYPE_float32_t[::1] in_zs = np.zeros(2, dtype='float32')
for ki in range(0, scale_length):
k = scs[ki]
k_half = <int>(k / 2.0)
rc_start = scales_half - k_half
rc_end = scales_half - k_half + k
rc = rc_end - rc_start
dist_weights = np.zeros((rc, rc), dtype='float32')
dist_weights_stack[ki, :rc, :rc] = _create_weights(dist_weights, rc, rc)
with nogil:
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
for ki in range(0, scale_length):
k = scs[ki]
k_half = <int>(k / 2.0)
# Get the DMPS.
ch_bd = chbd[i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]
block_rows = ch_bd.shape[0]
block_cols = ch_bd.shape[1]
# Add the DMPs for the local scale.
#dmp_vector_[...] = dmp_vector
#_add_dmps(ch_bd, block_rows, block_cols, dmp_vector_)
dw = dist_weights_stack[ki, :block_rows, :block_cols]
_get_weighted_mean_var(ch_bd, dw, block_rows, block_cols, in_zs)
#sts_[...] = sts
# Get the DMP vector
# central moments.
#_get_moments(dmp_vector_, sts_)
# Fill the output.
for sti in range(0, 2):
out_list_[pix_ctr] = in_zs[sti]
pix_ctr += 1
def feature_dmp(DTYPE_float32_t[:, ::1] chbd, int blk, list scs, int end_scale):
cdef:
int scales_half = <int>(end_scale / 2.0)
int scales_block = end_scale - blk
int rows = chbd.shape[0]
int cols = chbd.shape[1]
DTYPE_uint16_t[::1] scales_array = np.array(scs, dtype='uint16')
int scale_length = scales_array.shape[0]
unsigned int out_len = _get_output_length(rows, cols, scales_block, blk, scale_length, 2)
DTYPE_float32_t[::1] out_list = np.zeros(out_len, dtype='float32')
_feature_dmp(chbd,
blk,
scales_array,
end_scale,
scales_half,
scales_block,
out_len,
rows,
cols,
scale_length,
out_list)
return np.float32(out_list)
cdef void _extract_values(DTYPE_uint8_t[:, :] block,
DTYPE_uint16_t[:] values,
DTYPE_uint16_t[:, :] rc_,
int fl) nogil:
cdef:
Py_ssize_t fi, fi_, fj_
for fi in range(0, fl):
fi_ = rc_[0, fi]
fj_ = rc_[1, fi]
values[fi] = block[fi_, fj_]
cdef void _get_direction(DTYPE_uint8_t[:, ::1] chunk,
int chunk_shape,
int rows_half,
int cols_half,
DTYPE_float32_t center_mean,
DTYPE_float32_t thresh_hom,
DTYPE_float32_t[::1] values_,
Py_ssize_t t_value,
bint is_row,
DTYPE_float32_t[::1] hist_,
Py_ssize_t hist_counter,
int skip_factor,
DTYPE_uint16_t[:, ::1] rc) nogil:
cdef:
Py_ssize_t ija, lni, lni_f, rc_shape
DTYPE_float32_t ph_i, line_sd
DTYPE_float32_t alpha_ =.1
DTYPE_float32_t sfs_max, sfs_min, d_i, sfs_w_mean
DTYPE_uint16_t[::1] line_values
# Iterate over every other angle
for ija from 0 <= ija < chunk_shape by skip_factor:
ph_i = 0.
# Draw a line between the two endpoints.
if is_row:
draw_line(rows_half, cols_half, ija, t_value, rc)
else:
draw_line(rows_half, cols_half, t_value, ija, rc)
# rc_shape = rc.shape[1]
rc_shape = rc[2, 0] # the real line length
# line_values = rc[0].copy()
line_values = rc[3, :rc_shape] # row of zeros, up to the line length
# Extract the values along the line.
_extract_values(chunk, line_values, rc, rc_shape)
# Iterate over line values.
lni_f = 0
for lni in range(0, rc_shape):
if ph_i < thresh_hom:
# Pixel homogeneity
ph_i += abs_f(center_mean - float(line_values[lni]))
lni_f += 1
else:
break
# Get the line length
d_i = _get_line_length(float(rows_half), float(cols_half), float(rc[0, lni_f]), float(rc[1, lni_f]))
# Get the standard deviation along the line.
line_sd = _get_std_1d_uint16(line_values[:lni_f], lni_f)
# Get the line statistics
sfs_max = _get_max_sample(values_[0], d_i)
sfs_min = _get_min_sample(values_[1], d_i)
sfs_w_mean = (alpha_ * (d_i - 1.)) / line_sd
# Update the histogram with
# the line length.
if not npy_isnan(d_i) and not npy_isinf(d_i):
hist_[hist_counter] = d_i
hist_counter += 1
if not npy_isnan(sfs_max) and not npy_isinf(sfs_max):
values_[0] = sfs_max
if (sfs_min!= 0) and not npy_isnan(sfs_min) and not npy_isinf(sfs_min):
values_[1] = sfs_min
if not npy_isnan(d_i) and not npy_isinf(d_i):
values_[2] += d_i
if not npy_isnan(sfs_w_mean) and not npy_isinf(sfs_w_mean):
values_[3] += sfs_w_mean
cdef void _get_directions(DTYPE_uint8_t[:, ::1] chunk,
int chunk_rws,
int chunk_cls,
int rows_half,
int cols_half,
DTYPE_float32_t center_mean,
DTYPE_float32_t thresh_hom,
DTYPE_float32_t[::1] values,
int skip_factor,
DTYPE_uint16_t[:, ::1] rcc_,
DTYPE_float32_t[::1] hist) nogil:
"""
Returns:
1: Length (maximum line length)
2: Width (minimum line length)
3: Mean
4: w-mean
5: Standard deviation
6: Maximum ratio of orthogonal angles
Not currently implemented:
7: Minimum ratio of orthogonal angles
"""
cdef:
Py_ssize_t i_, j | Cython |
_, iia_, ija_, ia, ja, rr_shape, lni
DTYPE_float32_t ph_i
DTYPE_float32_t total_count
Py_ssize_t hist_length = 0
Py_ssize_t hist_counter = 0
Py_ssize_t hist_counter_, ofc
DTYPE_float32_t max_diff, orthog_diff
# Get the histogram and row and column skip lengths.
for i_ in range(0, 2):
for iia_ from 0 <= iia_ < chunk_rws by skip_factor:
hist_length += 1
for j_ in range(0, 2):
for ija_ from 0 <= ija_ < chunk_cls by skip_factor:
hist_length += 1
values[1] = 999999.
# Fill the histogram
# Rows, 1st column
_get_direction(chunk, chunk_rws, rows_half, cols_half,
center_mean, thresh_hom, values, 0, True,
hist, hist_counter, skip_factor, rcc_)
# Rows, last column
_get_direction(chunk, chunk_rws, rows_half, cols_half,
center_mean, thresh_hom, values, chunk_cls-1, True,
hist, hist_counter, skip_factor, rcc_)
# Columns, 1st row
_get_direction(chunk, chunk_cls, rows_half, cols_half,
center_mean, thresh_hom, values, 0, False,
hist, hist_counter, skip_factor, rcc_)
# Columns, last row
_get_direction(chunk, chunk_cls, rows_half, cols_half,
center_mean, thresh_hom, values, chunk_rws-1, False,
hist, hist_counter, skip_factor, rcc_)
values[2] /= float(hist_length) # mean
values[3] /= float(hist_length) # w-mean
# Calculate the standard deviation
# of the histogram.
values[4] = _get_std_1d(hist, hist_length, values[2])
# total_count = _get_sum1d(hist, hist_length)
# values[3] /= total_count # H w-mean
# Calculate the standard deviation
# of the histogram.
# values[4] = _get_std_1d(hist, hist_length)
# Calculate the min orthogonal ratio.
max_diff = 0.
hist_counter_ = 0
ofc = (chunk_rws * 2) - 1
for iia_ from 0 <= iia_ < chunk_rws by skip_factor:
# Ratio of orthogonal angles
orthog_diff = abs_f(hist[iia_] - float((ofc - hist_counter_)))
max_diff = _get_max_sample(max_diff, orthog_diff)
hist_counter_ += 1
ofc = (chunk_cls * 2) - 1
for ija_ from 0 <= ija_ < chunk_cls by skip_factor:
# Ratio of orthogonal angles
orthog_diff = abs_f(hist[ija_] - float((ofc - hist_counter_)))
max_diff = _get_max_sample(max_diff, orthog_diff)
hist_counter_ += 1
values[5] = max_diff
cdef void _sfs_feas(DTYPE_uint8_t[:, ::1] chunk,
unsigned int block_size,
DTYPE_float32_t thresh_hom,
unsigned int skip_factor,
DTYPE_uint16_t[:, ::1] rcc_,
DTYPE_float32_t[::1] hist_,
DTYPE_float32_t[::1] sfs_values) nogil:
"""
Reference:
Zhang, Liangpei et al. 2006. "A Pixel Shape Index Coupled With Spectral Information for Classification of High
Spatial Resolution Remotely Sensed Imagery." IEEE Transactions on Geoscience and Remote Sensing, V. 44, No. 10.
Huang, Xin et al. 2007. "Classification and Extraction of Spatial Features in Urban Areas Using High-Resolution
Multispectral Imagery." IEEE Transactions on Geoscience and Remote Sensing, V. 4, No. 2.
Returns:
Directional lengths (length=8)
PSI
"""
cdef:
unsigned int chunk_rws, chunk_cls, rows_half, cols_half, block_half
DTYPE_float32_t ctr_blk_mean, sfs_value
DTYPE_uint8_t[:, ::1] chunk_block
Py_ssize_t cbr, cbc
# get chunk size
chunk_rws = chunk.shape[0]
chunk_cls = chunk.shape[1]
rows_half = <int>(chunk_rws / 2.0)
cols_half = <int>(chunk_cls / 2.0)
block_half = <int>(block_size / 2.0)
# Get the current window chunk
chunk_block = chunk[rows_half-block_half:rows_half+block_half,
cols_half-block_half:cols_half+block_half]
# Get the center block average
if block_size > 1:
cbr = chunk_block.shape[0]
cbc = chunk_block.shape[1]
ctr_blk_mean = _get_mean_uint8(chunk_block, cbr, cbc)
else:
ctr_blk_mean = chunk_block[rows_half, cols_half]
_get_directions(chunk,
chunk_rws,
chunk_cls,
rows_half,
cols_half,
ctr_blk_mean,
thresh_hom,
sfs_values,
skip_factor,
rcc_,
hist_)
cdef void _feature_sfs(DTYPE_uint8_t[:, ::1] ch_bd,
unsigned int block_size,
DTYPE_uint16_t[::1] scales_array,
int n_scales,
DTYPE_float32_t thresh_hom,
int scales_half,
int scales_block,
int out_len,
int rows,
int cols,
int skip_factor,
DTYPE_uint16_t[:, ::1] rcc_,
DTYPE_float32_t[::1] hist_,
DTYPE_float32_t[::1] out_list_):
cdef:
Py_ssize_t i, j, ki, k_half, st_
DTYPE_uint16_t k
DTYPE_uint8_t[:, ::1] ch_bd_
DTYPE_float32_t[::1] sts = np.zeros(6, dtype='float32')
DTYPE_float32_t[::1] sts_ = sts.copy()
int pix_ctr = 0
with nogil:
if block_size > 1:
for i from 0 <= i < rows-scales_block by block_size:
for j from 0 <= j < cols-scales_block by block_size:
for ki in range(0, n_scales):
k = scales_array[ki]
k_half = <int>(k / 2.0)
ch_bd_ = ch_bd[i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]
sts_[...] = sts
_sfs_feas(ch_bd_, block_size, thresh_hom, skip_factor, rcc_, hist_, sts_)
for st_ in range(0, 6):
out_list_[pix_ctr] = sts_[st_]
pix_ctr += 1
else:
for i in range(0, rows-scales_block):
for j in range(0, cols-scales_block):
for ki in range(0, n_scales):
k = scales_array[ki]
k_half = <int>(k / 2.0)
ch_bd_ = ch_bd[i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]
sts_[...] = sts
_sfs_feas(ch_bd_, block_size, thresh_hom, skip_factor, rcc_, hist_, sts_)
for st_ in range(0, 6):
out_list_[pix_ctr] = sts_[st_]
pix_ctr += 1
def feature_sfs(DTYPE_uint8_t[:, ::1] chbd,
unsigned int block_size,
list scales,
unsigned int end_scale,
DTYPE_float32_t thresh_hom,
unsigned int skip_factor=4):
cdef:
Py_ssize_t i, j, ki, k
int scales_half = <int>(end_scale / 2.0)
int scales_block = end_scale - block_size
int rows = chbd.shape[0]
int cols = chbd.shape[1]
DTYPE_uint16_t[::1] scales_array = np.array(scales, dtype='uint16')
int scale_length = scales_array.shape[0]
DTYPE_uint16_t[:, ::1] rcc = np.zeros((4, end_scale), dtype='uint16')
DTYPE_float32_t[::1] histogram = np.zeros(end_scale, dtype='float32')
DTYPE_float32_t[::1] out_list
unsigned int out_len = _get_output_length(rows, cols, scales_block, block_size, scale_length, 6)
out_list = np.zeros(out_len, dtype='float32')
_feature_sfs(chbd,
block_size,
scales_array | Cython |
,
scale_length,
thresh_hom,
scales_half,
scales_block,
out_len,
rows,
cols,
skip_factor,
rcc,
histogram,
out_list)
return np.float32(out_list)
# cdef list _feature_surf(np.ndarray[DTYPE_uint8_t, ndim=2] surf_arr, k_pts, int j, int i, int k, list scs):
#
# """
# Get the moments
# """
#
# cdef int start_y = i+(scs[-1]/2)-(k/2)
# cdef int start_x = j+(scs[-1]/2)-(k/2)
#
# if surf_arr.max() == 0:
# return [0., 0., 0., 0.]
# else:
# if k_pts:
# # return desc_stats[m](pyramid_hist_sift(surfArr, kPts, start_x, start_y).sp_hist)
# return get_moments(pyramid_hist_sift(surf_arr, k_pts, start_x, start_y).sp_hist)
# else:
# return [0., 0., 0., 0.]
#
#
# def feature_surf(np.ndarray[DTYPE_uint8_t, ndim=2] chBd, int blk, list scs, int end_scale):
#
# cdef:
# Py_ssize_t i, j, ki, k, k_half
# int rows = chBd.shape[0]
# int cols = chBd.shape[1]
# list sts
# DTYPE_float64_t st
# int scales_half = end_scale / 2
# int scales_block = end_scale - blk
# np.ndarray[DTYPE_float64_t, ndim=1] out_list
# int out_len = 0
# int pix_ctr = 0
# int n_scales = np.array(scs).shape[0]
#
# for i from 0 <= i < rows-scales_block by blk:
# for j from 0 <= j < cols-scales_block by blk:
# for ki in range(0, n_scales):
# out_len += 4
#
# # set the output list
# out_list = np.zeros(out_len).astype(np.float64)
#
# # compute SURF features
# kPts, descrip = cv2.SURF(50).detectAndCompute(chBd, None)
#
# for i from 0 <= i < rows-scales_block by blk:
# for j from 0 <= j < cols-scales_block by blk:
# for ki in range(0, n_scales):
#
# sts = _feature_surf(chBd[i+scales_half-k_half:i+scales_half-k_half+k,
# j+scales_half-k_half:j+scales_half-k_half+k], kPts, j, i, k, scs)
#
# for st in range(0, 4):
#
# out_list[pix_ctr] = sts[st]
#
# pix_ctr += 1
#
# return out_list
# ORB keypoints
# Ethan Rublee, Vincent Rabaud, Kurt Konolige, Gary R. Bradski:
# ORB: An efficient alternative to SIFT or SURF. ICCV 2011: 2564-2571.
def fill_key_points(DTYPE_float32_t[:, ::1] in_block, list key_point_list):
cdef:
Py_ssize_t key_point_index
int n_key_points = len(key_point_list)
int brows = in_block.shape[0]
int bcols = in_block.shape[1]
double key_y, key_x
int key_y_idx, key_x_idx
DTYPE_uint8_t[:, ::1] key_point_array = np.zeros((brows, bcols), dtype='uint8')
for key_point_index in range(0, n_key_points):
key_x, key_y = key_point_list[key_point_index].pt
key_y_idx = <int>(floor(key_y))
key_x_idx = <int>(floor(key_x))
key_point_array[key_y_idx, key_x_idx] = 1
return np.uint8(key_point_array)
cdef DTYPE_float32_t[::1] _pyramid_hist_sift(DTYPE_uint8_t[:, ::1] key_point_array,
DTYPE_float32_t[::1] levels,
int orb_rows,
int orb_cols,
DTYPE_float32_t[::1] hist_) nogil:
cdef:
Py_ssize_t lv, ki, kj, grid_counter
int rr_rows, cc_cols, y_tiles, x_tiles
DTYPE_uint8_t[:, ::1] kblock
grid_counter = 0
# Iterate over each level
for lv in range(0, 3):
y_tiles = <int>(floor(orb_rows / levels[lv]))
x_tiles = <int>(floor(orb_cols / levels[lv]))
if (y_tiles > 1) and (x_tiles > 1):
for ki from 0 <= ki < orb_rows-1 by y_tiles:
rr_rows = n_rows_cols(ki, y_tiles, orb_rows)
if rr_rows > 1:
for kj from 0 <= kj < orb_cols-1 by x_tiles:
cc_cols = n_rows_cols(kj, x_tiles, orb_cols)
if cc_cols > 1:
# Get the keypoint block
kblock = key_point_array[ki:ki+rr_rows, kj:kj+cc_cols]
# Enter the keypoint sum into the histogram
hist_[grid_counter] += _get_sum_uint8(kblock, rr_rows, cc_cols)
grid_counter += 1
return hist_[:grid_counter]
cdef void _feature_orb(DTYPE_uint8_t[:, ::1] ch_bd,
int blk,
DTYPE_uint16_t[::1] scales_array,
int scales_half,
int scales_block,
int scale_length,
int out_len,
int rows,
int cols,
int scales_length,
int end_scale,
DTYPE_float32_t[::1] out_list_):
cdef:
Py_ssize_t i, j, ki, st
DTYPE_uint16_t k
int k_half
DTYPE_float32_t[::1] levels = np.array([2, 4, 8], dtype='float32')
Py_ssize_t pix_ctr = 0
int block_rows, block_cols
DTYPE_uint8_t[:, ::1] ch_bd_sub
DTYPE_float32_t[::1] sts = np.zeros(5, dtype='float32')
DTYPE_float32_t[::1] sts_ = sts.copy()
DTYPE_float32_t[::1] hist = np.zeros(end_scale*end_scale*4, dtype='float32')
DTYPE_float32_t[::1] hist_ = hist.copy()
with nogil:
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
for ki in range(0, scale_length):
k = scales_array[ki]
k_half = <int>(k / 2.)
ch_bd_sub = ch_bd[i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]
block_rows = ch_bd_sub.shape[0]
block_cols = ch_bd_sub.shape[1]
if _get_max(ch_bd_sub, block_rows, block_cols) > 0:
sts_[...] = sts
hist_[...] = hist
_get_moments(_pyramid_hist_sift(ch_bd_sub, levels, block_rows, block_cols, hist_), sts_)
for st in range(0, 5):
out_list_[pix_ctr] = sts_[st]
pix_ctr += 1
else:
pix_ctr += 5
def feature_orb(DTYPE_uint8_t[:, ::1] chbd,
int blk,
list scs,
int end_scale):
cdef:
Py_ssize_t i, j, ki
int scales_half = <int>(end_scale / 2.)
int scales_block = end_scale - blk
int rows = chbd.shape[0]
int cols = chbd.shape[1]
DTYPE_uint16_t[::1] scales_array = np.array(scs, dtype='uint16')
int scale_length = scales_array.shape[0]
unsigned int out_len = _get_output_length(rows, cols, scales_block, blk, scale_length, 5)
DTYPE_float32_t[::1] out_list = np.zeros(out_len, dtype='float32')
_feature_orb(chbd,
blk,
scales_array,
scales_half,
scales_block,
scale_length,
out_len,
rows,
cols,
scale_length,
end_scale,
out_list)
return np.float32(out_list)
cdef DTYPE_uint8_t[:, :, :] _set_lbp(DTYPE_uint8_t[:, ::1] chbd,
int rows,
int cols,
DTYPE_uint8_t[::1] p_range,
dict rdict):
"""
| Cython |
Get the Local Binary Patterns
"""
cdef:
Py_ssize_t scsc
unsigned int p_len = 3
DTYPE_uint8_t[:, :, :] lbp_bd = np.zeros((p_len, rows, cols), dtype='uint8')
DTYPE_uint8_t[:, :] lbp_array
# Run LBP for each scale
for scsc in range(0, p_len):
lbp_array = np.uint8(LBP(np.uint8(chbd),
int(p_range[scsc]),
int(rdict[int(p_range[scsc])]),
method='uniform'))
lbp_bd[scsc, :, :] = lbp_array[...]
return lbp_bd
cdef np.ndarray[DTYPE_float32_t, ndim=1] _feature_lbp(DTYPE_uint8_t[:, ::1] chbd,
int blk,
list scs,
int end_scale):
cdef:
Py_ssize_t i, j, ki, sti
unsigned int pc, pr_bin_count, k_half
int rows = chbd.shape[0]
int cols = chbd.shape[1]
DTYPE_uint8_t[:, :, ::1] lbp_bd
DTYPE_uint8_t[:, :, ::1] ch_bd
unsigned int scales_half = <int>(end_scale / 2.)
unsigned int scales_block = end_scale - blk
DTYPE_float32_t[::1] sts, out_list
unsigned int pix_ctr = 0
DTYPE_uint16_t k
DTYPE_uint16_t[:] scales_array = np.array(scs, dtype='uint16')
int scale_length = scales_array.shape[0]
np.ndarray[DTYPE_float32_t, ndim=1] out_list_a
unsigned int out_len
DTYPE_uint8_t[::1] p_range = np.array([8, 16, 32], dtype='uint8')
dict rdict = {4: 1, 8: 1, 16: 2, 32: 4, 64: 8, 128: 16}
# get the LBP images
lbp_bd = np.ascontiguousarray(_set_lbp(chbd, rows, cols, p_range, rdict))
# count of bins for all p,r LBP pairs
pr_bin_count = np.sum([pr+2 for pr in p_range])
out_len = _get_output_length(rows, cols, scales_block, blk, scale_length, pr_bin_count)
# set the output list
out_list = np.empty(out_len, dtype='float32')
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
for ki in range(0, scale_length):
k = scales_array[ki]
k_half = <int>(k / 2.0)
ch_bd = np.ascontiguousarray(np.uint8(lbp_bd[:,
i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]))
# get histograms and concatenate
sts = np.float32(np.ascontiguousarray(np.concatenate([np.bincount(np.uint8(ch_bd[pc]).flat,
minlength=pc+2) for pc in range(0, 3)])))
for sti in range(0, 4):
out_list[pix_ctr] = sts[sti]
pix_ctr += 1
out_list_a = np.float32(out_list)
out_list_a[np.isnan(out_list_a) | np.isinf(out_list_a)] = 0
return out_list_a
def feature_lbp(np.ndarray[DTYPE_uint8_t, ndim=2] chbd, int blk, list scs, int end_scale):
return _feature_lbp(chbd, blk, scs, end_scale)
cdef void _feature_lbpm(DTYPE_uint8_t[:, ::1] chBd,
int blk,
DTYPE_uint16_t[:] scs,
int end_scale,
int scales_half,
int scales_block,
int out_len,
int rows,
int cols,
int scale_length,
DTYPE_float32_t[::1] out_list_):
"""
At each scale, returns:
1: Mean
2: Variance
3: Skew
4: Kurtosis
"""
cdef:
Py_ssize_t i, j, sti, ki
unsigned int pc, k_half, k
DTYPE_uint8_t[:, :, ::1] lbp_bd
DTYPE_uint8_t[:, :, ::1] ch_bd
int pix_ctr = 0
DTYPE_float32_t[::1] sts = np.zeros(5, dtype='float32')
DTYPE_float32_t[::1] sts_ = sts.copy()
DTYPE_float32_t[::1] lbp_results
DTYPE_uint8_t[::1] p_range = np.array([8, 16, 32], dtype='uint8')
dict rdict = {4: 1, 8: 1, 16: 2, 32: 4, 64: 8, 128: 16}
# get the LBP images
lbp_bd = np.ascontiguousarray(_set_lbp(chBd, rows, cols, p_range, rdict))
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
for ki in range(0, scale_length):
k = scs[ki]
k_half = <int>(k / 2.0)
ch_bd = np.ascontiguousarray(np.uint8(lbp_bd[:,
i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]))
# get histograms and concatenate
lbp_results = np.float32(np.ascontiguousarray(np.concatenate([np.bincount(np.uint8(ch_bd[pc]).flat,
minlength=pc+2)
for pc in range(0, 3)])))
sts_[...] = sts
_get_moments(lbp_results, sts_)
for sti in range(0, 5):
out_list_[pix_ctr] = sts_[sti]
pix_ctr += 1
def feature_lbpm(np.ndarray[DTYPE_uint8_t, ndim=2] chbd, int blk, list scs, int end_scale):
cdef:
Py_ssize_t i, j, ki
int scales_half = <int>(end_scale / 2.)
int scales_block = end_scale - blk
int rows = chbd.shape[0]
int cols = chbd.shape[1]
DTYPE_uint16_t[:] scales_array = np.array(scs, dtype='uint16')
int scale_length = scales_array.shape[0]
unsigned int out_len = _get_output_length(rows, cols, scales_block, blk, scale_length, 5)
DTYPE_float32_t[::1] out_list = np.zeros(out_len, dtype='float32')
_feature_lbpm(chbd,
blk,
scales_array,
end_scale,
scales_half,
scales_block,
out_len,
rows,
cols,
scale_length,
out_list)
return np.float32(out_list)
cdef inline DTYPE_float32_t _get_distance(tuple line):
return sqrt(pow((line[0][0] - line[1][0]), 2.) + pow((line[0][1] - line[1][1]), 2.))
cdef inline DTYPE_float64_t get_slope(tuple line):
return np.degrees(atan(float((line[0][1] - line[1][1])) / float((line[1][0] - line[0][0]))))
def houghFunc_1(np.ndarray[DTYPE_uint8_t, ndim=2] edgeArr, int houghIndex, int minLen, np.ndarray[long, ndim=2] checkList1_2, np.ndarray[long, ndim=2] checkList2_2,
list scs, int i, int j):
cdef np.ndarray[int, ndim=1] line
cdef DTYPE_float32_t pi = 3.14159265
cdef DTYPE_float32_t pi2 = 3.14159265 / 2.
if houghIndex == 0:
if checkList1_2[i, j] == 1:
return np.mean([ _get_distance(line) for line in cv2.HoughLinesP(edgeArr, 1, pi, minLen, minLineLength=minLen, maxLineGap=2)[0] ]) # average line length
else:
return 0.
if houghIndex == 1:
if checkList2_2[i, j] == 1:
return np.mean([ _get_distance(line) for line in cv2.HoughLinesP(edgeArr, 1, pi2, minLen, minLineLength=minLen, maxLineGap=2)[0] ]) # average line length
else:
return 0.
elif houghIndex == 2:
| Cython |
if checkList1_2[i, j] == 1 and checkList2_2[i, j] == 1:
return np.vstack((cv2.HoughLinesP(edgeArr, 1, pi, minLen, minLineLength=minLen, maxLineGap=2)[0],
cv2.HoughLinesP(edgeArr, 1, pi2, minLen, minLineLength=minLen, maxLineGap=2)[0])).shape[0] # number of lines
elif checkList1_2[i, j] == 1 and checkList2_2[i, j]!= 1:
return cv2.HoughLinesP(edgeArr, 1, pi, minLen, minLineLength=minLen, maxLineGap=2)[0].shape[0]
elif checkList1_2[i, j]!= 1 and checkList2_2[i, j] == 1:
return cv2.HoughLinesP(edgeArr, 1, pi2, minLen, minLineLength=minLen, maxLineGap=2)[0].shape[0]
else:
return 0.
else:
return (float(np.argwhere(edgeArr==255).shape[0]) / float(edgeArr.shape[0]*edgeArr.shape[1])) * 100. # edge density
def houghFunc_2(np.ndarray[DTYPE_uint8_t, ndim=2] edgeArr, int houghIndex, int minLen, np.ndarray[long, ndim=3] checkList1_3, np.ndarray[long, ndim=3] checkList2_3,
list scs, int i, int j, int k):
cdef np.ndarray[int, ndim=1] line
cdef DTYPE_float32_t pi = 3.14159265
cdef DTYPE_float32_t pi2 = 3.14159265 / 2.
if houghIndex == 0:
if checkList1_3[scs.index(k), i, j] == 1:
return np.mean([ _get_distance(line) for line in cv2.HoughLinesP(edgeArr, 1, pi, minLen, minLineLength=minLen, maxLineGap=2)[0] ]) # average vertical line length
else:
return 0.
if houghIndex == 1:
if checkList2_3[scs.index(k), i, j] == 1:
return np.mean([ _get_distance(line) for line in cv2.HoughLinesP(edgeArr, 1, pi2, minLen, minLineLength=minLen, maxLineGap=2)[0] ]) # average horizontal line length
else:
return 0.
elif houghIndex == 2:
if checkList1_3[scs.index(k), i, j] == 1 and checkList2_3[scs.index(k), i, j] == 1:
return np.vstack((cv2.HoughLinesP(edgeArr, 1, pi, minLen, minLineLength=minLen, maxLineGap=2)[0],
cv2.HoughLinesP(edgeArr, 1, pi2, minLen, minLineLength=minLen, maxLineGap=2)[0])).shape[0] # number of lines
elif checkList1_3[scs.index(k), i, j] == 1 and checkList2_3[scs.index(k), i, j]!= 1:
return cv2.HoughLinesP(edgeArr, 1, pi, minLen, minLineLength=minLen, maxLineGap=2)[0].shape[0]
elif checkList1_3[scs.index(k), i, j]!= 1 and checkList2_3[scs.index(k), i, j] == 1:
return cv2.HoughLinesP(edgeArr, 1, pi2, minLen, minLineLength=minLen, maxLineGap=2)[0].shape[0]
else:
return 0.
else:
return (float(np.argwhere(edgeArr==255).shape[0]) / float(edgeArr.shape[0]*edgeArr.shape[1])) * 100. # edge density
cdef list _hough_function(list lines_list, np.ndarray[DTYPE_uint8_t, ndim=2] edge_arr, int large_rws, int large_cls):
cdef:
int small_rws = edge_arr.shape[0]
int small_cls = edge_arr.shape[1]
list lines_list_chunk = []
list line_seg
DTYPE_float32_t rws_cls = float(small_rws * small_cls)
tuple line
DTYPE_float32_t mean_len, num_lines, edge_dens, std_slope
int x_min = (large_cls - small_cls) / 2
int x_max = x_min + small_cls
int y_min = (large_rws - small_rws) / 2
int y_max = y_min + small_rws
for line_seg in lines_list:
for line in line_seg:
if (line[0][0] >= x_min) and (line[1][0] <= x_max) and (line[0][1] >= y_min) and (line[1][1] <= y_max):
lines_list_chunk.append(line)
# average line length
# mean_len = np.mean([ _get_distance(line) if (len(line) > 0) else 0. for line in lines ])
mean_len = np.mean([_get_distance(line) for line in lines_list_chunk])
# number of lines
# num_lines = float(len([ _get_distance(line) if (len(line) > 0) else 0. for line in lines ]))
num_lines = float(len([_get_distance(line) for line in lines_list_chunk]))
# edge density
# edge_dens = (float(np.argwhere(edge_arr == 1).shape[0]) / rws_cls) * 100.
edge_dens = (edge_arr.sum() / rws_cls) * 100.
# standard deviation of line angles
# slopes = [get_slope(line) if (len(line) > 0) else 0. for line in lines]
std_slope = np.asarray([get_slope(line) for line in lines_list_chunk]).std()
# angle bins
# slope_hist, bins = np.histogram(np.searchsorted([0, 90, 180, 270, 360], slopes), bins=4, range=(1, 4))
if edge_arr.max() == 0:
return [0., 0., 0., 0.]
else:
# return list([mean_len, num_lines, edge_dens, mean_slope, slope_hist[0], slope_hist[1], slope_hist[2], slope_hist[3]])
return list([mean_len, num_lines, edge_dens, std_slope])
cdef np.ndarray[DTYPE_float32_t, ndim=1] _feature_hough(np.ndarray[DTYPE_uint8_t, ndim=2] chBd, int blk,
DTYPE_uint16_t[:] scales_array,
int scales_half, int scales_block,
int scale_length,
int out_len, int rows, int cols, int threshold,
int min_len, int line_gap, int end_scale):
cdef:
Py_ssize_t i, j, ki
DTYPE_uint16_t k, k_half
int k_half_end = end_scale / 2
DTYPE_float32_t pi = 3.14159
np.ndarray[DTYPE_uint8_t, ndim=2] ch_bd, large_scale
int pix_ctr = 0
np.ndarray[DTYPE_float32_t, ndim=1] out_list = np.zeros(out_len, dtype='float32')
int large_scale_rws, large_scale_cls
list lines_list
list angles = [np.array([np.radians(22.5)]), np.array([np.radians(45)]),
np.array([np.radians(67.5)]), np.array([np.radians(90)]),
np.array([np.radians(112.5)]), np.array([np.radians(135)]),
np.array([np.radians(157.5)]), np.array([np.radians(180)])]
np.ndarray[DTYPE_float64_t, ndim=1] angle
pix_ctr = 0
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
# get the angles at the largest scale
lines_list = []
# get the largest scale array
large_scale = chBd[i+scales_half-k_half_end:i+scales_half-k_half_end+end_scale,
j+scales_half-k_half_end:j+scales_half-k_half_end+end_scale]
# get the dimensions for the largest scale
large_scale_rws = large_scale.shape[0]
large_scale_cls = large_scale.shape[1]
# compute the PHL at various angles
# and add to a list
lines_list = [PHL(large_scale, threshold=threshold, line_length=min_len,
line_gap=line_gap, theta=angle) for angle in angles]
# get the matching dimensions at each scale
# and get | Cython |
line statistics
for ki in range(0, scale_length):
k = scales_array[ki]
k_half = k / 2
# get the current scale array
ch_bd = chBd[i+scales_half - k_half:i+scales_half - k_half + k,
j+scales_half - k_half:j+scales_half - k_half + k]
# get line statistics
sts = _hough_function(lines_list, ch_bd, large_scale_rws, large_scale_cls)
for st in sts:
out_list[pix_ctr] = st
pix_ctr += 1
return out_list
def feature_hough(np.ndarray[DTYPE_uint8_t, ndim=2] chBd, int blk, list scs, int end_scale, int threshold,
int min_len, int line_gap):
cdef:
Py_ssize_t i, j, k
int rows = chBd.shape[0]
int cols = chBd.shape[1]
list sts
DTYPE_float64_t st
int scales_half = end_scale / 2
int scales_block = end_scale - blk
DTYPE_uint16_t[:] scales_array = np.array(scs, dtype='uint16')
int scale_length = scales_array.shape[0]
unsigned int out_len = _get_output_length(rows, cols, scales_block, blk, scale_length, 4)
return _feature_hough(chBd, blk, scales_array, scales_half, scales_block, scale_length,
out_len, rows, cols, threshold, min_len, line_gap, end_scale)
# PanTex
cdef void _glcm_loop(DTYPE_uint8_t[:, :] image,
DTYPE_float32_t[:] distances,
DTYPE_float32_t[:] angles,
int levels,
DTYPE_float32_t[:, :, :, ::1] out,
DTYPE_float32_t[:, :] out_sums,
Py_ssize_t rows,
Py_ssize_t cols) nogil:
cdef:
Py_ssize_t a_idx, d_idx, r, c, row, col
Py_ssize_t angles_, distances_
DTYPE_uint8_t i, j
DTYPE_float32_t angle, distance
angles_ = angles.shape[0]
distances_ = distances.shape[0]
for a_idx in range(0, angles_):
angle = angles[a_idx]
for d_idx in range(0, distances_):
distance = distances[d_idx]
# Iterate over the image to get
# the grey-level pairs.
for r in range(0, rows):
for c in range(0, cols):
# Current row pixel value
i = image[r, c]
# compute the location of the offset pixel
row = r + <int>(roundd(sin(angle) * distance))
col = c + <int>(roundd(cos(angle) * distance))
# row = r + int(round(sin(angle) * distance))
# col = c + int(round(cos(angle) * distance))
# row = r + int(round(sin(angle) * distance))
# col = c + int(round(cos(angle) * distance))
# make sure the offset is within bounds
if (0 <= row < rows) and (0 <= col < cols):
# Current column pixel value
j = image[row, col]
if (0 <= i < levels) and (0 <= j < levels):
# Fill the co-occurrence matrix.
out[i, j, d_idx, a_idx] += 1
# Fill the co-occurrence matrix
# for the symmetric pair.
out[j, i, d_idx, a_idx] += 1
# Add 2 for the symmetric sums
out_sums[d_idx, a_idx] += 2
cdef DTYPE_float32_t[:, :, :, ::1] _norm_glcm(DTYPE_float32_t[:, :, :, ::1] Pt,
DTYPE_float32_t[:, :] Pt_sums,
DTYPE_float32_t[:] distances,
DTYPE_float32_t[:] angles, int levels,
DTYPE_float32_t[:, :, :, ::1] glcm_normed_) nogil:
cdef:
Py_ssize_t a_idx, d_idx, r, c
Py_ssize_t angles_, distances_
DTYPE_float32_t angle_dist_sum
angles_ = angles.shape[0]
distances_ = distances.shape[0]
# Get the sums
for a_idx in range(0, angles_):
for d_idx in range(0, distances_):
# angle_dist_sum = 0.
# Iterate over the co-occurrence array
# and normalize.
for r in range(0, levels):
for c in range(0, levels):
glcm_normed_[r, c, d_idx, a_idx] += (Pt[r, c, d_idx, a_idx] / Pt_sums[d_idx, a_idx])
return glcm_normed_
# cdef _check_nans(DTYPE_float32_t[:, :, :, :] glcm_mat_nan,
# DTYPE_float32_t[:] distances,
# DTYPE_float32_t[:] angles,
# int levels) nogil:
#
# cdef:
# Py_ssize_t a_idx, d_idx, r, c
# Py_ssize_t angles_, distances_
# DTYPE_float32_t value2check
#
# angles_ = angles.shape[0]
# distances_ = distances.shape[0]
#
# # Get the sums
# for a_idx in range(0, angles_):
#
# for d_idx in range(0, distances_):
#
# # angle_dist_sum = 0.
#
# # Iterate over the image
# for r in range(0, levels):
#
# for c in range(0, levels):
#
# value2check = glcm_mat_nan[r, c, d_idx, a_idx]
#
# if npy_isnan(value2check) or npy_isinf(value2check):
# glcm_mat_nan[r, c, d_idx, a_idx] = 0
cdef DTYPE_float32_t[:, :, :, ::1] _greycomatrix(DTYPE_uint8_t[:, :] image,
DTYPE_float32_t[:] distances,
DTYPE_float32_t[:] angles,
Py_ssize_t levels, Py_ssize_t rows, Py_ssize_t cols,
DTYPE_float32_t[:, :, :, ::1] P,
DTYPE_float32_t[:, :] angle_dist_sums,
DTYPE_float32_t[:, :, :, ::1] glcm_normed) nogil:
cdef:
metric_ptr metric_norm
metric_norm = &_norm_glcm
# count co-occurences
_glcm_loop(image, distances, angles, levels, P, angle_dist_sums, rows, cols)
# Normalize the matrix
return metric_norm(P, angle_dist_sums, distances, angles, levels, glcm_normed)
cdef DTYPE_float32_t _glcm_contrast(DTYPE_float32_t[:, :, :, ::1] P,
DTYPE_float32_t[:] distances,
DTYPE_float32_t[:] angles,
Py_ssize_t levels,
DTYPE_float32_t[:, ::1] contrast_array) nogil:
cdef:
Py_ssize_t a_idx, d_idx, r, c
Py_ssize_t angles_, distances_
DTYPE_float32_t min_contrast = 1000000.
DTYPE_float32_t contrast_sum
angles_ = angles.shape[0]
distances_ = distances.shape[0]
for a_idx in range(0, angles_):
for d_idx in range(0, distances_):
# Sum the contrast for the current angle/distance pair.
contrast_sum = 0.
# Iterate over the co-occurrence matrix
# and get the contrast.
for r in range(0, levels):
for c in range(0, levels):
contrast_sum += contrast_array[r, c] * P[r, c, d_idx, a_idx]
# Get the minimum contrast over all angle/distance pairs.
min_contrast = _get_min_sample(min_contrast, contrast_sum)
return min_contrast
cdef DTYPE_float32_t[:, ::1] _set_contrast_weights(int levels):
cdef:
Py_ssize_t li, lj
DTYPE_float32_t[:, ::1] contrast_array = np.zeros((levels, levels), dtype='float32')
for li in range(0, levels):
for lj in range(0, levels):
contrast_array[li, lj] = pow2(float(li-lj))
return contrast_array
cdef void _feature_pantex(DTYPE_uint8_t[:, ::1] chBd,
int blk,
DTYPE_uint16_t[::1] scs,
int scales_half,
int scales_block,
int out_len,
bint weighted,
int rows,
int cols,
int scale_length,
int levels,
DTYPE_float32_t[::1] out_list_):
"""
Calculates the Anisotropic Built-up Presence Index (PanTex)
The GLCM code was adapted from the Scikit-image team
@ https://github.com/scikit-image/scikit-image/blob/master | Cython |
/skimage/feature/_texture.pyx
"""
cdef:
Py_ssize_t i, j, ki, block_rows, block_cols
DTYPE_uint16_t k
int k_half
DTYPE_uint8_t[:, ::1] ch_bd
DTYPE_float32_t pi = 3.14159265
DTYPE_float32_t[:, :, :, ::1] glcm_mat
DTYPE_float32_t con_min
Py_ssize_t pix_ctr = 0
# directions [E, NE, N, NW]
DTYPE_float32_t[:] disp_vect = np.array([0., pi / 6., pi / 4., pi / 3., pi / 2., (2. * pi) / 3.,
(3. * pi) / 4., (5. * pi) / 6.], dtype='float32')
DTYPE_float32_t[:] dists = np.array([1, 2], dtype='float32')
DTYPE_float32_t[:, ::1] contrast_weights = _set_contrast_weights(levels)
DTYPE_float32_t[:, :, :, ::1] P_ = np.zeros((levels, levels, dists.shape[0], disp_vect.shape[0]),
dtype='float32')
DTYPE_float32_t[:, :] angle_dist_sums_ = np.zeros((dists.shape[0], disp_vect.shape[0]),
dtype='float32')
DTYPE_float32_t[:, :, :, ::1] glcm_normed_ = P_.copy()
DTYPE_float32_t[:, :, :, ::1] P_c = P_.copy()
DTYPE_float32_t[:, :] angle_dist_sums_c = angle_dist_sums_.copy()
DTYPE_float32_t[:] mean_var_values
DTYPE_float32_t[:, ::1] kernel_weight
DTYPE_float32_t[::1] in_zs = np.zeros(2, dtype='float32')
DTYPE_float32_t[:, ::1] dist_weights
list dist_weights_m = []
if weighted:
for ki in range(0, scale_length):
k = scs[ki]
k_half = <int>(k / 2.)
rs = (scales_half - k_half + k) - (scales_half - k_half)
cs = (scales_half - k_half + k) - (scales_half - k_half)
dist_weights = np.empty((rs, cs), dtype='float32')
dist_weights_m.append(_create_weights(dist_weights, rs, cs))
with nogil:
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
for ki in range(0, scale_length):
k = scs[ki]
k_half = <int>(k / 2.)
ch_bd = chBd[i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]
block_rows = ch_bd.shape[0]
block_cols = ch_bd.shape[1]
if _get_max(ch_bd, block_rows, block_cols) == 0:
con_min = 0.
else:
P_c[...] = P_
angle_dist_sums_c[...] = angle_dist_sums_
glcm_normed_[...] = P_
glcm_mat = _greycomatrix(ch_bd,
dists,
disp_vect,
levels,
block_rows,
block_cols,
P_c,
angle_dist_sums_c,
glcm_normed_)
con_min = _glcm_contrast(glcm_mat,
dists,
disp_vect,
levels,
contrast_weights)
with gil:
kernel_weight = dist_weights_m[ki]
_get_weighted_mean_var_byte(ch_bd, kernel_weight, block_rows, block_cols, in_zs)
if not npy_isnan(con_min) and not npy_isinf(con_min):
out_list_[pix_ctr] = con_min * in_zs[0]
pix_ctr += 1
else:
with nogil:
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
for ki in range(0, scale_length):
k = scs[ki]
k_half = <int>(k / 2.)
ch_bd = chBd[i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]
block_rows = ch_bd.shape[0]
block_cols = ch_bd.shape[1]
if _get_max(ch_bd, block_rows, block_cols) == 0:
con_min = 0.
else:
P_c[...] = P_
angle_dist_sums_c[...] = angle_dist_sums_
glcm_normed_[...] = P_
glcm_mat = _greycomatrix(ch_bd,
dists,
disp_vect,
levels,
block_rows,
block_cols,
P_c,
angle_dist_sums_c,
glcm_normed_)
con_min = _glcm_contrast(glcm_mat, dists, disp_vect, levels, contrast_weights)
if not npy_isnan(con_min) and not npy_isinf(con_min):
out_list_[pix_ctr] = con_min
pix_ctr += 1
def feature_pantex(DTYPE_uint8_t[:, ::1] chbd, int blk, list scs, int end_scale, bint weighted, int levels=32):
cdef:
Py_ssize_t i, j, ki
int scales_half = <int>(end_scale / 2.)
int scales_block = end_scale - blk
int rows = chbd.shape[0]
int cols = chbd.shape[1]
DTYPE_uint16_t[::1] scales_array = np.array(scs, dtype='uint16')
int scale_length = scales_array.shape[0]
unsigned int out_len = _get_output_length(rows, cols, scales_block, blk, scale_length, 1)
DTYPE_float32_t[::1] out_list = np.zeros(out_len, dtype='float32')
_feature_pantex(chbd,
blk,
scales_array,
scales_half,
scales_block,
out_len,
weighted,
rows,
cols,
scale_length,
levels,
out_list)
return np.float32(out_list)
cdef DTYPE_float32_t[:, ::1] _create_weights(DTYPE_float32_t[:, ::1] dist_weights, int rs, int cs) nogil:
cdef:
Py_ssize_t ri, rj
DTYPE_float32_t rm = rs / 2.
DTYPE_float32_t cm = cs / 2.
for ri in range(0, rs):
for rj in range(0, cs):
dist_weights[ri, rj] = _euclidean_distance(cm, rm, float(rj), float(ri))
return dist_weights
cdef void feature_mean_float32(DTYPE_float32_t[:, ::1] ch_bd,
unsigned int blk,
DTYPE_uint16_t[::1] scs,
unsigned int scales_half,
unsigned int scales_block,
unsigned int scale_length,
DTYPE_float32_t[:, :, ::1] dist_weights_stack,
DTYPE_float32_t[::1] in_zs,
DTYPE_float32_t[::1] out_list_):
cdef:
Py_ssize_t i, j, ki, pix_ctr, pi
DTYPE_uint16_t k
unsigned int k_half, r_size, c_size
unsigned int rows = ch_bd.shape[0]
unsigned int cols = ch_bd.shape[1]
DTYPE_float32_t[:, ::1] block_chunk, dw
pix_ctr = 0
with nogil:
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
for ki in range(0, scale_length):
k = scs[ki]
k_half = <int>(k / 2.)
#rc_start = scales_half - k_half
#rc_end = scales_half - k_half + k
#r_size = (i + rc_end) - (i + rc_start) if (i + rc_end) - (i + rc_start) <= rows else rows - (i + rc_start)
#c_size = (j + rc_end) - (j + rc_start) if (j + rc_end) - (j + rc_start) <= cols else cols - (j + rc_start)
block_chunk = ch_bd[i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]
r_size = block_chunk.shape[0]
c_size = block_chunk.shape[1]
dw = dist_weights_stack[ki, :r_size, :c_size]
_get_weighted_mean_var(block_chunk, dw, r_size, c_size, in_zs)
for pi in range(0, 2):
out_list_[pix_ctr] = in_zs[pi]
pix_ctr += 1
def feature_mean(DTYPE_float32_t[:, | Cython |
::1] ch_bd, int blk, list scs, int end_scale):
cdef:
Py_ssize_t i, j, ki
unsigned int scales_half = <int>(end_scale / 2.)
unsigned int scales_block = end_scale - blk
unsigned int rows = ch_bd.shape[0]
unsigned int cols = ch_bd.shape[1]
DTYPE_uint16_t[::1] scales_array = np.array(scs, dtype='uint16')
unsigned int scale_length = scales_array.shape[0]
unsigned int k, k_half, rc_start, rc_end, rc
DTYPE_float32_t[:, :, ::1] dist_weights_stack = np.zeros((scale_length, end_scale*2, end_scale*2), dtype='float32')
DTYPE_float32_t[:, ::1] dist_weights
DTYPE_float32_t[::1] in_zs = np.zeros(2, dtype='float32')
unsigned int out_len = _get_output_length(rows, cols, scales_block, blk, scale_length, 2)
DTYPE_float32_t[::1] out_list = np.zeros(out_len, dtype='float32')
for ki in range(0, scale_length):
k = scales_array[ki]
k_half = <int>(k / 2.)
rc_start = scales_half - k_half
rc_end = scales_half - k_half + k
rc = rc_end - rc_start
dist_weights = np.zeros((rc, rc), dtype='float32')
dist_weights_stack[ki, :rc, :rc] = _create_weights(dist_weights, rc, rc)
feature_mean_float32(ch_bd,
blk,
scales_array,
scales_half,
scales_block,
scale_length,
dist_weights_stack,
in_zs,
out_list)
return np.float32(out_list)
# def feaCtrFloat64(np.ndarray[DTYPE_float64_t, ndim=2] chBd, int blk, list scs, int rows, int cols):
#
# cdef int i, j, k
#
# return [ chBd[i+scs[-1]/2, j+scs[-1]/2] for k in scs for i in range(0, rows-(scs[-1]-blk), blk) for j in range(0, cols-(scs[-1]-blk), blk) ]
#
#
# def feaCtrFloat32(np.ndarray[DTYPE_float32_t, ndim=2] chBd, int blk, list scs, int rows, int cols):
#
# cdef int i, j, k
#
# return [ chBd[i+scs[-1]/2, j+scs[-1]/2] for k in scs for i in range(0, rows-(scs[-1]-blk), blk) for j in range(0, cols-(scs[-1]-blk), blk) ]
#
#
# def feaCtr_uint16(np.ndarray[unsigned short, ndim=2] chBd, int blk, list scs, int rows, int cols):
#
# cdef int i, j, k
#
# return [ chBd[i+scs[-1]/2, j+scs[-1]/2] for k in scs for i in range(0, rows-(scs[-1]-blk), blk) for j in range(0, cols-(scs[-1]-blk), blk) ]
#
#
# def feaCtr_uint8(np.ndarray[unsigned char, ndim=2] chBd, int blk, list scs, int rows, int cols):
#
# cdef int i, j, k
#
# return [ chBd[i+scs[-1]/2, j+scs[-1]/2] for k in scs for i in range(0, rows-(scs[-1]-blk), blk) for j in range(0, cols-(scs[-1]-blk), blk) ]
#
#
# def feaCtr_uint(np.ndarray[int, ndim=2] chBd, int blk, list scs, int rows, int cols):
#
# cdef int i, j, k
#
# return [ chBd[i+scs[-1]/2, j+scs[-1]/2] for k in scs for i in range(0, rows-(scs[-1]-blk), blk) for j in range(0, cols-(scs[-1]-blk), blk) ]
#
#
# def feaCtr(np.ndarray chBd, int blk, list scs):
#
# cdef int rows = chBd.shape[0]
# cdef int cols = chBd.shape[1]
#
# if chBd.dtype == 'float64':
# return feaCtrFloat64(chBd, blk, scs, rows, cols)
# elif chBd.dtype == 'float32':
# return feaCtrFloat32(chBd, blk, scs, rows, cols)
# elif chBd.dtype == 'uint16':
# return feaCtr_uint16(chBd, blk, scs, rows, cols)
# elif chBd.dtype == 'uint8':
# try:
# return feaCtr_uint8(chBd, blk, scs, rows, cols)
# except:
# return feaCtr_uint(chBd, blk, scs, rows, cols)
# Lacunarity
cdef int max_box_number(DTYPE_uint8_t[:, :] w, int rr_rows, int rr_cols) nogil:
cdef:
int maxi = _get_max(w, rr_rows, rr_cols)
int mini = _get_min(w, rr_rows, rr_cols)
int boxes_max = <int>(ceil(float(maxi) / _get_min_sample_i(rr_rows, rr_cols)))
int boxes_min = <int>(ceil(float(mini) / _get_min_sample_i(rr_rows, rr_cols)))
return <int>((boxes_max - boxes_min) + 1)
cdef void _div1d(DTYPE_float32_t[:] array1d, int cs, DTYPE_float32_t div_value) nogil:
cdef:
Py_ssize_t js
for js in range(0, cs):
array1d[js] /= div_value
cdef DTYPE_float32_t _lacunarity(DTYPE_uint8_t[:, ::1] chunk_sub,
int r,
DTYPE_float32_t[::1] zs) nogil:
cdef:
int rows_ = chunk_sub.shape[0]
int cols_ = chunk_sub.shape[1]
# Get max for probability.
int maxw = (_get_max(chunk_sub, rows_, cols_) - _get_min(chunk_sub, rows_, cols_)) + 1
# Get the maximum number of boxes in each block
# ceiling is needed for uneven rows or columns.
int n_rows_ = <int>(ceil(float(rows_) / r))
int n_cols_ = <int>(ceil(float(cols_) / r))
int ns = <int>(float(n_rows_) * float(n_cols_))
# Create array of zeros
DTYPE_float32_t[::1] nsr = zs[:ns]
int maxww = maxw + 1
DTYPE_float32_t[::1] nqr = zs[:maxww]
int nn = 0
Py_ssize_t mm, n, dd
int rr_rows, rr_cols
DTYPE_uint8_t[:, ::1] w
int m
# DTYPE_float32_t[:] nqr_sum
DTYPE_float32_t smn, l2_sum
DTYPE_float32_t[::1] l1 = zs[:ns]
DTYPE_float32_t[::1] l2 = zs[:ns]
DTYPE_float32_t ns_rp
for mm from 0 <= mm < rows_ by r:
rr_rows = n_rows_cols(mm, r, rows_)
for n from 0 <= n < cols_ by r:
rr_cols = n_rows_cols(n, r, cols_)
# Differential Box Counting
# Return max. box value minus min. box value for r x r window
w = chunk_sub[mm:mm+rr_rows, n:n+rr_cols]
m = max_box_number(w, rr_rows, rr_cols)
# Append the mass to the temporary MASS array.
# The length of array equals number of boxes in the block.
nsr[nn] = m
# Append MASS counts for probability.
# The length of array equals max. number of MASS possibilites (+1).
nqr[m] += 1
nn += 1
# Get probability for each MASS count.
# MASS counts divided by total number of boxes in k x k window.
# nqr_sum = np.divide(np.asarray(nqr), ns).astype(np.float32)
_div1d(nqr, maxww, float(ns))
# Calculate moments
# L1 = MASS squared times the probability
# (both are arrays of length equal to the number of boxes in k x k window).
for dd in range(0, ns):
ns_rp = nsr[dd]
l1[dd] = pow2(ns_rp | Cython |
) * nqr[<int>ns_rp]
l2[dd] = ns_rp * nqr[<int>ns_rp]
# Sum the L2 array and square the result
smn = _get_sum1d(l2, ns)
l2_sum = pow2(smn)
# Lacunarity for k x k block
if l2_sum!= 0:
return _get_sum1d(l1, ns) / l2_sum
else:
return 0.
cdef void _feature_lacunarity(DTYPE_uint8_t[:, ::1] chunk_block,
int blk,
DTYPE_uint16_t[::1] scales,
int scales_half,
int scales_block,
int rows,
int cols,
int r,
int out_len,
int scale_length,
DTYPE_float32_t[::1] zs,
DTYPE_float32_t[::1] out_list_):
cdef:
Py_ssize_t i, j, ki, cr, cc
unsigned int k, k_half
Py_ssize_t pixel_counter = 0
DTYPE_uint8_t[:, ::1] ch_bd
with nogil:
for i from 0 <= i < rows-scales_block by blk:
for j from 0 <= j < cols-scales_block by blk:
for ki in range(0, scale_length):
k = scales[ki]
k_half = <int>(k / 2.)
ch_bd = chunk_block[i+scales_half-k_half:i+scales_half-k_half+k,
j+scales_half-k_half:j+scales_half-k_half+k]
cr = ch_bd.shape[0]
cc = ch_bd.shape[1]
if _get_max(ch_bd, cr, cc) == 0:
out_list_[pixel_counter] = 0
else:
out_list_[pixel_counter] = _lacunarity(ch_bd, r, zs)
pixel_counter += 1
def feature_lacunarity(DTYPE_uint8_t[:, ::1] chunk_block, int blk, list scales, int end_scale, int r=2):
cdef:
Py_ssize_t i, j, ki
int rows = chunk_block.shape[0]
int cols = chunk_block.shape[1]
int scales_half = end_scale / 2
int scales_block = end_scale - blk
DTYPE_uint16_t[::1] scale_array = np.array(scales, dtype='uint16')
int scale_length = scale_array.shape[0]
DTYPE_float32_t[::1] zs = np.zeros((end_scale*2)*(end_scale*2), dtype='float32')
unsigned int out_len = _get_output_length(rows, cols, scales_block, blk, scale_length, 1)
DTYPE_float32_t[::1] out_list = np.zeros(out_len, dtype='float32')
_feature_lacunarity(chunk_block,
blk,
scale_array,
scales_half,
scales_block,
rows,
cols,
r,
out_len,
scale_length,
zs,
out_list)
return np.float32(out_list)
# cdef azimuthal_avg(image, center=None):
#
# """
# Calculate the azimuthally averaged radial profile.
#
# image - The 2D image
# center - The [x,y] pixel coordinates used as the center. The default is
# None, which then uses the center of the image (including
# fracitonal pixels).
# """
#
# cdef:
# np.ndarray[DTYPE_int64_t, ndim=2] y, x
#
# # Calculate the indices from the image
# y, x = np.indices(image.shape)
#
# if not center:
# center = np.array([(x.max() - x.min()) / 2., (x.max() - x.min()) / 2.0])
#
# # get hypotenuse
# r = np.hypot(np.subtract(x, center[0]), np.subtract(y, center[1]))
#
# # get sorted radii indices
# ind = np.argsort(r.flat)
# rSorted = r.flat[ind]
#
# # get image values from index positions
# iSorted = image.flat[ind]
#
# # Get the integer part of the radii (bin size = 1)
# rInt = rSorted.astype(int)
#
# # Find all pixels that fall within each radial bin.
# deltar = np.subtract(rInt[1:], rInt[:-1]) # Assumes all radii represented
# rind = np.where(deltar)[0] # location of changed radius
# nr = np.subtract(rind[1:], rind[:-1]) # number of radius bin
#
# # Cumulative sum to figure out sums for each radius bin
# csim = np.cumsum(iSorted, dtype=float)
# tbin = np.subtract(csim[rind[1:]], csim[rind[:-1]])
#
# radialProf = np.divide(tbin, nr)
#
# return radialProf
#
#
# cdef DTYPE_float32_t[:, ::1] _fourier_transform(DTYPE_uint8_t[:, ::1] chunk_block):
#
# cdef:
# DTYPE_float32_t[:, :] dft = cv2.dft(np.float32(chunk_block), flags=cv2.DFT_COMPLEX_OUTPUT)
# DTYPE_float32_t[:, :, :] dft_shift = np.fft.fftshift(dft)
#
# # get the Power Spectrum
# return np.float32(azimuthal_avg(20. * np.log(cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1]))))
#
#
# cdef np.ndarray[DTYPE_float32_t, ndim=1] _feature_fourier(DTYPE_uint8_t[:, ::1] chunk_block,
# int blk,
# DTYPE_uint16_t[::1] scales,
# int scales_half,
# int scales_block,
# int rows,
# int cols,
# int out_len,
# int scale_length):
#
# cdef:
# Py_ssize_t i, j, ki, cr, cc
# int k, k_half
# Py_ssize_t pixel_counter = 0
# DTYPE_uint8_t[:, ::1] ch_bd
# DTYPE_float32_t[::1] out_list = np.zeros(out_len, dtype='float32')
#
# for i from 0 <= i < rows-scales_block by blk:
# for j from 0 <= j < cols-scales_block by blk:
# for ki in range(0, scale_length):
#
# k = scales[ki]
# k_half = <int>(k / 2.)
#
# ch_bd = chunk_block[i+scales_half-k_half:i+scales_half-k_half+k,
# j+scales_half-k_half:j+scales_half-k_half+k]
#
# bcr = ch_bd.shape[0]
# bcc = ch_bd.shape[1]
#
# ch_bd_transform = _fourier_transform(ch_bd)
#
# out_list[pixel_counter] = _get_mean(ch_bd_transform, bcr, bcc)
# pixel_counter += 1
#
# out_list[pixel_counter] = _get_var(ch_bd_transform, bcr, bcc)
# pixel_counter += 1
#
# return np.float32(out_list)
#
#
# def feature_fourier(np.ndarray chunk_block, int blk, list scales, int end_scale):
#
# cdef:
# Py_ssize_t i, j, ki
# int rows = chunk_block.shape[0]
# int cols = chunk_block.shape[1]
# int scales_half = <int>(end_scale / 2.)
# int scales_block = end_scale - blk
# DTYPE_uint16_t[::1] scale_array = np.array(scales, dtype='uint16')
# int scale_length = scale_array.shape[0]
# int out_len = _get_output_length(rows, cols, scales_block, blk, scale_length, 1)
#
# return _feature_fourier(np.uint8(chunk_block),
# blk,
# scale_array,
# scales_half,
# scales_block,
# rows,
# cols,
# out_len,
# scale_length)
cdef void _fill_labels(DTYPE_uint64_t[:, ::1] im,
DTYPE_uint64_t[:, ::1] area_im,
DTYPE_uint64_t[::1] props,
DTYPE_uint64_t[::1] unique_labels,
unsigned int n_unique,
unsigned int rows,
unsigned int cols):
cdef:
Py_ssize_t ui, i, j
int unq, uli_area
with nogil:
for uli in range(0, n_unique):
unq = unique_labels[uli]
if unq > 0:
uli_area = props[uli]
for i in range(0, rows):
for j in range(0, cols):
if im[i, j] == unq:
area_im[i, j] = uli_area
def fill_labels(DTYPE_uint64_t[:, ::1] im, DTYPE_uint64_t[::1] props):
cdef:
| Cython |
unsigned int rows = im.shape[0]
unsigned int cols = im.shape[1]
DTYPE_uint64_t[:, ::1] area_im = np.zeros((rows, cols), dtype='uint64')
DTYPE_uint64_t[::1] unique_labels = np.uint64(np.unique(im))
unsigned int n_unique = unique_labels.shape[0]
_fill_labels(im, area_im, props, unique_labels, n_unique, rows, cols)
return np.uint64(area_im)
<|end_of_text|>
include "../barst_defines.pxi"
include "../inline_funcs.pxi"
from pybarst.core.server cimport BarstChannel, BarstServer
cdef class MCDAQChannel(BarstChannel):
cdef SChanInitMCDAQ daq_init
cdef int reading
cdef HANDLE read_pipe
cdef public object direction
'''
Whether this channel can read, write, or do both. A single MC DAQ device
can have both read and write ports. This attribute indicates if the
device has a output port, a input port, or both. `'w'` means it only has
an output port, `'r'` means it only has an input port, and `'rw'` or `'wr'`
means that it has both a output and input port.
'''
cdef public unsigned short init_val
'''
What values (high/low) the output pins (if it supports output) will be set
to when the channel is initially created on the server.
'''
cdef public int continuous
'''
Whether, when reading, the server should continuously read and send data
back to the client. This is only used for a input device
(:attr:`direction` contains `'r'`). When `True`, a single call to
:meth:`read` after the channel is opened will start the server reading the
device continuously and sending the data back to this client. This will
result in a high sampling rate of the device. If it's `False`, each call to
:meth:`read` will trigger a new read resulting in a possibly slower
reading rate.
'''
cpdef object write(MCDAQChannel self, unsigned short mask,
unsigned short value)
cpdef object read(MCDAQChannel self)
cdef inline object _send_trigger(MCDAQChannel self)
<|end_of_text|># Copyright (C) 2012~2012 by Yichao Yu
# [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
cdef extern from "cpufreq.h":
struct cpufreq_policy:
unsigned long min
unsigned long max
char *governor
struct cpufreq_available_governors:
char *governor
cpufreq_available_governors *next
cpufreq_available_governors *first
struct cpufreq_available_frequencies:
unsigned long frequency
cpufreq_available_frequencies *next
cpufreq_available_frequencies *first
struct cpufreq_affected_cpus:
unsigned int cpu
cpufreq_affected_cpus *next
cpufreq_affected_cpus *first
struct cpufreq_stats:
unsigned long frequency
unsigned long long time_in_state
cpufreq_stats *next
cpufreq_stats *first
int cpufreq_cpu_exists(unsigned int cpu)
unsigned long cpufreq_get_freq_kernel(unsigned int cpu)
unsigned long cpufreq_get_freq_hardware(unsigned int cpu)
unsigned long cpufreq_get_transition_latency(unsigned int cpu)
int cpufreq_get_hardware_limits(unsigned int cpu,
unsigned long *min,
unsigned long *max)
char *cpufreq_get_driver(unsigned int cpu)
void cpufreq_put_driver(char *ptr)
cpufreq_policy *cpufreq_get_policy(unsigned int cpu)
void cpufreq_put_policy(cpufreq_policy *policy)
cpufreq_available_governors *cpufreq_get_available_governors(
unsigned int cpu)
void cpufreq_put_available_governors(
cpufreq_available_governors *first)
cpufreq_available_frequencies *cpufreq_get_available_frequencies(
unsigned int cpu)
void cpufreq_put_available_frequencies(
cpufreq_available_frequencies *first)
cpufreq_affected_cpus *cpufreq_get_affected_cpus(unsigned int cpu)
void cpufreq_put_affected_cpus(cpufreq_affected_cpus *first)
cpufreq_affected_cpus *cpufreq_get_related_cpus(unsigned int cpu)
void cpufreq_put_related_cpus(cpufreq_affected_cpus *first)
cpufreq_stats *cpufreq_get_stats(unsigned int cpu,
unsigned long long *total_time)
void cpufreq_put_stats(cpufreq_stats *stats)
unsigned long cpufreq_get_transitions(unsigned int cpu)
int cpufreq_set_policy(unsigned int cpu, cpufreq_policy *policy)
int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq)
int cpufreq_modify_policy_max(unsigned int cpu, unsigned long max_freq)
int cpufreq_modify_policy_governor(unsigned int cpu, char *governor)
int cpufreq_set_frequency(unsigned int cpu,
unsigned long target_frequency)
<|end_of_text|>cdef struct Writer:
boolean (*reserve)(Writer &writer, size_t amount) except False
boolean (*append_c)(Writer &writer, char datum) except False
boolean (*append_s)(Writer &writer, const char *s, Py_ssize_t length) except False
PyObject *options
ctypedef Writer &WriterRef
<|end_of_text|>import hmmlearn
import talib
import numpy as np
import pandas as pd
from hmmlearn import hmm
from sklearn.metrics import mean_squared_error
import Library
import FirebaseClient
import hmmlib
class hmm1_nautilus():
"""
Using a hidden markov model to trade ETHUSD
"""
def __init__(self, balance, symbol, horizon):
self.balance = balance
self.position = 0
self.symbol = symbol
self.horizon = horizon
########## VARIABLES ##############
self.regime_count = 4
self.compression = 240
self.step_range = range(30, 151, 10)
self.windows = range(50, 251, 10)
####################################
self.counter = self.windows[-1] * self.compression + 1
self.candles = self.getCandles()
self.run()
def run(self):
for step in self.step_range:
for window in self.windows:
self.hmm = hmmlib.HMM(self.regime_count, step, self.candles[:self.windows[-1]*self.compression], self.compression, window)
self.simulate("ETHUSD" + str(step) + ":" + str(window))
def getCandles(self):
data = Library.get_all_binance(self.symbol, self.horizon, save = True)
close = np.array(data.iloc[:,3].astype(float), np.float)[-279564:] # Jan 1 2018 - Present
return close
def simulate(self, name):
# Hard Reset
self.position = 0
self.balance = 10000
values = []
positions = []
prices = []
for i in range(self.windows[-1] * self.compression + 1, len(self.candles)):
if self.hmm.add(self.candles[i]):
positions.append(self.position)
values.append(self.position * self.candles[i] + self.balance)
prices.append(self.candles[i])
self.forecast(self.candles[i])
FirebaseClient.log(name, "Position", positions)
FirebaseClient.log(name, "Value", values)
FirebaseClient.log(name, "Price", prices)
def forecast(self, price):
next = self.hmm.predict()
transmat = self.hmm.transmat()
regime_returns = self.hmm.getRegimeReturns()
# Calculating Win Rate, Win/Loss amount ratio, and expected value
win_rate = 0
win_amount = 0
loss_amount = 0
expected_value = 0
transarr = transmat[next]
for i in range(0, self.regime_count):
expected_value += (regime_returns[i] * transarr[i])
if regime_returns[i] > 0:
win_rate += transarr[i]
win_amount += regime_returns[i]
else:
loss_amount -= regime_returns[i]
percentage = Library.kelly(win_rate, win_amount, loss_amount)
#Low-Pass Filter
if abs(expected_value) > 0.003:
# Buy or sell here
diff = percentage - (self.position * price) / (self.position * price + self.balance)
if diff > 0 and self.balance > diff * (self.position * price + self.balance):
old_balance = self.balance
self.balance -= diff * (self.position * price + self.balance)
self.position | Cython |
+= (diff * (self.position * price + old_balance)) / price
elif diff < 0 and self.position + (diff*(self.position*price + self.balance))/price > -1:
old_balance = self.balance
self.balance += -diff * (self.position * price + self.balance)
self.position += (diff * (self.position * price + old_balance)) / price
strategy = hmm1_nautilus(10000, "ETHUSDT", "5m")
<|end_of_text|>#cython: boundscheck=False, wraparound=False, language_level=3
import numpy as np
import cython
cimport cython
cimport numpy as np
from libcpp.vector cimport vector
from libc.math cimport M_PI, M_PI_2
np.import_array()
ctypedef fused T:
double
complex
ctypedef void (*funv)(T* const, T*, int, int, void* const)
cdef IterAllButAxis(funv f, np.ndarray[T, ndim=1] input_array, np.ndarray[T, ndim=1] output_array, int st, int N, int axis, long int[::1] shapein, long int[::1] shapeout, void* const data):
cdef:
np.flatiter ita = np.PyArray_IterAllButAxis(input_array.reshape(shapein), &axis)
np.flatiter ito = np.PyArray_IterAllButAxis(output_array.reshape(shapeout), &axis)
while np.PyArray_ITER_NOTDONE(ita):
f(<T* const>np.PyArray_ITER_DATA(ita), <T*>np.PyArray_ITER_DATA(ito), st, N, data)
np.PyArray_ITER_NEXT(ita)
np.PyArray_ITER_NEXT(ito)
def imult(T[:, :, ::1] array, double scale):
cdef int i, j, k
for i in range(array.shape[0]):
for j in range(array.shape[1]):
for k in range(array.shape[2]):
array[i, j, k] *= scale
return array
ctypedef struct CDN:
double* ld
double* ud
int N
cdef void CDN_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i
CDN* c0 = <CDN*>data
double* ld = c0.ld
double* ud = c0.ud
int Nd = c0.N
b[0] = ud[0]*v[st]
b[(Nd-1)*st] = ld[Nd-2]*v[(Nd-2)*st]
for i in range(1, Nd-1):
b[i*st] = ud[i]*v[(i+1)*st] + ld[i-1]*v[(i-1)*st]
cpdef CDN_matvec(np.ndarray v, np.ndarray b, int axis, double[::1] ld, double[::1] ud):
cdef:
CDN c0 = CDN(&ld[0], &ud[0], ud.shape[0]+1)
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](CDN_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
else:
IterAllButAxis[complex](CDN_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
return b
ctypedef struct BDN:
double* ld
double* dd
double* ud
int N
cdef void BDN_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i, j, k
BDN* c0 = <BDN*>data
double* ld = c0.ld
double* dd = c0.dd
double ud = c0.ud[0]
int M = c0.N
b[0] = ud*v[2*st] + dd[0]*v[0]
b[st] = ud*v[3*st] + dd[1]*v[st]
b[(M-2)*st] = ld[M-4]*v[(M-4)*st] + dd[M-2]*v[(M-2)*st]
b[(M-1)*st] = ld[M-3]*v[(M-3)*st] + dd[M-1]*v[(M-1)*st]
for i in range(2, M-2):
b[i*st] = ud*v[(i+2)*st] + dd[i]*v[i*st] + ld[i-2]*v[(i-2)*st]
cpdef BDN_matvec(np.ndarray v, np.ndarray b, int axis, double[::1] ld, double[::1] dd, double ud):
cdef:
BDN c0 = BDN(&ld[0], &dd[0], &ud, dd.shape[0])
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](BDN_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
else:
IterAllButAxis[complex](BDN_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
return b
ctypedef struct CDD0:
double* ld
double* ud
int Nd
cdef void CDD_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i
CDD0* c0 = <CDD0*>data
double* ld = c0.ld
double* ud = c0.ud
int Nd = c0.Nd
b[0] = ud[0]*v[st]
b[(Nd-1)*st] = ld[Nd-2]*v[(Nd-2)*st]
for i in range(1, Nd-1):
b[i*st] = ud[i]*v[(i+1)*st] + ld[i-1]*v[(i-1)*st]
cpdef CDD_matvec(np.ndarray v, np.ndarray b, int axis, double[::1] ld, double[::1] ud):
cdef:
CDD0 c0 = CDD0(&ld[0], &ud[0], ud.shape[0]+1)
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](CDD_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
else:
IterAllButAxis[complex](CDD_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
return b
ctypedef struct SBB:
double* dd
int N
cdef void SBB_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i, j, k
SBB* c0 = <SBB*>data
double* dd = c0.dd
int M = c0.N
double p, r
T d, s1, s2, o1, o2
j = M-1
s1 = 0.0
s2 = 0.0
o1 = 0.0
o2 = 0.0
b[j*st] = dd[j]*v[j*st]
b[(j-1)*st] = dd[j-1]*v[(j-1)*st]
for k in range(M-3, -1, -1):
j = k+2
p = k*dd[k]/(k+1)
r = 24*(k | Cython |
+1)*(k+2)*M_PI
d = v[j*st]/(j+3.)
if k % 2 == 0:
s1 += d
s2 += (j+2)*(j+2)*d
b[k*st] = dd[k]*v[k*st] + p*s1 + r*s2
else:
o1 += d
o2 += (j+2)*(j+2)*d
b[k*st] = dd[k]*v[k*st] + p*o1 + r*o2
cpdef SBB_matvec(np.ndarray v, np.ndarray b, int axis, double[::1] dd):
cdef:
SBB c0 = SBB(&dd[0], dd.shape[0])
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](SBB_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
else:
IterAllButAxis[complex](SBB_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
return b
ctypedef struct ADD:
double* dd
int N
cdef void ADD_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i, j, k
ADD* c0 = <ADD*>data
double* dd = c0.dd
int M = c0.N
double p
T s1 = 0.0
T s2 = 0.0
T d
k = M-1
b[k*st] = dd[k]*v[k*st]
b[(k-1)*st] = dd[k-1]*v[(k-1)*st]
for k in range(M-3, -1, -1):
j = k+2
p = -4*(k+1)*M_PI
if j % 2 == 0:
s1 += v[j*st]
b[k*st] = dd[k]*v[k*st] + p*s1
else:
s2 += v[j*st]
b[k*st] = dd[k]*v[k*st] + p*s2
cpdef ADD_matvec(np.ndarray v, np.ndarray b, int axis, double[::1] dd):
cdef:
ADD c0 = ADD(&dd[0], dd.shape[0])
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](ADD_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
else:
IterAllButAxis[complex](ADD_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
return b
cdef void ATT_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i, j, k
double p0, p1
T s1 = 0.0
T s2 = 0.0
T s3 = 0.0
T s4 = 0.0
T d
k = N-1
b[(N-1)*st] = 0
b[(N-2)*st] = 0
for k in range(N-3, -1, -1):
j = k+2
p0 = M_PI/2
p1 = M_PI/2*k**2
if j % 2 == 0:
s1 += j*v[j*st]
s3 += j**3*v[j*st]
b[k*st] = p0*s3 - p1*s1
else:
s2 += j*v[j*st]
s4 += j**3*v[j*st]
b[k*st] = p0*s4 - p1*s2
cpdef ATT_matvec(np.ndarray v, np.ndarray b, int axis):
cdef:
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](ATT_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, NULL)
else:
IterAllButAxis[complex](ATT_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, NULL)
return b
cdef void GLL_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i, j, k
double p0, p1
T s1 = 0.0
T s2 = 0.0
T s3 = 0.0
T s4 = 0.0
T d
k = N-1
b[(N-1)*st] = 0
b[(N-2)*st] = 0
for k in range(N-3, -1, -1):
j = k+2
p0 = 2*(k+0.5)/(2*k+1)
p1 = p0*k*(k+1)
if j % 2 == 0:
s1 += j*(j+1)*v[j*st]
s3 += v[j*st]
b[k*st] = p0*s1 - p1*s3
else:
s2 += j*(j+1)*v[j*st]
s4 += v[j*st]
b[k*st] = p0*s2 - p1*s4
cpdef GLL_matvec(np.ndarray v, np.ndarray b, int axis):
cdef:
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](GLL_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, NULL)
else:
IterAllButAxis[complex](GLL_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, NULL)
return b
cdef void CLL_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i, j, k
double p
T s1 = 0.0
T s2 = 0.0
b[(N-1)*st] = 0
for k in range(N-2, -1, -1):
j = k+1
if j % 2 == 0:
s1 += v[j*st]
b[k*st] = 2*s1
else:
s2 += v[j*st]
b[k*st] = 2*s2
cpdef CLL_matvec(np.ndarray v, np.ndarray b, int axis):
cdef:
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](CLL_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, NULL)
else:
IterAllButAxis[complex](CLL_matvec_ptr, | Cython |
np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, NULL)
return b
cdef void CTSD_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i, ii
T sum_u0, sum_u1
double pi = np.pi
double pi2 = 2*np.pi
sum_u0 = 0.0
sum_u1 = 0.0
b[(N-1)*st] = 0.0
b[(N-2)*st] = -(N-2+1)*pi*v[(N-3)*st]
b[(N-3)*st] = -(N-3+1)*pi*v[(N-4)*st]
for i in xrange(N-4, -1, -1):
ii = i*st
if i > 0:
b[ii] = -(i+1)*pi*v[(i-1)*st]
else:
b[ii] = 0
if i % 2 == 0:
sum_u0 = sum_u0 + v[(i+1)*st]
b[ii] -= sum_u0*pi2
else:
sum_u1 = sum_u1 + v[(i+1)*st]
b[ii] -= sum_u1*pi2
cpdef CTSD_matvec(np.ndarray v, np.ndarray b, int axis):
cdef:
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](CTSD_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, NULL)
else:
IterAllButAxis[complex](CTSD_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, NULL)
return b
cdef void CTT_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i, j, k
double p
T s1 = 0.0
T s2 = 0.0
b[(N-1)*st] = 0
for k in range(N-2, -1, -1):
j = k+1
if j % 2 == 0:
s1 += (k+1)*v[j*st]
b[k*st] = M_PI*s1
else:
s2 += (k+1)*v[j*st]
b[k*st] = M_PI*s2
cpdef CTT_matvec(np.ndarray v, np.ndarray b, int axis):
cdef:
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](CTT_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, NULL)
else:
IterAllButAxis[complex](CTT_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, NULL)
return b
ctypedef struct TD:
double* ld
double* dd
double* ud
int N
cdef void Tridiagonal_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i
TD* c0 = <TD*>data
double* ld = c0.ld
double* dd = c0.dd
double* ud = c0.ud
int M = c0.N
b[0] = dd[0]*v[0] + ud[0]*v[2*st]
b[st] = dd[1]*v[st] + ud[1]*v[3*st]
for i in range(2, M-2):
b[i*st] = ld[i-2]*v[(i-2)*st] + dd[i]*v[i*st] + ud[i]*v[(i+2)*st]
i = M-2
b[i*st] = ld[i-2]*v[(i-2)*st] + dd[i]*v[i*st]
i = M-1
b[i*st] = ld[i-2]* v[(i-2)*st] + dd[i]*v[i*st]
cpdef Tridiagonal_matvec(np.ndarray v, np.ndarray b, int axis, double[::1] ld, double[::1] dd, double[::1] ud):
cdef:
TD c0 = TD(&ld[0], &dd[0], &ud[0], dd.shape[0])
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](Tridiagonal_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
else:
IterAllButAxis[complex](Tridiagonal_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
return b
ctypedef struct PD:
double* ldd
double* ld
double* dd
double* ud
double* udd
int N
cdef void Pentadiagonal_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i
PD* c0 = <PD*>data
double* ldd = c0.ldd
double* ld = c0.ld
double* dd = c0.dd
double* ud = c0.ud
double* udd = c0.udd
int M = c0.N
b[0] = dd[0]*v[0] + ud[0]*v[2*st] + udd[0]*v[4*st]
b[1*st] = dd[1]*v[1*st] + ud[1]*v[3*st] + udd[1]*v[5*st]
b[2*st] = ld[0]*v[0] + dd[2]*v[2*st] + ud[2]*v[4*st] + udd[2]*v[6*st]
b[3*st] = ld[1]*v[1*st] + dd[3]*v[3*st] + ud[3]*v[5*st] + udd[3]*v[7*st]
for i in range(4, M-4):
b[i*st] = ldd[i-4]*v[(i-4)*st] + ld[i-2]*v[(i-2)*st] + dd[i]*v[i*st] + ud[i]*v[(i+2)*st] + udd[i]*v[(i+4)*st]
i = M-4
b[i*st] = ldd[i-4]*v[(i-4)*st] + ld[i-2]*v[(i-2)*st] + dd[i]*v[i*st] + ud[i]*v[(i+2)*st]
i = M-3
b[i*st] = ldd[i-4]*v[(i-4)*st] + ld[i-2]*v[(i-2)*st] + dd[i]*v[i*st] + ud[i]*v[(i+2)*st]
i = M-2
b[i*st] = ldd[i-4]*v[(i-4)*st] + ld[i-2]*v[(i-2)*st] + dd[i]*v[i*st]
i = M-1
b[i*st] = ldd[i-4]*v[(i-4)*st] + ld[i-2]*v[(i-2)*st] + dd[i]*v[i*st]
cpdef Pent | Cython |
adiagonal_matvec(np.ndarray v, np.ndarray b, int axis, double[::1] ldd, double[::1] ld, double[::1] dd, double[::1] ud, double[::1] udd):
cdef:
PD c0 = PD(&ldd[0], &ld[0], &dd[0], &ud[0], &udd[0], dd.shape[0])
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](Pentadiagonal_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
else:
IterAllButAxis[complex](Pentadiagonal_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
return b
ctypedef struct CBD:
double* ld
double* ud
double* udd
int N
cdef void CBD_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i
CBD* c0 = <CBD*>data
double* ld = c0.ld
double* ud = c0.ud
double* udd = c0.udd
int M = c0.N
b[0] = ud[0]*v[1*st] + udd[0]*v[3*st]
for i in range(1, M):
b[i*st] = ld[i-1]*v[(i-1)*st] + ud[i]*v[(i+1)*st] + udd[i]*v[(i+3)*st]
i = M
b[i*st] = ld[i-1]*v[(i-1)*st] + ud[i]*v[(i+1)*st]
cpdef CBD_matvec(np.ndarray v, np.ndarray b, int axis, double[::1] ld, double[::1] ud, double[::1] udd):
cdef:
CBD c0 = CBD(&ld[0], &ud[0], &udd[0], udd.shape[0])
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]/v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](CBD_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
else:
IterAllButAxis[complex](CBD_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
return b
ctypedef struct CDB:
double* lld
double* ld
double* ud
int N
cdef void CDB_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int i, j, k
CDB* c0 = <CDB*>data
double* lld = c0.lld
double* ld = c0.ld
double* ud = c0.ud
int M = c0.N
b[0] = ud[0]*v[st]
for k in range(1, 3):
b[k*st] = ld[k-1]*v[(k-1)*st] + ud[k]*v[(k+1)*st]
for k in range(3, M):
b[k*st] = lld[k-3]*v[(k-3)*st] + ld[k-1]*v[(k-1)*st] + ud[k]*v[(k+1)*st]
for k in xrange(M, M+2):
b[k*st] = lld[k-3]*v[(k-3)*st] + ld[k-1]* v[(k-1)*st]
b[(M+2)*st] = lld[M-1]*v[(M-1)*st]
cpdef CDB_matvec(np.ndarray v, np.ndarray b, int axis, double[::1] lld, double[::1] ld, double[::1] ud):
cdef:
CDB c0 = CDB(&lld[0], &ld[0], &ud[0], ud.shape[0])
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](CDB_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
else:
IterAllButAxis[complex](CDB_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
return b
ctypedef struct BBD:
double* ld
double* dd
double* ud
double* uud
int N
cdef void BBD_matvec_ptr(T* const v,
T* b,
int st,
int N,
void* const data):
cdef:
int k
BBD* c0 = <BBD*>data
double* ld = c0.ld
double* dd = c0.dd
double* ud = c0.ud
double* uud = c0.uud
int M = c0.N
b[0] = dd[0]*v[0] + ud[0]*v[2*st] + uud[0]*v[4*st]
b[st] = dd[1]*v[st] + ud[1]*v[3*st] + uud[1]*v[5*st]
for k in range(2, M):
b[k*st] = ld[0]*v[(k-2)*st] + dd[k]*v[k*st] + ud[k]*v[(k+2)*st] + uud[k]*v[(k+4)*st]
for k in range(M, M+2):
b[k*st] = ld[0]*v[(k-2)*st] + dd[k]*v[k*st] + ud[k]*v[(k+2)*st]
cpdef BBD_matvec(np.ndarray v, np.ndarray b, int axis, double ld, double[::1] dd, double[::1] ud, double[::1] uud):
cdef:
BBD c0 = BBD(&ld, &dd[0], &ud[0], &uud[0], uud.shape[0])
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
if v.dtype.char in 'fdg':
IterAllButAxis[double](BBD_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
else:
IterAllButAxis[complex](BBD_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), st, N, axis, shape, shape, &c0)
return b
# Helmholtz solver has nonconstant coefficients alfa and beta, so need special iterator
ctypedef void (*funH)(T* const, T*, double* const, double* const, int, int, void* const)
cdef ABIterAllButAxis(funH f, np.ndarray[T, ndim=1] input_array, np.ndarray[T, ndim=1] output_array,
np.ndarray[double, ndim=1] alfa, np.ndarray[double, ndim=1] beta, int st, int N,
int axis, long int[::1] shape, long int[::1] ashape, void* const data):
cdef:
np.flatiter ita = np.PyArray_IterAllButAxis(input_array.reshape(shape), &axis)
np.flatiter ito = np.PyArray | Cython |
_IterAllButAxis(output_array.reshape(shape), &axis)
np.flatiter alfai = np.PyArray_IterAllButAxis(alfa.reshape(ashape), &axis)
np.flatiter betai = np.PyArray_IterAllButAxis(beta.reshape(ashape), &axis)
while np.PyArray_ITER_NOTDONE(ita):
f(<T* const>np.PyArray_ITER_DATA(ita), <T*>np.PyArray_ITER_DATA(ito), <double*>np.PyArray_ITER_DATA(alfai), <double*>np.PyArray_ITER_DATA(betai), st, N, data)
np.PyArray_ITER_NEXT(ita)
np.PyArray_ITER_NEXT(ito)
np.PyArray_ITER_NEXT(alfai)
np.PyArray_ITER_NEXT(betai)
ctypedef struct HH:
double* dd
double* ud
double* bd
int N
cdef void Helmholtz_matvec_ptr(T* const v,
T* b,
double* const alfa,
double* const beta,
int st,
int N,
void* const data):
# b = (alfa*A + beta*B)*v
# For B matrix ld = ud = -pi/2
cdef:
int i, j, k
HH* c0 = <HH*>data
double* dd = c0.dd
double* ud = c0.ud
double* bd = c0.bd
int M = c0.N
T s1 = 0.0
T s2 = 0.0
double p
double alf = alfa[0]
double bet = beta[0]
k = M-1
b[k*st] = (dd[k]*alf + bd[k]*bet)*v[k*st] - M_PI_2*bet*v[(k-2)*st]
b[(k-1)*st] = (dd[k-1]*alf + bd[k-1]*bet)*v[(k-1)*st] - M_PI_2*bet*v[(k-3)*st]
for k in range(M-3, 1, -1):
p = ud[k]*alf
if k % 2 == 0:
s2 += v[(k+2)*st]
b[k*st] = (dd[k]*alf + bd[k]*bet)*v[k*st] - M_PI_2*bet*(v[(k-2)*st] + v[(k+2)*st]) + p*s2
else:
s1 += v[(k+2)*st]
b[k*st] = (dd[k]*alf + bd[k]*bet)*v[k*st] - M_PI_2*bet*(v[(k-2)*st] + v[(k+2)*st]) + p*s1
k = 1
s1 += v[(k+2)*st]
s2 += v[(k+1)*st]
b[k*st] = (dd[k]*alf + bd[k]*bet)*v[k*st] - M_PI_2*bet*v[(k+2)*st] + ud[k]*alf*s1
b[(k-1)*st] = (dd[k-1]*alf + bd[k-1]*bet)*v[(k-1)*st] - M_PI_2*bet*v[(k+1)*st] + ud[k-1]*alf*s2
cpdef Helmholtz_matvec(np.ndarray v, np.ndarray b, np.ndarray alfa, np.ndarray beta, A, B, int axis):
cdef:
double[::1] dd = A[0]
double[::1] ud = A[2]
double[::1] bd = B[0]
HH c0 = HH(&dd[0], &ud[0], &bd[0], A[0].shape[0])
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
np.ndarray[long int, ndim=1] ashape = np.array(np.shape(alfa), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]/v.itemsize
if v.dtype.char in 'fdg':
ABIterAllButAxis[double](Helmholtz_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), np.PyArray_Ravel(alfa, np.NPY_CORDER), np.PyArray_Ravel(beta, np.NPY_CORDER), st, N, axis, shape, ashape, &c0)
else:
ABIterAllButAxis[complex](Helmholtz_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), np.PyArray_Ravel(alfa, np.NPY_CORDER), np.PyArray_Ravel(beta, np.NPY_CORDER), st, N, axis, shape, ashape, &c0)
return b
ctypedef struct HN:
double* dd
double* ud
double* bl
double* bd
double* bu
int N
cdef void Helmholtz_Neumann_matvec_ptr(T* const v,
T* b,
double* const alfa,
double* const beta,
int st,
int N,
void* const data):
# b = (alfa*A + beta*B)*v
# A matrix has diagonal dd and upper second diagonal at ud
# B matrix has diagonal bd and second upper and lower diagonals bu and bl
cdef:
int i, j, k, j2
HN* c0 = <HN*>data
double* dd = c0.dd
double* ud = c0.ud
double* bl = c0.bl
double* bd = c0.bd
double* bu = c0.bu
int M = c0.N
T s1 = 0.0
T s2 = 0.0
double p
double alf = alfa[0]
double bet = beta[0]
for k in (M-1, M-2):
j2 = k*k
b[k*st] = (dd[k]*alf + bd[k]*bet)*v[k*st]*j2 + bl[k-2]*bet*v[(k-2)*st]*j2
for k in range(M-3, 1, -1):
p = ud[k]*alf
if k % 2 == 0:
s2 += v[(k+2)*st]*(k+2)**2
b[k*st] = (dd[k]*alf + bd[k]*bet)*v[k*st]*k**2 + bet*(bl[k-2]*v[(k-2)*st]*(k-2)**2 + bu[k]*v[(k+2)*st]*(k+2)**2) + p*s2
else:
s1 += v[(k+2)*st]*(k+2)**2
b[k*st] = (dd[k]*alf + bd[k]*bet)*v[k*st]*k**2 + bet*(bl[k-2]*v[(k-2)*st]*(k-2)**2 + bu[k]*v[(k+2)*st]*(k+2)**2) + p*s1
k = 1
s1 += v[(k+2)*st]*(k+2)**2
b[k*st] = (dd[k]*alf + bd[k]*bet)*v[k*st]*k**2 + bet*(bu[k]*v[(k+2)*st]*(k+2)**2) + ud[k]*alf*s1
k = 0
s2 += v[(k+2)*st]*(k+2)**2
b[k*st] = (dd[k]*alf + bd[k]*bet)*v[k*st]*k**2 + bet*(bu[k]*v[(k+2)*st]*(k+2)**2) + ud[k]*alf*s2
b[0] += bd[0]*v[0]*bet
b[2*st] += bl[0]*v[0]*bet
cpdef Helmholtz_Neumann_matvec(np.ndarray v, np.ndarray b, np.ndarray alfa, np.ndarray beta, A, B, int axis):
cdef:
HN c0
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
np.ndarray[long int, ndim=1] ashape = np.array(np.shape(alfa), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]//v.itemsize
int M = A[0].shape[0]
np.ndarray[long int, ndim=1] k = np.arange(M)
np.ndarray[long int, ndim=1] j2 = k**2
double[::1] dd = np.zeros_like(A[0])
double[::1] ud = np.zeros_like(A[2])
double | Cython |
[::1] bl = np.zeros_like(B[-2])
double[::1] bd = np.zeros_like(B[0])
double[::1] bu = np.zeros_like(B[2])
j2[0] = 1
j2[:] = 1/j2
j2[0] = 0
dd[:] = A[0]*j2
ud[:] = A[2]*j2[2:]
j2[0] = 1
bd[:] = B[0]*j2
bu[:] = B[2]*j2[2:]
bl[:] = B[-2]*j2[:-2]
c0 = HN(&dd[0], &ud[0], &bl[0], &bd[0], &bu[0], A[0].shape[0])
if v.dtype.char in 'fdg':
ABIterAllButAxis[double](Helmholtz_Neumann_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), np.PyArray_Ravel(alfa, np.NPY_CORDER), np.PyArray_Ravel(beta, np.NPY_CORDER), st, N, axis, shape, ashape, &c0)
else:
ABIterAllButAxis[complex](Helmholtz_Neumann_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), np.PyArray_Ravel(alfa, np.NPY_CORDER), np.PyArray_Ravel(beta, np.NPY_CORDER), st, N, axis, shape, ashape, &c0)
return b
ctypedef struct Bi:
double* a0
# 3 upper diagonals of SBB
double* sii
double* siu
double* siuu
# All 3 diagonals of ABB
double* ail
double* aii
double* aiu
# All 5 diagonals of BBB
double* bill
double* bil
double* bii
double* biu
double* biuu
int N
cdef void Biharmonic_matvec_ptr(T* const v,
T* b,
double* const alfa,
double* const beta,
int st,
int N,
void* const data):
cdef:
int i, j, k
vector[double] ldd, ld, dd, ud, udd
double p, r
T d, s1, s2, o1, o2
Bi* c0 = <Bi*>data
double* sii = c0.sii
double* siu = c0.siu
double* siuu = c0.siuu
double* ail = c0.ail
double* aii = c0.aii
double* aiu = c0.aiu
double* bill = c0.bill
double* bil = c0.bil
double* bii = c0.bii
double* biu = c0.biu
double* biuu = c0.biuu
double a0 = c0.a0[0]
int M = c0.N
double alf = alfa[0]
double bet = beta[0]
dd.resize(M)
ld.resize(M)
ldd.resize(M)
ud.resize(M)
udd.resize(M)
for i in range(M):
dd[i] = a0*sii[i] + alf*aii[i] + bet*bii[i]
for i in range(M-2):
ld[i] = alf*ail[i] + bet*bil[i]
for i in range(M-4):
ldd[i] = bet*bill[i]
for i in range(M-2):
ud[i] = a0*siu[i] + alf*aiu[i] + bet*biu[i]
for i in range(M-4):
udd[i] = a0*siuu[i] + bet*biuu[i]
i = M-1
b[i*st] = ldd[i-4]*v[(i-4)*st]+ ld[i-2]* v[(i-2)*st] + dd[i]*v[i*st]
i = M-2
b[i*st] = ldd[i-4]*v[(i-4)*st]+ ld[i-2]* v[(i-2)*st] + dd[i]*v[i*st]
i = M-3
b[i*st] = ldd[i-4]*v[(i-4)*st]+ ld[i-2]* v[(i-2)*st] + dd[i]*v[i*st] + ud[i]*v[(i+2)*st]
i = M-4
b[i*st] = ldd[i-4]*v[(i-4)*st]+ ld[i-2]* v[(i-2)*st] + dd[i]*v[i*st] + ud[i]*v[(i+2)*st]
i = M-5
b[i*st] = ldd[i-4]*v[(i-4)*st]+ ld[i-2]* v[(i-2)*st] + dd[i]*v[i*st] + ud[i]*v[(i+2)*st] + udd[i]*v[(i+4)*st]
i = M-6
b[i*st] = ldd[i-4]*v[(i-4)*st]+ ld[i-2]* v[(i-2)*st] + dd[i]*v[i*st] + ud[i]*v[(i+2)*st] + udd[i]*v[(i+4)*st]
s1 = 0.0
s2 = 0.0
o1 = 0.0
o2 = 0.0
for k in range(M-7, -1, -1):
j = k+6
p = k*sii[k]/(k+1.)
r = 24*(k+1)*(k+2)*M_PI
d = v[j*st]/(j+3.)
if k % 2 == 0:
s1 += d
s2 += (j+2)*(j+2)*d
b[k*st] = (p*s1 + r*s2)*a0
else:
o1 += d
o2 += (j+2)*(j+2)*d
b[k*st] = (p*o1 + r*o2)*a0
if k > 3:
b[k*st] += ldd[k-4]*v[(k-4)*st]+ ld[k-2]* v[(k-2)*st] + dd[k]*v[k*st] + ud[k]*v[(k+2)*st] + udd[k]*v[(k+4)*st]
elif k > 1:
b[k*st] += ld[k-2]* v[(k-2)*st] + dd[k]*v[k*st] + ud[k]*v[(k+2)*st] + udd[k]*v[(k+4)*st]
else:
b[k*st] += dd[k]*v[k*st] + ud[k]*v[(k+2)*st] + udd[k]*v[(k+4)*st]
cpdef Biharmonic_matvec(np.ndarray v, np.ndarray b, double a0, np.ndarray alfa, np.ndarray beta,
double[::1] sii, double[::1] siu, double[::1] siuu,
double[::1] ail, double[::1] aii, double[::1] aiu,
double[::1] bill, double[::1] bil, double[::1] bii, double[::1] biu, double[::1] biuu, int axis=0):
cdef:
Bi c0 = Bi(&a0, &sii[0], &siu[0], &siuu[0], &ail[0], &aii[0], &aiu[0], &bill[0], &bil[0], &bii[0], &biu[0], &biuu[0], sii.shape[0])
np.ndarray[long int, ndim=1] shape = np.array(np.shape(v), dtype=int)
np.ndarray[long int, ndim=1] ashape = np.array(np.shape(alfa), dtype=int)
int N = v.shape[axis]
int st = v.strides[axis]/v.itemsize
if v.dtype.char in 'fdg':
ABIterAllButAxis[double](Biharmonic_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), np.PyArray_Ravel(alfa, np.NPY_CORDER), np.PyArray_Ravel(beta, np.NPY_CORDER), st, N, axis, shape, ashape, &c0)
else | Cython |
:
ABIterAllButAxis[complex](Biharmonic_matvec_ptr, np.PyArray_Ravel(v, np.NPY_CORDER), np.PyArray_Ravel(b, np.NPY_CORDER), np.PyArray_Ravel(alfa, np.NPY_CORDER), np.PyArray_Ravel(beta, np.NPY_CORDER), st, N, axis, shape, ashape, &c0)
return b
<|end_of_text|>def approx_e(int n=40, display=False):
cdef double sum = 0.
cdef double factorial = 1.
cdef int k
for k in xrange(1,n+1):
factorial *= k
sum += 1/factorial
if display:
print(1 + sum)
<|end_of_text|>"""
Copyright (C) 2011, Enthought Inc
Copyright (C) 2011, Patrick Henaff
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
include '../types.pxi'
from quantlib.handle cimport Handle, shared_ptr
from quantlib.termstructures.yields._flat_forward cimport YieldTermStructure
cimport quantlib._quote as _qt
from quantlib._stochastic_process cimport StochasticProcess
cdef extern from 'ql/processes/hestonprocess.hpp' namespace 'QuantLib':
cdef cppclass HestonProcess(StochasticProcess):
HestonProcess() # fake empty constructor for Cython
# fixme: implement the discrization version of the constructor
enum Discretization:
PartialTruncation
FullTruncation
Reflection
NonCentralChiSquareVariance
QuadraticExponential
QuadraticExponentialMartingale
BroadieKayaExactSchemeLobatto
BroadieKayaExactSchemeLaguerre
BroadieKayaExactSchemeTrapezoidal
HestonProcess(
Handle[YieldTermStructure]& riskFreeRate,
Handle[YieldTermStructure]& dividendYield,
Handle[_qt.Quote]& s0,
Real v0, Real kappa,
Real theta, Real sigma, Real rho, HestonProcess.Discretization d) except +
Size size() except +
Real v0() except +
Real rho() except +
Real kappa() except +
Real theta() except +
Real sigma() except +
Handle[_qt.Quote] s0()
Handle[YieldTermStructure] dividendYield()
Handle[YieldTermStructure] riskeFreeRate()
cdef extern from 'ql/processes/batesprocess.hpp' namespace 'QuantLib':
cdef cppclass BatesProcess(HestonProcess):
BatesProcess(
Handle[YieldTermStructure]& riskFreeRate,
Handle[YieldTermStructure]& dividendYield,
Handle[_qt.Quote]& s0,
Real v0, Real kappa,
Real theta, Real sigma, Real rho,
Real lambda_, Real nu, Real delta, HestonProcess.Discretization d) except +
Real Lambda 'lambda'() except +
Real nu() except +
Real delta() except +
<|end_of_text|>#cython: boundscheck=False
#cython: wraparound=False
import numpy as np
cimport numpy as np
def foreground_groups(
np.ndarray[np.int32_t, ndim=2] foreground,
np.ndarray[np.int32_t, ndim=2] groups,
np.ndarray[np.int32_t, ndim=1] group_size,
int num_groups
):
cdef int d0 = foreground.shape[0]
cdef int d1 = foreground.shape[1]
cdef np.ndarray[np.int32_t, ndim=1] fg_groups = np.zeros( ( num_groups,), np.int32 )
cdef np.ndarray[np.int32_t, ndim=2] mask = np.zeros( ( d0, d1 ), np.int32 )
cdef np.ndarray[np.int32_t, ndim=1] votes = np.zeros( ( num_groups,), np.int32 )
cdef int i, j
for i in xrange( d0 ):
for j in xrange( d1 ):
if foreground[i, j] == 1:
votes[groups[i, j]] += 1
for i in xrange( num_groups ):
if group_size[i] > 10 and 1.0 * votes[i] / group_size[i] > 0.6:
fg_groups[i] = 1
for i in xrange( d0 ):
for j in xrange( d1 ):
if fg_groups[groups[i, j]] == 1:
mask[i, j] = 1
return fg_groups, mask
<|end_of_text|>
from libc.math cimport log, log1p, expm1
cdef inline double boxcox(double x, double lmbda) nogil:
if lmbda == 0:
return log(x)
else:
return expm1(lmbda * log(x)) / lmbda
cdef inline double boxcox1p(double x, double lmbda) nogil:
if lmbda == 0:
return log1p(x)
else:
return expm1(lmbda * log1p(x)) / lmbda
<|end_of_text|># distutils: language = c++
# distutils: sources =../libs/Zeo++/networkio.cc
from cavd.netstorage cimport ATOM_NETWORK, VORONOI_NETWORK
from libcpp.string cimport string
#Added at 20180704
from libcpp.vector cimport vector
from cavd.voronoicell cimport VOR_CELL, BASIC_VCELL
from cavd.channel cimport CHANNEL
cdef extern from '../libs/Zeo++/networkio.h':
cdef void parseFilename(const char* filename, char* name, char* extension)
cdef bint checkInputFile(char* filename)
cdef bint readCIFFile(char *filename, ATOM_NETWORK *cell, bint radial)
cdef bint readARCFile(char *filename, ATOM_NETWORK *cell, bint radial)
cdef bint readCUCFile(char *filename, ATOM_NETWORK *cell, bint radial)
cdef bint readCSSRFile(char *filename, ATOM_NETWORK *cell, bint radial)
cdef bint readV1File(char *filename, ATOM_NETWORK *cell, bint radial)
cdef bint writeToCSSR(char *filename, ATOM_NETWORK *cell)
cdef bint writeToCIF(char *filename, ATOM_NETWORK *cell)
cdef bint writeToV1(char * filename, ATOM_NETWORK *cell)
cdef bint writeToNt2(char *filename, VORONOI_NETWORK *vornet, double minRad)
cdef bint writeToNt2(char *filename, VORONOI_NETWORK *vornet)
cdef bint writeToXYZ(char *filename, ATOM_NETWORK *cell, bint is_supercell,
bint is_duplicate_perimeter_atoms)
cdef bint writeToVTK(char *filename, ATOM_NETWORK *cell)
cdef bint writeToMOPAC(char *filename, ATOM_NETWORK *cell, bint is_supercell)
cdef bint writeVornetToXYZ "writeToXYZ"(char *filename, VORONOI_NETWORK*, double)
# remove migrant ion added at 20180408
cdef bint readRemoveMigrantCif(char *filename, ATOM_NETWORK *cell, const char *migrant, bint radial)
# writeToVasp
#cdef bint writeToVasp(char *filename, ATOM_NETWORK *cell, VORONOI_NETWORK *vornet, bint storeRadius, double minRad)
#edited at 20180530
cdef bint writeToVasp(char *filename, ATOM_NETWORK *cell, VORONOI_NETWORK *vornet, double minRad, double maxRad)
cdef bint writeToVasp(char *filename, ATOM_NETWORK *cell, VORONOI_NETWORK *vornet)
cdef bint writeAtmntToVasp(char *filename, ATOM_NETWORK *cell)
#add at 20190518
cdef bint writeToNET(char *filename, ATOM_NETWORK *cell, VORONOI_NETWORK *vornet, double minRad, double maxRad)
cdef bint writeToNET(char *filename, ATOM_NETWORK *cell, VORONOI_NETWORK *vornet)
# At present the return value of performVoronoiDecomp is void*
# Compile it after void* is changed to bool in the original source file
cdef extern from "../libs/Zeo++/network.h":
cdef bint performVoronoiDecomp(bint, ATOM_NETWORK*, VORONOI_NETWORK*,
vector[VOR_CELL]*, bint, vector[BASIC_VCELL]*)<|end_of_text|># distutils: sources = c_funcs.c
cimport _c_funcs_h
ctypedef unsigned short unscaled
cpdef scale(v, scaling_type):
cdef (float (*)(_c_funcs_h.unscaled)) * SCALE_FUNCS = [
_c_funcs_h.scale_a, _c_funcs_h.scale_b]
return SCALE_FUNCS[scaling_type](v)
print scale(1 | Cython |
, 0)
print scale(1, 1)
<|end_of_text|>from libc.stdint cimport uint32_t, uint64_t
cdef extern from "portable_endian.h":
cdef uint32_t htole32(uint32_t x);
cdef uint64_t htole64(uint64_t x);
<|end_of_text|># distutils: language = c++
from libcpp.string cimport string
from..Topology cimport _Topology, Topology
from..ArgList cimport _ArgList, ArgList
from..Frame cimport _Frame, Frame
from..AtomMask cimport _AtomMask, AtomMask
from..core.FileName cimport _FileName, FileName
from.DataSet_Coords cimport _DataSet_Coords, DataSet_Coords
from.DataSet cimport _DataSet, DataSet
from.DataSet_1D cimport _DataSet_1D, DataSet_1D
cdef extern from "DataSet_Coords_REF.h":
cdef cppclass _DataSet_Coords_REF "DataSet_Coords_REF" (_DataSet_Coords):
_DataSet_Coords_REF()
# turn off those methods since they are in parent class
@staticmethod
_DataSet * Alloc()
size_t Size() const
#int Sync()
#void Info() const
#void Add(size_t, const void *)
#int AllocateCoords(size_t)
#inline void AddFrame(const _Frame& fIn)
#inline void GetFrame(int idx, _Frame& fIn)
#inline void GetFrame(int idx, _Frame& fIn, const _AtomMask& mIn)
#inline void SetCRD(int idx, const _Frame& fIn)
int LoadRef(const string&, const _Topology&, int)
int SetupRef_Frame(const string&, const string&, const _Topology&, _ArgList&, int)
int SetupRef_Frame(_DataSet_Coords *, const string&, int, int)
int StripRef(const string&)
int StripRef(const _AtomMask&)
const _Frame& RefFrame() const
const _FileName& _FrameFilename() const
int RefIndex() const
cdef class DataSet_Coords_REF (DataSet_Coords):
cdef _DataSet_Coords_REF* thisptr
<|end_of_text|>###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2018 Prof. William H. Green ([email protected]), #
# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
Contains functionality for generating the master equation matrix for a given
pressure-dependent reaction network.
"""
import numpy
cimport numpy
from libc.math cimport exp
import rmgpy.constants as constants
################################################################################
cpdef generateFullMEMatrix(network, bint products=True):
"""
Generate the full master equation matrix for the network.
"""
cdef numpy.ndarray[numpy.int_t,ndim=1] Jlist
cdef numpy.ndarray[numpy.int_t,ndim=3] indices
cdef numpy.ndarray[numpy.float64_t,ndim=1] Elist
cdef numpy.ndarray[numpy.float64_t,ndim=2] M
cdef numpy.ndarray[numpy.float64_t,ndim=3] densStates
cdef numpy.ndarray[numpy.float64_t,ndim=4] Kij, Gnj, Fim
cdef numpy.ndarray[numpy.float64_t,ndim=5] Mcoll
cdef double T, P, beta, val
cdef int Nisom, Nreac, Nprod, Ngrains, NJ
cdef int i, n, r, s, u, v
T = network.T
P = network.P
Elist = network.Elist
Jlist = network.Jlist
densStates = network.densStates
Mcoll = network.Mcoll
Kij = network.Kij
Fim = network.Fim
Gnj = network.Gnj
Nisom = network.Nisom
Nreac = network.Nreac
Nprod = network.Nprod
Ngrains = network.Ngrains
NJ = network.NJ
beta = 1. / (constants.R * T)
# Construct accounting matrix
indices = -numpy.ones((Nisom,Ngrains,NJ), numpy.int)
Nrows = 0
for r in range(Ngrains):
for s in range(NJ):
for i in range(Nisom):
if densStates[i,r,s] > 0:
indices[i,r,s] = Nrows
Nrows += 1
Nrows += Nreac
if products:
Nrows += Nprod
# Construct full ME matrix
M = numpy.zeros([Nrows,Nrows], numpy.float64)
# Collision terms
for i in range(Nisom):
for r in range(Ngrains):
for s in range(NJ):
if indices[i,r,s] > -1:
for u in range(r, Ngrains):
for v in range(s, NJ):
M[indices[i,r,s], indices[i,u,v]] = Mcoll[i,r,s,u,v]
M[indices[i,u,v], indices[i,r,s]] = Mcoll[i,u,v,r,s]
# Isomerization terms
for i in range(Nisom):
for j in range(i):
if Kij[i,j,Ngrains-1,0] > 0 or Kij[j,i,Ngrains-1,0] > 0:
for r in range(Ngrains):
for s in range(NJ):
u = indices[i,r,s]; v = indices[j,r,s]
if u > -1 and v > -1:
M[v,u] = Kij[j,i,r,s]
M[u,u] -= Kij[j,i,r,s]
M[u,v] = Kij[i,j,r,s]
M[v,v] -= Kij[i,j,r,s]
# Association/dissociation terms
for i in range(Nisom):
for n in range(Nreac+Nprod):
if Gnj[n,i,Ngrains-1,0] > 0:
for r in range(Ngrains):
for s in range(NJ):
u = indices[i,r,s]
if products:
v = Nrows - Nreac - Nprod + n
else:
v = Nrows - Nreac + n
if u > -1:
M[u,u] -= Gnj[n,i,r,s]
if n < Nreac or products:
M[v,u] = Gnj[n,i,r,s]
if n < Nreac:
val = Fim[i,n,r,s] * densStates[n+Nisom,r,s] * (2*Jlist[s]+1) * exp(-Elist[r] * beta)
M[u,v] = val
M[v,v] -= val
return M, indices
<|end_of_text|>import numpy as np
from syri.bin.func.myUsefulFunctions import *
import sys
import time
from igraph import *
from collections import Counter, deque, defaultdict
from scipy.stats import *
from datetime import datetime, date
import pandas as pd
from multiprocessing import Pool
from functools import partial
import os
from gc import collect
from Bio.SeqIO import parse
import logging
import psutil
from syri.pyxFiles.synsearchFunctions import apply_TS, alignmentBlock
from syri.pyxFiles.function cimport getAllLongestPaths, getConnectivityGraph
cimport numpy as np
cimport cython
np.random.seed(1)
cpdef getInvBlocks(invTree, invertedCoordsOri):
cdef int nrow, i, child
nrow = invTree.shape[0]
invBlocks = [alignmentBlock(i, np.where(invTree.iloc[i,] == True)[0], invertedCoordsOri.iloc[i]) for i in range(nrow)]
for block in invBlocks:
i = 0
while(i < len(block.children)):
block.children = list(set(block.children) - | Cython |
set(invBlocks[block.children[i]].children))
i+=1
block.children.sort()
for child in block.children:
invBlocks[child].addParent(block.id)
return(invBlocks)
cpdef list getShortest(invBlocks):
cdef:
shortest = deque()
int i
list j = list(range(len(invBlocks)))
invG = getConnectivityGraph(invBlocks)
source = np.array(invG.es['source'], dtype = np.int32)
target = np.array(invG.es['target'], dtype = np.int32)
weight = np.array(invG.es['weight'], dtype = np.float32)
n = len(invG.vs.indices)
for i in j:
shortest.append(getAllLongestPaths(n,i,j,source,target,weight))
short = [list(s) for s in shortest]
return short
cpdef list getRevenue(invBlocks, shortest, np.ndarray aStart, np.ndarray aEnd, np.ndarray bStart, np.ndarray bEnd, np.ndarray iDen):
cdef:
list revenue,i, values, startA, endA, startB, endB, iden
np.ndarray[np.int32_t] j
np.int32_t k
Py_ssize_t l
revenue = []
for i in shortest:
values = []
for j in i:
if len(j) == 1:
values.append(invBlocks[j[0]].score)
else:
score = 0
startA = [aStart[j[0]]]
endA = [aEnd[j[0]]]
startB = [bEnd[j[0]]]
endB = [bStart[j[0]]]
iden = [iDen[j[0]]]
for k in j[1:]:
isMore = True if iDen[k] > iden[-1] else False
if aStart[k] < endA[-1]:
if isMore:
endA[-1] = aStart[k]
startA.append(aStart[k])
endA.append(aEnd[k])
else:
startA.append(endA[-1])
endA.append(aEnd[k])
else:
startA.append(aStart[k])
endA.append(aEnd[k])
if bStart[k] > startB[-1]:
if isMore:
startB[-1] = bStart[k]
startB.append(bEnd[k])
endB.append(bStart[k])
else:
endB.append(startB[-1])
startB.append(bEnd[k])
else:
startB.append(bEnd[k])
endB.append(bStart[k])
iden.append(iDen[k])
if len(startA) == len(endA) == len(startB) == len(endB) == len(iden):
for l in range(len(iden)):
score += iden[l]*((endA[l] - startA[l]) + (endB[l] - startB[l]))
values.append(score)
revenue = revenue + [values]
return(revenue)
cpdef dict getNeighbourSyn(np.ndarray aStartInv, np.ndarray aEndInv, np.ndarray bStartInv, np.ndarray bEndInv, np.ndarray indexInv, np.ndarray bDirInv, np.ndarray aStartSyn, np.ndarray aEndSyn, np.ndarray bStartSyn, np.ndarray bEndSyn, np.ndarray indexSyn, np.ndarray bDirSyn, int threshold):
cdef:
cdef Py_ssize_t i, j, index
dict neighbourSyn = dict()
int upBlock, downBlock
list upSyn, downSyn
for i in range(len(indexInv)):
index = indexInv[i]
upSyn = np.where(indexSyn < index)[0].tolist()
downSyn = np.where(indexSyn > index)[0].tolist()
upBlock = -1
downBlock = len(indexSyn)
for j in upSyn[::-1]:
if bDirSyn[j] == bDirInv[i]:
if (aStartInv[i] - aStartSyn[j]) > threshold and (aEndInv[i] - aEndSyn[j]) > threshold and (bStartInv[i] - bStartSyn[j]) > threshold and (bEndInv[i] - bEndSyn[j]) > threshold:
upBlock = j
break
else:
if (aStartInv[i] - aStartSyn[j]) > threshold and (aEndInv[i] - aEndSyn[j]) > threshold and (bEndInv[i] - bStartSyn[j]) > threshold and (bStartInv[i] - bEndSyn[j]) > threshold:
upBlock = j
break
for j in downSyn:
if bDirSyn[j] == bDirInv[i]:
if (aStartSyn[j] - aStartInv[i]) > threshold and (aEndSyn[j] - aEndInv[i]) > threshold and (bStartSyn[j] - bStartInv[i]) > threshold and (bEndSyn[j] - bEndInv[i]) > threshold:
downBlock = j
break
else:
if (aStartSyn[j] - aStartInv[i]) > threshold and (aEndSyn[j] - aEndInv[i]) > threshold and (bStartSyn[j] - bEndInv[i]) > threshold and (bEndSyn[j] - bStartInv[i]) > threshold:
downBlock = j
break
neighbourSyn[i] = [upBlock, downBlock]
return(neighbourSyn)
cpdef list getCost(list synPath, list shortest, dict neighbourSyn, list synBlockScore, synData, invertedCoordsOri):
cdef:
list cost, i, values
int leftSyn, rightSyn, leftEnd, rightEnd, overlapLength
double syncost
np.ndarray[np.int32_t] j
cost = []
synLength = len(synPath)
for i in shortest:
values = []
for j in i:
leftSyn, rightSyn = getNeighbours(neighbourSyn, j)
synCost = sum([synBlockScore[synIndex] for synIndex in range(leftSyn+1,rightSyn)])
leftEnd = synData.iat[leftSyn, 1] if leftSyn > -1 else 0
rightEnd = synData.iat[rightSyn,0] if rightSyn < synLength else invertedCoordsOri.iat[j[-1],1]
if rightEnd - leftEnd > 1000:
values.append(synCost)
else:
overlapLength = (leftEnd - invertedCoordsOri.iat[j[0], 0]) + (invertedCoordsOri.iat[j[-1],1] - rightEnd)
if overlapLength > ((rightEnd - leftEnd)/2):
values.append(synCost + 10000000000000)
else:
values.append(synCost)
cost = cost + [values]
return(cost)
def getNeighbours(neighbourSyn, j):
return(min(neighbourSyn[j[0]]+neighbourSyn[j[-1]]), max(neighbourSyn[j[0]]+neighbourSyn[j[-1]]))
def getInversions(coords,chromo, threshold, synData, synPath):
logger = logging.getLogger("getinversion."+chromo)
class inversion:
def __init__(self, cost, revenue, neighbours, invPos):
self.cost = cost
self.revenue = revenue
self.profit = revenue - cost
self.neighbours = list(neighbours)
self.invPos = invPos
invertedCoordsOri = coords.loc[(coords.aChr == chromo) & (coords.bChr == chromo) & (coords.bDir == -1)]
if len(invertedCoordsOri) == 0:
return(invertedCoordsOri, [],[],invertedCoordsOri,[],[])
invertedCoords = invertedCoordsOri.copy()
maxCoords = np.max(np.max(invertedCoords[["bStart","bEnd"]]))
invertedCoords.bStart = maxCoords + 1 - invertedCoords.bStart
invertedCoords.bEnd = maxCoords + 1 - invertedCoords.bEnd
nrow = pd.Series(range(invertedCoords.shape[0]))
if len(invertedCoordsOri) > 0:
invTree = pd.DataFrame(apply_TS(invertedCoords.aStart.values,invertedCoords.aEnd.values,invertedCoords.bStart.values,invertedCoords.bEnd.values, threshold), index = range(len(invertedCoords)), columns = invertedCoords.index.values)
else:
invTree = pd.DataFrame([], index = range(len(invertedCoords)), columns = invertedCoords.index.values)
logger.debug("found inv Tree " + chromo)
#######################################################################
###### Create list of inverted alignments
#######################################################################
invblocks = getInvBlocks(invTree, invertedCoordsOri)
logger.debug("found inv blocks " + chromo)
#########################################################################
###### Finding profitable inversions (group of inverted blocks)
#########################################################################
shortest = getShortest(invblocks)
logger.debug("found shortest " + chromo )
# revenue = getRevenue(invBlocks, shortest, invertedCoordsOri)
revenue = getRevenue(invblocks, shortest, invertedCoordsOri.aStart.values, invertedCoordsOri.aEnd.values, invertedCoordsOri.bStart.values, invertedCoordsOri.bEnd.values, invertedCoordsOri.iden.values)
logger.debug("found revenue " + chromo)
## Get syntenic neighbouring | Cython |
blocks of inversions
# neighbourSyn = getNeighbourSyn(invertedCoordsOri, synData, threshold)
neighbourSyn = getNeighbourSyn(invertedCoordsOri.aStart.values, invertedCoordsOri.aEnd.values, invertedCoordsOri.bStart.values, invertedCoordsOri.bEnd.values, invertedCoordsOri.index.values, invertedCoordsOri.bDir.values, synData.aStart.values, synData.aEnd.values, synData.bStart.values, synData.bEnd.values, synData.index.values, synData.bDir.values, threshold)
logger.debug("found neighbours " + chromo)
synBlockScore = [(i.aLen + i.bLen)*i.iden for index, i in synData.iterrows()]
## Calculate cost adding an inversion, i.e sum of all synblocks which need to be removed to accomodate teh synblocks
cost = getCost(synPath, shortest, neighbourSyn, synBlockScore, synData, invertedCoordsOri)
logger.debug("found cost " + chromo)
## Calculate profit (or loss) associated with the addition of an inversion
profit = []
for i in range(len(revenue)):
profit = profit + [[revenue[i][j] - cost[i][j] for j in range(len(revenue[i]))]]
logger.debug("found profit " + chromo)
## Create list of all profitable inversions
##invPos are 0-indexed positions of inverted alignments in the invertedCoordsOri object
profitable = [inversion(cost[i][j], revenue[i][j],
getNeighbours(neighbourSyn, shortest[i][j]),shortest[i][j])
for i in range(len(profit)) for j in range(len(profit[i]))\
if profit[i][j] > (0.1*cost[i][j])] ##Select only those inversions for which the profit is more than 10% of the cost
logger.debug("found profitable " + chromo)
del(invblocks, revenue, neighbourSyn, shortest, synBlockScore)
collect()
#####################################################################
#### Find optimal set of inversions from all profitable inversions
#####################################################################
profitInvs = [p.profit for p in profitable]
if len(profitInvs) > 0:
lp = len(profitable)
iAStart = deque()
iAEnd = deque()
iBStart = deque()
iBEnd = deque()
for i in profitable:
iAStart.append(invertedCoordsOri.iat[i.invPos[0], 0])
iAEnd.append(invertedCoordsOri.iat[i.invPos[-1], 1])
iBStart.append(invertedCoordsOri.iat[i.invPos[-1], 3])
iBEnd.append(invertedCoordsOri.iat[i.invPos[0], 2])
iAStart = np.array(iAStart)
iAEnd = np.array(iAEnd)
iBStart = np.array(iBStart)
iBEnd = np.array(iBEnd)
scores = np.array([i.profit for i in profitable], dtype= int)
parents = np.array([-1]*lp, dtype = int)
totscore = scores.copy()
for i in range(lp):
nonOverlapA = np.where(iAStart > (iAEnd[i] - threshold))[0]
nonOverlapB = np.where(iBStart > (iBEnd[i] - threshold))[0]
childNodes = np.intersect1d(nonOverlapA, nonOverlapB, assume_unique=True) #.astype("uint32") + 1 ## two inversions can co-exist only if the overlap between them is less than threshold on both genomes
chIndex = np.where(scores[childNodes] + totscore[i] > totscore[childNodes])[0]
totscore[childNodes[chIndex]] = scores[childNodes[chIndex]] + totscore[i]
parents[childNodes[chIndex]] = i
maxid = totscore.argmax()
bestInvPath = deque([maxid])
while parents[i]!= -1:
bestInvPath.append(parents[i])
i = parents[i]
bestInvPath = list(bestInvPath)[::-1]
else:
bestInvPath = []
logger.debug("found bestInvPath " + chromo)
invBlocksIndex = unlist([profitable[_i].invPos for _i in bestInvPath])
invData = invertedCoordsOri.iloc[invBlocksIndex]
badSyn = []
synInInv = []
for _i in bestInvPath:
invNeighbour = profitable[_i].neighbours
# synInInv = list(range(invNeighbour[0]+1, invNeighbour[1]))
invPos = profitable[_i].invPos
invCoord = [invertedCoordsOri.iat[invPos[0],0],invertedCoordsOri.iat[invPos[-1],1],invertedCoordsOri.iat[invPos[-1],3],invertedCoordsOri.iat[invPos[0],2]]
for _j in range(invNeighbour[0]+1, invNeighbour[1]):
sd = synData.iloc[_j][["aStart","aEnd","bStart","bEnd"]]
if (invCoord[0] - sd[0] < threshold) and (sd[1] - invCoord[1] < threshold) and (invCoord[2] - sd[2] < threshold) and (sd[3] - invCoord[2] < threshold):
synInInv.append(_j)
else:
badSyn.append(_j)
return(invertedCoordsOri, profitable, bestInvPath,invData, synInInv, badSyn)
<|end_of_text|>import cython
if not cython.compiled:
raise ImportError(f"{__file__} have not compiled")
<|end_of_text|>cdef api double get_e():
print('Getting e...')
return 2.718281828
<|end_of_text|>from libc cimport uint32_t, uint64_t
cdef extern from "SFMT.h":
ctypedef void sfmt_t
cdef uint32_t gen_rand32(sfmt_t *ctx)
cdef uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit)
cdef uint64_t gen_rand64(sfmt_t *ctx)
cdef uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit)
cdef void fill_array32(sfmt_t *ctx, uint32_t *array, int size)
cdef void fill_array64(sfmt_t *ctx, uint64_t *array, int size)
cdef sfmt_t *init_gen_rand(uint32_t seed)
cdef sfmt_t *init_by_array(uint32_t *init_key, int key_length)
cdef void fini_gen_rand(sfmt_t *ctx)
cdef char *get_idstring()
cdef int get_min_array_size32()
cdef int get_min_array_size64()
cdef inline double to_real1(uint32_t v)
cdef inline double genrand_real1(sfmt_t *ctx)
cdef inline double to_real2(uint32_t v)
cdef inline double genrand_real2(sfmt_t *ctx)
cdef inline double to_real3(uint32_t v)
cdef inline double genrand_real3(sfmt_t *ctx)
cdef inline double to_res53(uint64_t v)
cdef inline double to_res53_mix(uint32_t x, uint32_t y)
cdef inline double genrand_res53(sfmt_t *ctx)
cdef inline double genrand_res53_mix(sfmt_t *ctx)
<|end_of_text|>#cython: boundscheck=False
#cython: wraparound=False
#cython: cdivision=True
"""
Code is based on the following excellent paper from lawrence r. rabbiner
https://web.ece.ucsb.edu/Faculty/Rabiner/ece259/Reprints/tutorial%20on%20hmm%20and%20applications.pdf
"""
from.utils cimport logsum_pair
cimport numpy as np
from joblib import Parallel
from joblib import delayed
import numpy as np
import time
DEF NEGINF = float("-inf")
DEF INF = float("inf")
cdef double[:,::1] forward_logprob(const long[::1] observation_sequence,
const double[:,::1] loga,
const double[:,::1] logb,
const double[::1] logpi):
"""
:param observation_sequence: observation sequence
:param loga: log of transition matrix
:param logb: log of observation matrix
:param logpi: log of initial state probabilities
:return: return the logarithm of the forward variable (alpha)
"""
T = observation_sequence.shape[0]
N = loga.shape[0]
cdef double[:,::1] log_alpha = np.empty((T, N), dtype=np.float64)
_forward_logprob(observation_sequence, loga, logb, logpi, log_alpha)
return log_alpha
cdef void _forward_logprob(const long[::1] observation_sequence,
const double[:,::1] loga,
const double[:,::1] logb,
const double[::1] logpi,
double[:,::1] log_alpha) nogil:
cdef int i, j, T, N, t | Cython |
cdef double temp
T = observation_sequence.shape[0]
N = loga.shape[0]
for j in range(N):
log_alpha[0, j] = logpi[j] + logb[j, observation_sequence[0]]
for t in range(1, T):
for j in range(N):
tmp = NEGINF # log probability of transition from any hidden at t-1 to state to j at t
for i in range(N):
tmp = logsum_pair(tmp, log_alpha[t - 1, i] + loga[i, j])
log_alpha[t, j] = tmp + logb[j, observation_sequence[t]]
cdef double _estimate_observation_logprob(const long[::1] observation_sequence,
const double[:,::1] loga,
const double[:,::1] logb,
const double[::1] logpi):
"""
estimate the log probability of an observation given model parameters
:param observation_sequence:
:param loga: log of transition matrix
:param logb: log of observation matrix
:param logpi: log of initial matrix
:return: log probability
"""
cdef double[:,::1] log_alpha = forward_logprob(observation_sequence,
loga, logb, logpi)
cdef int i, T, N
T = observation_sequence.shape[0]
N = loga.shape[0]
res = NEGINF
for i in range(N):
res = logsum_pair(res, log_alpha[T-1, i])
return res
cdef double[:,::1] backward_logprob(const long[::1] observation_sequence,
const double[:,::1] loga,
const double[:,::1] logb,
const double[::1] logpi):
"""
:param observation_sequence:
:param loga: log of transition matrix
:param logb: log of observation matrix
:param logpi: log of initial state probabilities
:return:
"""
cdef int T = observation_sequence.shape[0]
cdef int N = loga.shape[0]
cdef double[:,::1] log_beta = np.empty((T, N), dtype=np.float64)
_backward_logprob(observation_sequence, loga, logb, logpi, log_beta)
return log_beta
cdef void _backward_logprob(const long[::1] observation_sequence,
const double[:,::1] loga,
const double[:,::1] logb,
const double[::1] logpi,
double[:,::1] log_beta):
cdef int T = observation_sequence.shape[0]
cdef int N = loga.shape[0]
cdef int i,t,j
#log_beta[-1,:] = 0 #log(1) = 0
for i in range(N):
log_beta[T - 1, i] = 0
for t in range(T - 2, -1, -1):
for i in range(N):
tmp = NEGINF
for j in range(N):
tmp = logsum_pair(tmp,
log_beta[t + 1, j] +
loga[i, j] + logb[j, observation_sequence[t + 1]])
log_beta[t, i] = tmp
cpdef double[:,::1] state_logprob(const double[:,::1] log_alpha,
const double[:,::1] log_beta):
"""
log probability of being at state s at time t
:param alpha: forward variable alpha of a given observation sequence
:param beta: backward variable beta of the same observation sequence
:return:
"""
cdef int T, N
T = log_alpha.shape[0]
N = log_alpha.shape[1]
cdef double[:,::1] log_gamma = np.empty((T, N), dtype=np.float64)
_state_logprob(log_alpha, log_beta, log_gamma)
return log_gamma
cdef void _state_logprob(const double[:,::1] log_alpha,
const double[:,::1] log_beta,
double[:,::1] log_gamma):
cdef int T, N, t, i, j
T = log_alpha.shape[0]
N = log_alpha.shape[1]
for t in range(T):
for i in range(N):
log_gamma[t, i] = log_alpha[t, i] + log_beta[t, i]
tmp = NEGINF
for j in range(N):
tmp = logsum_pair(tmp, log_alpha[t, j] + log_beta[t, j])
log_gamma[t, i] -= tmp
cpdef double[:,:,::1] double_state_prob(const long[::1] observation_sequence,
const double[:,::1] log_alpha,
const double[:,::1] log_beta,
const double[:,::1] loga,
const double[:,::1] logb):
"""
log probability from transition to two states at time t
:param observation_sequence:
:param log_alpha:
:param log_beta:
:param loga: log of transition matrix
:param logb: log of observation matrix
:return:
"""
cdef int N, T
N = loga.shape[0]
T = observation_sequence.shape[0]
cdef double[:,:,::1] ksi = np.empty((T - 1, N, N), dtype=np.float64)
_double_state_prob(observation_sequence, log_alpha, log_beta, loga, logb, ksi)
return ksi
cdef void _double_state_prob(const long[::1] observation_sequence,
const double[:,::1] log_alpha,
const double[:,::1] log_beta,
const double[:,::1] loga,
const double[:,::1] logb,
double[:,:,::1] ksi):
"""
:param observation_sequence:
:param log_alpha:
:param log_beta:
:param loga: log of transition matrix
:param logb: log of observation matrix
:param ksi:
:return:
"""
cdef int N, T, t, i, j
N = loga.shape[0]
T = observation_sequence.shape[0]
cdef double tmp
for t in range(T - 1):
tmp = NEGINF
for i in range(N):
for j in range(N):
tmp = logsum_pair(tmp, log_alpha[t, i] + loga[i, j] + logb[j, observation_sequence[t + 1]] + log_beta[t + 1, j])
ksi[t, i, j] = log_alpha[t, i] + loga[i, j] + logb[j, observation_sequence[t + 1]] + log_beta[t + 1, j]
for i in range(N):
for j in range(N):
ksi[t, i, j] -= tmp
cdef class BaumWelchBatch:
cdef public double[::1] log_pi_sum
#expected number of transitions from State Si
cdef public double[::1] log_gamma_sum
#expected number of times in sate j
cdef public double[::1] log_gamma_full_sum
#expected number of transitions from State Si to Sate Sj
cdef public double[:,::1] log_ksi_sum
#expected number of times in States j and observing symbol v_k
cdef public double[:,::1] log_obs_sum
#
cdef public int stat_size
cdef double[:,::1] loga
cdef double[:,::1] logb
cdef double[::1] logpi
def __init__(self,
double[:,::1] loga,
double[:,::1] logb,
double[::1] logpi
):
cdef int N = loga.shape[0]
cdef int M = logb.shape[1]
self.loga = loga
self.logb = logb
self.logpi = logpi
self.log_pi_sum = np.full(N, NEGINF, dtype=np.float64)
self.log_ksi_sum = np.full((N,N), NEGINF, dtype=np.float64)
self.log_gamma_sum = np.empty(N, dtype=np.float64)
self.log_obs_sum = np.full((N, M), NEGINF, dtype=np.float64)
self.log_gamma_full_sum = np.full(N, NEGINF, dtype=np.float64)
self.stat_size = 0
def __getstate__(self):
state= {}
state['loga'] = np.asarray(self.loga)
state['logb'] = np.asarray(self.logb)
state['logpi'] = np.asarray(self.logpi)
state['log_pi_sum'] = np.asarray(self.log_pi_sum)
state['log_gamma_sum'] = np.asarray(self.log_gamma_sum)
state['log_gamma_full_sum'] = np.asarray(self.log_gamma_full_sum)
state['log_ksi_sum'] = np.asarray(self.log_ksi_sum)
state['log_obs_sum'] = np.asarray(self.log_obs_sum)
state['batch_size'] = self.stat_size
return state
def __setstate__(self, state | Cython |
):
self.logpi = state['logpi']
self.loga = state['loga']
self.logb = state['logb']
self.log_pi_sum = state['log_pi_sum']
self.log_gamma_sum = state['log_gamma_sum']
self.log_gamma_full_sum = state['log_gamma_full_sum']
self.log_ksi_sum = state['log_ksi_sum']
self.log_obs_sum = state['log_obs_sum']
self.stat_size = state['batch_size']
def __reduce__(self):
state=self.__getstate__()
return self.__class__, (state['loga'], state['logb'], state['logpi'],), state
cpdef void fit_sequence(self, const long[::1] observation_sequence):
"""
fit's an obseravtion sequence and set's log of the following statistics pi_sum,
gamma_sum, gamma_full_sum, ksi_sum, obs_sum
mainly used for parallel processing
:param observation_sequence:
:return:
"""
#_forward_logprob(data[row_index], self.loga, self.logb, self.logpi, log_alpha)
cdef double[:,::1] log_alpha = forward_logprob(observation_sequence, self.loga, self.logb, self.logpi)
cdef double[:, ::1] log_beta = backward_logprob(observation_sequence, self.loga, self.logb, self.logpi)
#_backward_logprob(data[row_index], self.loga, self.logb, self.logpi, log_beta)
cdef double[:, ::1] log_gamma = state_logprob(log_alpha, log_beta)
#_state_logprob(log_alpha, log_beta, log_gamma)
cdef double[:,:,::1] log_ksi = double_state_prob(observation_sequence,log_alpha, log_beta, self.loga, self.logb)
#_double_state_prob(data[row_index], log_alpha, log_beta, self.loga, self.logb, log_ksi)
cdef int T = observation_sequence.shape[0]
cdef int N = self.loga.shape[0]
cdef int M = self.logb.shape[1]
for i in range(N):
self.log_pi_sum[i] = logsum_pair(self.log_pi_sum[i], log_gamma[0,i])
#expected number of transition from State Si to Sate Sj
for i in range(N):
for j in range(N):
tmp = NEGINF
for t in range(T-1):
tmp = logsum_pair(tmp, log_ksi[t, i,j])
self.log_ksi_sum[i, j] = logsum_pair(tmp, self.log_ksi_sum[i, j])
#expected number of transition from State Si
for i in range(N):
tmp = NEGINF
for t in range(T-1):
tmp = logsum_pair(tmp, log_gamma[t, i])
self.log_gamma_sum[i] = tmp
#expected number of times in States j and observing symbol v_k
for t in range(T):
for j in range(N):
#data[row_index][t] is k in paper
self.log_obs_sum[j, observation_sequence[t]] = logsum_pair(self.log_obs_sum[j, observation_sequence[t]],
log_gamma[t, j])
#expected number of times in state j
for i in range(N):
tmp = NEGINF
for t in range(T):
tmp = logsum_pair(tmp, log_gamma[t, i])
self.log_gamma_full_sum[i] = tmp
self.stat_size+= 1
cdef BaumWelchBatch combine_run_pair(BaumWelchBatch brun1,
BaumWelchBatch brun2):
"""
combines two baum welch run's (brun1 and brun2)
into a new BaumWechBatch instance
:param brun1: BaumWelchBatch instance
:param brun2: BaumWelchBatch instance
:return: BaumWelchBatch instance that combines brun1 and brun2
"""
cdef BaumWelchBatch out = BaumWelchBatch(brun1.loga, brun1.logb, brun1.logpi)
cdef int N = brun1.log_pi_sum.shape[0]
cdef int M = brun1.log_obs_sum.shape[1]
for i in range(N):
out.log_pi_sum[i] = logsum_pair(brun1.log_pi_sum[i],
brun2.log_pi_sum[i])
out.log_gamma_sum[i] = logsum_pair(brun1.log_gamma_sum[i],
brun2.log_gamma_sum[i])
out.log_gamma_full_sum[i] = logsum_pair(brun1.log_gamma_full_sum[i],
brun2.log_gamma_full_sum[i])
for i in range(N):
for j in range(M):
out.log_obs_sum[i][j] = logsum_pair(brun1.log_obs_sum[i][j],
brun2.log_obs_sum[i][j])
for j in range(N):
out.log_ksi_sum[i][j] = logsum_pair(brun1.log_ksi_sum[i][j],
brun2.log_ksi_sum[i][j])
out.stat_size = brun1.stat_size + brun2.stat_size
return out
cdef HiddenMarkovModel combine_batches(BaumWelchBatch[:] baum_welch_batches):
cdef int i, j
cdef BaumWelchBatch res = baum_welch_batches[0]
for i in range(baum_welch_batches.shape[0]):
res = combine_run_pair(res, baum_welch_batches[i])
cdef int N = res.log_pi_sum.shape[0]
cdef int M = res.log_obs_sum.shape[1]
cdef double[::1] logpi = np.empty(N, dtype=np.float64)
cdef double[:,::1] loga = np.empty((N, N), dtype=np.float64)
cdef double[:,::1] logb = np.empty((N, M), dtype=np.float64)
for i in range(N):
logpi[i] = res.log_pi_sum[i] - np.log(res.stat_size)
for j in range(N):
loga[i,j] = res.log_ksi_sum[i,j] - res.log_gamma_sum[i]
for j in range(M):
logb[i,j] = res.log_obs_sum[i,j] - res.log_gamma_full_sum[i]
return HiddenMarkovModel(np.exp(np.array(loga)),
np.exp(np.array(logb)),
np.exp(np.array(logpi)))
cdef class HiddenMarkovModel:
cdef double[:,::1] _loga
cdef double[:,::1] _logb
cdef double[::1] _logpi
@property
def start_probabilities(self):
return np.exp(self._logpi)
@property
def transition_probabilities(self):
return np.exp(self._loga)
@property
def observation_probabilities(self):
return np.exp(self._logb)
@property
def loga(self):
return self._loga
@property
def logb(self):
return self._logb
@property
def logpi(self):
return self._logpi
def __init__(self, transition_probabilities, observation_probabilities,
start_probabilities, states=None, symbols=None):
self._loga = np.log(transition_probabilities)
self._logb = np.log(observation_probabilities)
self._logpi = np.log(start_probabilities)
def __getstate__(self):
state = {}
state['loga'] = np.asarray(self._loga)
state['logb'] = np.asarray(self._logb)
state['logpi'] = np.asarray(self._logpi)
return state
def __setstate__(self, state):
self._logpi = state['logpi']
self._loga = state['loga']
self._logb = state['logb']
def __reduce__(self):
state = self.__getstate__()
return self.__class__, (np.exp(state['loga']), np.exp(state['logb']), np.exp(state['logpi']),), state
def forward(self, observation_sequence):
return np.exp(forward_logprob(observation_sequence, self._loga, self._logb, self._logpi))
def backward(self, observation_sequence):
return np.exp(backward_logprob(observation_sequence, self._loga, self._logb, self._logpi))
cpdef double observation_log_probability(self, long[::1] observation_sequence):
return _estimate_observation_logprob(observation_sequence, self._loga, self._logb, self._logpi)
@classmethod
def from_batches(cls, batches):
"""
return's a HiddenMarkovModel that is the combination of multiple BaumWelchBatch runs that
are usually run in parallel
:param batches: list of BaumWelchBatch instances
:return: HiddenMarkovModel instance
"""
return combine_batches(np.array(batches))
def sample(self, n=None, length=None, random_state=None):
"""
returns hidden states and sequences generated from the model
:param n: number of samples, default is 1
:param length: length of each sequence
:param random_state: random state used for generating sequences
:return: hidden states, | Cython |
sequences
"""
if random_state is None:
random_state = np.random.get_state()[1][0]
np.random.seed(random_state)
if length is None:
length = np.random.choice(range(5, 30), 1)
if n is None:
return self._generate_sequence(length, random_state)
else:
return [self._generate_sequence(length, random_state) for _ in range(n)]
cdef tuple _generate_sequence(self, int length, long random_state):
#np.random.seed(random_state)
cdef np.ndarray out_states = np.empty(length, dtype=np.int)
cdef np.ndarray out_sequence = np.empty(length, dtype=np.int)
cdef int N = self._loga.shape[0]
cdef int M = self._logb.shape[1]
cdef np.ndarray A = np.exp(self._loga)
cdef np.ndarray B = np.exp(self._logb)
cdef np.ndarray pi = np.exp(self._logpi)
cdef int current_state = np.random.choice(N, size=1, p=pi)
for i in range(length):
out_states[i] = current_state
out_sequence[i] = np.random.choice(M, size=1, p=B[current_state,:].flatten())
current_state = np.random.choice(N, size=1, p=A[current_state,:].flatten())
return out_states, out_sequence
def fit(self, data, jobs=-1, batch_size=1000, stop_threshold=1E-9, min_iterations=0, max_iterations=1000):
"""
estimate model parameter using the baum-welch algorithm
:param data:
:param jobs:
:param batch_size:
:param stop_threshold:
:param min_iterations:
:param max_iterations:
:return:
"""
def generate_batches(data, batch_size):
start, end = 0, batch_size
while start < len(data):
yield data[start:end]
start += batch_size
end += batch_size
def fit_worker(data):
batch = BaumWelchBatch(self._loga, self._logb, self._logpi)
for d in data:
batch.fit_sequence(d)
return batch
with Parallel(n_jobs=-1, backend='threading') as parallel:
f2 = delayed(lambda x: sum(map(self.observation_log_probability, x)))
log_prob_sum = sum(parallel(f2(batch) for batch in generate_batches(data, batch_size)))
iteration = 0
improvement = INF
while improvement > stop_threshold or iteration < min_iterations + 1:
print(f'iteration {iteration +1}')
s = time.time()
with Parallel(n_jobs=-1, backend='threading') as parallel:
f = delayed(fit_worker)
baum_welch_runs = parallel(f(batch) for batch in generate_batches(data, batch_size))
new_model = HiddenMarkovModel.from_batches(baum_welch_runs)
f2 = delayed(lambda x: sum(map(new_model.observation_log_probability, x)))
new_logprob_sum = sum(parallel(f2(batch) for batch in generate_batches(data, batch_size)))
improvement = new_logprob_sum - log_prob_sum
print(f'improvement = {improvement:.18f}')
log_prob_sum = new_logprob_sum
if iteration >= max_iterations:
break
iteration += 1
e = time.time()
print(f'took {e-s}s')
<|end_of_text|>#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# distutils: language = c++
import numpy as np
import cuml.internals
from cuml.common.array import CumlArray
from cuml.raft.common.handle cimport handle_t
from cuml.common import input_to_cuml_array
from cuml.common.opg_data_utils_mg cimport *
from cuml.common.opg_data_utils_mg import _build_part_inputs
import rmm
from libc.stdlib cimport calloc, malloc, free
from cython.operator cimport dereference as deref
from libc.stdint cimport uintptr_t
from libcpp cimport bool
from libcpp.memory cimport shared_ptr
from cuml.neighbors.nearest_neighbors_mg import NearestNeighbors
from cudf.core import DataFrame as cudfDataFrame
class KNeighborsMG(NearestNeighbors):
def __init__(self, batch_size=1024, **kwargs):
super(KNeighborsMG, self).__init__(**kwargs)
self.batch_size = batch_size
def get_out_type(self, data, query):
if len(data) > 0:
self._set_base_attributes(output_type=data[0])
out_type = self.output_type
if len(query) > 0:
out_type = self._get_output_type(query[0])
cuml.internals.set_api_output_type(out_type)
return out_type
def gen_local_input(self, data, data_parts_to_ranks, data_nrows,
query, query_parts_to_ranks, query_nrows,
ncols, rank, convert_dtype):
data_dask = [d[0] for d in data]
self.n_dims = ncols
data_cai, data_local_parts, data_desc = \
_build_part_inputs(data_dask, data_parts_to_ranks, data_nrows,
ncols, rank, convert_dtype)
query_cai, query_local_parts, query_desc = \
_build_part_inputs(query, query_parts_to_ranks, query_nrows,
ncols, rank, convert_dtype)
return {
'data': {
'local_parts': <uintptr_t>data_local_parts,
'desc': <uintptr_t>data_desc
},
'query': {
'local_parts': <uintptr_t>query_local_parts,
'desc': <uintptr_t>query_desc
},
'cais': {
'data': data_cai,
'query': query_cai
},
}
def gen_local_output(self, data, convert_dtype, dtype):
cdef vector[int_ptr_vector] *out_local_parts_i32
cdef vector[float_ptr_vector] *out_local_parts_f32
outputs = [d[1] for d in data]
n_out = len(outputs)
if dtype == 'int32':
out_local_parts_i32 = new vector[int_ptr_vector](<int>n_out)
elif dtype == 'float32':
out_local_parts_f32 = new vector[float_ptr_vector](<int>n_out)
else:
raise ValueError('Wrong dtype')
outputs_cai = []
for i, arr in enumerate(outputs):
for j in range(arr.shape[1]):
if isinstance(arr, cudfDataFrame):
col = arr.iloc[:, j]
else:
col = arr[:, j]
out_ai, _, _, _ = \
input_to_cuml_array(col, order="F",
convert_to_dtype=(dtype
if convert_dtype
else None),
check_dtype=[dtype])
outputs_cai.append(out_ai)
if dtype == 'int32':
out_local_parts_i32.at(i).push_back(<int*><uintptr_t>
out_ai.ptr)
else:
out_local_parts_f32.at(i).push_back(<float*><uintptr_t>
out_ai.ptr)
return {
'outputs':
<uintptr_t>out_local_parts_i32 if dtype == 'int32'
else <uintptr_t>out_local_parts_f32,
'cais': [outputs_cai]
}
def alloc_local_output(self, local_query_rows):
cdef vector[int64Data_t*] *indices_local_parts \
= new vector[int64Data_t*]()
cdef vector[floatData_t*] *dist_local_parts \
= new vector[floatData_t*]()
indices_cai = []
dist_cai = []
for n_rows in local_query_rows:
i_cai = CumlArray.zeros(shape=(n_rows, self.n_neighbors),
order="C", dtype=np.int64)
d_cai = CumlArray.zeros(shape=(n_rows, self.n_neighbors),
order="C", dtype=np.float32)
indices_cai.append(i_cai)
dist_cai.append(d_cai)
indices_local_parts.push_back(new int64Data_t(
<int64_t*><uintptr_t>i_cai.ptr, n_rows * self.n_neighbors))
dist_local_parts.push_back(new floatData_t(
<float*><uintptr_t>d_cai.ptr, n_rows * self.n_neighbors))
return {
'indices': <uintptr_t>indices_local_parts,
'distances': <uintptr_t>dist_local_parts,
'cais': {
'indices': indices_cai,
'distances': dist_cai
}
}
def free_mem(self, input, result=None):
cdef floatData_t *f_ptr
cdef vector[floatData_t*] | Cython |
*f_lp
for input_type in ['data', 'query']:
ilp = input[input_type]['local_parts']
f_lp = <vector[floatData_t *]*><uintptr_t>ilp
for i in range(f_lp.size()):
f_ptr = f_lp.at(i)
free(<void*>f_ptr)
free(<void*><uintptr_t>f_lp)
free(<void*><uintptr_t>input[input_type]['desc'])
cdef int64Data_t *i64_ptr
cdef vector[int64Data_t*] *i64_lp
if result:
f_lp = <vector[floatData_t *]*><uintptr_t>result['distances']
for i in range(f_lp.size()):
f_ptr = f_lp.at(i)
free(<void*>f_ptr)
free(<void*><uintptr_t>f_lp)
i64_lp = <vector[int64Data_t *]*><uintptr_t>result['indices']
for i in range(i64_lp.size()):
i64_ptr = i64_lp.at(i)
free(<void*>i64_ptr)
free(<void*><uintptr_t>i64_lp)
<|end_of_text|># Code below is autogenerated by pyx2pxd - https://github.com/HenriquesLab/pyx2pxd
cdef float[:, :, :] _calculate_ccm(float[:, :, :] img_stack, int ref)
cdef float[:, :, :] _calculate_ccm_from_ref(float[:, :, :] img_stack, float[:, :] img_ref)
cdef float[:, :] _calculate_slice_ccm(float[:, :] img_ref, float[:, :] img_slice)
cdef void _normalize_ccm(float[:, :] img_ref, float[:, :] img_slice, float[:, :] ccm_slice) nogil
cdef float[:,:,:] _calculate_rccm(float[:, :] img_slice, float[:, :] img_ref)
cdef float[:,:] _calculate_ccm_cartesian(float[:, :] img_slice, float[:, :] img_ref)
cdef float[:,:] _calculate_ccm_polar(float[:, :] img_slice, float[:, :] img_ref)
cdef float[:,:] _calculate_ccm_logpolar(float[:, :] img_slice, float[:, :] img_ref)
<|end_of_text|># -*- coding: utf-8 -*-
# Copyright 1999-2022 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libc.stdint cimport *
from libc.string cimport *
cdef class Encoder:
cdef int _pos
cdef int _buf_len
cdef int _last_error
cdef char *_buf_ptr
cdef init(self, char *buf_ptr, int buf_len)
cdef int position(self) nogil
cdef int get_last_error(self) nogil
cdef void set_last_error(self, int errno) nogil
cdef int append_tag(self, int field_num, int wire_type) nogil
cdef int append_sint32(self, int32_t value) nogil
cdef int append_uint32(self, uint32_t value) nogil
cdef int append_sint64(self, int64_t value) nogil
cdef int append_uint64(self, uint64_t value) nogil
cdef int append_bool(self, bint value) nogil
cdef int append_float(self, float value) nogil
cdef int append_double(self, double value) nogil
cdef int append_string(self, const char *ptr, int value_len) nogil
cdef int _set_varint32(self, int32_t varint) nogil
cdef int _set_varint64(self, int64_t varint) nogil
cdef int _set_signed_varint32(self, int32_t varint) nogil
cdef int _set_signed_varint64(self, int64_t varint) nogil
<|end_of_text|># Copyright (c) 2013 The University of Edinburgh.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python bindings of SCOREP library
# For MPI profiling to work, this MUST be imported before MPI.
cimport scorep_user as user
cimport scorep_internals as internals
from libc.stdint cimport uintptr_t
import atexit
cdef dlopen_libscorep(name):
"""
Ensure the library NAME is dlopen'd and its symbols resolved.
Necessary to intercept MPI profiling interface at the right time.
"""
cdef void *handle = NULL
cdef int mode = internals.RTLD_NOW | internals.RTLD_GLOBAL
cdef char *libname = name
handle = internals.dlopen(libname, mode)
if handle is NULL:
raise RuntimeError(
"Unable to force loading of %s is it in LD_LIBRARY_PATH?" % name)
def initialize():
"""Initialize the measurement system"""
dlopen_libscorep("libscorep_mpi.so")
internals.SCOREP_InitMeasurement()
@atexit.register
def finalize():
"""Finalize the measurement system"""
internals.SCOREP_FinalizeMeasurement()
# Cache of user-defined region handles.
_region_handles = dict()
cdef user.SCOREP_SourceFileHandle last_file_handle = 0
cdef char *last_file_name = NULL
def region_begin(name):
"""Start recording a region marked by NAME"""
cdef user.SCOREP_User_RegionHandle handle
cdef char *c_name = name
cdef user.SCOREP_User_RegionType c_type
# We should take an argument that decides what type to use
c_type = user.SCOREP_USER_REGION_TYPE_COMMON
h = _region_handles.get(name)
if h:
# Convert the cached value into a handle
handle = <user.SCOREP_User_RegionHandle><uintptr_t>h
else:
handle = user.SCOREP_USER_INVALID_REGION
# Actually register the region beginning
user.SCOREP_User_RegionBegin(&handle, &last_file_name,
&last_file_handle,
c_name, c_type,
"test.py", 1)
# Map the handle back into something we can cache
_region_handles[name] = <uintptr_t>handle
def region_end(name):
"""Finish recording the region denoted by NAME"""
cdef user.SCOREP_User_RegionHandle handle
h = _region_handles.get(name)
if h:
# Handle should be in the cache
handle = <user.SCOREP_User_RegionHandle><uintptr_t>h
else:
# region_end without matching region_begin
raise RuntimeError("Could not find handle for region %s" % name)
user.SCOREP_User_RegionEnd(handle)
def region_init(name):
"""Register a region marked by NAME"""
cdef user.SCOREP_User_RegionHandle handle
cdef char *c_name = name
cdef user.SCOREP_User_RegionType c_type
c_type = user.SCOREP_USER_REGION_TYPE_COMMON
h = _region_handles.get(name)
if h:
# Convert the cached value into a handle
handle = <user.SCOREP_User_RegionHandle><uintptr_t>h
else:
handle = user.SCOREP_USER_INVALID_REGION
# Register the region
user.SCOREP_User_RegionInit(&handle, &last_file_name,
&last_file_handle,
c_name, c_type,
"test.py", 1)
# Cache
_region_handles[name] = <uintptr_t>handle
def region_enter(name):
"""Generate an enter event for the specified region by NAME"""
cdef user.SCOREP_User_RegionHandle handle
h = _region_handles.get(name)
if h:
# Handle should be in the cache
handle = <user.SCOREP_User_RegionHandle><uintptr_t>h
else:
# region_end without matching region_init
raise RuntimeError("Could not find handle for region %s" % name)
user.SCOREP_User_RegionEnter(handle)
def rewind_region_enter(name):
"""Generate an enter event for the specified rewind region by NAME"""
cdef user.SCOREP_User_RegionHandle handle
h = _region_handles.get(name)
if h:
# Handle should be in the cache
handle = <user.SCOREP_User_RegionHandle><uintptr_t>h
else:
# region_end without matching region_init
raise RuntimeError("Could not find handle for region %s" % name)
user.SCOREP_User_Rewind | Cython |
RegionEnter(handle)
def enable_recording():
"""Enables Recording"""
user.SCOREP_User_EnableRecording()
def disable_recording():
"""Disables Recording"""
user.SCOREP_User_DisableRecording()
def recording_enabled():
"""Check if recording is enabled"""
user.SCOREP_User_RecordingEnabled()
# Run initialization routine on module import
initialize()
<|end_of_text|>import csv
from itertools import chain
from radix import Radix
from utils import File2
cdef list PRIVATE4 = ['0.0.0.0/8', '10.0.0.8/8', '100.64.0.0/10', '127.0.0.0/8', '169.254.0.0/16', '172.16.0.0/12',
'192.0.0.0/24', '192.0.2.0/24', '192.31.196.0/24', '192.52.193.0/24', '192.88.99.0/24', '192.168.0.0/16',
'192.175.48.0/24', '198.18.0.0/15', '198.51.100.0/24', '203.0.113.0/24', '240.0.0.0/4',
'255.255.255.255/32']
cdef list PRIVATE6 = ['::1/128', '::/128', '::ffff:0:0/96', '64:ff9b::/96', '100::/64', '2001::/23', '2001::/32', '2001:1::1/128',
'2001:2::/48', '2001:3::/32', '2001:4:112::/48', '2001:5::/32', '2001:10::/28', '2001:20::/28',
'2001:db8::/32', '2002::/16', '2620:4f:8000::/48', 'fc00::/7', 'fe80::/10']
cdef str MULTICAST4 = '224.0.0.0/3'
cdef str MULTICAST6 = 'FF00::/8'
class RoutingTable(Radix):
@classmethod
def private(cls, inet='both'):
rt = cls()
rt.add_private(inet=inet, remove=False)
rt.add_default()
return rt
@classmethod
def ip2as(cls, filename):
rt = cls()
with File2(filename) as f:
f.readline()
for prefix, asn in csv.reader(f):
try:
rt.add_prefix(int(asn), prefix)
except TypeError:
print(asn, prefix)
raise
return rt
def __init__(self):
super().__init__()
def __getitem__(self, item):
return self.search_best(item).data['asn']
def __setitem__(self, key, value):
self.add(key).data['asn'] = value
def add_default(self):
self.add_prefix(0, '0.0.0.0/0')
def add_ixp(self, str network=None, masklen=None, packed=None, remove=True):
if remove:
covered = self.search_covered(network, masklen) if network and masklen else self.search_covered(network)
for node in covered:
try:
self.delete(node.prefix)
except KeyError:
pass
self.add_prefix(-1, network)
def add_prefix(self, int asn, *args, **kwargs):
node = self.add(*args, **kwargs)
node.data['asn'] = asn
def add_multicast(self, str inet='both', bint remove=True):
prefixes = []
if inet == 'ipv4' or 'both':
prefixes.append(MULTICAST4)
if inet == 'ipv6' or 'both':
prefixes.append(MULTICAST6)
for prefix in prefixes:
if remove:
for node in self.search_covered(prefix):
self.delete(node.prefix)
self.add_prefix(-3, prefix)
def add_private(self, str inet='both', bint remove=True):
if inet == 'both':
prefixes = chain(PRIVATE4, PRIVATE6)
elif inet == 'ipv4':
prefixes = PRIVATE4
elif inet == 'ipv6':
prefixes = PRIVATE6
else:
raise Exception('Unknown INET {}'.format(inet))
for prefix in prefixes:
if remove:
nodes = self.search_covered(prefix)
for node in nodes:
self.delete(node.prefix)
self.add_prefix(-2, prefix)
def add_rir(self, rir, ixp_asns):
rirrows = []
for address, prefixlen, asn in rir:
if asn not in ixp_asns:
if not self.search_covering(address, prefixlen):
rirrows.append((address, prefixlen, asn))
for address, prefixlen, asn in rirrows:
self.add_prefix(asn, address, prefixlen)
def isglobal(self, str address):
return self[address] >= -1
cpdef bint valid(long asn) except -1:
return asn!= 23456 and 0 < asn < 64496 or 131071 < asn < 4200000000<|end_of_text|>ctypedef unsigned long long INT
include "overflow_check.pxi"
<|end_of_text|>#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
cimport c_amqp_definitions
cimport c_message
cimport c_link
cimport c_amqpvalue
cdef extern from "azure_uamqp_c/message_receiver.h":
cdef enum MESSAGE_RECEIVER_STATE_TAG:
MESSAGE_RECEIVER_STATE_IDLE,
MESSAGE_RECEIVER_STATE_OPENING,
MESSAGE_RECEIVER_STATE_OPEN,
MESSAGE_RECEIVER_STATE_CLOSING,
MESSAGE_RECEIVER_STATE_ERROR
ctypedef struct MESSAGE_RECEIVER_HANDLE:
pass
ctypedef c_amqpvalue.AMQP_VALUE (*ON_MESSAGE_RECEIVED)(const void* context, c_message.MESSAGE_HANDLE message)
ctypedef void (*ON_MESSAGE_RECEIVER_STATE_CHANGED)(const void* context, MESSAGE_RECEIVER_STATE_TAG new_state, MESSAGE_RECEIVER_STATE_TAG previous_state)
MESSAGE_RECEIVER_HANDLE messagereceiver_create(c_link.LINK_HANDLE link, ON_MESSAGE_RECEIVER_STATE_CHANGED on_message_receiver_state_changed, void* context)
void messagereceiver_destroy(MESSAGE_RECEIVER_HANDLE message_receiver)
int messagereceiver_open(MESSAGE_RECEIVER_HANDLE message_receiver, ON_MESSAGE_RECEIVED on_message_received, void* callback_context)
int messagereceiver_close(MESSAGE_RECEIVER_HANDLE message_receiver)
int messagereceiver_get_link_name(MESSAGE_RECEIVER_HANDLE message_receiver, const char** link_name)
int messagereceiver_get_received_message_id(MESSAGE_RECEIVER_HANDLE message_receiver, c_amqp_definitions.delivery_number* message_number)
int messagereceiver_send_message_disposition(MESSAGE_RECEIVER_HANDLE message_receiver, const char* link_name, c_amqp_definitions.delivery_number message_number, c_amqpvalue.AMQP_VALUE delivery_state)
void messagereceiver_set_trace(MESSAGE_RECEIVER_HANDLE message_receiver, bint trace_on)
<|end_of_text|>from libc.stdlib cimport malloc, qsort, free
from libc.string cimport memcpy
from libc.math cimport fabs
cdef int n, m, k
cdef double **matrix
cdef double **new_matrix
cdef double **old_matrix
cdef int i, j, l, h
with open("input.txt", "r") as fin:
n, m, k = map(int, fin.readline().split())
matrix = <double**> malloc(n*sizeof(double *))
new_matrix = <double**> malloc(n*sizeof(double *))
old_matrix = <double**> malloc(n*sizeof(double *))
for i in range(n):
matrix[i] = <double *> malloc(m * sizeof(double))
new_matrix[i] = <double *> malloc(m * sizeof(double))
old_matrix[i] = <double *> malloc(m * sizeof(double))
for j, fff in enumerate(fin.readline().split()):
matrix[i][j] = float(fff)
new_matrix[i][j] = 0
old_matrix[i][j] = 0
cdef double *cofficients = <double*> malloc(k*sizeof(double))
cdef int power = 1
cdef int side = 1
cdef int N
for l in range(k):
N = side * 4 + 4
power *= 2
side += 2
cofficients[l] = 1.0/power/N
cdef double eps = 0.00000001
cdef int step_max = 100
cdef double norm_sum = 0
cdef double amount
cdef int x1, x2, y1, y2, x, y
for l in range(step_max):
for i in range(n):
for j in range(m):
new_matrix[i][j] = matrix[i][j]
for h in range(k):
amount = 0
x1 = i - h - 1
x2 = i + h + 1
for y in range(j - h - 1, j + h + 2):
if 0 <= x | Cython |
1 < n and 0 <= y < m:
amount += new_matrix[x1][y]
if 0 <= x2 < n and 0 <= y < m:
amount += new_matrix[x2][y]
y1 = j - h - 1
y2 = j + h + 1
for x in range(i - h, i + h + 1):
if 0 <= y1 < m and 0 <= x < n:
amount += new_matrix[x][y1]
if 0 <= y2 < m and 0 <= x < n:
amount += new_matrix[x][y2]
new_matrix[i][j] -= cofficients[h] * amount
sub = fabs(new_matrix[i][j] - old_matrix[i][j])
norm_sum = max(sub, norm_sum)
if norm_sum <= eps:
break
for i in range(n):
for j in range(m):
old_matrix[i][j] = new_matrix[i][j]
with open("output.txt", "w") as fout:
for i in range(n):
for j in range(m):
fout.write(" %.5f " % new_matrix[i][j])
fout.write("\n")
free(cofficients)
free(matrix)
free(old_matrix)
free(new_matrix)<|end_of_text|>import numpy as np
cimport numpy as np
class GradientDescent:
def __init__(self):
# --- BackProp parameters ---
# learning rate (0.1-0.001, down to 1e-7 for RNNs)
self.alpha = 0.1
# alpha decay (0.999; 1.0 = disabled)
self.alphadecay = 1.0
# momentum parameters (0.1 or 0.9)
self.momentum = 0.0
self.momentumvector = None
# --- RProp parameters ---
self.rprop = False
# maximum step width (1 - 20)
self.deltamax = 5.0
# minimum step width (0.01 - 1e-6)
self.deltamin = 0.01
# the remaining parameters do not normally need to be changed
self.deltanull = 0.1
self.etaplus = 1.2
self.etaminus = 0.5
self.lastgradient = None
def init(self, values):
assert isinstance(values, np.ndarray)
self.values = values.copy()
if self.rprop:
self.lastgradient = np.zeros(len(values), dtype='float64')
self.rprop_theta = self.lastgradient + self.deltanull
self.momentumvector = None
else:
self.lastgradient = None
self.momentumvector = np.zeros(len(values))
<|end_of_text|>"""Cython definitions for the quickhull library."""
from libcpp.vector cimport vector
cdef extern from "Structs/Vector3.hpp" namespace "quickhull":
cdef cppclass Vector3[T]:
Vector3()
Vector3(T x, T y, T z)
T x
T y
T z
cdef extern from "Structs/VertexDataSource.hpp" namespace "quickhull":
cdef cppclass VertexDataSource[T]:
VertexDataSource()
VertexDataSource(const Vector3[T] * ptr, size_t count)
VertexDataSource(const vector[Vector3[T]]& vec)
size_t size()
const Vector3[T]& operator[](size_t index)
const Vector3[T] * begin()
const Vector3[T] * end()
cdef extern from "ConvexHull.hpp" namespace "quickhull":
cdef cppclass ConvexHull[T]:
ConvexHull()
vector[size_t]& getIndexBuffer()
VertexDataSource[T]& getVertexBuffer()
cdef extern from "QuickHull.hpp" namespace "quickhull":
cdef cppclass QuickHull[FloatType]:
QuickHull()
ConvexHull[FloatType] getConvexHull(
const vector[Vector3[FloatType]]& pointCloud,
bint CCW,
bint useOriginalIndices
)
<|end_of_text|># Copyright Lars Buitinck / University of Amsterdam 2013
cimport cython
cimport numpy as np
import numpy as np
np.import_array()
cdef np.float64_t NEGINF = -np.inf
@cython.boundscheck(False)
@cython.wraparound(False)
def bestfirst(np.ndarray[ndim=2, dtype=np.float64_t] score,
trans_score,
np.ndarray[ndim=2, dtype=np.float64_t] trans,
np.ndarray[ndim=1, dtype=np.float64_t, mode="c"] init,
np.ndarray[ndim=1, dtype=np.float64_t, mode="c"] final):
"""First-order heuristic best-first decoder.
See viterbi for the arguments. score may be overwritten.
trans_score is not supported yet.
"""
cdef:
np.ndarray[ndim=1, dtype=np.npy_intp, mode="c"] path
np.float64_t candidate, maxval
np.npy_intp i, j, maxind, n_samples, n_states
if trans_score is not None:
raise NotImplementedError("No transition scores for bestfirst yet.")
n_samples, n_states = score.shape[0], score.shape[1]
path = np.empty(n_samples, dtype=np.intp)
score[0] += init
score[n_samples - 1] += final
path[0] = np.argmax(score[0])
for i in range(1, n_samples):
maxind = 0
maxval = NEGINF
for j in range(n_states):
candidate = trans[path[i - 1], j] + score[i, j]
if candidate > maxval:
maxind = j
maxval = candidate
path[i] = maxind
return path
<|end_of_text|># Copyright (c) 2020-2022, NVIDIA CORPORATION.
from cudf._lib.column cimport Column
from cudf._lib.scalar import as_device_scalar
from cudf._lib.scalar cimport DeviceScalar
from cudf._lib.types import SUPPORTED_NUMPY_TO_LIBCUDF_TYPES
from libcpp.memory cimport unique_ptr
from libcpp.string cimport string
from libcpp.utility cimport move
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.column.column_view cimport column_view
from cudf._lib.cpp.scalar.scalar cimport string_scalar
from cudf._lib.cpp.strings.convert.convert_booleans cimport (
from_booleans as cpp_from_booleans,
to_booleans as cpp_to_booleans,
)
from cudf._lib.cpp.strings.convert.convert_datetime cimport (
from_timestamps as cpp_from_timestamps,
is_timestamp as cpp_is_timestamp,
to_timestamps as cpp_to_timestamps,
)
from cudf._lib.cpp.strings.convert.convert_durations cimport (
from_durations as cpp_from_durations,
to_durations as cpp_to_durations,
)
from cudf._lib.cpp.strings.convert.convert_floats cimport (
from_floats as cpp_from_floats,
to_floats as cpp_to_floats,
)
from cudf._lib.cpp.strings.convert.convert_integers cimport (
from_integers as cpp_from_integers,
hex_to_integers as cpp_hex_to_integers,
integers_to_hex as cpp_integers_to_hex,
is_hex as cpp_is_hex,
to_integers as cpp_to_integers,
)
from cudf._lib.cpp.strings.convert.convert_ipv4 cimport (
integers_to_ipv4 as cpp_integers_to_ipv4,
ipv4_to_integers as cpp_ipv4_to_integers,
is_ipv4 as cpp_is_ipv4,
)
from cudf._lib.cpp.types cimport data_type, type_id
from cudf._lib.types cimport underlying_type_t_type_id
import cudf
def floating_to_string(Column input_col):
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_from_floats(
input_column_view))
return Column.from_unique_ptr(move(c_result))
def string_to_floating(Column input_col, object out_type):
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
cdef type_id tid = <type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[out_type]
)
)
cdef data_type c_out_type = data_type(tid)
with nogil:
c_result = move(
cpp_to_floats(
input_column_view,
c_out_type))
return Column.from_unique_ptr(move(c_result))
def dtos(Column input_col):
"""
Converting/Casting input column of type double to string column
Parameters
----------
input_col : input column of type double
Returns
-------
A Column with double values cast to string
"""
return floating_to_string(input_col)
def stod(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to double
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to double
"""
return | Cython |
string_to_floating(input_col, cudf.dtype("float64"))
def ftos(Column input_col):
"""
Converting/Casting input column of type float to string column
Parameters
----------
input_col : input column of type double
Returns
-------
A Column with float values cast to string
"""
return floating_to_string(input_col)
def stof(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to float
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to float
"""
return string_to_floating(input_col, cudf.dtype("float32"))
def integer_to_string(Column input_col):
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_from_integers(
input_column_view))
return Column.from_unique_ptr(move(c_result))
def string_to_integer(Column input_col, object out_type):
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
cdef type_id tid = <type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[out_type]
)
)
cdef data_type c_out_type = data_type(tid)
with nogil:
c_result = move(
cpp_to_integers(
input_column_view,
c_out_type))
return Column.from_unique_ptr(move(c_result))
def i8tos(Column input_col):
"""
Converting/Casting input column of type int8 to string column
Parameters
----------
input_col : input column of type int8
Returns
-------
A Column with int8 values cast to string
"""
return integer_to_string(input_col)
def stoi8(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to int8
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to int8
"""
return string_to_integer(input_col, cudf.dtype("int8"))
def i16tos(Column input_col):
"""
Converting/Casting input column of type int16 to string column
Parameters
----------
input_col : input column of type int16
Returns
-------
A Column with int16 values cast to string
"""
return integer_to_string(input_col)
def stoi16(Column input_col):
"""
Converting/Casting input column of type string to int16
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to int16
"""
return string_to_integer(input_col, cudf.dtype("int16"))
def itos(Column input_col):
"""
Converting/Casting input column of type int32 to string column
Parameters
----------
input_col : input column of type int32
Returns
-------
A Column with int32 values cast to string
"""
return integer_to_string(input_col)
def stoi(Column input_col):
"""
Converting/Casting input column of type string to int32
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to int32
"""
return string_to_integer(input_col, cudf.dtype("int32"))
def ltos(Column input_col):
"""
Converting/Casting input column of type int64 to string column
Parameters
----------
input_col : input column of type int64
Returns
-------
A Column with int64 values cast to string
"""
return integer_to_string(input_col)
def stol(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to int64
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to int64
"""
return string_to_integer(input_col, cudf.dtype("int64"))
def ui8tos(Column input_col):
"""
Converting/Casting input column of type uint8 to string column
Parameters
----------
input_col : input column of type uint8
Returns
-------
A Column with uint8 values cast to string
"""
return integer_to_string(input_col)
def stoui8(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to uint8
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to uint8
"""
return string_to_integer(input_col, cudf.dtype("uint8"))
def ui16tos(Column input_col):
"""
Converting/Casting input column of type uint16 to string column
Parameters
----------
input_col : input column of type uint16
Returns
-------
A Column with uint16 values cast to string
"""
return integer_to_string(input_col)
def stoui16(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to uint16
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to uint16
"""
return string_to_integer(input_col, cudf.dtype("uint16"))
def uitos(Column input_col):
"""
Converting/Casting input column of type uint32 to string column
Parameters
----------
input_col : input column of type uint32
Returns
-------
A Column with uint32 values cast to string
"""
return integer_to_string(input_col)
def stoui(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to uint32
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to uint32
"""
return string_to_integer(input_col, cudf.dtype("uint32"))
def ultos(Column input_col):
"""
Converting/Casting input column of type uint64 to string column
Parameters
----------
input_col : input column of type uint64
Returns
-------
A Column with uint64 values cast to string
"""
return integer_to_string(input_col)
def stoul(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to uint64
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to uint64
"""
return string_to_integer(input_col, cudf.dtype("uint64"))
def _to_booleans(Column input_col, object string_true="True"):
"""
Converting/Casting input column of type string to boolean column
Parameters
----------
input_col : input column of type string
string_true : string that represents True
Returns
-------
A Column with string values cast to boolean
"""
cdef DeviceScalar str_true = as_device_scalar(string_true)
cdef column_view input_column_view = input_col.view()
cdef const string_scalar* string_scalar_true = <const string_scalar*>(
str_true.get_raw_ptr())
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_to_booleans(
input_column_view,
string_scalar_true[0]))
return Column.from_unique_ptr(move(c_result))
def to_booleans(Column input_col, **kwargs):
return _to_booleans(input_col)
def _from_booleans(
Column input_col,
object string_true="True",
object string_false="False"):
"""
Converting/Casting input column of type boolean to string column
Parameters
----------
input_col : input column of type boolean
string_true : string that represents True
string_false : string that represents False
Returns
-------
A Column with boolean values cast to string
"""
cdef DeviceScalar str_true = as_device_scalar(string_true)
cdef DeviceScalar str_false = as_device_scalar(string_false)
cdef column_view input_column_view = input_col.view()
cdef const string_scalar* string_scalar_true = <const string_scalar*>(
str_true.get_raw_ptr())
cdef const string_scalar* string_scalar_false = <const string_scalar*>(
str_false.get_raw_ptr())
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_from_booleans(
input_column_view,
string_scalar_true[0],
string_scalar_false[0]))
return Column.from_unique_ptr(move(c_result))
def from_booleans(Column input_col):
return _from_booleans(input_col)
def int2timestamp(
Column input_col,
str format,
Column names):
"""
Converting/Casting input date-time column to string
column with specified format
Parameters
----------
input_col : input column of type timestamp in integer format
format : The string specifying output format
names : The string names to use for weekdays ("%a", "%A") and
months ("%b", "%B")
Returns
-------
A Column with date-time represented in string format
"""
cdef column_view input_column_view = input_col.view()
cdef string c_timestamp_format = format.encode("UTF-8")
cdef column_view input_strings_names = | Cython |
names.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_from_timestamps(
input_column_view,
c_timestamp_format,
input_strings_names))
return Column.from_unique_ptr(move(c_result))
def timestamp2int(Column input_col, dtype, format):
"""
Converting/Casting input string column to date-time column with specified
timestamp_format
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with string represented in date-time format
"""
cdef column_view input_column_view = input_col.view()
cdef type_id tid = <type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[dtype]
)
)
cdef data_type out_type = data_type(tid)
cdef string c_timestamp_format = format.encode('UTF-8')
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_to_timestamps(
input_column_view,
out_type,
c_timestamp_format))
return Column.from_unique_ptr(move(c_result))
def istimestamp(
Column input_col,
object format,
**kwargs):
"""
Check input string column matches the specified timestamp format
Parameters
----------
input_col : input column of type string
format : format string of timestamp specifiers
Returns
-------
A Column of boolean values identifying strings that matched the format.
"""
if input_col.size == 0:
return cudf.core.column.as_column([], dtype=kwargs.get('dtype'))
cdef column_view input_column_view = input_col.view()
cdef string c_timestamp_format = <string>str(format).encode('UTF-8')
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_is_timestamp(
input_column_view,
c_timestamp_format))
return Column.from_unique_ptr(move(c_result))
def timedelta2int(Column input_col, dtype, format):
"""
Converting/Casting input string column to TimeDelta column with specified
format
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with string represented in TimeDelta format
"""
cdef column_view input_column_view = input_col.view()
cdef type_id tid = <type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[dtype]
)
)
cdef data_type out_type = data_type(tid)
cdef string c_duration_format = format.encode('UTF-8')
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_to_durations(
input_column_view,
out_type,
c_duration_format))
return Column.from_unique_ptr(move(c_result))
def int2timedelta(
Column input_col,
**kwargs):
"""
Converting/Casting input Timedelta column to string
column with specified format
Parameters
----------
input_col : input column of type Timedelta in integer format
Returns
-------
A Column with Timedelta represented in string format
"""
cdef column_view input_column_view = input_col.view()
cdef string c_duration_format = kwargs.get(
'format', "%D days %H:%M:%S").encode('UTF-8')
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_from_durations(
input_column_view,
c_duration_format))
return Column.from_unique_ptr(move(c_result))
def int2ip(Column input_col, **kwargs):
"""
Converting/Casting integer column to string column in ipv4 format
Parameters
----------
input_col : input integer column
Returns
-------
A Column with integer represented in string ipv4 format
"""
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_integers_to_ipv4(input_column_view))
return Column.from_unique_ptr(move(c_result))
def ip2int(Column input_col, **kwargs):
"""
Converting string ipv4 column to integer column
Parameters
----------
input_col : input string column
Returns
-------
A Column with ipv4 represented as integer
"""
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_ipv4_to_integers(input_column_view))
return Column.from_unique_ptr(move(c_result))
def is_ipv4(Column source_strings):
"""
Returns a Column of boolean values with True for `source_strings`
that have strings in IPv4 format. This format is nnn.nnn.nnn.nnn
where nnn is integer digits in [0,255].
"""
cdef unique_ptr[column] c_result
cdef column_view source_view = source_strings.view()
with nogil:
c_result = move(cpp_is_ipv4(
source_view
))
return Column.from_unique_ptr(move(c_result))
def htoi(Column input_col, **kwargs):
"""
Converting input column of type string having hex values
to integer of out_type
Parameters
----------
input_col : input column of type string
out_type : The type of integer column expected
Returns
-------
A Column of integers parsed from hexadecimal string values.
"""
cdef column_view input_column_view = input_col.view()
cdef type_id tid = <type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[
kwargs.get('dtype', cudf.dtype("int64"))
]
)
)
cdef data_type c_out_type = data_type(tid)
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_hex_to_integers(input_column_view,
c_out_type))
return Column.from_unique_ptr(move(c_result))
def is_hex(Column source_strings):
"""
Returns a Column of boolean values with True for `source_strings`
that have hex characters.
"""
cdef unique_ptr[column] c_result
cdef column_view source_view = source_strings.view()
with nogil:
c_result = move(cpp_is_hex(
source_view
))
return Column.from_unique_ptr(move(c_result))
def itoh(Column input_col):
"""
Converting input column of type integer to a string
column with hexadecimal character digits.
Parameters
----------
input_col : input column of type integer
Returns
-------
A Column of strings with hexadecimal characters.
"""
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_integers_to_hex(input_column_view))
return Column.from_unique_ptr(move(c_result))
<|end_of_text|>
cdef extern from "filter_audio/ns/include/noise_suppression_x.h":
ctypedef struct NsxHandle:
pass
bint WebRtcNsx_Create(NsxHandle** NS_inst)
bint WebRtcNsx_Free(NsxHandle*NS_inst)
bint WebRtcNsx_Init(NsxHandle*NS_inst, int fs)
bint WebRtcNsx_set_policy(NsxHandle* Ns_inst, int mode)
bint WebRtcNsx_Process(NsxHandle* NS_inst,
short* speechFrame,
short* speechFrameHB,
short* outframe,
short* outFrameHB)
<|end_of_text|>def tell(message):
if not isinstance(message, str):
return
length = len(message)
if length == 0:
return 5
elif length == 1:
return 1
elif length == 2:
return 'Hello!'
else:
return 0
<|end_of_text|>cimport cython
from libc.stdint cimport (int8_t, uint8_t, int16_t, uint16_t,
uint32_t, int32_t, int64_t, uint64_t, INT64_MAX)
from libc.string cimport memcpy, memset
from cpython.string cimport PyString_FromStringAndSize
cdef extern from "Python.h":
int PyByteArray_Resize(object o, Py_ssize_t len)
char* PyByteArray_AS_STRING(object o)
cdef long round_to_word(long pos):
return (pos + (8 - 1)) & -8 # Round up to 8-byte boundary
cdef class SegmentBuilder(object):
def __cinit__(self, long length=512):
self.length = length
self.buf = bytearray(self.length)
self.cbuf = PyByteArray_AS_STRING(self.buf)
self.end = 0
cdef void _resize(self, Py_ssize_t minlen):
# exponential growth of the buffer. By using this formula, we grow
# faster at the beginning (where the constant plays a major role) and
# slower when the buffer it's already big (where length >> 1 plays a
# major role)
cdef long newlen = self.length + ( self.length >> 1 ) + 512;
newlen = max(minlen, newlen)
newlen = round_to_word(newlen)
cdef long curlen = self.length | Cython |
PyByteArray_Resize(self.buf, newlen)
cdef char* oldbuf = self.cbuf
self.cbuf = PyByteArray_AS_STRING(self.buf)
## if oldbuf!= self.cbuf:
## print 'REALLOC %s --> %s' % (curlen, newlen)
## else:
## print' %s --> %s' % (curlen, newlen)
memset(self.cbuf + curlen, 0, newlen - curlen)
self.length = newlen
cpdef as_string(self):
return PyString_FromStringAndSize(self.cbuf, self.end)
cpdef void write_int64(self, Py_ssize_t i, int64_t value):
(<int64_t*>(self.cbuf+i))[0] = value
cdef void memcpy_from(self, Py_ssize_t i, const char* src, Py_ssize_t n):
cdef void* dst = self.cbuf + i
memcpy(dst, src, n)
cpdef Py_ssize_t allocate(self, Py_ssize_t length):
"""
Allocate ``length`` bytes of memory inside the buffer. Return the start
position of the newly allocated space.
"""
cdef Py_ssize_t result = self.end
self.end += length
if self.end > self.length:
self._resize(self.end)
return result
cpdef Py_ssize_t alloc_struct(self, Py_ssize_t pos, long data_size, long ptrs_size):
"""
Allocate a new struct of the given size, and write the resulting pointer
at position i. Return the newly allocated position.
"""
cdef long length = (data_size+ptrs_size) * 8
cdef Py_ssize_t result = self.allocate(length)
cdef long offet = result - (pos+8)
cdef long p = ptr.new_struct(offet/8, data_size, ptrs_size)
self.write_int64(pos, p)
return result
cpdef Py_ssize_t alloc_list(self, Py_ssize_t pos, long size_tag, long item_count,
long body_length):
"""
Allocate a new list of the given size, and write the resulting pointer
at position i. Return the newly allocated position.
"""
body_length = round_to_word(body_length)
cdef Py_ssize_t result = self.allocate(body_length)
cdef long offet = result - (pos+8)
cdef long p = ptr.new_list(offet/8, size_tag, item_count)
self.write_int64(pos, p)
return result
<|end_of_text|>
# import nims_py.pxd
from nims_py cimport get_next_message, get_next_message_timed, create_message_queue
from nims_py cimport Track, TracksMessage, sizeof_tracks_message, sizeof_track
from nims_py cimport nims_checkin
import math
import sys
cdef class PyTrack:
cdef Track _trk
_allkeys = ('id','size_sq_m','speed_mps', 'target_strength', \
'min_range_m','max_range_m','min_bearing_deg','max_bearing_deg', \
'min_elevation_deg','max_elevation_deg', 'first_detect', 'pings_visible',\
'last_pos_range', 'last_pos_bearing', 'last_pos_elevation', \
'last_vel_range', 'last_vel_bearing', 'last_vel_elevation', \
'width', 'length', 'height')
# could initialize all members, but...
def __cinit__(self):
self._trk.id = 0
property id:
def __get__(self):
return self._trk.id
property size_sq_m:
def __get__(self):
return self._trk.size_sq_m
property speed_mps:
def __get__(self):
return self._trk.speed_mps
property target_strength:
def __get__(self):
return self._trk.target_strength
property min_range_m:
def __get__(self):
return self._trk.min_range_m
property max_range_m:
def __get__(self):
return self._trk.max_range_m
property min_bearing_deg:
def __get__(self):
return self._trk.min_bearing_deg
property max_bearing_deg:
def __get__(self):
return self._trk.max_bearing_deg
property min_elevation_deg:
def __get__(self):
return self._trk.min_elevation_deg
property max_elevation_deg:
def __get__(self):
return self._trk.max_elevation_deg
property first_detect:
def __get__(self):
return self._trk.first_detect
property pings_visible:
def __get__(self):
return self._trk.pings_visible
property last_pos_range:
def __get__(self):
return self._trk.last_pos_range
property last_pos_bearing:
def __get__(self):
return self._trk.last_pos_bearing
property last_pos_elevation:
def __get__(self):
return self._trk.last_pos_elevation
property last_vel_range:
def __get__(self):
return self._trk.last_vel_range
property last_vel_bearing:
def __get__(self):
return self._trk.last_vel_bearing
property last_vel_elevation:
def __get__(self):
return self._trk.last_vel_elevation
property width:
def __get__(self):
return self._trk.width
property length:
def __get__(self):
return self._trk.length
property height:
def __get__(self):
return self._trk.height
def __repr__(self):
return repr(self.dict_value())
def __str__(self):
my_class = self.__class__
s = "<%s.%s (0x%x)>" % (my_class.__module__, my_class.__name__, id(self))
s += " = {\n"
kl = [len(k) for k in self._allkeys]
klmax = max(kl)
for k in self._allkeys:
s += "%s = %s\n" % (k.rjust(klmax + 4), getattr(self, k))
s += "}"
return s
def dict_value(self, ensure_finite=False):
ret = dict()
for k in self._allkeys:
v = getattr(self, k)
# Tracker is stuffing nan/inf/-inf values in, and
# the python JSON conversion changes them to a
# string representation that is not valid.
#
# Cleaner to change here rather than force the clients
# to iterate the nested dictionaries.
def _is_finite_value(v):
try:
if math.isnan(v):
return False
if math.isinf(v):
return False
except Exception, e:
pass
return True
if ensure_finite and _is_finite_value(v) == False:
v = None
ret[k] = v
return ret
cdef class PyTracksMessage:
cdef TracksMessage _msg
def __cinit__(self):
self._msg.frame_num = 0
self._msg.ping_num_sonar = 0
self._msg.ping_time = 0.0
self._msg.num_tracks = 0
property frame_num:
def __get__(self):
return self._msg.frame_num
property ping_num_sonar:
def __get__(self):
return self._msg.ping_num_sonar
property num_tracks:
def __get__(self):
return self._msg.num_tracks
property ping_time:
def __get__(self):
return self._msg.ping_time
def tracks(self):
# returns PyTrack objects
all_tracks = []
for idx in xrange(self.num_tracks):
ptrk = PyTrack()
#!!! fixme wtf
ptrk._trk = self._msg.tracks[idx]
all_tracks.append(ptrk)
return all_tracks
def __len__(self):
return self.num_tracks
def __getitem__(self, key):
assert isinstance(key, int), "Only integer indexing is supported"
assert key >= 0 and key < self.num_tracks, "Index out of range"
obj = self._msg.tracks[key]
return obj
def __iter__(self):
for pytrk in self.tracks():
yield pytrk
def __str__(self):
return str(self.dict_value())
def dict_value(self, ensure_finite=False):
ret = dict()
ret["frame_num"] = self.frame_num
ret["ping_num_sonar"] = self.ping_num_sonar
ret["ping_time"] = self.ping_time
ret["num_tracks"] = self.num_tracks
ret["tracks"] = [ptrk.dict_value(ensure_finite=ensure_finite) for ptrk in self.tracks()]
return ret
cpdef int nims_checkin_py():
ret = nims_checkin()
return ret
cpdef int open_tracks_message_queue_py(char *queue_name):
msg_size = sizeof_tracks_message()
| Cython |
mq = create_message_queue(msg_size, queue_name)
return mq
cpdef object get_next_tracks_message_py(int mq):
cdef TracksMessage msg
msg_size = sizeof_tracks_message()
sz = get_next_message(mq, &msg, msg_size)
pmsg = PyTracksMessage()
if sz > 0:
pmsg._msg = msg
else:
sys.stderr.write("get_next_tracks_message_py: error getting next message\n")
return pmsg
cpdef object get_next_tracks_message_timed_py(int mq, float timeout_secs):
cdef TracksMessage msg
msg_size = sizeof_tracks_message()
secs = int(timeout_secs)
nsecs = int((timeout_secs - secs) * 1e9)
sz = get_next_message_timed(mq, &msg, msg_size, secs, nsecs)
pmsg = PyTracksMessage()
if sz > 0:
pmsg._msg = msg
elif -1 == sz:
sys.stderr.write("get_next_tracks_message_timed_py: error getting next message\n")
else:
# timeout is not an error
pass
return pmsg
<|end_of_text|># cimport PrognosticVariables as PrognosticVariables
# # cimport DiagnosticVariables as DiagnosticVariables
# cimport Grid as Grid
# # cimport Restart
cdef class TimeStepping:
cdef:
public double dt
public double t
public double t_max
public Py_ssize_t nstep
# double [:,:] value_copies
# double [:,:] tendency_copies
# public Py_ssize_t ts_type
public double plot_freq
# cpdef initialize(self, namelist, PrognosticVariables.PrognosticVariables PV)
cpdef initialize(self, namelist)
# cpdef update(self,Grid.Grid Gr, PrognosticVariables.PrognosticVariables PV)
cpdef update(self)
# cpdef adjust_timestep(self,Grid.Grid Gr, PrognosticVariables.PrognosticVariables PV, DiagnosticVariables.DiagnosticVariables DV)
# cdef void compute_cfl_max(self,Grid.Grid Gr, PrognosticVariables.PrognosticVariables PV, DiagnosticVariables.DiagnosticVariables DV)
# cpdef restart(self, Restart.Restart Re)
# cdef inline double cfl_time_step(self)<|end_of_text|>import numpy as np
cimport numpy as np
np.import_array()
cdef extern from "median.h":
void c_median_filter(const unsigned char * src, unsigned char * dst, int channelStride, int h, int matrix_radius)
def c_median_filter_func(np.ndarray[char, ndim=1, mode="c"] in_array not None,
np.ndarray[char, ndim=1, mode="c"] out_array not None,
channel_strides, h, matrix_radius):
c_median_filter(<unsigned char*> np.PyArray_DATA(in_array),
<unsigned char*> np.PyArray_DATA(out_array),
channel_strides, h, matrix_radius)
<|end_of_text|># cython: profile=True
"""Calculates the force acting on the satellite from relativistic effects
Description:
Calculates the force acting on the satellite from relativistic effects, following [1].
References:
[1] Petit, G. and Luzum, B. (eds.), IERS Conventions (2010), IERS Technical Note No. 36, BKG (2010)
"""
# External library imports
import numpy as np
# Midgard imports
from midgard.math.constant import constant
from midgard.dev import log
# Where imports
from where import apriori
cdef double GM_earth, GM_sun, c, J
cdef int num_param
cdef double [:, :, :] g2i
def register_entry_point():
"""Register entry points for setup and later calls."""
return dict(setup=relativistic_setup, call=relativistic)
def relativistic_setup(
rundate, force_parameters, sat_name, time_grid, epochs, body_pos_gcrs, body_pos_itrs, bodies, gcrs2itrs
):
"""Set up module variables used later during calculation.
Args:
rundate: Time of integration start.
force_parameters: Dict of parameters to be estimated.
sat_name: Name of satellite.
time_grid: Table of times in seconds since rundate, in utc.
epochs: time_grid converted to Time objects, in utc.
body_pos_gcrs: The positions of the bodies in the solar system in GCRS.
body_pos_itrs: The positions of the bodies in the solar system in ITRS.
bodies: List of bodies.
gcrs2itrs: List of transformation matrices, one for each time in epochs.
"""
global GM_earth
global GM_sun
global c, num_param
global g2i
global earth_pos
global earth_vel
global J
# Set gravitational constants and speed of light
GM_earth = constant.get("GM", source="egm_2008")
GM_sun = constant.get("GM_sun")
c = constant.get("c")
# Number of parameters to be estimated
num_param = len(force_parameters)
# Position and velocity of the Earth with respect to the Sun
eph = apriori.get("ephemerides")
earth_pos = eph.pos_gcrs("earth", time = epochs) - eph.pos_gcrs("sun", time=epochs)
earth_vel = eph.vel_gcrs("earth", time = epochs) - eph.vel_gcrs("sun", time=epochs)
# Earth's angular momentum per unit mass
J = constant.get("J")
# Transformation matrices
g2i = gcrs2itrs
def relativistic(sat_pos_gcrs, sat_vel_gcrs, int current_step, **_not_used):
"""Compute relativistic gravitational force on satellite
Args:
sat_pos_gcrs: Satellite position in GCRS.
sat_vel_gcrs: Satellite velocity in GCRS.
current_step: Int, step number of current step of integrator.
_not_used: Unused variables.
Returns:
Acceleration and equation for state transition matrix due to relativistic effects.
"""
gamma = 1
beta = 1
gcrs2itrs = g2i[current_step]
r = np.linalg.norm(sat_pos_gcrs)
v2 = np.dot(sat_vel_gcrs, sat_vel_gcrs)
rv = np.dot(sat_pos_gcrs, sat_vel_gcrs)
rxv = np.cross(sat_pos_gcrs, sat_vel_gcrs)
# TODO: Not sure if this is the correct treatment of J?
J_itrs = np.array([0, 0, J])
J_gcrs = np.dot(gcrs2itrs.T, J_itrs)
rJ = np.dot(sat_pos_gcrs, J_gcrs)
vxJ = np.cross(sat_vel_gcrs, J_gcrs)
R = earth_pos[current_step, :]
R_dot = earth_vel[current_step, :]
# Equation 10.12 from [1]:
# Schwarzschild terms:
acc1 = GM_earth / (c**2 * r**3) * ((2 * (beta + gamma) * GM_earth / r - gamma * v2) * sat_pos_gcrs + 2 * (1 + gamma) * rv * sat_vel_gcrs)
# Lense-Thirring precession:
acc2 = (1 + gamma) * GM_earth / (c**2 * r**3) * (3 / r**2 * rxv * rJ + vxJ)
# de Sitter precession:
acc3 = (1 + 2 * gamma) * np.cross(np.cross(R_dot, (-GM_sun * R / (c**2 * np.linalg.norm(R)**3 ))), sat_vel_gcrs)
# Sum of relativistic effects:
acc = acc1 + acc2 + acc3
# Assume negligible state transition matrix
trans = np.zeros((3, 6))
# No parameters to be estimated
sens = np.zeros((3, num_param))
return (acc, trans, sens)
<|end_of_text|>import parabam
import time
import sys
import os
import copy
import numpy as np
from multiprocessing import Queue
from abc import ABCMeta, abstractmethod
class StatCore(object):
def __init__(self):
self._counts = {}
self._local_structures = {}
self._system = {}
def __pre_run_routine__(self,iterator,**kwargs):
super(StatCore,self).__pre_run_routine__(iterator)
for name,structure in self._constants.structures.items():
self._local_structures[name] = structure.empty_clone()
self._counts[name] = 0
def __get_results__(self):
results = {}
results["structures"] = \
self.__unpack_structures__(self._local_structures)
results["counts"] = self._counts
results["system"] = self._system
return results
def __post_run_routine__(self,**kwargs):
super(StatCore,self).__post_run_routine__()
pass
def __handle_rule | Cython |
_output__(self,rule_output,read):
if rule_output:#Allows return False
local_structures = self._local_structures
for name,result in rule_output.items():
self._counts[name] += 1
local_structures[name].add(result)
def __unpack_structures__(self,structures):
unpacked = []
for name,struc in structures.items():
unpacked.append( (name,struc.data) )
return unpacked
class Task(StatCore,parabam.command.Task):
def __init__(self,parent_bam,inqu,outqu,statusqu,task_size,constants):
parabam.command.Task.__init__(self,parent_bam=parent_bam,
inqu=inqu,
outqu=outqu,
statusqu=statusqu,
task_size=task_size,
constants=constants)
StatCore.__init__(self)
class PairTask(StatCore,parabam.command.PairTask):
def __init__(self,parent_bam,inqu,outqu,statusqu,task_size,constants):
parabam.command.PairTask.__init__(self,parent_bam=parent_bam,
inqu=inqu,
outqu=outqu,
statusqu=statusqu,
task_size=task_size,
constants=constants)
StatCore.__init__(self)
class Handler(parabam.command.Handler):
def __init__(self,object parent_bam, object output_paths,object inqu,
object constants,object pause_qus,dict out_qu_dict,object report=True):
super(Handler,self).__init__(parent_bam = parent_bam,output_paths = output_paths,
inqu=inqu,constants=constants,pause_qus=pause_qus,
out_qu_dict=out_qu_dict)
self._final_structures = {}
for name,struc in self._constants.structures.items():
self._final_structures[struc.name] = struc.empty_clone()
def __new_package_action__(self,new_package,**kwargs):
super(Handler,self).__new_package_action__(new_package)
results = new_package.results
for name,data in results["structures"]:
final_struc = self._final_structures[name]
final_struc.merge(data)
def __handler_exit__(self):
super(Handler,self).__handler_exit__()
constants = self._constants
#Append to global csv
if constants.numeric_names: #Check that there are global analyses
data_str = \
self.__get_data_str_from_names__(constants.numeric_names,
self._final_structures)
with open(self._output_paths["global"]["stats"],"a") as out_object:
out_object.write("%s%s\n" % \
(self._parent_bam.filename.decode(),data_str))
#Output non global data
for name,structure in self._final_structures.items():
if structure.struc_type == np.ndarray or \
structure.struc_type == dict:
structure.write_to_csv(\
self._output_paths[self._parent_bam.filename.decode()][name])
def __get_data_str_from_names__(self,names,structures):
data_str = ""
for name in names:
if structures[name].struc_type == np.ndarray:
continue
cur_data = structures[name].data
data_str += ",%.5f" % (cur_data,)
return data_str
class UserStructure(object):
__metaclass__ = ABCMeta
def __init__(self,name,struc_type,store_method,data):
self.struc_type = struc_type
self.store_method = store_method
self.data = data
self.org_data = copy.copy(data)
self.name = name
if store_method == "max":
self.add = self.add_max
self.merge = self.merge_max
elif store_method == "min":
self.add = self.add_min
self.merge = self.merge_min
else:
self.add = self.add_cumu
self.merge = self.merge_cumu
def max_decision(self,result,existing):
return max(result,existing)
def min_decision(self,result,exisiting):
return min(result,exisiting)
@abstractmethod
def empty_clone(self):
pass
@abstractmethod
def add_max(self,result):
pass
@abstractmethod
def add_min(self,result):
pass
@abstractmethod
def add_cumu(self,result):
pass
@abstractmethod
def merge_max(self,result):
pass
@abstractmethod
def merge_min(self,result):
pass
@abstractmethod
def merge_cumu(self,result):
pass
class NumericStructure(UserStructure):
def __init__(self,name,struc_type,store_method,data,log_scaling=False):
super(NumericStructure,self).__init__(name,struc_type,store_method,data)
self.log_scaling = log_scaling
if store_method == "min":
self.data = float('inf')
self.org_data = copy.copy(float('inf'))
def empty_clone(self):
return NumericStructure(self.name,
self.struc_type,
self.store_method,
self.org_data)
def add_cumu(self,result):
self.data += result
def add_max(self,result):
self.data = self.max_decision(result,self.data)
def add_min(self,result):
self.data = self.min_decision(result,self.data)
def merge_max(self,result):
self.data = self.max_decision(self.data,result)
del result
def merge_min(self,result):
self.data = self.min_decision(self.data,result)
del result
def merge_cumu(self,result):
self.data += result
del result
#TODO: This mode doesn't work at all. Probably something to do with
# creating an empty clone. Counts are inflated.
class CounterStructure(UserStructure):
def __init__(self,name,struc_type,store_method,data):
super(CounterStructure,self).__init__(name,struc_type,store_method,data)
def empty_clone(self):
return CounterStructure(self.name,self.struc_type,self.store_method,self.org_data)
def add_cumu(self,result):
for key,value in result.items():
try:
self.data[key] += value
except KeyError:
self.data[key] = value
def add_max(self,result):
for key,value in result.items():
try:
self.data[key] = max([self.data[key],value])
except KeyError:
self.data[key] = value
def add_min(self,result):
for key,value in result.items():
try:
self.data[key] = min([self.data[key],value])
except KeyError:
self.data[key] = value
def merge_cumu(self,result):
self.add_cumu(result)
del result
def merge_max(self,result):
self.add_max(result)
del result
def merge_min(self,result):
self.add_min(result)
del result
def write_to_csv(self,out_path):
with open(out_path,"w") as out_file:
for key,value in self.data.items():
out_str = "%s,%.5f\n" % (key,value,)
out_file.write(out_str)
class ArrayStructure(UserStructure):
def __init__(self,name,struc_type,store_method,data):
super(ArrayStructure,self).__init__(name,struc_type,store_method,data)
if self.store_method == "vstack":
self.seen = 0
self.add = self.add_vstack
self.merge = self.merge_vstack
def empty_clone(self):
return ArrayStructure(self.name,self.struc_type,self.store_method,self.org_data)
def add_max(self,result,coords):
existing = self.data[coords]
self.data[coords] = self.max_decision(result,existing)
def add_min(self,result,coords):
existing = self.data[coords]
self.data[coords] = self.min_decision(result,existing)
def add_cumu(self,result):
self.data = np.add(self.data,result)
def add_vstack(self,result):
if self.seen == 0:
self.data = result
self.seen += 1
else:
self.data = np.vstack((self.data,result))
def merge_max(self,result):
self.data = np.maximum(self.data,result)
del result
def merge_min(self,result):
self.data = np.minimum(self.data,result)
del result
def merge_cumu(self,result):
self.data = np.add(self.data,result)
del result
def merge_vstack(self,result):
self.add_vstack(result)
del result
def write_to_csv(self,out_path):
format = []
for x in self.data[0,:]:
type_of_x = type(x)
if type_of_x == str or type_of_x == np.string_:
format.append("%s")
else:
format.append("%.5f")
np.savetxt(out_path,self.data,fmt=",".join(format),delimiter=",")
class Stat(parabam.command.Interface):
def __init__(self,**kwargs):
super(Stat,self).__init__(instance_name = "parabam stat", **kwargs)
def __setup_other_cmd_args__(self):
# Must implement this method to satisfy
# inheritance
pass
def run_cmd(self):
module,rule,constants = \
self.__get_module_and_vitals__(self.cmd_args.rule)
struc_blueprint = {}
module.get_blueprints(struc_blueprint)
self.run(input_paths=self.cmd_args.input,
constants = constants,
rule = rule,
struc | Cython |
_blueprint = struc_blueprint,
fetch_region = self.cmd_args.region,
specified_outpath = self.cmd_args.output)
#TODO: The specified_output currently doesn't lead anywhere
def run(self,input_paths,
constants,
rule,
struc_blueprint,
specified_outpath=None,
fetch_region=None,
**kwargs):
''' Docstring! '''
args = dict(locals())
del args["self"]
#Prepare state structures and insert to args
#kwargs are later used to construct the Constant file
#passed to all the fileprocessors and handlers
structures = self.__create_structures__(struc_blueprint)
numeric_names = self.__get_numeric_names__(structures)
args["structures"] = structures
args["numeric_names"] = numeric_names
results = super(Stat,self).run(**args)
return results
def __get_destroy_handler_order__(self):
return [Handler]
def __get_queue_names__(self,**kwargs):
return ["main"]
def __get_handler_bundle__(self,**kwargs):
handler_bundle = { Handler: {"inqu":"main","out_qu_dict":[]}}
return handler_bundle
def __instalise_final_output__(self,numeric_names,
specified_outpath,
**kwargs):
final_output = {}
if len(numeric_names) > 0:
global_filename = \
self.__get_global_output_path__(specified_outpath)
self.__create_global_output_file__(global_filename,numeric_names)
final_output["global"] = {"stats": global_filename}
return final_output
def __get_global_output_path__(self,specified_outpath):
if specified_outpath is None:
global_filename = \
os.path.join(self.temp_dir,"parabam_stat_%d_%d.csv"\
% (time.time(),os.getpid()))
else:
global_filename = specified_outpath
return global_filename
def __get_output_paths__(self,input_path,
final_output_paths,
structures,
**kwargs):
output_paths = {input_path:{}}
for name,structure in structures.items():
if not issubclass(structure.__class__,NumericStructure):
path_id,ext = os.path.splitext(os.path.basename(input_path))
csv_path = "%s_%s.csv" % (path_id,name,)
output_paths[input_path][name] =\
os.path.join(".",self.temp_dir,csv_path)
if "global" in final_output_paths.keys():
output_paths["global"] = final_output_paths["global"]
return output_paths
def __get_task_class__(self,**kwargs):
if self.pair_process:
return PairTask
else:
return Task
def __get_queues__(self,object constants,**kwargs):
queues = {"main":Queue()}
return queues
def __create_global_output_file__(self,output_path,numeric_names):
header = "Sample,%s\n" % (",".join(numeric_names),)
with open(output_path,"w") as out_obj:
out_obj.write(header)
def __get_numeric_names__(self,structures):
numeric_analysis = []
for name,structure in structures.items():
if issubclass(structure.__class__,NumericStructure):
numeric_analysis.append(name)
return sorted(numeric_analysis)
def __create_structures__(self,struc_blueprint):
structures = {}
class_to_type_map = {int:NumericStructure,
float:NumericStructure,
np.ndarray:ArrayStructure,
dict:CounterStructure}
for name,definition in struc_blueprint.items():
definition["name"] = name
definition["struc_type"] = type(definition["data"])
structures[name] = \
class_to_type_map[definition["struc_type"]](**definition)
return structures
def get_parser(self):
#argparse imported in./interface/parabam
parser = self.default_parser()
parser.add_argument('--output','-o',
metavar='OUTPUT',
nargs='?',
required=False
,help="Specify a name for the output CSV file. If this argument is \n"\
"not supplied, the output will take the following form:\n"\
"parabam_stat_[UNIX_TIME].csv")
return parser
#...happily ever after
<|end_of_text|>from cpython cimport array
import array
cdef extern from "bubblesort.c":
void sort(int* arr, int size)
cpdef c_sort(array.array arr, int size):
sort(arr.data.as_ints, size)<|end_of_text|>import collections
import operator
import os
import sys
import time
def copy2dList(L1):
len_gf = len(L1)
L2 = [[] for _ in range(len_gf)]
for h, g in enumerate(L2):
g.extend(L1[h])
return L2
def copy1dList(L1):
L2 = []
L2.extend(L1)
return L2
def readfile(name):
with open(name) as f:
content = f.readlines()
formula = []
unsat = False
for line in content:
line = line.split()
if line ==['0']:
unsat = True
elif len(line) == 0:
pass
elif line[0] == 'c' or line[0] == '0' or line[0] == "%" or line[0] == 'p':
pass
else:
formula.append([int(x) for x in line[0:-1]])
return formula, unsat
def simplifyunit(formula, var):
newformula = []
for clause in formula:
if var in clause:
pass
elif -var in clause:
new = copy1dList(clause)
new.remove(-var)
newformula.append(new)
else:
newformula.append(clause)
return newformula
def minimal(sez):
minVal = len(sez[-1])
minList = []
for a in sez:
mapVal = len(a)
if mapVal > minVal:
continue
if mapVal < minVal:
minVal = mapVal
minList = [a]
else: # mapVal == minVal
minList.append(a)
return minList, minVal
def keywithmaxval1(d):
v = list(d.values())
k = list(d.keys())
return k[v.index(max(v))]
def keywithmaxval(d):
""" a) create a list of the dict's keys and values;
b) return the key with the max value"""
return max(d.items(), key=operator.itemgetter(1))[0]
def najpojavitve(formula):
pon = {}
for clause in formula:
for var in clause:
pon[abs(var)] = pon.get(abs(var), 0) + 1
k = keywithmaxval(pon)
return k
# tudi unit caluse iščemo enega po enega
def findvar(formula):
if len(formula) > 0:
minclauses, minlen = minimal(formula)
# če je formula protislovje
if minlen == 0:
return "cont", None
# če obstaja unit clause
elif minlen == 1:
return True, (minclauses[0])[0]
# če unit clausa ni
else:
return False, najpojavitve(minclauses)
# če je formula tavtologija
else:
return "tavt", None
def flatten(l, ltypes=collections.Sequence):
l = list(l)
while l:
while l and isinstance(l[0], ltypes):
l[0:1] = l[0]
if l: yield l.pop(0)
def DPLL(form):
formula = copy2dList(form)
sat = None
val = []
guessformula = []
guessformulas = []
guessval = []
guesses = 0
while sat is None:
# zagotovo reševanje
if len(guessval) == 0:
unit, var = findvar(formula)
# najdemo vse unit clause
while unit is True:
formula = simplifyunit(formula, var)
val.append(var)
unit, var = findvar(formula)
if unit == "tavt":
sat = True
out = val
elif unit == "cont":
sat = False
out = []
# izberemo literal iz najkrajšega clause in ga morda zamenjamo z negacijo
else:
guessformula = simplifyunit(formula, var)
guessval = [val, var]
guesses = 1
# ugibanje
else:
unit, var = findvar(guessformula)
while unit is True:
guessformula = simplifyunit(guessformula, var)
guessval.append(var)
unit, var = findvar(guessformula)
# najde pravo rešitev
if unit == "tavt":
sat = True
out = list(flatten(guessval))
# ugibanje je bilo napačno
elif unit == "cont":
if guesses == 1:
wrong = guessval[1]
val.append(-wrong)
guessval = []
formula = simplifyunit(formula, -wrong)
else:
wrong = guessval | Cython |
[1]
guessval = guessval[0]
guessval.append(-wrong)
guessformula = guessformulas.pop()
guessformula = simplifyunit(guessformula, -wrong)
guesses -= 1
else:
guessformulas.append(guessformula)
guessformula = simplifyunit(guessformula, var)
guessval = [guessval, var]
guesses += 1
return sat, out
<|end_of_text|>#
# Copyright (c) 2018-2020 by Kristoffer Paulsson <[email protected]>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
"""Archive implementation."""
import datetime
import functools
import os
import re
import struct
import time
import uuid
from pathlib import Path, PurePosixPath
from typing import Union
from angelos.archive7.fs import Delete, InvalidPath, EntryRecord, FileObject
from angelos.archive7.fs import FileSystemStreamManager, TYPE_DIR, TYPE_LINK, TYPE_FILE, \
HierarchyTraverser
from angelos.common.misc import SharedResourceMixin
from angelos.common.utils import Util
class Archive7Error(RuntimeError):
"""Errors related to Archive7."""
INVALID_FORMAT = ("Invalid format", 120)
AR7_NOT_FOUND = ("Archive file not found", 121)
AR7_OPERAND_INVALID = ("Invalid or unsupported operand", 122)
class Header:
"""Header for the Archive 7 format."""
__slots__ = ["major", "minor", "type", "role", "use", "id", "owner", "domain", "node", "created", "title"]
FORMAT = "!8scHHbbb16s16s16s16sQ256s"
def __init__(
self, owner: uuid.UUID, identity: uuid.UUID = None, node: uuid.UUID = None, domain: uuid.UUID = None,
title: Union[bytes, bytearray] = None, type_: int = None, role: int = None, use: int = None,
major: int = 2, minor: int = 0, created: datetime.datetime = None
):
self.major = major
self.minor = minor
self.type = type_
self.role = role
self.use = use
self.id = identity
self.owner = owner
self.domain = domain
self.node = node
self.created = created if created else datetime.datetime.now()
self.title = title
def __bytes__(self):
return struct.pack(
Header.FORMAT,
b"archive7",
b"a",
2,
0,
self.type if self.type else 0,
self.role if self.role else 0,
self.use if self.use else 0,
self.id.bytes if isinstance(self.id, uuid.UUID) else uuid.uuid4().bytes,
self.owner.bytes if isinstance(self.owner, uuid.UUID) else bytes(16),
self.domain.bytes if isinstance(self.domain, uuid.UUID) else bytes(16),
self.node.bytes if isinstance(self.node, uuid.UUID) else bytes(16),
int(
time.mktime(self.created.timetuple())
if isinstance(self.created, datetime.datetime)
else time.mktime(datetime.datetime.now().timetuple())
),
self.title[:256] if isinstance(self.title, (bytes, bytearray)) else bytes(256)
)
@staticmethod
def meta_unpack(data: Union[bytes, bytearray]) -> "Header":
metadata = struct.unpack(Header.FORMAT, data)
if metadata[0]!= b"archive7" or metadata[1]!= b"a":
raise Archive7Error(*Archive7Error.INVALID_FORMAT)
return Header(
type_=metadata[4],
role=metadata[5],
use=metadata[6],
identity=uuid.UUID(bytes=metadata[7]),
owner=uuid.UUID(bytes=metadata[8]),
domain=uuid.UUID(bytes=metadata[9]),
node=uuid.UUID(bytes=metadata[10]),
created=datetime.datetime.fromtimestamp(metadata[11]),
title=metadata[12].strip(b"\x00"),
major=metadata[2],
minor=metadata[3],
)
class Archive7(SharedResourceMixin):
"""Archive main class and high level API."""
def __init__(self, filename: Path, secret: bytes, delete: int = Delete.ERASE):
"""Init archive using a file object and set delete mode."""
SharedResourceMixin.__init__(self)
self.__closed = False
self.__delete = delete
self.__manager = FileSystemStreamManager(filename, secret)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@staticmethod
def setup(filename: Path, secret: bytes, owner: uuid.UUID = None, node: uuid.UUID = None, title: str = None,
domain: uuid.UUID = None, type_: int = None, role: int = None, use: int = None):
"""Create a new archive.
Args:
filename (str):
Path and filename to archive
secret (bytes):
Encryption key
owner (uuid.UUID):
Angelos owner UUID
node (uuid.UUID):
Angelos node UUID
title (str):
Title or name of the archive
domain (uuid.UUID):
Angelos domain UUID
type_ (int):
Facade type
role (int):
Node role
use (int):
Archive usage
Returns (Archive7):
Initialized Archive7 instance
"""
header = Header(
owner=owner, node=node, title=title.encode() if title else title,
domain=domain, type_=type_, role=role, use=use
)
archive = Archive7(filename, secret)
archive._Archive7__manager.meta = bytes(header)
archive._Archive7__manager.save_meta()
return archive
@staticmethod
def open(filename: Path, secret: bytes, delete: int = 3):
"""Open an archive with a symmetric encryption key.
Args:
filename (str):
Path and filename to archive
secret (bytes):
Encryption key
delete (int):
Delete methodology
Returns (Archive7):
Opened Archive7 instance
"""
if not os.path.isfile(filename):
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"path": filename})
return Archive7(filename, secret, delete)
@property
def closed(self):
"""Archive closed status."""
return self.__closed
def close(self):
"""Close archive."""
if not self.__closed:
self.__manager.close()
self.__closed = True
def stats(self):
"""Archive stats."""
size = struct.calcsize(Header.FORMAT)
return Header.meta_unpack(self.__manager.meta[:size])
async def info(self, *args, **kwargs):
return await self._run(functools.partial(self.__info, *args, **kwargs))
def __info(self, filename: PurePosixPath) -> EntryRecord:
"""Information about a file.
Args:
filename (str):
Path and name of file
Returns (ArchiveEntry):
File entry from registry
"""
try:
return self.__manager.search_entry(self.__manager.resolve_path(filename))
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"path": filename})
async def glob(
self,
name: str = "*",
id: uuid.UUID = None,
parent: uuid.UUID = None,
owner: uuid.UUID = None,
created: datetime.datetime = None,
modified: datetime.datetime = None,
deleted: bool = False,
user: str = None,
group: str = None
) -> set:
"""Glob the file system in the archive."""
sq = Archive7.Query(pattern=name)
if id:
sq.id(id)
if parent:
sq.parent(parent)
if owner:
sq.owner(owner)
if created:
sq.created(created)
if modified:
sq.modified(modified)
if deleted:
sq.deleted(deleted)
if user:
sq.user(user)
if group:
sq.group(group)
files = set()
async for entry, path in self.search(sq):
files.add(path)
return files
async def move(self, *args, **kwargs):
return await self._run(functools.partial(self.__move, *args, **kwargs))
def __move(self, filename: PurePosixPath, dirname: PurePosixPath):
"""Move file/dir to another directory."""
try:
identity = self.__manager.resolve_path(filename)
parent = self.__manager.resolve_path(dirname)
self.__manager.change_parent(identity, parent)
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"filename": filename, "dirname": dirname})
async def chmod(self, *args, **kwargs):
return await self._run(functools.partial(self.__chmod, *args, **kwargs))
def __chmod(
self,
filename: PurePosixPath,
# id: uuid.UUID = None,
owner | Cython |
: uuid.UUID = None,
deleted: bool = None,
user: str = None,
group: str = None,
perms: int = None,
):
"""Update ID/owner or deleted status for an entry."""
try:
self.__manager.update_entry(
self.__manager.resolve_path(filename),
owner=owner, deleted=deleted, user=user, group=group, perms=perms
)
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"path": filename})
async def remove(self, *args, **kwargs):
return await self._run(functools.partial(self.__remove, *args, **kwargs))
def __remove(self, filename: PurePosixPath, mode: int = None):
"""Remove file or dir."""
try:
self.__manager.delete_entry(
self.__manager.resolve_path(filename), mode if mode else self.__delete)
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"path": filename})
async def rename(self, *args, **kwargs):
return await self._run(functools.partial(self.__rename, *args, **kwargs))
def __rename(self, filename: PurePosixPath, dest: str):
"""Rename file or directory."""
try:
self.__manager.change_name(self.__manager.resolve_path(filename), dest)
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"path": filename})
async def isdir(self, *args, **kwargs):
return await self._run(functools.partial(self.__isdir, *args, **kwargs))
def __isdir(self, dirname: PurePosixPath) -> bool:
"""Check if a path is a known directory."""
try:
return self.__manager.search_entry(self.__manager.resolve_path(dirname, True)).type == TYPE_DIR
except InvalidPath:
return False
async def isfile(self, *args, **kwargs):
return await self._run(functools.partial(self.__isfile, *args, **kwargs))
def __isfile(self, filename: PurePosixPath) -> bool:
"""Check if a path is a known file."""
try:
return self.__manager.search_entry(self.__manager.resolve_path(filename, True)).type == TYPE_FILE
except InvalidPath:
return False
async def islink(self, *args, **kwargs):
return await self._run(functools.partial(self.__islink, *args, **kwargs))
def __islink(self, filename: PurePosixPath) -> bool:
"""Check if a path is a known link."""
try:
return self.__manager.search_entry(self.__manager.resolve_path(filename, False)).type == TYPE_LINK
except InvalidPath:
return False
async def mkdir(self, *args, **kwargs):
return await self._run(functools.partial(self.__mkdir, *args, **kwargs))
def __mkdir(
self,
dirname: PurePosixPath,
user: str = None,
group: str = None,
perms: int = None
) -> uuid.UUID:
"""
Make a new directory and super directories if missing.
name The full path and name of new directory
returns the entry ID
"""
try:
return self.__manager.create_entry(
TYPE_DIR, dirname.parts[-1],
self.__manager.resolve_path(dirname.parent),
user=user, group=group, perms=perms
)
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"path": dirname.parent})
async def mkfile(self, *args, **kwargs):
return await self._run(functools.partial(self.__mkfile, *args, **kwargs))
def __mkfile(
self,
filename: PurePosixPath,
data: bytes,
created: datetime.datetime = None,
modified: datetime.datetime = None,
owner: uuid.UUID = None,
parent: uuid.UUID = None,
id: uuid.UUID = None,
user: str = None,
group: str = None,
perms: int = None
) -> uuid.UUID:
"""Create a new file."""
try:
parent = self.__manager.resolve_path(filename.parent)
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"parent": filename.parent})
identity = self.__manager.create_entry(
type_=TYPE_FILE,
name=filename.parts[-1],
stream=uuid.UUID(int=0),
parent=parent,
identity=id,
owner=owner,
created=created,
modified=modified,
user=user,
group=group,
perms=perms,
)
vfd = self.__manager.open(identity, "wb")
vfd.write(data)
length = vfd.stream.length()
vfd.close()
self.__manager.update_entry(identity, length=length)
return identity
async def link(self, *args, **kwargs):
return await self._run(functools.partial(self.__link, *args, **kwargs))
def __link(
self,
filename: PurePosixPath,
target: PurePosixPath,
created: datetime.datetime = None,
modified: datetime.datetime = None,
user: str = None,
group: str = None,
perms: int = None
) -> uuid.UUID:
"""Create a new link to file or directory."""
try:
parent = self.__manager.resolve_path(filename.parent)
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"path": filename.parent})
try:
owner = self.__manager.resolve_path(target)
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"target": target})
return self.__manager.create_entry(
type_=TYPE_LINK,
name=filename.parts[-1],
parent=parent,
owner=owner,
created=created,
modified=modified,
user=user,
group=group,
perms=perms,
)
async def save(self, *args, **kwargs) -> uuid.UUID:
return await self._run(functools.partial(self.__save, *args, **kwargs))
def __save(self, filename: PurePosixPath, data: bytes, modified: datetime.datetime = None):
"""Update a file with new data."""
if not modified:
modified = datetime.datetime.now()
try:
identity = self.__manager.resolve_path(filename, True)
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"path": filename})
vfd = self.__manager.open(identity, "wb")
vfd.write(data)
vfd.truncate()
length = vfd.stream.length()
vfd.close()
self.__manager.update_entry(identity, modified=modified, length=length)
return identity
async def load(self, *args, **kwargs):
return await self._run(functools.partial(self.__load, *args, **kwargs))
def __load(self, filename: PurePosixPath, fd: bool = False, readonly: bool = True) -> Union[bytes, FileObject]:
"""Load data from a file."""
try:
if fd:
return self.__manager.open(self.__manager.resolve_path(filename, True), "rb" if readonly else "wb")
else:
vfd = self.__manager.open(self.__manager.resolve_path(filename, True), "rb")
data = vfd.read()
vfd.close()
return data
except InvalidPath:
raise Archive7Error(*Archive7Error.AR7_NOT_FOUND, {"path": filename})
async def search(self, query: "Archive7.Query"):
"""Search is an async generator that iterates over the file system hierarchy.
Use accordingly:
query = Archive.Query()
async for entry, path in archive.search(query):
pass
"""
evaluator = query.build()
traverser = self.__manager.traverse_hierarchy(uuid.UUID(int=0))
while True:
entry, path = await self._wild(functools.partial(self.__search, traverser=traverser))
if not entry:
break
if evaluator(entry, str(path)):
yield entry, path
def __search(self, traverser: HierarchyTraverser) -> tuple:
"""Load data from a file."""
try:
return next(traverser)
except StopIteration:
return None, None
class Query:
"""Low level query API."""
EQ = "=" # b'e'
NE = "≠" # b'n'
GT = ">" # b'g'
LT = "<" # b'l'
def __init__(self, pattern: str ="*"):
"""Init a query."""
self.__type = (TYPE_FILE, TYPE_DIR, TYPE_LINK)
self.__path_regex = None
if not pattern == "*":
path = re.escape(pattern).replace("\*", ".*").replace("\?", ".")
self.__path_regex = re.compile(path)
self.__id = None
self.__parent = None
self.__owner = None
self.__created = None
self.__modified = None
self.__deleted = False
self.__user = None
self.__group = None
@property
def types(self):
"""File system entry types."""
return self.__type
def type(self, _type=None, operand=" | Cython |
="):
"""Search for an entry type."""
Util.is_type(_type, (tuple, bytes, type(None)))
if isinstance(_type, tuple):
self.__type = _type
elif isinstance(_type, bytes):
self.__type = (_type,)
return self
def id(self, identity=None):
"""Search for ID."""
Util.is_type(identity, uuid.UUID)
self.__id = identity
return self
def parent(self, parent, operand="="):
"""Search with directory ID."""
Util.is_type(parent, (uuid.UUID, tuple, type(None)))
if operand not in ["=", "≠"]:
raise Archive7Error(*Archive7Error.AR7_OPERAND_INVALID, {"operand": operand})
if isinstance(parent, uuid.UUID):
self.__parent = ([parent.int], operand)
elif isinstance(parent, tuple):
ints = []
for i in parent:
ints.append(i.int)
self.__parent = (ints, operand)
return self
def owner(self, owner, operand="="):
"""Search with owner."""
Util.is_type(owner, (uuid.UUID, tuple, type(None)))
if operand not in ["=", "≠"]:
raise Archive7Error(*Archive7Error.AR7_OPERAND_INVALID, {"operand": operand})
if isinstance(owner, uuid.UUID):
self.__owner = ([owner.int], operand)
elif isinstance(owner, tuple):
ints = []
for i in owner:
ints.append(i.int)
self.__owner = (ints, operand)
return self
def created(self, created, operand="<"):
"""Search with creation date."""
Util.is_type(created, (int, str, datetime.datetime))
if operand not in ["=", ">", "<"]:
raise Archive7Error(*Archive7Error.AR7_OPERAND_INVALID, {"operand": operand})
if isinstance(created, int):
created = datetime.datetime.fromtimestamp(created)
elif isinstance(created, str):
created = datetime.datetime.fromisoformat(created)
self.__created = (created, operand)
return self
def modified(self, modified, operand="<"):
"""Search with modified date."""
Util.is_type(modified, (int, str, datetime.datetime))
if operand not in ["=", ">", "<"]:
raise Archive7Error(*Archive7Error.AR7_OPERAND_INVALID, {"operand": operand})
if isinstance(modified, int):
modified = datetime.datetime.fromtimestamp(modified)
elif isinstance(modified, str):
modified = datetime.datetime.fromisoformat(modified)
self.__modified = (modified, operand)
return self
def deleted(self, deleted):
"""Search for deleted."""
Util.is_type(deleted, (bool, type(None)))
self.__deleted = deleted
return self
def user(self, user, operand="="):
"""Search with unix username."""
Util.is_type(user, (str, tuple, type(None)))
if operand not in ["=", "≠"]:
raise Archive7Error(*Archive7Error.AR7_OPERAND_INVALID, {"operand": operand})
if isinstance(user, str):
self.__user = ([user.encode("utf-8")], operand)
elif isinstance(user, tuple):
ints = []
for i in user:
ints.append(i.encode("utf-8"))
self.__user = (ints, operand)
return self
def group(self, group, operand="="):
"""Search with unix group."""
Util.is_type(group, (str, tuple, type(None)))
if operand not in ["=", "≠"]:
raise Archive7Error(*Archive7Error.AR7_OPERAND_INVALID, {"operand": operand})
if isinstance(group, str):
self.__group = ([group.encode("utf-8")], operand)
elif isinstance(group, tuple):
ints = []
for i in group:
ints.append(i.encode("utf-8"))
self.__group = (ints, group)
return self
def build(self, paths=None):
"""Generate the search query function."""
def _type_in(x):
return x.type in self.__type
def _id_is(x):
return self.__id.int == x.id.int
def _parent_is(x):
return x.parent.int in self.__parent[0]
def _parent_not(x):
return x.parent.int not in self.__parent[0]
def _owner_is(x):
return x.owner.int in self.__owner[0]
def _owner_not(x):
return x.owner.int not in self.__owner[0]
def _created_eq(x):
return x.created == self.__created[0]
def _created_lt(x):
return x.created > self.__created[0]
def _created_gt(x):
return x.created < self.__created[0]
def _modified_eq(x):
return x.modified == self.__modified[0]
def _modified_lt(x):
return x.modified > self.__modified[0]
def _modified_gt(x):
return x.modified < self.__modified[0]
def _deleted_is(x):
return x.deleted is True
def _deleted_not(x):
return x.deleted is False
def _deleted_any(x):
return True
def _user_is(x):
return x.user in self.__user[0]
def _user_not(x):
return x.user not in self.__user[0]
def _group_is(x):
return x.group in self.__group[0]
def _group_not(x):
return x.group not in self.__group[0]
qualifiers = [_type_in]
if self.__id:
qualifiers.append(_id_is)
if self.__parent:
if self.__parent[1] == "=":
qualifiers.append(_parent_is)
elif self.__parent[1] == "≠":
qualifiers.append(_parent_not)
if self.__owner:
if self.__owner[1] == "=":
qualifiers.append(_owner_is)
elif self.__owner[1] == "≠":
qualifiers.append(_owner_not)
if self.__created:
if self.__created[1] == "=":
qualifiers.append(_created_eq)
elif self.__created[1] == "<":
qualifiers.append(_created_lt)
elif self.__created[1] == ">":
qualifiers.append(_created_gt)
if self.__modified:
if self.__modified[1] == "=":
qualifiers.append(_modified_eq)
elif self.__modified[1] == "<":
qualifiers.append(_modified_lt)
elif self.__modified[1] == ">":
qualifiers.append(_modified_gt)
if isinstance(self.__deleted, bool):
if self.__deleted:
qualifiers.append(_deleted_is)
else:
qualifiers.append(_deleted_not)
elif isinstance(self.__deleted, type(None)):
qualifiers.append(_deleted_any)
if self.__user:
if self.__user[1] == "=":
qualifiers.append(_user_is)
elif self.__user[1] == "≠":
qualifiers.append(_user_not)
if self.__group:
if self.__group[1] == "=":
qualifiers.append(_group_is)
elif self.__group[1] == "≠":
qualifiers.append(_group_not)
def query(rec, path):
"""Evaluate entry and path against criteria."""
if self.__path_regex:
if not bool(self.__path_regex.match(path)):
return False
for q in qualifiers:
if not q(rec):
return False
return True
return query
<|end_of_text|># -*- coding: utf-8 -*-
"""
pygments.lexers.c_like
~~~~~~~~~~~~~~~~~~~~~~
Lexers for other C-like languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, Include, bygroups, inherit, words, \
default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers.c_cpp import CLexer, CppLexer
from pygments.lexers import _mql_builtins
__all__ = ['PikeLexer', 'NesCLexer', 'ClayLexer', 'ECLexer', 'ValaLexer',
'CudaLexer', 'SwigLexer', 'MqlLexer', 'ArduinoLexer']
class PikeLexer(CppLexer):
"""
For `Pike <http://pike.lysator.liu.se/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Pike'
aliases = ['pike']
filenames = ['*.pike', '*.pmod']
mimetypes = ['text/x-pike']
tokens = {
'statements': [
(words((
'catch', 'new', 'private', 'protected', 'public', 'gauge',
'throw', 'throws', 'class', 'interface', 'implement', 'abstract', 'extends', 'from',
'this','super', 'constant', 'final','static', 'import', 'use', 'extern',
'inline', 'proto', 'break', 'continue', 'if', 'else', 'for',
'while', 'do','switch', 'case', 'as', 'in','version','return', 'true', 'false', 'null',
'__VERSION__', '__MAJOR__', '__MINOR__', '__BUILD__', '__REAL_VERSION__',
'__REAL | Cython |
_MAJOR__', '__REAL_MINOR__', '__REAL_BUILD__', '__DATE__', '__TIME__',
'__FILE__', '__DIR__', '__LINE__', '__AUTO_BIGNUM__', '__NT__', '__PIKE__',
'__amigaos__', '_Pragma','static_assert', 'defined','sscanf'), suffix=r'\b'),
Keyword),
(r'(bool|int|long|float|short|double|char|string|object|void|mapping|'
r'array|multiset|program|function|lambda|mixed|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'[~!%^&*+=|?:<>/@-]', Operator),
inherit,
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
class NesCLexer(CLexer):
"""
For `nesC <https://github.com/tinyos/nesc>`_ source code with preprocessor
directives.
.. versionadded:: 2.0
"""
name = 'nesC'
aliases = ['nesc']
filenames = ['*.nc']
mimetypes = ['text/x-nescsrc']
tokens = {
'statements': [
(words((
'abstract', 'as', 'async', 'atomic', 'call', 'command', 'component',
'components', 'configuration', 'event', 'extends', 'generic',
'implementation', 'includes', 'interface','module', 'new', 'norace',
'post', 'provides','signal', 'task', 'uses'), suffix=r'\b'),
Keyword),
(words(('nx_struct', 'nx_union', 'nx_int8_t', 'nx_int16_t', 'nx_int32_t',
'nx_int64_t', 'nx_uint8_t', 'nx_uint16_t', 'nx_uint32_t',
'nx_uint64_t'), suffix=r'\b'),
Keyword.Type),
inherit,
],
}
class ClayLexer(RegexLexer):
"""
For `Clay <http://claylabs.com/clay/>`_ source.
.. versionadded:: 2.0
"""
name = 'Clay'
filenames = ['*.clay']
aliases = ['clay']
mimetypes = ['text/x-clay']
tokens = {
'root': [
(r'\s', Text),
(r'//.*?$', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\b(public|private|import|as|record|variant|instance'
r'|define|overload|default|external|alias'
r'|rvalue|ref|forward|inline|noinline|forceinline'
r'|enum|var|and|or|not|if|else|goto|return|while'
r'|switch|case|break|continue|for|in|true|false|try|catch|throw'
r'|finally|onerror|staticassert|eval|when|newtype'
r'|__FILE__|__LINE__|__COLUMN__|__ARG__'
r')\b', Keyword),
(r'[~!%^&*+=|:<>/-]', Operator),
(r'[#(){}\[\],;.]', Punctuation),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'\d+[LlUu]*', Number.Integer),
(r'\b(true|false)\b', Name.Builtin),
(r'(?i)[a-z_?][\w?]*', Name),
(r'"""', String, 'tdqs'),
(r'"', String, 'dqs'),
],
'strings': [
(r'(?i)\\(x[0-9a-f]{2}|.)', String.Escape),
(r'.', String),
],
'nl': [
(r'\n', String),
],
'dqs': [
(r'"', String, '#pop'),
Include('strings'),
],
'tdqs': [
(r'"""', String, '#pop'),
Include('strings'),
Include('nl'),
],
}
class ECLexer(CLexer):
"""
For eC source code with preprocessor directives.
.. versionadded:: 1.5
"""
name = 'eC'
aliases = ['ec']
filenames = ['*.ec', '*.eh']
mimetypes = ['text/x-echdr', 'text/x-ecsrc']
tokens = {
'statements': [
(words((
'virtual', 'class', 'private', 'public', 'property', 'import',
'delete', 'new', 'new0','renew','renew0', 'define', 'get',
'set','remote', 'dllexport', 'dllimport','stdcall','subclass',
'__on_register_module', 'namespace', 'using', 'typed_object',
'any_object', 'incref','register', 'watch','stopwatching', 'firewatchers',
'watchable', 'class_designer', 'class_fixed', 'class_no_expansion', 'isset',
'class_default_property', 'property_category', 'class_data',
'class_property', 'thisclass', 'dbtable', 'dbindex',
'database_open', 'dbfield'), suffix=r'\b'), Keyword),
(words(('uint', 'uint16', 'uint32', 'uint64', 'bool', 'byte',
'unichar', 'int64'), suffix=r'\b'),
Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(null|value|this)\b', Name.Builtin),
inherit,
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
class ValaLexer(RegexLexer):
"""
For Vala source code with preprocessor directives.
.. versionadded:: 1.1
"""
name = 'Vala'
aliases = ['vala', 'vapi']
filenames = ['*.vala', '*.vapi']
mimetypes = ['text/x-vala']
tokens = {
'whitespace': [
(r'^\s*#if\s+0', Comment.Preproc, 'if0'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'[L@]?"', String,'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'(?s)""".*?"""', String), # verbatim strings
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'(\[)(Compact|Immutable|(?:Boolean|Simple)Type)(\])',
bygroups(Punctuation, Name.Decorator, Punctuation)),
# TODO: "correctly" parse complex code attributes
(r'(\[)(CCode|(?:Integer|Floating)Type)',
bygroups(Punctuation, Name.Decorator)),
(r'[()\[\],.]', Punctuation),
(words((
'as', 'base', 'break', 'case', 'catch', 'construct', 'continue',
'default', 'delete', 'do', 'else', 'enum', 'finally', 'for',
'foreach', 'get', 'if', 'in', 'is', 'lock', 'new', 'out', 'params',
'return','set','sizeof','switch', 'this', 'throw', 'try',
'typeof', 'while', 'yield'), suffix=r'\b'),
Keyword),
(words((
'abstract', 'const', 'delegate', 'dynamic', 'ensures', 'extern',
'inline', 'internal', 'override', 'owned', 'private', 'protected',
'public','ref','requires','signal','static', 'throws', 'unowned',
'var', 'virtual', 'volatile', ' | Cython |
weak', 'yields'), suffix=r'\b'),
Keyword.Declaration),
(r'(namespace|using)(\s+)', bygroups(Keyword.Namespace, Text),
'namespace'),
(r'(class|errordomain|interface|struct)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(\.)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
# void is an actual keyword, others are in glib-2.0.vapi
(words((
'void', 'bool', 'char', 'double', 'float', 'int', 'int8', 'int16',
'int32', 'int64', 'long','short','size_t','ssize_t','string',
'time_t', 'uchar', 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'ulong', 'unichar', 'ushort'), suffix=r'\b'),
Keyword.Type),
(r'(true|false|null)\b', Name.Builtin),
('[a-zA-Z_]\w*', Name),
],
'root': [
Include('whitespace'),
default('statement'),
],
'statement': [
Include('whitespace'),
Include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
(r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
],
}
class CudaLexer(CLexer):
"""
For NVIDIA `CUDA™ <http://developer.nvidia.com/category/zone/cuda-zone>`_
source.
.. versionadded:: 1.6
"""
name = 'CUDA'
filenames = ['*.cu', '*.cuh']
aliases = ['cuda', 'cu']
mimetypes = ['text/x-cuda']
function_qualifiers = set(('__device__', '__global__', '__host__',
'__noinline__', '__forceinline__'))
variable_qualifiers = set(('__device__', '__constant__', '__shared__',
'__restrict__'))
vector_types = set(('char1', 'uchar1', 'char2', 'uchar2', 'char3', 'uchar3',
'char4', 'uchar4','short1', 'ushort1','short2', 'ushort2',
'short3', 'ushort3','short4', 'ushort4', 'int1', 'uint1',
'int2', 'uint2', 'int3', 'uint3', 'int4', 'uint4', 'long1',
'ulong1', 'long2', 'ulong2', 'long3', 'ulong3', 'long4',
'ulong4', 'longlong1', 'ulonglong1', 'longlong2',
'ulonglong2', 'float1', 'float2', 'float3', 'float4',
'double1', 'double2', 'dim3'))
variables = set(('gridDim', 'blockIdx', 'blockDim', 'threadIdx', 'warpSize'))
functions = set(('__threadfence_block', '__threadfence', '__threadfence_system',
'__syncthreads', '__syncthreads_count', '__syncthreads_and',
'__syncthreads_or'))
execution_confs = set(('<<<', '>>>'))
def get_tokens_unprocessed(self, text):
for index, token, value in CLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.variable_qualifiers:
token = Keyword.Type
elif value in self.vector_types:
token = Keyword.Type
elif value in self.variables:
token = Name.Builtin
elif value in self.execution_confs:
token = Keyword.Pseudo
elif value in self.function_qualifiers:
token = Keyword.Reserved
elif value in self.functions:
token = Name.Function
yield index, token, value
class SwigLexer(CppLexer):
"""
For `SWIG <http://www.swig.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'SWIG'
aliases = ['swig']
filenames = ['*.swg', '*.i']
mimetypes = ['text/swig']
priority = 0.04 # Lower than C/C++ and Objective C/C++
tokens = {
'statements': [
# SWIG directives
(r'(%[a-z_][a-z0-9_]*)', Name.Function),
# Special variables
('\$\**\&?\w+', Name),
# Stringification / additional preprocessor directives
(r'##*[a-zA-Z_]\w*', Comment.Preproc),
inherit,
],
}
# This is a far from complete set of SWIG directives
swig_directives = set((
# Most common directives
'%apply', '%define', '%director', '%enddef', '%exception', '%extend',
'%feature', '%fragment', '%ignore', '%immutable', '%import', '%include',
'%inline', '%insert', '%module', '%newobject', '%nspace', '%pragma',
'%rename', '%shared_ptr', '%template', '%typecheck', '%typemap',
# Less common directives
'%arg', '%attribute', '%bang', '%begin', '%callback', '%catches', '%clear',
'%constant', '%copyctor', '%csconst', '%csconstvalue', '%csenum',
'%csmethodmodifiers', '%csnothrowexception', '%default', '%defaultctor',
'%defaultdtor', '%defined', '%delete', '%delobject', '%descriptor',
'%exceptionclass', '%exceptionvar', '%extend_smart_pointer', '%fragments',
'%header', '%ifcplusplus', '%ignorewarn', '%implicit', '%implicitconv',
'%init', '%javaconst', '%javaconstvalue', '%javaenum', '%javaexception',
'%javamethodmodifiers', '%kwargs', '%luacode', '%mutable', '%naturalvar',
'%nestedworkaround', '%perlcode', '%pythonabc', '%pythonappend',
'%pythoncallback', '%pythoncode', '%pythondynamic', '%pythonmaybecall',
'%pythonnondynamic', '%pythonprepend', '%refobject', '%shadow', '%sizeof',
'%trackobjects', '%types', '%unrefobject', '%varargs', '%warn',
'%warnfilter'))
def analyse_text(text):
rv = 0
# Search for SWIG directives, which are conventionally at the beginning of
# a line. The probability of them being within a line is low, so let another
# lexer win in this case.
matches = re.findall(r'^\s*(%[a-z_][a-z0-9_]*)', text, re.M)
for m in matches:
if m in SwigLexer.swig_directives:
rv = 0.98
break
else:
rv = 0.91 # Fraction higher than MatlabLexer
return rv
class MqlLexer(CppLexer):
"""
For `MQL4 <http://docs.mql4.com/>`_ and
`MQL5 <http://www.mql5.com/en/docs>`_ source code.
.. versionadded:: 2.0
"""
name = 'MQL'
aliases = ['mql','mq4','mq5','mql4','mql5']
filenames = ['*.mq4', '*.mq5', '*.mqh']
mimetypes = ['text/x-mql']
tokens = {
'statements': [
(words(_mql_builtins.keywords, suffix=r'\b'), Keyword),
(words(_mql_builtins.c_types, suffix=r'\b'), Keyword.Type),
(words(_mql_builtins.types, suffix=r'\b'), Name.Function),
(words(_mql_builtins.constants, suffix=r'\b'), Name.Constant),
(words(_mql_builtins.colors, prefix='(clr)?', suffix=r'\b'),
Name.Constant),
inherit,
],
}
class ArduinoLexer(CppLexer):
"""
For `Arduino(tm) <https://arduino.cc/>`_ source.
This is an extension of the CppLexer, as the Arduino® Language is a superset
of C++
.. version | Cython |
added:: 2.1
"""
name = 'Arduino'
aliases = ['arduino']
filenames = ['*.ino']
mimetypes = ['text/x-arduino']
# Language sketch main structure functions
structure = set(('setup', 'loop'))
# Language operators
operators = set(('not', 'or', 'and', 'xor'))
# Language 'variables'
variables = set((
'DIGITAL_MESSAGE', 'FIRMATA_STRING', 'ANALOG_MESSAGE', 'REPORT_DIGITAL',
'REPORT_ANALOG', 'INPUT_PULLUP', 'SET_PIN_MODE', 'INTERNAL2V56', 'SYSTEM_RESET',
'LED_BUILTIN', 'INTERNAL1V1', 'SYSEX_START', 'INTERNAL', 'EXTERNAL', 'HIGH',
'LOW', 'INPUT', 'OUTPUT', 'INPUT_PULLUP', 'LED_BUILTIN', 'true', 'false',
'void', 'boolean', 'char', 'unsigned char', 'byte', 'int', 'unsigned int',
'word', 'long', 'unsigned long','short', 'float', 'double','string', 'String',
'array','static', 'volatile', 'const', 'boolean', 'byte', 'word','string',
'String', 'array', 'int', 'float', 'private', 'char', 'virtual', 'operator',
'sizeof', 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int8_t', 'int16_t',
'int32_t', 'int64_t', 'dynamic_cast', 'typedef', 'const_cast', 'const',
'struct','static_cast', 'union', 'unsigned', 'long', 'volatile','static',
'protected', 'bool', 'public', 'friend', 'auto', 'void', 'enum', 'extern',
'class','short','reinterpret_cast', 'double','register', 'explicit',
'signed', 'inline', 'delete', '_Bool', 'complex', '_Complex', '_Imaginary',
'atomic_bool', 'atomic_char', 'atomic_schar', 'atomic_uchar', 'atomic_short',
'atomic_ushort', 'atomic_int', 'atomic_uint', 'atomic_long', 'atomic_ulong',
'atomic_llong', 'atomic_ullong', 'PROGMEM'))
# Language shipped functions and class ( )
functions = set((
'KeyboardController', 'MouseController', 'SoftwareSerial', 'EthernetServer',
'EthernetClient', 'LiquidCrystal', 'RobotControl', 'GSMVoiceCall',
'EthernetUDP', 'EsploraTFT', 'HttpClient', 'RobotMotor', 'WiFiClient',
'GSMScanner', 'FileSystem', 'Scheduler', 'GSMServer', 'YunClient', 'YunServer',
'IPAddress', 'GSMClient', 'GSMModem', 'Keyboard', 'Ethernet', 'Console',
'GSMBand', 'Esplora', 'Stepper', 'Process', 'WiFiUDP', 'GSM_SMS', 'Mailbox',
'USBHost', 'Firmata', 'PImage', 'Client', 'Server', 'GSMPIN', 'FileIO',
'Bridge', 'Serial', 'EEPROM', 'Stream', 'Mouse', 'Audio', 'Servo', 'File',
'Task', 'GPRS', 'WiFi', 'Wire', 'TFT', 'GSM', 'SPI', 'SD',
'runShellCommandAsynchronously', 'analogWriteResolution',
'retrieveCallingNumber', 'printFirmwareVersion', 'analogReadResolution',
'sendDigitalPortPair', 'noListenOnLocalhost','readJoystickButton',
'setFirmwareVersion','readJoystickSwitch','scrollDisplayRight',
'getVoiceCallStatus','scrollDisplayLeft', 'writeMicroseconds',
'delayMicroseconds', 'beginTransmission', 'getSignalStrength',
'runAsynchronously', 'getAsynchronously', 'listenOnLocalhost',
'getCurrentCarrier','readAccelerometer','messageAvailable',
'sendDigitalPorts', 'lineFollowConfig', 'countryNameWrite', 'runShellCommand',
'readStringUntil','rewindDirectory','readTemperature','setClockDivider',
'readLightSensor', 'endTransmission', 'analogReference', 'detachInterrupt',
'countryNameRead', 'attachInterrupt', 'encryptionType','readBytesUntil',
'robotNameWrite','readMicrophone', 'robotNameRead', 'cityNameWrite',
'userNameWrite','readJoystickY','readJoystickX','mouseReleased',
'openNextFile','scanNetworks', 'noInterrupts', 'digitalWrite', 'beginSpeaker',
'mousePressed', 'isActionDone','mouseDragged', 'displayLogos', 'noAutoscroll',
'addParameter','remoteNumber', 'getModifiers', 'keyboardRead', 'userNameRead',
'waitContinue', 'processInput', 'parseCommand', 'printVersion','readNetworks',
'writeMessage', 'blinkVersion', 'cityNameRead','readMessage','setDataMode',
'parsePacket', 'isListening','setBitOrder', 'beginPacket', 'isDirectory',
'motorsWrite', 'drawCompass', 'digitalRead', 'clearScreen','serialEvent',
'rightToLeft','setTextSize', 'leftToRight','requestFrom', 'keyReleased',
'compassRead', 'analogWrite', 'interrupts', 'WiFiServer', 'disconnect',
'playMelody', 'parseFloat', 'autoscroll', 'getPINUsed','setPINUsed',
'setTimeout','sendAnalog','readSlider', 'analogRead', 'beginWrite',
'createChar','motorsStop', 'keyPressed', 'tempoWrite','readButton',
'subnetMask', 'debugPrint','macAddress', 'writeGreen', 'randomSeed',
'attachGPRS','readString','sendString','remotePort','releaseAll',
'mouseMoved', 'background', 'getXChange', 'getYChange', 'answerCall',
'getResult', 'voiceCall', 'endPacket', 'constrain', 'getSocket', 'writeJSON',
'getButton', 'available', 'connected', 'findUntil','readBytes', 'exitValue',
'readGreen', 'writeBlue','startLoop', 'IPAddress', 'isPressed','sendSysex',
'pauseMode', 'gatewayIP','setCursor', 'getOemKey', 'tuneWrite', 'noDisplay',
'loadImage','switchPIN', 'onRequest', 'onReceive', 'changePIN', 'playFile',
'noBuffer', 'parseInt', 'overflow', 'checkPIN', 'knobRead', 'beginTFT',
'bitClear', 'updateIR', 'bitWrite', 'position', 'writeRGB', 'highByte',
'writeRed','setSpeed','readBlue', 'noStroke','remoteIP', 'transfer',
'shutdown', 'hangCall', 'beginSMS', 'endWrite', 'attached','maintain',
'noCursor', 'checkReg', 'checkPUK','shiftOut', 'isValid','shiftIn', 'pulseIn',
'connect', 'println', 'localIP', 'pinMode', 'getIMEI', 'display', 'noBlink',
'process', 'getBand', 'running', 'beginSD', 'drawBMP', 'lowByte','setBand',
'release', 'bitRead', 'prepare', 'pointTo','readRed','setMode', 'noFill',
'remove', 'listen','stroke', 'detach', 'attach', 'noTone', 'exists', 'buffer',
'height', 'bitSet', 'circle', 'config', 'cursor', 'random', 'IRread','setDNS',
'endSMS', 'getKey','micros','millis', 'begin', 'print', 'write','ready',
'flush', 'width', 'isPIN', 'blink', 'clear', 'press','mkdir', 'rmdir', 'close',
'point', 'yield', 'image', 'BSSID', 'click', 'delay','read', 'text','move',
'peek', 'beep','rect', 'line', 'open','seek', 'fill','size', 'turn','stop',
'home', 'find','step', 'tone','sqrt', 'RSSI', 'SSID', 'end', 'bit', 'tan',
'cos','sin', 'pow','map', 'abs','max','min', 'get', 'run', 'put',
'isAlphaNumeric', 'isAlpha', 'isAscii', 'isWhitespace', 'isControl', 'isDigit',
'isGraph', 'isLowerCase', 'isPrintable', 'isPunct', 'isSpace', 'isUpperCase',
'isHexadecimalDigit'))
# do not highlight
suppress_highlight = set((
'namespace', 'template','mutable', 'using', 'asm', 'typeid',
'typename', 'this', 'alignof', 'constexpr', 'decltype', 'noexcept',
'static_assert', 'thread_local','restrict'))
def get_tokens_unprocessed(self, text):
for index, token, value in CppLexer.get | Cython |
_tokens_unprocessed(self, text):
if value in self.structure:
yield index, Name.Builtin, value
elif value in self.operators:
yield index, Operator, value
elif value in self.variables:
yield index, Keyword.Reserved, value
elif value in self.suppress_highlight:
yield index, Name, value
elif value in self.functions:
yield index, Name.Function, value
else:
yield index, token, value
<|end_of_text|>cdef extern from * nogil:
ctypedef const char* PetscSNESType "SNESType"
PetscSNESType SNESNEWTONLS
PetscSNESType SNESNEWTONTR
#PetscSNESType SNESPYTHON
PetscSNESType SNESNRICHARDSON
PetscSNESType SNESKSPONLY
PetscSNESType SNESKSPTRANSPOSEONLY
PetscSNESType SNESVINEWTONRSLS
PetscSNESType SNESVINEWTONSSLS
PetscSNESType SNESNGMRES
PetscSNESType SNESQN
PetscSNESType SNESSHELL
PetscSNESType SNESNGS
PetscSNESType SNESNCG
PetscSNESType SNESFAS
PetscSNESType SNESMS
PetscSNESType SNESNASM
PetscSNESType SNESANDERSON
PetscSNESType SNESASPIN
PetscSNESType SNESCOMPOSITE
PetscSNESType SNESPATCH
ctypedef enum PetscSNESNormSchedule "SNESNormSchedule":
SNES_NORM_DEFAULT
SNES_NORM_NONE
SNES_NORM_ALWAYS
SNES_NORM_INITIAL_ONLY
SNES_NORM_FINAL_ONLY
SNES_NORM_INITIAL_FINAL_ONLY
ctypedef enum PetscSNESConvergedReason "SNESConvergedReason":
# iterating
SNES_CONVERGED_ITERATING
# converged
SNES_CONVERGED_FNORM_ABS
SNES_CONVERGED_FNORM_RELATIVE
SNES_CONVERGED_SNORM_RELATIVE
SNES_CONVERGED_ITS
# diverged
SNES_DIVERGED_FUNCTION_DOMAIN
SNES_DIVERGED_FUNCTION_COUNT
SNES_DIVERGED_LINEAR_SOLVE
SNES_DIVERGED_FNORM_NAN
SNES_DIVERGED_MAX_IT
SNES_DIVERGED_LINE_SEARCH
SNES_DIVERGED_INNER
SNES_DIVERGED_LOCAL_MIN
SNES_DIVERGED_DTOL
SNES_DIVERGED_JACOBIAN_DOMAIN
SNES_DIVERGED_TR_DELTA
ctypedef int (*PetscSNESCtxDel)(void*)
ctypedef int (*PetscSNESInitialGuessFunction)(PetscSNES,
PetscVec,
void*) except PETSC_ERR_PYTHON
ctypedef int (*PetscSNESFunctionFunction)(PetscSNES,
PetscVec,
PetscVec,
void*) except PETSC_ERR_PYTHON
ctypedef int (*PetscSNESUpdateFunction)(PetscSNES,
PetscInt) except PETSC_ERR_PYTHON
ctypedef int (*PetscSNESJacobianFunction)(PetscSNES,
PetscVec,
PetscMat,
PetscMat,
void*) except PETSC_ERR_PYTHON
ctypedef int (*PetscSNESObjectiveFunction)(PetscSNES,
PetscVec,
PetscReal*,
void*) except PETSC_ERR_PYTHON
ctypedef int (*PetscSNESConvergedFunction)(PetscSNES,
PetscInt,
PetscReal,
PetscReal,
PetscReal,
PetscSNESConvergedReason*,
void*) except PETSC_ERR_PYTHON
ctypedef int (*PetscSNESMonitorFunction)(PetscSNES,
PetscInt,
PetscReal,
void*) except PETSC_ERR_PYTHON
int SNESCreate(MPI_Comm,PetscSNES*)
int SNESDestroy(PetscSNES*)
int SNESView(PetscSNES,PetscViewer)
int SNESSetType(PetscSNES,PetscSNESType)
int SNESGetType(PetscSNES,PetscSNESType*)
int SNESSetOptionsPrefix(PetscSNES,char[])
int SNESAppendOptionsPrefix(PetscSNES,char[])
int SNESGetOptionsPrefix(PetscSNES,char*[])
int SNESSetFromOptions(PetscSNES)
int SNESGetKSP(PetscSNES,PetscKSP*)
int SNESSetKSP(PetscSNES,PetscKSP)
int SNESGetDM(PetscSNES,PetscDM*)
int SNESSetDM(PetscSNES,PetscDM)
# --- FAS ---
int SNESFASSetInterpolation(PetscSNES,PetscInt,PetscMat)
int SNESFASGetInterpolation(PetscSNES,PetscInt,PetscMat*)
int SNESFASSetRestriction(PetscSNES,PetscInt,PetscMat)
int SNESFASGetRestriction(PetscSNES,PetscInt,PetscMat*)
int SNESFASSetInjection(PetscSNES,PetscInt,PetscMat)
int SNESFASGetInjection(PetscSNES,PetscInt,PetscMat*)
int SNESFASSetRScale(PetscSNES,PetscInt,PetscVec)
int SNESFASSetLevels(PetscSNES,PetscInt,MPI_Comm[])
int SNESFASGetLevels(PetscSNES,PetscInt*)
int SNESFASGetCycleSNES(PetscSNES,PetscInt,PetscSNES*)
int SNESFASGetCoarseSolve(PetscSNES,PetscSNES*)
int SNESFASGetSmoother(PetscSNES,PetscInt,PetscSNES*)
int SNESFASGetSmootherDown(PetscSNES,PetscInt,PetscSNES*)
int SNESFASGetSmootherUp(PetscSNES,PetscInt,PetscSNES*)
int SNESGetNPC(PetscSNES,PetscSNES*)
int SNESHasNPC(PetscSNES,PetscBool*)
int SNESSetNPC(PetscSNES,PetscSNES)
int SNESGetRhs(PetscSNES,PetscVec*)
int SNESGetSolution(PetscSNES,PetscVec*)
int SNESSetSolution(PetscSNES,PetscVec)
int SNESGetSolutionUpdate(PetscSNES,PetscVec*)
int SNESSetInitialGuess"SNESSetComputeInitialGuess"(PetscSNES,PetscSNESInitialGuessFunction,void*)
int SNESSetFunction(PetscSNES,PetscVec,PetscSNESFunctionFunction,void*)
int SNESGetFunction(PetscSNES,PetscVec*,void*,void**)
int SNESSetUpdate(PetscSNES,PetscSNESUpdateFunction)
int SNESSetJacobian(PetscSNES,PetscMat,PetscMat,PetscSNESJacobianFunction,void*)
int SNESGetJacobian(PetscSNES,PetscMat*,PetscMat*,PetscSNESJacobianFunction*,void**)
int SNESSetObjective(PetscSNES,PetscSNESObjectiveFunction,void*)
int SNESGetObjective(PetscSNES,PetscSNESObjectiveFunction*,void**)
int SNESComputeFunction(PetscSNES,PetscVec,PetscVec)
int SNESComputeJacobian(PetscSNES,PetscVec,PetscMat,PetscMat)
int SNESComputeObjective(PetscSNES,PetscVec,PetscReal*)
ctypedef int (*PetscSNESNGSFunction)(PetscSNES,
PetscVec,
PetscVec,
void*) except PETSC_ERR_PYTHON
int SNESSetNGS(PetscSNES,PetscSNESNGSFunction,void*)
int SNESGetNGS(PetscSNES,PetscSNESNGSFunction*,void**)
int SNESComputeNGS(PetscSNES,PetscVec,PetscVec)
int SNESSetNormSchedule(PetscSNES,PetscSNESNormSchedule)
int SNESGetNormSchedule(PetscSNES,PetscSNESNormSchedule*)
int SNESSetTolerances(PetscSNES,PetscReal,PetscReal,PetscReal,PetscInt,PetscInt)
int SNESGetTolerances(PetscSNES,PetscReal*,PetscReal*,PetscReal*,PetscInt*,PetscInt*)
int SNESSetConvergenceTest(PetscSNES,PetscSNESConvergedFunction,void*,PetscSNESCtxDel*)
int SNESConvergedDefault(PetscSNES,Pets | Cython |
cInt,PetscReal,PetscReal,PetscReal,
PetscSNESConvergedReason*,void*) except PETSC_ERR_PYTHON
int SNESConvergedSkip(PetscSNES,PetscInt,PetscReal,PetscReal,PetscReal,
PetscSNESConvergedReason*,void*) except PETSC_ERR_PYTHON
int SNESSetConvergenceHistory(PetscSNES,PetscReal[],PetscInt[],PetscInt,PetscBool)
int SNESGetConvergenceHistory(PetscSNES,PetscReal*[],PetscInt*[],PetscInt*)
int SNESLogConvergenceHistory(PetscSNES,PetscReal,PetscInt)
int SNESMonitorSet(PetscSNES,PetscSNESMonitorFunction,void*,PetscSNESCtxDel)
int SNESMonitorCancel(PetscSNES)
int SNESMonitor(PetscSNES,PetscInt,PetscReal)
int SNESSetUp(PetscSNES)
int SNESReset(PetscSNES)
int SNESSolve(PetscSNES,PetscVec,PetscVec)
int SNESSetConvergedReason(PetscSNES,PetscSNESConvergedReason)
int SNESGetConvergedReason(PetscSNES,PetscSNESConvergedReason*)
int SNESSetIterationNumber(PetscSNES,PetscInt)
int SNESGetIterationNumber(PetscSNES,PetscInt*)
int SNESSetFunctionNorm(PetscSNES,PetscReal)
int SNESGetFunctionNorm(PetscSNES,PetscReal*)
int SNESGetLinearSolveIterations(PetscSNES,PetscInt*)
int SNESSetCountersReset(PetscSNES,PetscBool)
int SNESGetNumberFunctionEvals(PetscSNES,PetscInt*)
int SNESSetMaxNonlinearStepFailures(PetscSNES,PetscInt)
int SNESGetMaxNonlinearStepFailures(PetscSNES,PetscInt*)
int SNESGetNonlinearStepFailures(PetscSNES,PetscInt*)
int SNESSetMaxLinearSolveFailures(PetscSNES,PetscInt)
int SNESGetMaxLinearSolveFailures(PetscSNES,PetscInt*)
int SNESGetLinearSolveFailures(PetscSNES,PetscInt*)
int SNESKSPSetUseEW(PetscSNES,PetscBool)
int SNESKSPGetUseEW(PetscSNES,PetscBool*)
int SNESKSPSetParametersEW(PetscSNES,PetscInt,PetscReal,PetscReal,
PetscReal,PetscReal,PetscReal,PetscReal)
int SNESKSPGetParametersEW(PetscSNES,PetscInt*,PetscReal*,PetscReal*,
PetscReal*,PetscReal*,PetscReal*,PetscReal*)
int SNESVISetVariableBounds(PetscSNES,PetscVec,PetscVec)
#ctypedef int (*PetscSNESVariableBoundsFunction)(PetscSNES,PetscVec,PetscVec)
#int SNESVISetComputeVariableBounds(PetscSNES,PetscSNESVariableBoundsFunction)
int SNESVIGetInactiveSet(PetscSNES, PetscIS*)
int SNESCompositeGetSNES(PetscSNES,PetscInt,PetscSNES*)
int SNESCompositeGetNumber(PetscSNES,PetscInt*)
int SNESNASMGetSNES(PetscSNES,PetscInt,PetscSNES*)
int SNESNASMGetNumber(PetscSNES,PetscInt*)
int SNESPatchSetCellNumbering(PetscSNES, PetscSection)
int SNESPatchSetDiscretisationInfo(PetscSNES, PetscInt, PetscDM*, PetscInt*, PetscInt*, const PetscInt**, const PetscInt*, PetscInt, const PetscInt*, PetscInt, const PetscInt*)
int SNESPatchSetComputeOperator(PetscSNES, PetscPCPatchComputeOperator, void*)
int SNESPatchSetComputeFunction(PetscSNES, PetscPCPatchComputeFunction, void*)
int SNESPatchSetConstructType(PetscSNES, PetscPCPatchConstructType, PetscPCPatchConstructOperator, void*)
cdef extern from "custom.h" nogil:
int SNESSetUseMFFD(PetscSNES,PetscBool)
int SNESGetUseMFFD(PetscSNES,PetscBool*)
int SNESSetUseFDColoring(PetscSNES,PetscBool)
int SNESGetUseFDColoring(PetscSNES,PetscBool*)
int SNESConvergenceTestCall(PetscSNES,PetscInt,
PetscReal,PetscReal,PetscReal,
PetscSNESConvergedReason*)
ctypedef const char* PetscSNESLineSearchType "SNESLineSearchType"
PetscSNESLineSearchType SNESLINESEARCHBT
PetscSNESLineSearchType SNESLINESEARCHNLEQERR
PetscSNESLineSearchType SNESLINESEARCHBASIC
PetscSNESLineSearchType SNESLINESEARCHL2
PetscSNESLineSearchType SNESLINESEARCHCP
PetscSNESLineSearchType SNESLINESEARCHSHELL
PetscSNESLineSearchType SNESLINESEARCHNCGLINEAR
int SNESGetLineSearch(PetscSNES,PetscSNESLineSearch*)
int SNESLineSearchSetFromOptions(PetscSNESLineSearch)
int SNESLineSearchApply(PetscSNESLineSearch,PetscVec,PetscVec,PetscReal*,PetscVec)
int SNESLineSearchDestroy(PetscSNESLineSearch*)
ctypedef int (*PetscSNESPreCheckFunction)(PetscSNESLineSearch,
PetscVec,PetscVec,
PetscBool*,
void*) except PETSC_ERR_PYTHON
int SNESLineSearchSetPreCheck(PetscSNESLineSearch,PetscSNESPreCheckFunction,void*)
int SNESLineSearchGetSNES(PetscSNESLineSearch,PetscSNES*)
cdef extern from "libpetsc4py.h":
PetscSNESType SNESPYTHON
int SNESPythonSetContext(PetscSNES,void*)
int SNESPythonGetContext(PetscSNES,void**)
int SNESPythonSetType(PetscSNES,char[])
# -----------------------------------------------------------------------------
cdef inline SNES ref_SNES(PetscSNES snes):
cdef SNES ob = <SNES> SNES()
ob.snes = snes
PetscINCREF(ob.obj)
return ob
# -----------------------------------------------------------------------------
cdef int SNES_InitialGuess(
PetscSNES snes,
PetscVec x,
void* ctx,
) except PETSC_ERR_PYTHON with gil:
cdef SNES Snes = ref_SNES(snes)
cdef Vec Xvec = ref_Vec(x)
cdef object context = Snes.get_attr('__initialguess__')
if context is None and ctx!= NULL: context = <object>ctx
assert context is not None and type(context) is tuple # sanity check
(initialguess, args, kargs) = context
initialguess(Snes, Xvec, *args, **kargs)
return 0
# -----------------------------------------------------------------------------
cdef int SNES_PreCheck(
PetscSNESLineSearch linesearch,
PetscVec x,
PetscVec y,
PetscBool *changed,
void* ctx
) except PETSC_ERR_PYTHON with gil:
cdef PetscSNES snes = NULL;
CHKERR(SNESLineSearchGetSNES(linesearch, &snes));
cdef object b = False
cdef SNES Snes = ref_SNES(snes)
cdef Vec Xvec = ref_Vec(x)
cdef Vec Yvec = ref_Vec(y)
cdef object context = Snes.get_attr('__precheck__')
if context is None and ctx!= NULL: context = <object>ctx
assert context is not None and type(context) is tuple # sanity check
(precheck, args, kargs) = context
b = precheck(Xvec, Yvec, *args, **kargs)
changed[0] = asBool(b)
return 0
# -----------------------------------------------------------------------------
cdef int SNES_Function(
PetscSNES snes,
PetscVec x,
PetscVec f,
void* ctx,
) except PETSC_ERR_PYTHON with gil:
cdef SNES Snes = ref_SNES(snes)
cdef Vec Xvec = ref_Vec(x)
cdef Vec Fvec = ref_Vec(f)
cdef object context = Snes.get_attr('__function__')
if context is None and ctx!= NULL: context = <object>ctx
assert context is not None and type(context) is tuple # sanity check
(function | Cython |
, args, kargs) = context
function(Snes, Xvec, Fvec, *args, **kargs)
return 0
# -----------------------------------------------------------------------------
cdef int SNES_Update(
PetscSNES snes,
PetscInt its,
) except PETSC_ERR_PYTHON with gil:
cdef SNES Snes = ref_SNES(snes)
cdef object context = Snes.get_attr('__update__')
assert context is not None and type(context) is tuple # sanity check
(update, args, kargs) = context
update(Snes, toInt(its), *args, **kargs)
return 0
# -----------------------------------------------------------------------------
cdef int SNES_Jacobian(
PetscSNES snes,
PetscVec x,
PetscMat J,
PetscMat P,
void* ctx,
) except PETSC_ERR_PYTHON with gil:
cdef SNES Snes = ref_SNES(snes)
cdef Vec Xvec = ref_Vec(x)
cdef Mat Jmat = ref_Mat(J)
cdef Mat Pmat = ref_Mat(P)
cdef object context = Snes.get_attr('__jacobian__')
if context is None and ctx!= NULL: context = <object>ctx
assert context is not None and type(context) is tuple # sanity check
(jacobian, args, kargs) = context
jacobian(Snes, Xvec, Jmat, Pmat, *args, **kargs)
return 0
# -----------------------------------------------------------------------------
cdef int SNES_Objective(
PetscSNES snes,
PetscVec x,
PetscReal *o,
void* ctx,
) except PETSC_ERR_PYTHON with gil:
cdef SNES Snes = ref_SNES(snes)
cdef Vec Xvec = ref_Vec(x)
cdef object context = Snes.get_attr('__objective__')
if context is None and ctx!= NULL: context = <object>ctx
assert context is not None and type(context) is tuple # sanity check
(objective, args, kargs) = context
obj = objective(Snes, Xvec, *args, **kargs)
o[0] = asReal(obj)
return 0
# -----------------------------------------------------------------------------
cdef int SNES_NGS(
PetscSNES snes,
PetscVec x,
PetscVec b,
void* ctx,
) except PETSC_ERR_PYTHON with gil:
cdef SNES Snes = ref_SNES(snes)
cdef Vec Xvec = ref_Vec(x)
cdef Vec Bvec = ref_Vec(b)
cdef object context = Snes.get_attr('__ngs__')
if context is None and ctx!= NULL: context = <object>ctx
assert context is not None and type(context) is tuple # sanity check
(ngs, args, kargs) = context
ngs(Snes, Xvec, Bvec, *args, **kargs)
return 0
# -----------------------------------------------------------------------------
cdef int SNES_Converged(
PetscSNES snes,
PetscInt iters,
PetscReal xnorm,
PetscReal gnorm,
PetscReal fnorm,
PetscSNESConvergedReason *r,
void* ctx,
) except PETSC_ERR_PYTHON with gil:
cdef SNES Snes = ref_SNES(snes)
cdef object it = toInt(iters)
cdef object xn = toReal(xnorm)
cdef object gn = toReal(gnorm)
cdef object fn = toReal(fnorm)
cdef object context = Snes.get_attr('__converged__')
if context is None and ctx!= NULL: context = <object>ctx
assert context is not None and type(context) is tuple # sanity check
(converged, args, kargs) = context
reason = converged(Snes, it, (xn, gn, fn), *args, **kargs)
if reason is None: r[0] = SNES_CONVERGED_ITERATING
elif reason is False: r[0] = SNES_CONVERGED_ITERATING
elif reason is True: r[0] = SNES_CONVERGED_ITS # XXX?
else: r[0] = reason
return 0
# -----------------------------------------------------------------------------
cdef int SNES_Monitor(
PetscSNES snes,
PetscInt iters,
PetscReal rnorm,
void* ctx,
) except PETSC_ERR_PYTHON with gil:
cdef SNES Snes = ref_SNES(snes)
cdef object monitorlist = Snes.get_attr('__monitor__')
if monitorlist is None: return 0
cdef object it = toInt(iters)
cdef object rn = toReal(rnorm)
for (monitor, args, kargs) in monitorlist:
monitor(Snes, it, rn, *args, **kargs)
return 0
# -----------------------------------------------------------------------------
<|end_of_text|>from __future__ import print_function
from libcpp cimport bool
from cpython.mem cimport PyMem_Malloc, PyMem_Realloc, PyMem_Free
def get_neighbors(nb_list, n, l):
"""
Returns the nodes can be reached up to 'l' steps
:param edges:
:param n:
:param l:
:return:
"""
if l = 1:
pass
else:
cdef int *visited = <int *> PyMem_Malloc(n * sizeof(int))
for i in range(n):
visited[i] = 0
print(visited[5])
PyMem_Free(visited)
def fib(n):
"""Print the Fibonacci series up to n."""
a, b = 0, 1
while b < n:
print(b, end=' ')
a, b = b, a + b
<|end_of_text|>import asyncio
import json
import logging
from async_timeout import timeout
from datetime import datetime, timedelta
from decimal import Decimal
from typing import Any, AsyncIterable, Dict, List, Optional, TYPE_CHECKING
import aiohttp
from aiohttp.client_exceptions import ContentTypeError
from libc.stdint cimport int64_t
from hummingbot.connector.exchange.beaxy.beaxy_api_order_book_data_source import BeaxyAPIOrderBookDataSource
from hummingbot.connector.exchange.beaxy.beaxy_auth import BeaxyAuth
from hummingbot.connector.exchange.beaxy.beaxy_constants import BeaxyConstants
from hummingbot.connector.exchange.beaxy.beaxy_in_flight_order import BeaxyInFlightOrder
from hummingbot.connector.exchange.beaxy.beaxy_misc import BeaxyIOError
from hummingbot.connector.exchange.beaxy.beaxy_order_book_tracker import BeaxyOrderBookTracker
from hummingbot.connector.exchange.beaxy.beaxy_user_stream_tracker import BeaxyUserStreamTracker
from hummingbot.connector.exchange_base cimport ExchangeBase
from hummingbot.connector.trading_rule cimport TradingRule
from hummingbot.core.clock cimport Clock
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.data_type.order_book cimport OrderBook
from hummingbot.core.data_type.trade_fee import AddedToCostTradeFee
from hummingbot.core.event.events import (
BuyOrderCompletedEvent,
BuyOrderCreatedEvent,
MarketEvent,
MarketOrderFailureEvent,
MarketTransactionFailureEvent,
OrderCancelledEvent,
OrderExpiredEvent,
OrderFilledEvent,
SellOrderCompletedEvent,
SellOrderCreatedEvent,
)
from hummingbot.core.data_type.common import OrderType, TradeType
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather
from hummingbot.core.utils.estimate_fee import estimate_fee
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce
from hummingbot.logger import HummingbotLogger
if TYPE_CHECKING:
from hummingbot.client.config.config_helpers import ClientConfigAdapter
s_logger = None
s_decimal_0 = Decimal('0.0')
s_decimal_NaN = Decimal('NaN')
cdef class BeaxyExchangeTransactionTracker(TransactionTracker):
cdef:
BeaxyExchange _owner
def __init__(self, owner: BeaxyExchange):
super().__init__()
self._owner = owner
cdef c_did_timeout_tx(self, str tx_id):
TransactionTracker.c_did_timeout_tx(self, tx_id)
self._owner.c_did_timeout_tx(tx_id)
cdef class BeaxyExchange(ExchangeBase):
MARKET_BUY_ORDER_COMPLETED_EVENT_TAG = MarketEvent.BuyOrderCompleted.value
MARKET_SELL_ORDER_COMPLETED_EVENT_TAG = MarketEvent.SellOrderCompleted.value
MARKET_ORDER_CANCELED_EVENT_TAG = MarketEvent.OrderCancelled.value
MARKET_ORDER_FAILURE_EVENT_TAG = MarketEvent.OrderFailure.value
MARKET_ORDER_EXPIRED_EVENT_TAG = MarketEvent.OrderExpired.value
MARKET_ORDER_FILLED_EVENT_TAG = MarketEvent.OrderFilled.value
MARKET_BUY_ORDER_CREATED_EVENT_TAG = MarketEvent.BuyOrderCreated.value
MARKET_SELL_ORDER_CREATED_EVENT_TAG = MarketEvent.SellOrderCreated.value
API_CALL_TIMEOUT = 60.0
UPDATE_ORDERS_INTERVAL = 15.0
UPDATE_FEE_PERCENTAGE_INTERVAL = 60.0
ORDER | Cython |
_NOT_EXIST_CONFIRMATION_COUNT = 3
@classmethod
def logger(cls) -> HummingbotLogger:
global s_logger
if s_logger is None:
s_logger = logging.getLogger(__name__)
return s_logger
def __init__(
self,
client_config_map: "ClientConfigAdapter",
beaxy_api_key: str,
beaxy_secret_key: str,
poll_interval: float = 5.0, # interval which the class periodically pulls status from the rest API
trading_pairs: Optional[List[str]] = None,
trading_required: bool = True
):
super().__init__(client_config_map)
self._trading_required = trading_required
self._beaxy_auth = BeaxyAuth(beaxy_api_key, beaxy_secret_key)
self._set_order_book_tracker(BeaxyOrderBookTracker(trading_pairs=trading_pairs))
self._order_not_found_records = {}
self._user_stream_tracker = BeaxyUserStreamTracker(beaxy_auth=self._beaxy_auth)
self._ev_loop = asyncio.get_event_loop()
self._poll_notifier = asyncio.Event()
self._last_timestamp = 0
self._last_order_update_timestamp = 0
self._last_fee_percentage_update_timestamp = 0
self._poll_interval = poll_interval
self._in_flight_orders: Dict[str, BeaxyInFlightOrder] = {}
self._tx_tracker = BeaxyExchangeTransactionTracker(self)
self._trading_rules = {}
self._auth_polling_task = None
self._status_polling_task = None
self._user_stream_tracker_task = None
self._user_stream_event_listener_task = None
self._trading_rules_polling_task = None
self._shared_client = None
self._maker_fee_percentage = {}
self._taker_fee_percentage = {}
@staticmethod
async def convert_from_exchange_trading_pair(exchange_trading_pair: str) -> Optional[str]:
trading_pair = await BeaxyAPIOrderBookDataSource.trading_pair_associated_to_exchange_symbol(exchange_trading_pair)
return trading_pair
@staticmethod
async def convert_to_exchange_trading_pair(hb_trading_pair: str) -> str:
return await BeaxyAPIOrderBookDataSource.exchange_symbol_associated_to_pair(hb_trading_pair)
@property
def name(self) -> str:
"""
*required
:return: A lowercase name / id for the market. Must stay consistent with market name in global settings.
"""
return 'beaxy'
@property
def order_books(self) -> Dict[str, OrderBook]:
"""
*required
Get mapping of all the order books that are being tracked.
:return: Dict[trading_pair : OrderBook]
"""
return self.order_book_tracker.order_books
@property
def beaxy_auth(self) -> BeaxyAuth:
"""
:return: BeaxyAuth class
"""
return self._beaxy_auth
@property
def trading_rules(self) -> Dict[str, Any]:
return self._trading_rules
@property
def status_dict(self) -> Dict[str, bool]:
"""
*required
:return: a dictionary of relevant status checks.
This is used by `ready` method below to determine if a market is ready for trading.
"""
return {
'order_books_initialized': self.order_book_tracker.ready,
'account_balance': len(self._account_balances) > 0 if self._trading_required else True,
'trading_rule_initialized': len(self._trading_rules) > 0 if self._trading_required else True
}
@property
def ready(self) -> bool:
"""
*required
:return: a boolean value that indicates if the market is ready for trading
"""
return all(self.status_dict.values())
@property
def limit_orders(self) -> List[LimitOrder]:
"""
*required
:return: list of active limit orders
"""
return [
in_flight_order.to_limit_order()
for in_flight_order in self._in_flight_orders.values()
]
@property
def in_flight_orders(self) -> Dict[str, BeaxyInFlightOrder]:
return self._in_flight_orders
@property
def tracking_states(self) -> Dict[str, any]:
"""
*required
:return: Dict[client_order_id: InFlightOrder]
This is used by the MarketsRecorder class to orchestrate market classes at a higher level.
"""
return {
key: value.to_json()
for key, value in self._in_flight_orders.items()
}
def restore_tracking_states(self, saved_states: Dict[str, any]):
"""
*required
Updates inflight order statuses from API results
This is used by the MarketsRecorder class to orchestrate market classes at a higher level.
"""
self._in_flight_orders.update({
key: BeaxyInFlightOrder.from_json(value)
for key, value in saved_states.items()
})
cdef c_start(self, Clock clock, double timestamp):
"""
*required
c_start function used by top level Clock to orchestrate components of the bot
"""
self._tx_tracker.c_start(clock, timestamp)
ExchangeBase.c_start(self, clock, timestamp)
async def start_network(self):
"""
*required
Async function used by NetworkBase class to handle when a single market goes online
"""
self.logger().debug(f'Starting beaxy network. Trading required is {self._trading_required}')
self._stop_network()
self.order_book_tracker.start()
self.logger().debug('OrderBookTracker started, starting polling tasks.')
if self._trading_required:
self._auth_polling_task = safe_ensure_future(self._beaxy_auth._auth_token_polling_loop())
self._status_polling_task = safe_ensure_future(self._status_polling_loop())
self._trading_rules_polling_task = safe_ensure_future(self._trading_rules_polling_loop())
self._user_stream_tracker_task = safe_ensure_future(self._user_stream_tracker.start())
self._user_stream_event_listener_task = safe_ensure_future(self._user_stream_event_listener())
async def check_network(self) -> NetworkStatus:
try:
res = await self._api_request(http_method='GET', path_url=BeaxyConstants.TradingApi.HEALTH_ENDPOINT, is_auth_required=False)
if res['trading_server']!= 200 and res['historical_data_server']!= 200:
return NetworkStatus.STOPPED
except asyncio.CancelledError:
raise
except Exception:
self.logger().network('Error fetching Beaxy network status.', exc_info=True)
return NetworkStatus.NOT_CONNECTED
return NetworkStatus.CONNECTED
cdef c_tick(self, double timestamp):
"""
*required
Used by top level Clock to orchestrate components of the bot.
This function is called frequently with every clock tick
"""
cdef:
int64_t last_tick = <int64_t>(self._last_timestamp / self._poll_interval)
int64_t current_tick = <int64_t>(timestamp / self._poll_interval)
ExchangeBase.c_tick(self, timestamp)
if current_tick > last_tick:
if not self._poll_notifier.is_set():
self._poll_notifier.set()
self._last_timestamp = timestamp
def _stop_network(self):
"""
Synchronous function that handles when a single market goes offline
"""
self.order_book_tracker.stop()
if self._status_polling_task is not None:
self._status_polling_task.cancel()
if self._user_stream_tracker_task is not None:
self._user_stream_tracker_task.cancel()
if self._user_stream_event_listener_task is not None:
self._user_stream_event_listener_task.cancel()
if self._trading_rules_polling_task is not None:
self._trading_rules_polling_task.cancel()
self._status_polling_task = self._user_stream_tracker_task = \
self._user_stream_event_listener_task = None
async def list_orders(self) -> List[Any]:
"""
Gets a list of the user's active orders via rest API
:returns: json response
"""
if self._in_flight_orders:
timestamp = min(order.creation_timestamp for order in self._in_flight_orders.values())
from_date = datetime.utcfromtimestamp(timestamp)
else:
from_date = datetime.utcnow() - timedelta(minutes=5)
result = await safe_gather(
self._api_request('get', path_url=BeaxyConstants.TradingApi.OPEN_ORDERS_ENDPOINT),
self._api_request('get', path_url=BeaxyConstants.TradingApi.CLOSED_ORDERS_ENDPOINT.format(
from_date=from_date.strftime('%Y-%m-%dT%H:%M:%SZ')
)),
)
return result
async def _update_order_status(self):
"""
Pulls the rest API for for latest order statuses and update local order statuses.
"""
cdef:
double current_timestamp = self._current_timestamp
if current_timestamp - self._last_order_update_timestamp <= self.UPDATE_ORDERS_INTERVAL:
return
tracked_orders = list(self._in_flight_orders.values())
open_orders, closed_orders = await self.list_orders()
open_order_dict = {entry['order_id']: entry for entry in open_orders}
close_order_dict = {entry['order_id']: entry for entry in closed_orders}
for tracked_order in tracked_orders:
| Cython |
client_order_id = tracked_order.client_order_id
# Do nothing, if the order has already been cancelled or has failed
if client_order_id not in self._in_flight_orders:
continue
# get last exchange_order_id with no blocking
exchange_order_id = self._in_flight_orders[client_order_id].exchange_order_id
if exchange_order_id is None:
continue
open_order = open_order_dict.get(exchange_order_id)
closed_order = close_order_dict.get(exchange_order_id)
order_update = closed_order or open_order
if not open_order and not closed_order:
self._order_not_found_records[client_order_id] = self._order_not_found_records.get(client_order_id, 0) + 1
if self._order_not_found_records[client_order_id] < self.ORDER_NOT_EXIST_CONFIRMATION_COUNT:
# Wait until the order not found error have repeated for a few times before actually treating
continue
self.logger().info(
f'The tracked order {client_order_id} does not exist on Beaxy for last day. '
f'(retried {self._order_not_found_records[client_order_id]}) Removing from tracking.'
)
tracked_order.last_state = 'CLOSED'
self.c_trigger_event(
self.MARKET_ORDER_CANCELED_EVENT_TAG,
OrderCancelledEvent(self._current_timestamp, client_order_id)
)
self.c_stop_tracking_order(client_order_id)
del self._order_not_found_records[client_order_id]
continue
# Update the tracked order
tracked_order.last_state = order_update['order_status']
if order_update['filled_size']:
execute_price = Decimal(str(order_update['limit_price'] if order_update['limit_price'] else order_update['average_price']))
execute_amount_diff = Decimal(str(order_update['filled_size'])) - tracked_order.executed_amount_base
# Emit event if executed amount is greater than 0.
if execute_amount_diff > s_decimal_0:
tracked_order.executed_amount_base = execute_amount_diff
tracked_order.executed_amount_quote += execute_amount_diff * execute_price
order_type_description = tracked_order.order_type_description
order_filled_event = OrderFilledEvent(
self._current_timestamp,
tracked_order.client_order_id,
tracked_order.trading_pair,
tracked_order.trade_type,
tracked_order.order_type,
execute_price,
execute_amount_diff,
self.c_get_fee(
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.order_type,
tracked_order.trade_type,
execute_price,
execute_amount_diff,
),
exchange_trade_id=str(int(self._time() * 1e6)),
)
self.logger().info(f'Filled {execute_amount_diff} out of {tracked_order.amount} of the '
f'{order_type_description} order {client_order_id}.')
self.c_trigger_event(self.MARKET_ORDER_FILLED_EVENT_TAG, order_filled_event)
if tracked_order.is_done:
if not tracked_order.is_failure and not tracked_order.is_cancelled:
new_confirmed_amount = Decimal(str(order_update['size']))
execute_amount_diff = new_confirmed_amount - tracked_order.executed_amount_base
execute_price = Decimal(str(order_update['limit_price'] if order_update['limit_price'] else order_update['average_price']))
# Emit event if executed amount is greater than 0.
if execute_amount_diff > s_decimal_0:
tracked_order.executed_amount_base = execute_amount_diff
tracked_order.executed_amount_quote += execute_amount_diff * execute_price
order_type_description = tracked_order.order_type_description
order_filled_event = OrderFilledEvent(
self._current_timestamp,
tracked_order.client_order_id,
tracked_order.trading_pair,
tracked_order.trade_type,
tracked_order.order_type,
execute_price,
execute_amount_diff,
self.c_get_fee(
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.order_type,
tracked_order.trade_type,
execute_price,
execute_amount_diff,
),
exchange_trade_id=str(int(self._time() * 1e6)),
)
self.logger().info(f'Filled {execute_amount_diff} out of {tracked_order.amount} of the '
f'{order_type_description} order {client_order_id}.')
self.c_trigger_event(self.MARKET_ORDER_FILLED_EVENT_TAG, order_filled_event)
if tracked_order.trade_type == TradeType.BUY:
self.logger().info(f'The market buy order {tracked_order.client_order_id} has completed '
f'according to order status API.')
self.c_trigger_event(self.MARKET_BUY_ORDER_COMPLETED_EVENT_TAG,
BuyOrderCompletedEvent(self._current_timestamp,
tracked_order.client_order_id,
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.executed_amount_base,
tracked_order.executed_amount_quote,
tracked_order.order_type))
else:
self.logger().info(f'The market sell order {tracked_order.client_order_id} has completed '
f'according to order status API.')
self.c_trigger_event(self.MARKET_SELL_ORDER_COMPLETED_EVENT_TAG,
SellOrderCompletedEvent(self._current_timestamp,
tracked_order.client_order_id,
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.executed_amount_base,
tracked_order.executed_amount_quote,
tracked_order.order_type))
else:
self.logger().info(f'The market order {tracked_order.client_order_id} has failed/been cancelled '
f'according to order status API.')
tracked_order.last_state = 'cancelled'
self.c_trigger_event(self.MARKET_ORDER_CANCELED_EVENT_TAG,
OrderCancelledEvent(
self._current_timestamp,
tracked_order.client_order_id
))
self.c_stop_tracking_order(tracked_order.client_order_id)
self._last_order_update_timestamp = current_timestamp
async def place_order(self, order_id: str, trading_pair: str, amount: Decimal, is_buy: bool, order_type: OrderType,
price: Decimal):
"""
Async wrapper for placing orders through the rest API.
:returns: json response from the API
"""
path_url = BeaxyConstants.TradingApi.CREATE_ORDER_ENDPOINT
trading_pair = await self.convert_to_exchange_trading_pair(trading_pair)
is_limit_type = order_type.is_limit_type()
data = {
'comment': order_id,
'symbol': trading_pair,
'order_type': 'limit' if is_limit_type else'market',
'side': 'buy' if is_buy else'sell',
'size': f'{amount:f}',
'destination': 'MAXI',
}
if is_limit_type:
data['price'] = f'{price:f}'
order_result = await self._api_request('POST', path_url=path_url, data=data)
self.logger().debug(f'Set order result {order_result}')
return order_result
cdef object c_get_fee(
self,
str base_currency,
str quote_currency,
object order_type,
object order_side,
object amount,
object price,
object is_maker = None,
):
"""
*required
function to calculate fees for a particular order
:returns: TradeFee class that includes fee percentage and flat fees
"""
cdef:
object maker_fee = self._maker_fee_percentage
object taker_fee = self._taker_fee_percentage
is_maker = order_type is OrderType.LIMIT_MAKER
pair = f'{base_currency}-{quote_currency}'
fees = maker_fee if is_maker else taker_fee
if pair not in fees:
self.logger().info(f'Fee for {pair} is not in fee cache')
return estimate_fee('beaxy', is_maker)
return AddedToCostTradeFee(percent=fees[pair] / Decimal(100))
async def execute_buy(
self,
order_id: str,
trading_pair: str,
amount: Decimal,
order_type: OrderType,
price: Optional[Decimal] = s_decimal_0
):
"""
Function that takes strategy inputs, auto corrects itself with trading rule,
and submit an API request to place a buy order
"""
cdef:
TradingRule trading_rule = self._trading_rules[trading_pair]
decimal_amount = self.quantize_order_amount(trading_pair, amount)
decimal_price = self.quantize_order_price(trading_pair, price)
if decimal_amount < trading_rule.min_order_size:
raise ValueError(f'Buy order amount {decimal_amount} is lower than the minimum order size '
f'{trading_rule.min_order_size}.')
try:
self.c_start_tracking_order(order_id, trading_pair, order_type, TradeType.BUY, decimal_price, decimal_amount)
order_result = await self.place_order(order_id, trading_pair, decimal_amount, True, order_type, decimal_price)
exchange_order_id = order_result['order_id']
tracked_order = self._in_flight_orders.get(order_id)
if tracked_order is not None:
self.logger().info(f'Created {order_type} buy order {order_id} for {decimal_amount} {trading_pair}.')
tracked_order.update_exchange_order_id(exchange_order_id)
self.c_trigger_event(self.MARKET_BUY_ORDER_CREATED_EVENT_TAG,
BuyOrderCreatedEvent(self._current_timestamp,
order_type,
trading_pair,
decimal_amount,
decimal_price,
order_id,
tracked_order.creation_timestamp))
except asyncio.CancelledError:
raise
except Exception:
tracked_order = self._in_flight_orders.get(order_id)
| Cython |
tracked_order.last_state = 'FAILURE'
self.c_stop_tracking_order(order_id)
order_type_str = order_type.name.lower()
self.logger().network(
f'Error submitting buy {order_type_str} order to Beaxy for '
f'{decimal_amount} {trading_pair} '
f'{decimal_price}.',
exc_info=True,
app_warning_msg='Failed to submit buy order to Beaxy. Check API key and network connection.'
)
self.c_trigger_event(self.MARKET_ORDER_FAILURE_EVENT_TAG,
MarketOrderFailureEvent(
self._current_timestamp,
order_id,
order_type
))
cdef str c_buy(self, str trading_pair, object amount, object order_type=OrderType.MARKET, object price=s_decimal_0,
dict kwargs={}):
"""
*required
Synchronous wrapper that generates a client-side order ID and schedules the buy order.
"""
cdef:
int64_t tracking_nonce = <int64_t> get_tracking_nonce()
str order_id = str(f'HBOT-buy-{trading_pair}-{tracking_nonce}')
safe_ensure_future(self.execute_buy(order_id, trading_pair, amount, order_type, price))
return order_id
async def execute_sell(
self,
order_id: str,
trading_pair: str,
amount: Decimal,
order_type: OrderType,
price: Optional[Decimal] = s_decimal_0
):
"""
Function that takes strategy inputs, auto corrects itself with trading rule,
and submit an API request to place a sell order
"""
cdef:
TradingRule trading_rule = self._trading_rules[trading_pair]
decimal_amount = self.quantize_order_amount(trading_pair, amount)
decimal_price = self.quantize_order_price(trading_pair, price)
if decimal_amount < trading_rule.min_order_size:
raise ValueError(f'Sell order amount {decimal_amount} is lower than the minimum order size '
f'{trading_rule.min_order_size}.')
try:
self.c_start_tracking_order(order_id, trading_pair, order_type, TradeType.SELL, decimal_price, decimal_amount)
order_result = await self.place_order(order_id, trading_pair, decimal_amount, False, order_type, decimal_price)
exchange_order_id = order_result['order_id']
tracked_order = self._in_flight_orders.get(order_id)
if tracked_order is not None:
self.logger().info(f'Created {order_type} sell order {order_id} for {decimal_amount} {trading_pair}.')
tracked_order.update_exchange_order_id(exchange_order_id)
self.c_trigger_event(self.MARKET_SELL_ORDER_CREATED_EVENT_TAG,
SellOrderCreatedEvent(self._current_timestamp,
order_type,
trading_pair,
decimal_amount,
decimal_price,
order_id,
tracked_order.creation_timestamp))
except asyncio.CancelledError:
raise
except Exception:
tracked_order = self._in_flight_orders.get(order_id)
tracked_order.last_state = 'FAILURE'
self.c_stop_tracking_order(order_id)
order_type_str = order_type.name.lower()
self.logger().network(
f'Error submitting sell {order_type_str} order to Beaxy for '
f'{decimal_amount} {trading_pair} '
f'{decimal_price if order_type is OrderType.LIMIT else ""}.',
exc_info=True,
app_warning_msg='Failed to submit sell order to Beaxy. Check API key and network connection.'
)
self.c_trigger_event(self.MARKET_ORDER_FAILURE_EVENT_TAG,
MarketOrderFailureEvent(self._current_timestamp, order_id, order_type))
cdef str c_sell(
self,
str trading_pair,
object amount,
object order_type=OrderType.MARKET,
object price=s_decimal_0,
dict kwargs={}
):
"""
*required
Synchronous wrapper that generates a client-side order ID and schedules the sell order.
"""
cdef:
int64_t tracking_nonce = <int64_t> get_tracking_nonce()
str order_id = str(f'HBOT-sell-{trading_pair}-{tracking_nonce}')
safe_ensure_future(self.execute_sell(order_id, trading_pair, amount, order_type, price))
return order_id
async def execute_cancel(self, trading_pair: str, order_id: str):
"""
Function that makes API request to cancel an active order
"""
try:
tracked_order = self._in_flight_orders.get(order_id)
if tracked_order is None:
raise ValueError(f'Failed to cancel order - {order_id}. Order not found.')
path_url = BeaxyConstants.TradingApi.DELETE_ORDER_ENDPOINT.format(id=tracked_order.exchange_order_id)
cancel_result = await self._api_request('delete', path_url=path_url)
return order_id
except asyncio.CancelledError:
raise
except BeaxyIOError as e:
if e.result and 'Active order not found or already cancelled.' in e.result['items']:
# The order was never there to begin with. So cancelling it is a no-op but semantically successful.
self.c_stop_tracking_order(order_id)
self.c_trigger_event(self.MARKET_ORDER_CANCELED_EVENT_TAG,
OrderCancelledEvent(self._current_timestamp, order_id))
return order_id
except IOError as ioe:
self.logger().warning(ioe)
except Exception as e:
self.logger().network(
f'Failed to cancel order {order_id}: ',
exc_info=True,
app_warning_msg=f'Failed to cancel the order {order_id} on Beaxy. '
f'Check API key and network connection.'
)
return None
cdef c_cancel(self, str trading_pair, str order_id):
"""
*required
Synchronous wrapper that schedules cancelling an order.
"""
safe_ensure_future(self.execute_cancel(trading_pair, order_id))
return order_id
async def cancel_all(self, timeout_seconds: float) -> List[CancellationResult]:
"""
*required
Async function that cancels all active orders.
Used by bot's top level stop and exit commands (cancelling outstanding orders on exit)
:returns: List of CancellationResult which indicates whether each order is successfully cancelled.
"""
incomplete_orders = [o for o in self._in_flight_orders.values() if not o.is_done]
tasks = [self.execute_cancel(o.trading_pair, o.client_order_id) for o in incomplete_orders]
order_id_set = set([o.client_order_id for o in incomplete_orders])
successful_cancellations = []
try:
async with timeout(timeout_seconds):
results = await safe_gather(*tasks, return_exceptions=True)
for client_order_id in results:
if client_order_id:
order_id_set.remove(client_order_id)
successful_cancellations.append(CancellationResult(client_order_id, True))
except Exception as e:
self.logger().network(
'Unexpected error cancelling orders.',
exc_info=True,
app_warning_msg='Failed to cancel order on Beaxy exchange. Check API key and network connection.'
)
failed_cancellations = [CancellationResult(oid, False) for oid in order_id_set]
return successful_cancellations + failed_cancellations
async def _update_trade_fees(self):
cdef:
double current_timestamp = self._current_timestamp
if current_timestamp - self._last_fee_percentage_update_timestamp <= self.UPDATE_FEE_PERCENTAGE_INTERVAL:
return
try:
res = await self._api_request('get', BeaxyConstants.TradingApi.TRADE_SETTINGS_ENDPOINT)
for symbol_data in res['symbols']:
trading_pair = f"{symbol_data['base']}-{symbol_data['term']}"
self._maker_fee_percentage[trading_pair] = Decimal(str(symbol_data['maker_fee']))
self._taker_fee_percentage[trading_pair] = Decimal(str(symbol_data['taker_fee']))
self._last_fee_percentage_update_timestamp = current_timestamp
except asyncio.CancelledError:
self.logger().warning('Got cancelled error fetching beaxy fees.')
raise
except Exception:
self.logger().network('Error fetching Beaxy trade fees.', exc_info=True,
app_warning_msg='Could not fetch Beaxy trading fees. '
'Check network connection.')
raise
async def _update_balances(self):
cdef:
dict account_info
list balances
str asset_name
set local_asset_names = set(self._account_balances.keys())
set remote_asset_names = set()
set asset_names_to_remove
account_balances = await self._api_request('get', path_url=BeaxyConstants.TradingApi.WALLETS_ENDPOINT)
for balance_entry in account_balances:
asset_name = balance_entry['currency']
available_balance = Decimal(str(balance_entry['available_balance']))
total_balance = Decimal(str(balance_entry['total_balance']))
self._account_available_balances[asset_name] = available_balance
self._account_balances[asset_name] = total_balance
remote_asset_names.add(asset_name)
asset_names_to_remove = local_asset_names.difference(remote_asset_names)
for asset_name in asset_names_to_remove:
del self._account_available_balances[asset_name]
del self._account_balances[asset_name]
async def _update_trading_rules(self):
"""
Pulls the API for trading rules (min / max order size, etc)
"""
cdef:
int64_t last_tick = <int64_t>(self._last_timestamp / 60.0)
int64_t current_tick = <int | Cython |
64_t>(self._current_timestamp / 60.0)
try:
if current_tick > last_tick or len(self._trading_rules) <= 0:
product_info = await self._api_request(http_method='get', url=BeaxyConstants.PublicApi.SYMBOLS_URL, is_auth_required=False)
trading_rules_list = await self._format_trading_rules(product_info)
self._trading_rules.clear()
for trading_rule in trading_rules_list:
# at Beaxy all pairs listed without splitter, so there is need to convert it
trading_pair = '{}-{}'.format(*BeaxyExchange.split_trading_pair(trading_rule.trading_pair))
self._trading_rules[trading_pair] = trading_rule
except Exception:
self.logger().warning('Got exception while updating trading rules.', exc_info=True)
async def _format_trading_rules(self, market_dict: Dict[str, Any]) -> List[TradingRule]:
"""
Turns json data from API into TradingRule instances
:returns: List of TradingRule
"""
cdef:
list retval = []
for rule in market_dict:
try:
trading_pair = await self.convert_from_exchange_trading_pair(rule.get('symbol'))
# Parsing from string doesn't mess up the precision
retval.append(TradingRule(trading_pair,
min_price_increment=Decimal(str(rule.get('tickSize'))),
min_order_size=Decimal(str(rule.get('minimumQuantity'))),
max_order_size=Decimal(str(rule.get('maximumQuantity'))),
min_base_amount_increment=Decimal(str(rule.get('quantityIncrement'))),
min_quote_amount_increment=Decimal(str(rule.get('quantityIncrement'))),
max_price_significant_digits=Decimal(str(rule.get('pricePrecision')))))
except Exception:
self.logger().error(f'Error parsing the trading_pair rule {rule}. Skipping.', exc_info=True)
return retval
async def _iter_user_event_queue(self) -> AsyncIterable[Dict[str, Any]]:
while True:
try:
yield await self._user_stream_tracker.user_stream.get()
except asyncio.CancelledError:
raise
except Exception:
self.logger().network(
'Unknown error. Retrying after 1 seconds.',
exc_info=True,
app_warning_msg='Could not fetch user events from Beaxy. Check API key and network connection.'
)
await asyncio.sleep(1.0)
async def _user_stream_event_listener(self):
async for msg_type, event_message in self._iter_user_event_queue():
try:
if msg_type == BeaxyConstants.UserStream.BALANCE_MESSAGE:
if event_message['type'] == 'update':
msgs = [event_message['data']]
elif event_message['type'] =='snapshot':
msgs = event_message['data']
for msg in msgs:
asset_name = msg['currency']
available_balance = Decimal(str(msg['available_balance']))
total_balance = Decimal(str(msg['total_balance']))
self._account_available_balances[asset_name] = available_balance
self._account_balances[asset_name] = total_balance
elif msg_type == BeaxyConstants.UserStream.ORDER_MESSAGE:
order = event_message['data']
exchange_order_id = order['order_id']
client_order_id = order['comment']
order_status = order['order_status']
if client_order_id is None:
continue
tracked_order = self._in_flight_orders.get(client_order_id)
if tracked_order is None:
self.logger().debug(f'Didn`rt find order with id {client_order_id}')
continue
if not tracked_order.exchange_order_id:
tracked_order.exchange_order_id = exchange_order_id
execute_price = s_decimal_0
execute_amount_diff = s_decimal_0
if order_status == 'partially_filled':
order_filled_size = Decimal(str(order['trade_size']))
execute_price = Decimal(str(order['trade_price']))
execute_amount_diff = order_filled_size - tracked_order.executed_amount_base
if execute_amount_diff > s_decimal_0:
tracked_order.executed_amount_base = order_filled_size
tracked_order.executed_amount_quote += Decimal(execute_amount_diff * execute_price)
self.logger().info(f'Filled {execute_amount_diff} out of {tracked_order.amount} of the '
f'{tracked_order.order_type_description} order {tracked_order.client_order_id}')
self.c_trigger_event(self.MARKET_ORDER_FILLED_EVENT_TAG,
OrderFilledEvent(
self._current_timestamp,
tracked_order.client_order_id,
tracked_order.trading_pair,
tracked_order.trade_type,
tracked_order.order_type,
execute_price,
execute_amount_diff,
self.c_get_fee(
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.order_type,
tracked_order.trade_type,
execute_price,
execute_amount_diff,
),
exchange_trade_id=str(int(self._time() * 1e6)),
))
elif order_status == 'completely_filled':
new_confirmed_amount = Decimal(str(order['size']))
execute_amount_diff = new_confirmed_amount - tracked_order.executed_amount_base
execute_price = Decimal(str(order['limit_price'] if order['limit_price'] else order['average_price']))
# Emit event if executed amount is greater than 0.
if execute_amount_diff > s_decimal_0:
tracked_order.executed_amount_base = execute_amount_diff
tracked_order.executed_amount_quote += execute_amount_diff * execute_price
order_type_description = tracked_order.order_type_description
order_filled_event = OrderFilledEvent(
self._current_timestamp,
tracked_order.client_order_id,
tracked_order.trading_pair,
tracked_order.trade_type,
tracked_order.order_type,
execute_price,
execute_amount_diff,
self.c_get_fee(
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.order_type,
tracked_order.trade_type,
execute_price,
execute_amount_diff,
),
exchange_trade_id=str(int(self._time() * 1e6)),
)
self.logger().info(f'Filled {execute_amount_diff} out of {tracked_order.amount} of the '
f'{order_type_description} order {client_order_id}.')
self.c_trigger_event(self.MARKET_ORDER_FILLED_EVENT_TAG, order_filled_event)
if tracked_order.trade_type == TradeType.BUY:
self.logger().info(f'The market buy order {tracked_order.client_order_id} has completed '
f'according to Beaxy user stream.')
self.c_trigger_event(self.MARKET_BUY_ORDER_COMPLETED_EVENT_TAG,
BuyOrderCompletedEvent(self._current_timestamp,
tracked_order.client_order_id,
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.executed_amount_base,
tracked_order.executed_amount_quote,
tracked_order.order_type))
else:
self.logger().info(f'The market sell order {tracked_order.client_order_id} has completed '
f'according to Beaxy user stream.')
self.c_trigger_event(self.MARKET_SELL_ORDER_COMPLETED_EVENT_TAG,
SellOrderCompletedEvent(self._current_timestamp,
tracked_order.client_order_id,
tracked_order.base_asset,
tracked_order.quote_asset,
tracked_order.executed_amount_base,
tracked_order.executed_amount_quote,
tracked_order.order_type))
self.c_stop_tracking_order(tracked_order.client_order_id)
elif order_status == 'canceled':
tracked_order.last_state = 'canceled'
self.c_trigger_event(self.MARKET_ORDER_CANCELED_EVENT_TAG,
OrderCancelledEvent(self._current_timestamp, tracked_order.client_order_id))
self.c_stop_tracking_order(tracked_order.client_order_id)
elif order_status in ['rejected','replaced','suspended']:
tracked_order.last_state = order_status
self.c_trigger_event(self.MARKET_ORDER_FAILURE_EVENT_TAG,
MarketOrderFailureEvent(self._current_timestamp, tracked_order.client_order_id, tracked_order.order_type))
self.c_stop_tracking_order(tracked_order.client_order_id)
elif order_status == 'expired':
tracked_order.last_state = 'expired'
self.c_trigger_event(self.MARKET_ORDER_EXPIRED_EVENT_TAG,
OrderExpiredEvent(self._current_timestamp, tracked_order.client_order_id))
self.c_stop_tracking_order(tracked_order.client_order_id)
except Exception:
self.logger().error('Unexpected error in user stream listener loop.', exc_info=True)
await asyncio.sleep(5.0)
async def _http_client(self) -> aiohttp.ClientSession:
"""
:returns: Shared client session instance
"""
if self._shared_client is None:
self._shared_client = aiohttp.ClientSession()
return self._shared_client
async def _api_request(
self,
http_method: str,
path_url: str = None,
url: str = None,
is_auth_required: bool = True,
data: Optional[Dict[str, Any]] = None,
custom_headers: [Optional[Dict[str, str]]] = None
) -> Dict[str, Any]:
"""
A wrapper for submitting API requests to Beaxy
:returns: json data from the endpoints
"""
try:
assert path_url is not None or url is not None
url = f'{BeaxyConstants.TradingApi.BASE_URL}{path_url}' if url is None else url
data_str = '' if data is None else json.dumps(data, separators=(',', ':'))
if is_auth_required:
headers = await self.beaxy_auth.generate_auth_dict(http_method, path_url, data_str)
else:
headers = {'Content-Type': 'application/json'}
if custom_headers:
| Cython |
from MSSpectrum cimport *
from MSChromatogram cimport *
from ExperimentalSettings cimport *
from DataProcessing cimport *
from PeakFileOptions cimport *
cdef extern from "<OpenMS/FORMAT/DATAACCESS/MSDataWritingConsumer.h>" namespace "OpenMS":
cdef cppclass PlainMSDataWritingConsumer:
PlainMSDataWritingConsumer(String filename) except + nogil
# copy constructor of 'PlainMSDataWritingConsumer' is implicitly deleted because base class 'OpenMS::MSDataWritingConsumer' has a deleted copy constructor (see XMLHandler)
PlainMSDataWritingConsumer(PlainMSDataWritingConsumer &) except + nogil # wrap-ignore
void consumeSpectrum(MSSpectrum & s) except + nogil
void consumeChromatogram(MSChromatogram & c) except + nogil
void setExperimentalSettings(ExperimentalSettings& exp) except + nogil
# wrap-doc:
# Set experimental settings for the whole file
#
#
# :param exp: Experimental settings to be used for this file (from this and the first spectrum/chromatogram, the class will deduce most of the header of the mzML file)
void setExpectedSize(Size expectedSpectra, Size expectedChromatograms) except + nogil
# wrap-doc:
# Set expected size of spectra and chromatograms to be written
#
# These numbers will be written in the spectrumList and chromatogramList
# tag in the mzML file. Therefore, these will contain wrong numbers if
# the expected size is not set correctly
#
#
# :param expectedSpectra: Number of spectra expected
# :param expectedChromatograms: Number of chromatograms expected
void addDataProcessing(DataProcessing d) except + nogil
# wrap-doc:
# Optionally add a data processing method to each chromatogram and spectrum
#
# The provided DataProcessing object will be added to each chromatogram
# and spectrum written to to the mzML file
#
#
# :param d: The DataProcessing object to be added
Size getNrSpectraWritten() except + nogil # wrap-doc:Returns the number of spectra written
Size getNrChromatogramsWritten() except + nogil # wrap-doc:Returns the number of chromatograms written
void setOptions(PeakFileOptions opt) except + nogil
PeakFileOptions getOptions() except + nogil
cdef cppclass NoopMSDataWritingConsumer:
# wrap-doc:
# Consumer class that perform no operation
#
# This is sometimes necessary to fulfill the requirement of passing an
# valid MSDataWritingConsumer object or pointer but no operation is
# required
NoopMSDataWritingConsumer(String filename) except + nogil
# copy constructor of 'NoopMSDataWritingConsumer' is implicitly deleted because base class 'OpenMS::MSDataWritingConsumer' has a deleted copy constructor (see XMLHandler)
NoopMSDataWritingConsumer(NoopMSDataWritingConsumer &) except + nogil # wrap-ignore
void consumeSpectrum(MSSpectrum & s) except + nogil
void consumeChromatogram(MSChromatogram & c) except + nogil
void setExperimentalSettings(ExperimentalSettings& exp) except + nogil
void setExpectedSize(Size expectedSpectra, Size expectedChromatograms) except + nogil
void addDataProcessing(DataProcessing d) except + nogil
Size getNrSpectraWritten() except + nogil
Size getNrChromatogramsWritten() except + nogil
<|end_of_text|># distutils: extra_compile_args = -fopenmp
# distutils: extra_link_args = -fopenmp
cimport cython
cimport numpy as np
from cython.parallel cimport prange
import numpy as np
ctypedef np.float32_t dtype_t
def initialize(u):
u[0,:] = 100 #top row
u[:,0] = 75 #left column
u[:,u.shape[0] - 1] = 50 #right column
@cython.boundscheck(False)
@cython.wraparound(False)
cdef bint convergence_test(dtype_t[:, ::1] u_new, dtype_t[:, ::1] u) nogil:
eps =.0001
cdef Py_ssize_t i, j
for i in xrange(u.shape[0]):
for j in xrange(u.shape[1]):
if u_new[i, j] - u[i, j] > eps:
return True
return False
def solver(int dim):
array = np.zeros((dim, dim), dtype=np.float32)
initialize(array)
cdef dtype_t[:, ::1] u = array
cdef dtype_t[:, ::1] u_new = array.copy()
cdef bint cont = True
cdef int iteration = 0
with nogil:
while cont:
if iteration % 2 == 0: #even
solve(u, u_new, dim, dim)
else:
solve(u_new, u, dim, dim)
#test for convergence
if iteration % 200 == 0:
cont = convergence_test(u_new, u)
iteration = iteration + 1
# return the original u_new numpy array object, not a cython.memoryview
return u_new.base
@cython.boundscheck(False)
@cython.wraparound(False)
cdef void solve(dtype_t[:, ::1] u, dtype_t[:, ::1] u_new,
int dimy, int dimx) nogil:
cdef int y, x
for y in prange(1, dimy - 1):
for x in range(1, dimx - 1):
u_new[y,x] = (u[y + 1,x] + u[y - 1,x] + u[y,x + 1] + u[y,x - 1]) / 4
<|end_of_text|># Present for backwards compatability
from cpython.module cimport *
<|end_of_text|>import numpy
import time
import subprocess as sb
from orbithelpers import *
import collider
class PSystem(list):
global savefolder
def __init__(self, List, name=None, folderlocation = None):
self.savefolder = folderlocation
self.name = name
for planetary_object in List:
self.append(planetary_object)
self.getcentral()
self.marked_for_deletion = []
#self.central = [pobject for pobject in self if pobject.mass == max([plobject.mass for plobject in self])]
def getcentral(self):
# attribute currently not used
self.central = [pobject for pobject in self if pobject.mass == max([plobject.mass for plobject in self])][0]
def update(self, time, granularity, iterations=1):
for iteration in xrange(iterations):
for planetary_object in self:
#print [otherplanet.position for otherplanet in self if not otherplanet is planetary_object]
planetary_object.updatevelocity([otherplanet for otherplanet in self if not otherplanet is planetary_object], granularity)
for planetary_object in self:
planetary_object.updateposition(time, granularity)
def addplanet(self, pplanet):
self.append(pplanet)
if pplanet.color == None:
pplanet.color = hexstring(random.randrange(16**6))
self.getcentral()
def makevelocitydataagearray(self, granularity):
# dictionaries with key=planet.idnumber, entry=list
# squared distances and relative velocities are easier to calculate than plain ones
# that's why these are stored
self.velocitydataages = {item.idnumber:[granularity for otheritem in self] for item in self}
self.relvelocitiessquared = {thisobject.idnumber:[sum(abs(thisobject.velocity-otherobject.velocity)) for otherobject in self] for thisobject in self}
self.distancessquared = {thisobject.idnumber:[sum(abs(thisobject.position-otherobject.position)) for otherobject in self] for thisobject in self}
def updatearray(self, granularity, initialising=0, iterations=1):
try: a = self.accelarray
except AttributeError:
#list version - might be reactivated if it turns out dict() is slower
#self.accelarray = [[numpy.array([0,0,0]) for planobject in self] for planobject in self]
#dict version
self.accelarray = self.acceldict = {item.idnumber:[numpy.array([0,0,0]) for otherobject in self] for item in self}
# only first dimension is implemented as a dictionary; we could make a dict
# of dicts, but then we lose the ability to take sums
#print self.accelarray
for i in xrange(iterations):
if self.marked_for_deletion:
for entry in self.marked_for_deletion:
self[self.marked_for_deletion[0][0]] = self[self.marked_for_deletion[1]]
self[self.marked_for_deletion[0][1]].mass = self[self.marked_for_deletion[0][1]].mass / 1e4
for index in range(len(self)):
planetary_object = self[index]
for | Cython |
otherindex in range(index+1, len(self)):
otherobject = self[otherindex]
# complex condition- true if the relative speed of the two objects is such
# that they've moved more than 1/250 their distance since the last update
# if False, the last calculated value for their accelerations is re-used
# this is supposed to save computations at a low cost in terms of accuracy
# I'm unsure it actually saves much since it comes at the cost of massive
# list/dict lookup
if distance(planetary_object.position, otherobject.position) < min((distance(planetary_object.velocity, otherobject.velocity) * granularity * 5), (planetary_object.radius + otherobject.radius) * 2):
close_pass = collider.Collision(planetary_object, otherobject)
close_pass.calculate_close_pass()
if close_pass.hascollided == True:
#self[index] = close_pass.newobject
self.marked_for_deletion.append((index, otherindex), close_pass.newobject)
planetary_object.velocity, otherobject.velocity = [close_pass.newobject.velocity] * 2
# do something to make sure the
else:
pass # make sure at the end, during.updatevelocityfromarray, the more precise result of close_pass is used
# however that's supposed to work
complex_condition = (# relative velocity * granularity * 10000 < distance, i.e.
self.relvelocitiessquared[planetary_object.idnumber][otherindex]
##* (granularity **2)
* (250**2)
* ((self.velocitydataages[planetary_object.idnumber][otherindex]) ** 2)
) > self.distancessquared[planetary_object.idnumber][otherindex]
#complex_condition = True
if initialising==1 or complex_condition==True:
mutualaccel = gravityaccelerationmutual(planetary_object, otherobject)
self.accelarray[planetary_object.idnumber][otherindex] = mutualaccel[0]
self.accelarray[otherobject.idnumber][index] = mutualaccel[1]
self.velocitydataages[planetary_object.idnumber][otherindex] = granularity
self.velocitydataages[otherobject.idnumber][index] = granularity
self.relvelocitiessquared[planetary_object.idnumber][otherindex] += sum(abs(planetary_object.velocity-otherobject.velocity))
self.distancessquared[planetary_object.idnumber][otherindex] += sum(abs(planetary_object.position-otherobject.position))
else:
#print "entering else branch for objects", planetary_object.name, otherobject.name
self.velocitydataages[planetary_object.idnumber][otherindex] += granularity
self.velocitydataages[otherobject.idnumber][index] += granularity
for index,planobject in enumerate(self):
self[index].updatevelocityfromarray(granularity, self.accelarray[planobject.idnumber])
self[index]
for index in range(len(self)):
currentplanet = self[index]
currentplanet.renewposition(granularity * currentplanet.velocity)
#print [(someplanet.name, distance1(someplanet.position-self.central.position) / au) for someplanet in self]
def savestatus(self, filename=None):
if filename == None:
filename = str("data_"+self.name+".txt")
self.outfilename = filename
print self.outfilename
sb.call(('touch "%s"' % self.savefolder+"/"+self.outfilename), shell=True)
outfile = file(self.savefolder+"/"+self.outfilename, "w")
#print outfile
for planetary_object in self:
for eigenschaft in [planetary_object.name, planetary_object.mass, planetary_object.position, planetary_object.velocity]:
outfile.write(str(eigenschaft))
outfile.write("\t")
outfile.write("\n")
outfile.close()<|end_of_text|># distutils: language=c++
# distutils: include_dirs=[/home/contestant/.local/lib/python3.8/site-packages/numpy/core/include, /opt/atcoder-stl]
# cython: boundscheck=False
# cython: wraparound=False
from libcpp cimport bool
from libcpp.utility cimport pair
from cython.operator cimport preincrement
from cython.operator cimport predecrement
from cython.operator cimport dereference
cdef extern from "<set>" namespace "std" nogil:
cdef cppclass multiset[T]:
ctypedef T value_type
cppclass iterator:
T& operator*()
iterator operator++()
iterator operator--()
bint operator==(iterator)
bint operator!=(iterator)
cppclass reverse_iterator:
T& operator*()
iterator operator++()
iterator operator--()
bint operator==(reverse_iterator)
bint operator!=(reverse_iterator)
cppclass const_iterator(iterator):
pass
cppclass const_reverse_iterator(reverse_iterator):
pass
multiset() except +
multiset(multiset&) except +
iterator begin()
reverse_iterator rbegin()
size_t count(const T&)
bint empty()
iterator end()
reverse_iterator rend()
iterator find(T&)
size_t size()
iterator upper_bound(const T&)
iterator lower_bound(T&)
pair[iterator, bint] insert(const T&) except +
iterator erase(iterator)
cdef extern from *:
ctypedef long long ll "long long"
cdef class MultiSet:
cdef multiset[ll] *_thisptr
def __cinit__(self):
self._thisptr = new multiset[ll]()
cpdef int size(self):
return self._thisptr.size()
cpdef bool empty(self):
return self._thisptr.empty()
cpdef void add(self, ll x):
self._thisptr.insert(x)
cpdef void remove(self, ll x):
self._thisptr.erase(self._thisptr.find(x))
cpdef int count(self, ll x):
return self._thisptr.count(x)
cpdef ll min(self):
return dereference(self._thisptr.begin())
cpdef ll max(self):
return dereference(self._thisptr.rbegin())
def lower_bound(self, x):
cdef multiset[ll].iterator itr = self._thisptr.lower_bound(x)
if itr == self._thisptr.end():
return None
else:
return dereference(itr)
def upper_bound(self, ll x):
cdef multiset[ll].iterator itr = self._thisptr.upper_bound(x)
if itr == self._thisptr.end():
return None
else:
return dereference(itr)
def next(self, x):
if x >= self.max():
return None
cdef multiset[ll].iterator itr = self._thisptr.find(x)
cdef int c = self._thisptr.count(x)
for i in range(c):
preincrement(itr)
return dereference(itr)
cpdef prev(self, x):
if x <= self.min():
return None
cdef multiset[ll].iterator itr = self._thisptr.find(x)
predecrement(itr)
return dereference(itr)
cpdef ll pop_min(self):
cdef multiset[ll].iterator itr = self._thisptr.begin()
cdef ll ret = dereference(itr)
self._thisptr.erase(itr)
return ret
cpdef ll pop_max(self):
cdef multiset[ll].reverse_iterator itr = self._thisptr.rbegin()
cdef ll ret = dereference(itr)
self._thisptr.erase(self._thisptr.find(ret))
return ret
def __contains__(self, x):
if self._thisptr.find(x)==self._thisptr.end():
return False
else:
return True<|end_of_text|>cdef PairedDataStruct createPDS(zStructPairedData *zpds):
"""Creates paired-data struct
Returns
-------
# PairedDataStruct class
Usage
-----
# Available only in extension scripts
"""
pd_st = PairedDataStruct()
if zpds:
pd_st.zpds = zpds
else:
zstructFree(zpds)
zpds=NULL
return pd_st
cdef class PairedDataStruct:
"""Paired-Data Struct class
"""
cdef:
zStructPairedData *zpds
def __cinit__(self,*arg,**kwargs):
self.zpds=NULL
def __dealloc__(self):
if self.zpds:
zstructFree(self.zpds)
#self.zpds=NULL
cpdef tuple _shape(self):
cdef:
int data_no = 0
int curve_no = 0
if self.zpds:
curve_no = self.zpds[0].numberCurves
data_no = self.zpds[0].numberOrdinates
return (data_no,curve_no)
cpdef tuple shape(self):
return (self.data_no(),self.curve_no())
cpdef int curve_no(self):
"""
Returns
------
int: the total number of curves i.e. no of rows
"""
cdef:
int num = 0
int start_curve,end_curve
if self.zpds:
num = self.zpds[0].numberCurves
start_curve = self.zpds[0].startingCurve
if start_curve:
end_curve = self.zpds[0]. | Cython |
endingCurve
num = end_curve - start_curve + 1
return num
cpdef int data_no(self):
"""
Return
int: the total number of data per curve i.e. no of columns
"""
cdef:
int num = 0
int start_ord,end_ord
if self.zpds:
num = self.zpds[0].numberOrdinates
start_ord = self.zpds[0].startingOrdinate
if start_ord:
end_ord = self.zpds[0].endingOrdinate
num = end_ord - start_ord + 1
return num
def get_data(self):
"""Get paired data values
Returns
-------
x,curves and labels_list
x: cython array of x axis values which is common for all the curves
curves: multidimension cython array with each row representing a curve
labels_list: list containing names of the curves
Notes
-----
* The row-column order of underlyign C array is just the reverse of the
row-column relationship used in pydsstools. In the C array, each
row contains data for each curve.
* Paired data conventions is the first row is row 1 and the first
column is column 1, not row 0 or column 0.
"""
cdef:
int rows = self.curve_no()
int cols = self.data_no()
view.array ca_view_x = view.array(shape=(1,cols),
itemsize=sizeof(float),format='f',
allocate_buffer=False)
view.array ca_view_curves = view.array(shape=(rows,cols),
itemsize=sizeof(float),format='f',
allocate_buffer=False)
ca_view_x.data = <char *>(self.zpds[0].floatOrdinates)
ca_view_curves.data = <char *>(self.zpds[0].floatValues)
labels_list = self.labels
return ca_view_x,ca_view_curves,labels_list
@property
def labels(self):
cdef:
int labelsLength
bytes labels
if self.zpds:
labelsLength = self.zpds[0].labelsLength
labels = <bytes>self.zpds[0].labels[:labelsLength]
labels_list = labels.split(b"\x00")
labels_list = [x.decode() for x in labels_list if x]
return labels_list
@property
def labels_list(self):
if self.labels:
return self.labels
@property
def dataType(self):
if self.zpds:
return self.zpds[0].dataType
@property
def independent_units(self):
if self.zpds:
if self.zpds[0].unitsIndependent:
return self.zpds[0].unitsIndependent
return ''
@property
def independent_type(self):
if self.zpds:
if self.zpds[0].typeIndependent:
return self.zpds[0].typeIndependent
return ''
@property
def dependent_units(self):
if self.zpds:
if self.zpds[0].unitsDependent:
return self.zpds[0].unitsDependent
return ''
@property
def dependent_type(self):
if self.zpds:
if self.zpds[0].typeDependent:
return self.zpds[0].typeDependent
return ''
cdef class PairedDataContainer:
cdef:
public str pathname
public int curve_no
public int data_no
public str independent_units
public str independent_type
public str dependent_units
public str dependent_type
public list labels_list
public object curves
public object independent_axis
int storageFlag # 10 or 11
float [:] independent_axis_mv
float [:,::1] curves_mv # delete this after saving to dss
float *curves_ptr
readonly bytearray labels
int labelsLength
#public bytes null_separated_bytes
def __init__(self,**kwargs):
_pathname = kwargs.get('pathname','')
_curve_no = kwargs.get('curve_no',0)
_labels_list = kwargs.get('labels_list',[])
_independent_units = kwargs.get('independent_units','feet')
_independent_type = kwargs.get('independent_type','linear')
_dependent_units = kwargs.get('dependent_units','feet')
_dependent_type = kwargs.get('dependent_type','linear')
self.pathname = _pathname
self.curve_no = _curve_no
self.labels_list = _labels_list
self.independent_units = _independent_units
self.independent_type = _independent_type
self.dependent_units =_dependent_units
self.dependent_type =_dependent_type
cdef int setFloatData(self) except *:
"""
"""
#logging.debug("Setting floatValues")
if isinstance(self.curves,array.array):
if self.curves.typecode == 'f':
#self.floatValues=&self.curves_mv[0]
#self.curves_ca =
raise "Python array input not implemented for Curve Array"
else:
raise "Invalid Curve Array Values (must be 32 bit float)"
elif isinstance(self.curves,np.ndarray):
if self.curves.ndim == 2:
self.curves_mv = np.ascontiguousarray(self.curves,dtype=np.float32)
'''
if self.curves.dtype==np.float32:
self.curves_mv = self.curves
else:
self.curves_mv = self.curves.astype(np.float32)
'''
self.curves_ptr = &self.curves_mv[0,0]
else:
raise BaseException("Curves data must be 2 dimensional numpy array")
else:
raise BaseException("Invalid Curve Data")
cdef int setIndependentAxisValues(self) except *:
if isinstance(self.independent_axis,array.array):
self.independent_axis_mv = np.asarray(self.independent_axis,np.float32)
elif isinstance(self.independent_axis,np.ndarray):
self.independent_axis_mv = np.ascontiguousarray(self.independent_axis,dtype=np.float32)
'''
if self.independent_axis.dtype==np.float32:
self.independent_axis_mv = self.independent_axis
else:
self.independent_axis_mv = self.independent_axis_mv.astype(np.float32)
'''
elif isinstance(self.independent_axis,(list,tuple)):
self.independent_axis_mv = np.array(self.independent_axis,np.float32)
else:
raise "Invalid Independent axis data container"
cdef int setLabels(self,int mode = -1,int label_size = 0) except *:
# label_size is necessary for preallocation of pd only
# 0 means default allocation
# positive value gives length of label characters per curve
cdef:
int curve_no = self.curve_no # total number of pd curves
int total_label_size # length of character array allocated for pd
int lists_list_length
labels_list_length = len(self.labels_list) # not necessary for mode = 0
if mode == 0:
# preallocate mode
# allocate the character array length for the labels!!
byte_labels = []
if labels_list_length == 0:
label_name =''*label_size
label_name = label_name.encode('ascii')
for i in range(curve_no):
byte_labels.append(label_name)
else:
for i in range(curve_no):
label_name = self.labels_list[i]
label_name = '{0:<{1:d}s}'.format(label_name,label_size)[0:label_size]
byte_labels.append(label_name.encode('ascii'))
null_separated_bytes = b"\x00".join(byte_labels)+b"\x00"
# +1 for null byte separating the labels
#self.labels = bytearray([0]*total_label_size)
self.labels = bytearray(null_separated_bytes)
self.labelsLength = len(self.labels)
elif mode == 1:
# single curve to preallocated pd
if labels_list_length:
label_name = self.labels_list[0]
x = '{0:<{1:d}s}'.format(label_name,label_size)[0:label_size]
if x:
if isinstance(x,str):
_x = x.encode('ascii')
elif isinstance(x,bytes):
_x=x
else:
__x = str(x)
_x = __x.encode('ascii')
null_separated_bytes = _x+b"\x00"
byte_array = bytearray(null_separated_bytes)
self.labels = byte_array
self.labelsLength = len(byte_array)
else:
# Assigning labels does not makes sense, setting labelsLength = 0 should retain the current label
self.labels = bytearray(' '.encode('ascii')+b"\x00")
self.labelsLength = 0
else:
#normal mode
byte_labels = []
if curve_no == labels_list_length:
for x in self.labels_list:
if isinstance(x,str):
_x = x.encode('ascii')
elif isinstance(x,bytes):
_x=x
else:
__x = str(x)
_x = __x.encode('ascii')
byte_labels.append(_x)
null_separated_bytes = b | Cython |
"\x00".join(byte_labels)+b"\x00"
byte_array = bytearray(null_separated_bytes)
self.labels = byte_array
self.labelsLength = len(byte_array)
else:
print("WARN: number of labels does not match total curves")
cdef int setValues(self,int mode = -1, int label_size = 0) except *:
"""
mode
-----
preallocate = 0,
one curve to save in already preallocated pd = 1,
normal = any integer except the above values
"""
if mode == 0:
# preallocate pd to store curves later
# Requirements:
# pathname
# curve_no
# data_no
# independent_axis_mv
# independent_units
# independent_type
# dependent_units
# dependent_type
assert self.curve_no >=1 and self.data_no >=1, "curve_no and data_no must be > 0"
self.setLabels(mode=0, label_size = label_size)
elif mode == 1:
# Save one curve on the preallocated/normal dataset
self.setFloatData()
self.setLabels(mode=1,label_size = label_size)
else:
# normal pd
assert self.curve_no >=1 and self.data_no >=1, "curve_no and data_no must be > 0"
self.setFloatData()
assert (self.curve_no * self.data_no) == self.curves_mv.size
self.setLabels(mode=-1)
if not mode == 1:
# Except for mode ==1, set the independent axis C array
self.setIndependentAxisValues()
assert len(self.independent_axis_mv) == self.data_no
return 0
cdef PairedDataStruct preallocNewPairedData(PairedDataContainer pdc):
cdef:
zStructPairedData *zpds=NULL
PairedDataStruct pd_st
char *pathname = pdc.pathname
float *independent_axis = &pdc.independent_axis_mv[0]
int data_no = pdc.data_no
int curve_no = pdc.curve_no
char *independent_units = pdc.independent_units
char *independent_type = pdc.independent_type
char *dependent_units = pdc.dependent_units
char *dependent_type = pdc.dependent_type
zpds = zstructPdNew(pathname)
zpds[0].numberCurves = curve_no
zpds[0].numberOrdinates = data_no
zpds[0].floatOrdinates = independent_axis
zpds[0].doubleOrdinates = NULL
zpds[0].unitsIndependent = independent_units
zpds[0].typeIndependent = independent_type
zpds[0].unitsDependent = dependent_units
zpds[0].typeDependent = dependent_type
# additional check
if pdc.labelsLength>0:
zpds[0].labelsLength = pdc.labelsLength
zpds[0].labels = <char *>pdc.labels
pd_st = createPDS(zpds)
return pd_st
cdef PairedDataStruct createOnePairedData(long long *ifltab,PairedDataContainer pdc,int curve_index,int start_ord_index=0,int end_ord_index=0):
cdef:
zStructPairedData *zpds=NULL
PairedDataStruct pd_st
char *pathname = pdc.pathname
float *curves = pdc.curves_ptr
zpds = zstructPdNew(pathname)
zpds[0].startingCurve = curve_index
zpds[0].endingCurve = curve_index
if start_ord_index:
zpds[0].startingOrdinate = start_ord_index
zpds[0].endingOrdinate = end_ord_index
zpds[0].floatValues = curves
zpds[0].floatOrdinates = NULL
zpds[0].doubleOrdinates = NULL
zpds[0].doubleValues = NULL
if pdc.labelsLength>0:
zpds[0].labels=<char *>pdc.labels
zpds[0].labelsLength = pdc.labelsLength
pd_st = createPDS(zpds)
return pd_st
cdef PairedDataStruct createNewFloatPairedData(PairedDataContainer pdc):
cdef:
zStructPairedData *zpds=NULL
PairedDataStruct pd_st
char *pathname = pdc.pathname
float *independent_axis = &pdc.independent_axis_mv[0]
float *curves = pdc.curves_ptr
int data_no = pdc.data_no
int curve_no = pdc.curve_no
char *independent_units = pdc.independent_units
char *independent_type = pdc.independent_type
char *dependent_units = pdc.dependent_units
char *dependent_type = pdc.dependent_type
zpds = zstructPdNewFloats(pathname, independent_axis, curves, data_no,
curve_no, independent_units, independent_type,
dependent_units, dependent_type)
# additional check
if pdc.labelsLength>0:
zpds[0].labelsLength = pdc.labelsLength
zpds[0].labels = <char *>pdc.labels
pd_st = createPDS(zpds)
return pd_st
<|end_of_text|>from libcpp cimport bool
from cython.operator cimport dereference as deref
cdef extern from 'boost/shared_ptr.hpp' namespace 'boost':
cdef cppclass shared_ptr[T]:
shared_ptr()
shared_ptr(T*)
shared_ptr(shared_ptr[T]&)
T* get()
long use_count()
#void reset(shared_ptr[T]&)
cdef extern from 'boost/regex.hpp' namespace 'boost':
cdef cppclass match_results[T]:
match_results(T&)
# size
long size()
bool empty()
# element access:
const_reference operator[](int n)
<|end_of_text|>
from __future__ import (absolute_import, division, print_function,
unicode_literals)
cimport numpy as np # noqa
import numpy as np
from six.moves import range
from algo_base import AlgoBase
class NMF(AlgoBase):
def __init__(self, n_factors=15, n_epochs=50, biased=False, reg_pu=.06,
reg_qi=.06, reg_bu=.02, reg_bi=.02, lr_bu=.005, lr_bi=.005,
init_low=0, init_high=1, random_state=None, verbose=False, sim_options={}):
#print('NMF init')
self.n_factors = n_factors
self.n_epochs = n_epochs
self.biased = biased
self.reg_pu = reg_pu
self.reg_qi = reg_qi
self.lr_bu = lr_bu
self.lr_bi = lr_bi
self.reg_bu = reg_bu
self.reg_bi = reg_bi
self.init_low = init_low
self.init_high = init_high
self.random_state = random_state
self.verbose = verbose
self.sim_options = sim_options
if self.init_low < 0:
raise ValueError('init_low should be greater than zero')
AlgoBase.__init__(self)
def fit(self, trainset):
AlgoBase.fit(self, trainset)
self.sgd(trainset)
return self
def sgd(self, trainset):
# user and item factors
cdef np.ndarray[np.double_t, ndim=2] pu
cdef np.ndarray[np.double_t, ndim=2] qi
# user and item biases
cdef np.ndarray[np.double_t] bu
cdef np.ndarray[np.double_t] bi
# auxiliary matrices used in optimization process
cdef np.ndarray[np.double_t, ndim=2] user_num
cdef np.ndarray[np.double_t, ndim=2] user_denom
cdef np.ndarray[np.double_t, ndim=2] item_num
cdef np.ndarray[np.double_t, ndim=2] item_denom
cdef int u, i, f
cdef double r, est, l, dot, err
cdef double reg_pu = self.reg_pu
cdef double reg_qi = self.reg_qi
cdef double reg_bu = self.reg_bu
cdef double reg_bi = self.reg_bi
cdef double lr_bu = self.lr_bu
cdef double lr_bi = self.lr_bi
cdef double global_mean = self.trainset.global_mean
# Randomly initialize user and item factors
rng = np.random.mtrand._rand
pu = rng.uniform(self.init_low, self.init_high,
size=(trainset.n_users, self.n_factors))
qi = rng.uniform(self.init_low, self.init_high,
size=(trainset.n_items, self.n_factors))
bu = np.zeros(trainset.n_users, np.double)
bi = np.zeros(trainset.n_items, np.double)
if not self.biased:
global_mean = 0
for current_epoch in range(self.n_epochs):
| Cython |
if self.verbose:
print("Processing epoch {}".format(current_epoch))
# (re)initialize nums and denoms to zero
user_num = np.zeros((trainset.n_users, self.n_factors))
user_denom = np.zeros((trainset.n_users, self.n_factors))
item_num = np.zeros((trainset.n_items, self.n_factors))
item_denom = np.zeros((trainset.n_items, self.n_factors))
# Compute numerators and denominators for users and items factors
for u, i, r in trainset.all_ratings():
# compute current estimation and error
dot = 0 # <q_i, p_u>
for f in range(self.n_factors):
dot += qi[i, f] * pu[u, f]
est = global_mean + bu[u] + bi[i] + dot
err = r - est
# update biases
if self.biased:
bu[u] += lr_bu * (err - reg_bu * bu[u])
bi[i] += lr_bi * (err - reg_bi * bi[i])
# compute numerators and denominators
for f in range(self.n_factors):
user_num[u, f] += qi[i, f] * r
user_denom[u, f] += qi[i, f] * est
item_num[i, f] += pu[u, f] * r
item_denom[i, f] += pu[u, f] * est
# Update user factors
for u in trainset.all_users():
n_ratings = len(trainset.ur[u])
for f in range(self.n_factors):
user_denom[u, f] += n_ratings * reg_pu * pu[u, f]
pu[u, f] *= user_num[u, f] / user_denom[u, f]
# Update item factors
for i in trainset.all_items():
n_ratings = len(trainset.ir[i])
for f in range(self.n_factors):
item_denom[i, f] += n_ratings * reg_qi * qi[i, f]
qi[i, f] *= item_num[i, f] / item_denom[i, f]
self.bu = bu
self.bi = bi
self.pu = pu
self.qi = qi
def estimate(self, u, i):
#print('estimate func called')
known_user = self.trainset.knows_user(u)
known_item = self.trainset.knows_item(i)
if self.biased:
est = self.trainset.global_mean
if known_user:
est += self.bu[u]
if known_item:
est += self.bi[i]
if known_user and known_item:
est += np.dot(self.qi[i], self.pu[u])
else:
if known_user and known_item:
est = np.dot(self.qi[i], self.pu[u])
else:
print('User and item are unkown.')
return est<|end_of_text|>"""Contains numerical functions written in Cython."""
def comm_success(
int cost,
int ambiguous_reference_cost,
int success_points,
double player_ambiguity_probability,
double partner_ambiguity_probability,
double partner_sum):
if partner_ambiguity_probability > 0.0:
return (
(ambiguous_reference_cost * player_ambiguity_probability) + # Times using ambiguous term
(success_points * player_ambiguity_probability * (partner_ambiguity_probability / partner_sum)) + # Times ambiguous term is understood
((cost + success_points) * (1.0 - player_ambiguity_probability)) # Times using unambiguous term (always understood)
)
return (
(ambiguous_reference_cost * player_ambiguity_probability) + # Times using ambiguous term (cost only - partner unreceptive)
((cost + success_points) * (1.0 - player_ambiguity_probability)) # Times using unambiguous term
)<|end_of_text|>cdef extern from "objvector.h":
ctypedef struct objvector_t:
pass
ctypedef struct PyObject:
pass
objvector_t *objvector_create()
void objvector_free(objvector_t *v)
void objvector_push(objvector_t *v, object obj)
size_t objvector_size(objvector_t *v)
PyObject **objvector_items(objvector_t *v, size_t *len)
void objvector_clear(objvector_t *v)
<|end_of_text|># distutils: language=c++
"""Cython implementation of Q-iteration."""
import cython
import numpy as np
import scipy.misc
from scipy.misc import logsumexp as lse
from cython.operator cimport dereference, preincrement
from libcpp.map cimport map
from libc.math cimport fmax, fabs, exp, log
from rlutil.envs.tabular_cy cimport tabular_env
from rlutil.envs.tabular_cy.q_iteration_py import compute_value_function as compute_value_function_py
@cython.boundscheck(False)
@cython.cdivision(True)
cdef compute_value_function(double[:, :] q_values, double[:] values, int ds, int da, double ent_wt):
r"""Computes the value function by maxing over the q-values.
Args:
q_values: A dS x dA array of q-values.
values: A dS array where the result will be stored
ds: Number of states
da: Number of actions
ent_wt: Entropy weight. Default 0.
"""
cdef int s, a
cdef double max_val, total
if ent_wt > 0:
for s in range(ds):
max_val = q_values[s, 0]
for a in range(da):
max_val = fmax(max_val, q_values[s,a])
total = 0
for a in range(da):
total += exp((q_values[s, a] - max_val)/ent_wt)
values[s] = max_val + ent_wt * log(total)
else:
for s in range(ds):
max_val = q_values[s, 0]
for a in range(da):
max_val = fmax(max_val, q_values[s,a])
values[s] = max_val
@cython.boundscheck(False)
@cython.cdivision(True)
cdef compute_value_function_evaluation(double[:, :] q_values, double[:] values, double[:,:] policy, int ds, int da, double ent_wt):
r"""Computes the (policy evaluation) value function
Args:
q_values: A dS x dA array of q-values.
values: A dS array where the result will be stored
ds: Number of states
da: Number of actions
ent_wt: Entropy weight. Default 0.
"""
cdef int s, a
cdef double max_val, total
if ent_wt > 0:
for s in range(ds):
total = 0
for a in range(da):
total += policy[s,a] * (q_values[s, a] - log(policy[s,a] + 1e-8))
values[s] = total
else:
for s in range(ds):
exp_val = 0
for a in range(da):
exp_val += policy[s, a] * q_values[s,a]
values[s] = exp_val
@cython.boundscheck(False)
cdef double max_abs_error(double[:, :] q1, double[:, :] q2, int ds, int da):
"""Compute max absolute error between two q values for early stopping."""
cdef double max_error
max_error = 0.0
for s in range(ds):
for a in range(da):
max_error = fmax(max_error, fabs(q1[s,a]-q2[s,a]))
return max_error
@cython.boundscheck(False)
cpdef q_iteration_sparse_python(tabular_env,
reward_fn=None,
warmstart_q=None,
int num_itrs=100,
double ent_wt=0.0,
double discount=0.99,
double atol=1e-8):
"""Computes q-values using sparse q-iteration.
Args:
tabular_env: A python TabularEnv environment.
reward_fn: A scalar-valued reward function f(s, a, ns) -> reward
warmstart_q: A dS x dA array of initial q-values.
num_itrs: Number of iterations to run.
ent_wt: Entropy weight. Default 0.
discount: Discount factor.
atol: Absolute error tolerance for early stopping.
Returns:
A dS x dA array of Q-values
"""
cdef int ds, da, s, a, i, ns_idx, ns
ds = tabular_env.num_states
da = tabular_env.num_actions
if reward_fn is None:
reward_fn = tabular_env.reward
q_values_np = np.zeros((ds, da), dtype=np.float64)
if warmstart_q is not None:
q_values_np[:, :] = warmstart_q
cdef double[:, :] q_values = q_values_np
new_q_values_np = np.zeros((ds, da), dtype=np.float64)
cdef double[:, :] new_q_values = new_q_values_np
r_sa_np = np.zeros((ds, da), dtype=np.float64)
cdef double[:, :] r_sa | Cython |
= r_sa_np
for s in range(ds):
for a in range(da):
r_sa[s, a] = reward_fn(s, a, 0)
v_fn_np = np.zeros((ds), dtype=np.float64)
cdef double[:] v_fn = v_fn_np
for i in range(num_itrs):
compute_value_function(q_values, v_fn, ds, da, ent_wt)
new_q_values[:, :] = 0.0
for s in range(ds):
for a in range(da):
transitions_py = tabular_env.transitions(s, a)
for ns in transitions_py:
new_q_values[s, a] += transitions_py[ns] * \
(r_sa[s, a] + discount * v_fn[ns])
if atol > 0:
diff = max_abs_error(new_q_values, q_values, ds, da)
if diff < atol:
break
q_values[:, :] = new_q_values[:, :]
return q_values_np
@cython.boundscheck(False)
cpdef softq_iteration(tabular_env.TabularEnv tabular_env,
warmstart_q=None,
int num_itrs=100,
double ent_wt=0.0,
double discount=0.99,
double atol=1e-8):
"""Computes q-values using sparse q-iteration.
Args:
tabular_env: A cython TabularEnv environment.
warmstart_q: A dS x dA array of initial q-values.
num_itrs: Number of iterations to run.
ent_wt: Entropy weight. Default 0.
discount: Discount factor.
atol: Absolute error tolerance for early stopping.
If atol < 0, this will always run for num_itrs iterations.
Returns:
A dS x dA array of Q-values
"""
cdef int ds, da, s, a, i, ns_idx, ns
ds = tabular_env.num_states
da = tabular_env.num_actions
q_values_np = np.zeros((ds, da), dtype=np.float64)
if warmstart_q is not None:
q_values_np[:, :] = warmstart_q
cdef double[:, :] q_values = q_values_np
new_q_values_np = np.zeros((ds, da), dtype=np.float64)
cdef double[:, :] new_q_values = new_q_values_np
v_fn_np = np.zeros((ds), dtype=np.float64)
cdef double[:] v_fn = v_fn_np
for i in range(num_itrs):
compute_value_function(q_values, v_fn, ds, da, ent_wt)
new_q_values[:, :] = 0.0
for s in range(ds):
for a in range(da):
transitions = tabular_env.transitions_cy(s, a)
transitions_end = transitions.end()
transitions_it = transitions.begin()
reward = tabular_env.reward(s, a, 0)
while transitions_it!= transitions_end:
ns = dereference(transitions_it).first
p = dereference(transitions_it).second
new_q_values[s, a] += p * (reward + discount * v_fn[ns])
preincrement(transitions_it)
if atol > 0:
diff = max_abs_error(new_q_values, q_values, ds, da)
if diff < atol:
break
q_values[:, :] = new_q_values[:, :]
return q_values_np
@cython.boundscheck(False)
cpdef softq_evaluation(tabular_env.TabularEnv tabular_env,
eval_qvalues,
warmstart_q=None,
int num_itrs=100,
double ent_wt=0.0,
double discount=0.99,
double atol=1e-8):
"""Computes q-values using sparse q-iteration.
Args:
tabular_env: A cython TabularEnv environment.
warmstart_q: A dS x dA array of initial q-values.
num_itrs: Number of iterations to run.
ent_wt: Entropy weight. Default 0.
discount: Discount factor.
atol: Absolute error tolerance for early stopping.
If atol < 0, this will always run for num_itrs iterations.
Returns:
A dS x dA array of Q-values
"""
cdef int ds, da, s, a, i, ns_idx, ns
ds = tabular_env.num_states
da = tabular_env.num_actions
q_values_np = np.zeros((ds, da), dtype=np.float64)
if warmstart_q is not None:
q_values_np[:, :] = warmstart_q
cdef double[:, :] q_values = q_values_np
new_q_values_np = np.zeros((ds, da), dtype=np.float64)
cdef double[:, :] new_q_values = new_q_values_np
v_fn_np = np.zeros((ds), dtype=np.float64)
cdef double[:] v_fn = v_fn_np
if ent_wt > 0:
compute_value_function(eval_qvalues, v_fn_np, ds, da, ent_wt)
policy_np = np.exp(eval_qvalues - np.expand_dims(v_fn_np, axis=-1))
policy_np = policy_np / np.sum(policy_np, axis=1, keepdims=True)
else:
policy_np = np.zeros_like(q_values_np)
policy_np = np.eye(da)[np.argmax(eval_qvalues, axis=1)] * eval_qvalues
policy_np[policy_np>0] = 1.0
cdef double[:,:] policy = policy_np
for i in range(num_itrs):
compute_value_function_evaluation(q_values, v_fn, policy, ds, da, ent_wt)
new_q_values[:, :] = 0.0
for s in range(ds):
for a in range(da):
transitions = tabular_env.transitions_cy(s, a)
transitions_end = transitions.end()
transitions_it = transitions.begin()
reward = tabular_env.reward(s, a, 0)
while transitions_it!= transitions_end:
ns = dereference(transitions_it).first
p = dereference(transitions_it).second
new_q_values[s, a] += p * (reward + discount * v_fn[ns])
preincrement(transitions_it)
if atol > 0:
diff = max_abs_error(new_q_values, q_values, ds, da)
if diff < atol:
break
q_values[:, :] = new_q_values[:, :]
compute_value_function_evaluation(q_values, v_fn, policy, ds, da, ent_wt)
exp_returns = 0
for state in tabular_env.initial_state_distribution:
exp_returns += v_fn[state]
return exp_returns, q_values_np
@cython.boundscheck(False)
cpdef softq_iteration_custom_reward(tabular_env.TabularEnv tabular_env,
reward,
warmstart_q=None,
int num_itrs=100,
double ent_wt=0.0,
double discount=0.99,
double atol=1e-8):
"""Computes q-values using sparse q-iteration.
Args:
tabular_env: A cython TabularEnv environment.
reward: A dS x dA reward matrix
warmstart_q: A dS x dA array of initial q-values.
num_itrs: Number of iterations to run.
ent_wt: Entropy weight. Default 0.
discount: Discount factor.
atol: Absolute error tolerance for early stopping.
If atol < 0, this will always run for num_itrs iterations.
Returns:
A dS x dA array of Q-values
"""
cdef int ds, da, s, a, i, ns_idx, ns
ds = tabular_env.num_states
da = tabular_env.num_actions
q_values_np = np.zeros((ds, da), dtype=np.float64)
if warmstart_q is not None:
q_values_np[:, :] = warmstart_q
cdef double[:, :] q_values = q_values_np
cdef double[:,:] custom_reward = reward
new_q_values_np = np.zeros((ds, da), dtype=np.float64)
cdef double[:, :] new_q_values = new_q_values_np
v_fn_np = np.zeros((ds), dtype=np.float64)
cdef double[:] v_fn = v_fn_np
for i in range(num_itrs):
compute_value_function(q_values, v_fn, ds, da, ent_wt)
new_q_values[:, :] = 0.0
for s in range(ds):
for a in range(da):
transitions = tabular_env.transitions_cy(s, a)
transitions_end = transitions.end()
transitions_it = transitions.begin()
r = custom_reward[s, a]
while transitions_it!= transitions_end:
ns = dereference(transitions_it).first
p = dereference(transitions_it).second
new_q_values[s, a] += p * (r + discount * v_fn[ns])
preincrement(transitions_it)
if atol > 0:
diff = max_abs_error(new_q_values, q_values, ds, da)
if diff < atol:
break
q_values[:, :] = new_q_values[:, :]
return q_values_np
#@cython.cdivision(True)
cpdef get_policy(q_fn, double ent_wt=1.0):
| Cython |
"""Return a policy by normalizing a Q-function."""
cdef double inverse_ent = 1.0/ent_wt
value_fn = ent_wt * lse(inverse_ent * q_fn, axis=1)
adv_rew = q_fn - np.expand_dims(value_fn, axis=1)
pol_probs = np.exp(inverse_ent * adv_rew)
return pol_probs
<|end_of_text|>## @package Keithley 2010 Control Program
# Python class for controlling Keithley 2010
import visa
import time
class Keithley2010:
## The constructor for opening one GPIB address
def __init__(self, address):
rm = visa.ResourceManager()
self.inst = rm.open_resource("GPIB0::%i::INSTR" %address)
self.inst.write("*IDN?")
print("Machine Type: %s" %self.inst.read())
def DataTaking(self, delay):
numOfReadings = 5
self.inst.write("trigger:delay %f" %(delay/numOfReadings))
self.inst.write("trace:points %d" %numOfReadings)
self.inst.write("trace:feed sense1; feed:control next")
time.sleep(delay)
self.inst.write("trace:data?")
data = self.inst.read()
item = data.split(",")
readings = []
for i in range(len(item)):
readings.append(float(item[i]))
readings.remove(min(readings))
readings.remove(max(readings))
#print(readings)
sumOfReadings = sum(readings)
maxOfReadings = max(readings)
self.average = sumOfReadings / (numOfReadings-2)
self.maxSigma = maxOfReadings - self.average
self.minSigma = self.average - min(readings)
def SetScanner(self, currentChannel):
#self.inst.write("trigger:delay 1")
self.inst.write("route:scan:internal (@%i)" %currentChannel)
#self.inst.write("route:scan:internal?")
#print(self.inst.read())
def GetValue(self):
return self.average
def GetMinSigma(self):
return self.minSigma
def GetMaxSigma(self):
return self.maxSigma
<|end_of_text|># distutils: language=c++
include "includes/grpc.pxi"
include "timespec.pxd"
include "socket.pxd"
include "callbackglue.pxd"
include "unarycall.pxd"
include "channel.pxd"
include "timer.pxd"
include "resolver.pxd"
<|end_of_text|>#cython: embedsignature=True
#cython: cdivision=True
#cython: always_allow_keywords=True
#cimport cython
include "MPI/MPI.pyx"
<|end_of_text|>cdef extern from "opencv2/core/core.hpp" namespace "cv":
cdef cppclass CvPoint2i "cv::Point2i":
Point2i() except +
Point2i(int x, inty) except +
int x
int y
cdef cppclass CvPoint "cv::Point":
Point() except +
Point(double x, double y) except +
double x
double y
cdef cppclass CvRect "cv::Rect":
Rect() except +
Rect(int x, int y, int width, int height) except +
int x
int y
int width
int height
cdef cppclass CvMat "cv::Mat":
Mat() except +
Mat(int rows, int cols, int type) except +
void create(int rows, int cols, int type)
int rows
int cols
int dims
void* data
cdef class Point2i:
cdef CvPoint2i *this_ptr
cdef class Point:
cdef CvPoint *this_ptr
cdef class Rect:
cdef CvRect *this_ptr
cdef class Mat:
cdef CvMat *this_ptr
<|end_of_text|>def testcython(int n):
return 3.1415**n
print testcython(10)
from libc.string cimport strlen
def get_len(char *msg):
return strlen(msg)
print get_len("asdfasfd\0asdfasfd")
import numpy as np
def testmemview(double[:,::1] mv):
''' Tests roundtrip: python object -> cython typed memoryview -> python object '''
print np.sum(mv)
from numpy import ones
testmemview(ones((10, 10)))
<|end_of_text|># cython: boundscheck=False, nonecheck=False, wraparound=False, cdivision=True
from __future__ import division
import math
import os
import random
import sys
import time
import cv2
from scipy import integrate
import libMORPH as morph
import numpy as np
from libc.math cimport ceil
cimport numpy as np
cimport cython
# from cython.parallel import prange
cdef float pi = np.pi
def MH1D(myfunc, float xmin, float xmax, int p, type):
cdef float xold, num, den, A, x1, u, xstar, stdevx
cdef long long count
cdef np.ndarray[np.float64_t, ndim = 1] X = np.zeros(p, dtype=np.float64)
cdef np.ndarray[np.int64_t, ndim = 1] XX = np.zeros(p, dtype=np.int64)
count = 0
while True:
xold = np.random.uniform(xmin, xmax)
if myfunc(xold) > 0:
break
X[count] = xold
stdevx = xmax - xmin
while count < p - 1:
while True:
xstar = np.random.normal(xold, stdevx)
if xmin <= xstar <= xmax:
break
num = myfunc(xstar)
den = myfunc(xold)
A = min(1, num / den)
x1 = xstar
u = np.random.rand()
if u <= A:
xold = xstar
count = count + 1
X[count] = x1
if type == 1:
XX = np.array(XX, dtype=np.int64)
return XX
else:
return X
def MH2D(myfunc, float xmin, float xmax, float ymin, float ymax, int p, int type):
cdef float xold, yold, num, den, A, x1, y1, u, xstar, ystar, stdevx, stdevy
cdef long long count
cdef np.ndarray[np.float64_t, ndim = 1] X = np.zeros(p, dtype=np.float64)
cdef np.ndarray[np.float64_t, ndim = 1] Y = np.zeros(p, dtype=np.float64)
cdef np.ndarray[np.int64_t, ndim = 1] XX = np.zeros(p, dtype=np.int64)
cdef np.ndarray[np.int64_t, ndim = 1] YY = np.zeros(p, dtype=np.int64)
count = 0
while True:
xold = np.random.uniform(xmin, xmax)
yold = np.random.uniform(ymin, ymax)
if myfunc(xold, yold) > 0:
break
X[count] = xold
Y[count] = yold
stdevx = xmax - xmin
stdevy = ymax - ymin
while count < p - 1:
while True:
xstar = np.random.normal(xold, stdevx)
ystar = np.random.normal(yold, stdevy)
if xmin <= xstar <= xmax and ymin <= ystar <= ymax:
break
num = myfunc(xstar, ystar)
den = myfunc(xold, yold)
A = min(1, num / den)
x1, y1 = xstar, ystar
u = np.random.rand()
if u <= A:
xold, yold = xstar, ystar
count = count + 1
X[count] = x1
Y[count] = y1
if type == 1: # 1 means int
XX = np.array(X, dtype=np.int64)
YY = np.array(Y, dtype=np.int64)
return XX, YY
else: # else means float
return X, Y
def MH3D(myfunc, float xmin, float xmax, float ymin, float ymax, float zmin, float zmax, int p, type):
cdef float xold, yold, zold, num, den, A, x1, y1, z1, u, xstar, ystar, zstar, stdevx, stdevy, stdevz
cdef long long count
cdef np.ndarray[np.float64_t, ndim = 1] X = np.zeros(p, dtype=np.float64)
cdef np.ndarray[np.float64_t, ndim = 1] Y = np.zeros(p, dtype=np.float64)
cdef np.ndarray[np.float64_t, ndim = 1] Z = np.zeros(p, dtype=np.float64)
cdef np.ndarray[np.int64_t, ndim = 1] XX = np.zeros(p, dtype=np.int64)
cdef np.ndarray[np.int64_t, nd | Cython |
im = 1] YY = np.zeros(p, dtype=np.int64)
cdef np.ndarray[np.int64_t, ndim = 1] ZZ = np.zeros(p, dtype=np.int64)
count = 0
while True:
xold = np.random.uniform(xmin, xmax)
yold = np.random.uniform(ymin, ymax)
zold = np.random.uniform(zmin, zmax)
if myfunc(xold, yold, zold) > 0:
break
X[count] = xold
Y[count] = yold
Z[count] = zold
stdevx = xmax - xmin
stdevy = ymax - ymin
stdevz = zmax - zmin
while count < p - 1:
while True:
xstar = np.random.normal(xold, stdevx)
ystar = np.random.normal(yold, stdevy)
zstar = np.random.normal(zold, stdevz)
if xmin <= xstar <= xmax and ymin <= ystar <= ymax\
and zmin <= zstar <= zmax:
break
num = myfunc(xstar, ystar, zstar)
den = myfunc(xold, yold, zold)
A = min(1, num / den)
x1, y1, z1 = xstar, ystar, zstar
u = np.random.rand()
if u <= A:
xold, yold, zold = xstar, ystar, zstar
count = count + 1
X[count] = x1
Y[count] = y1
Z[count] = z1
if type == 1:
XX = np.array(X, dtype=np.int64)
YY = np.array(Y, dtype=np.int64)
ZZ = np.array(Z, dtype=np.int64)
return XX, YY, ZZ
else:
return X, Y, Z
def INVCDF1D(myfunc, float xmin, float xmax, int p):
cdef int i, j
cdef float g, u, v1, v2, g1, g2, N, MIN, m, dx, sum
cdef int num = 1000
cdef np.ndarray[np.float64_t, ndim = 1] GRID = np.zeros(num, dtype=np.float64)
cdef np.ndarray[np.float64_t, ndim = 1] VALUE = np.zeros(num, dtype=np.float64)
cdef np.ndarray[np.float64_t, ndim = 1] Myfuncval = np.zeros(num, dtype=np.float64)
cdef np.ndarray[np.float64_t, ndim = 1] U = np.zeros(p, dtype=np.float64)
cdef np.ndarray[np.float64_t, ndim = 1] X = np.zeros(p, dtype=np.float64)
if xmin == xmax:
return np.ones(p) * xmin
if xmin > xmax:
xmin, xmax = xmax, xmin
U = np.random.uniform(0, 1, p)
GRID = np.linspace(xmin, xmax, num)
dx = GRID[1] - GRID[0]
N = integrate.quad(myfunc, xmin, xmax)[0]
Myfunc = np.vectorize(myfunc)
Myfuncval = Myfunc(GRID) / N
VALUE[0] = 0
for i in range(1, num):
VALUE[i] = VALUE[i - 1] + (Myfuncval[i] + Myfuncval[i - 1]) * 0.5 * dx
for j in range(p):
u = U[j]
for i in range(num - 1):
if u > VALUE[i] and u < VALUE[i + 1]:
g1 = GRID[i]
g2 = GRID[i + 1]
v1 = VALUE[i]
v2 = VALUE[i + 1]
m = (v2 - v1) / (g2 - g1)
X[j] = g1 + (u - v1) / m
break
return X
<|end_of_text|># Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, unicode_literals, print_function
def fields_eq(fields):
"""Generates an ``__eq__`` method.
:param list fields:
List of fields of the object to be compared.
"""
def __eq__(self, other):
return isinstance(other, self.__class__) and all(
getattr(self, name) == getattr(other, name) for name in fields
)
return __eq__
def fields_str(cls_name, field_list, include_none=True):
"""Generates a ``__str__`` method.
:param cls_name:
Name of the class for which the method is being generated.
:param list field_list:
List of field_list of the class to be included in the output.
:param bool include_none:
Whether None attributes should be included in the output.
"""
def __str__(self):
fields = {}
for name in field_list:
# TODO use to_primitive?
value = getattr(self, name)
if include_none or value is not None:
fields[name] = value
return "%s(%r)" % (cls_name, fields)
return __str__
def to_primitive_method(type_spec):
"""Generates the ``to_primitive`` method for types given the TypeSpec."""
def to_primitive(self):
return type_spec.to_primitive(self)
return to_primitive
def from_primitive_classmethod():
"""Generates the ``from_primitive`` classmethod for types."""
@classmethod
def from_primitive(cls, prim_value):
return cls.type_spec.from_primitive(prim_value)
return from_primitive
def struct_hasher(spec):
"""Generates a ``__hash__`` method.
:param list fields:
List of fields of the object to be hashed.
"""
def __hash__(self):
return hash(
tuple(
getattr(self, field.name) for field in spec.fields
)
)
return __hash__
<|end_of_text|>import logging
import sys
import time
import rtmidi
import numpy as np
import threading
n_pulses = 100000
def main():
midiout = rtmidi.MidiOut()
available_ports = midiout.get_ports()
if available_ports:
midiout.open_port(2)
else:
midiout.open_virtual_port("My virtual output")
def send_pulse():
midiout.send_message([248])
def send_start():
midiout.send_message([250])
def send_midi_clock(tempo, double start_time):
cdef float period
cdef double now, next_pulse_time
#start_time = time.time() # maybe change to absolute time later
started = False
period = (60.0/tempo)/24.0
lookahead = 0.004
while True:
now = time.time()
if not started:
if start_time - now < lookahead:
threading.Timer(start_time - time.time(), send_start, ()).start()
started = True
next_pulse_time = start_time + (np.floor((now-start_time)/period) + 1) * period
if next_pulse_time - now < lookahead:
threading.Timer(next_pulse_time - time.time(), send_pulse, ()).start()
time.sleep(lookahead*2)
else:
time.sleep(0.0005)
def start_on_measure(int tempo):
cdef double now, next_measure_time
threshold = 0.001
period = (60.0/tempo) * 4
now = time.time()
next_measure_time = (np.floor(now/period) + 1) * period
send_midi_clock(tempo,next_measure_time)
start_on_measure(120)
if __name__=="__main__":
main()
<|end_of_text|>import os
import sys
from itertools import takewhile
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.db.migrations import Migration
from cy_migrations.upstream.autodetector import MigrationAutodetector
from cy_migrations.upstream.loader import MigrationLoader
from cy_migrations.upstream.questioner import (
InteractiveMigrationQuestioner, MigrationQuestioner,
NonInteractiveMigration | Cython |
Questioner,
)
from cy_migrations.upstream.state_ import ProjectState
from cy_migrations.upstream.writer import MigrationWriter
from django.utils.six import iteritems
from django.utils.six.moves import zip
class Command(BaseCommand):
help = "Creates new migration(s) for apps."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*',
help='Specify the app label(s) to create migrations for.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', default=False,
help="Just show what migrations would be made; don't actually write them.")
parser.add_argument('--merge', action='store_true', dest='merge', default=False,
help="Enable fixing of migration conflicts.")
parser.add_argument('--empty', action='store_true', dest='empty', default=False,
help="Create an empty migration.")
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('-n', '--name', action='store', dest='name', default=None,
help="Use this name for migration file(s).")
parser.add_argument('-e', '--exit', action='store_true', dest='exit_code', default=False,
help='Exit with error code 1 if no changes needing migrations are found.')
def handle(self, *app_labels, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
self.dry_run = options.get('dry_run', False)
self.merge = options.get('merge', False)
self.empty = options.get('empty', False)
self.migration_name = options.get('name')
self.exit_code = options.get('exit_code', False)
# Make sure the app they asked for exists
app_labels = set(app_labels)
bad_app_labels = set()
for app_label in app_labels:
try:
apps.get_app_config(app_label)
except LookupError:
bad_app_labels.add(app_label)
if bad_app_labels:
for app_label in bad_app_labels:
self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignore_no_migrations=True)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detect_conflicts()
# If app_labels is specified, filter out conflicting migrations for unspecified apps
if app_labels:
conflicts = {
app_label: conflict for app_label, conflict in iteritems(conflicts)
if app_label in app_labels
}
if conflicts and not self.merge:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.stdout.write("No conflicts detected to merge.")
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
return self.handle_merge(loader, conflicts)
if self.interactive:
questioner = InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
else:
questioner = NonInteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
questioner,
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not app_labels:
raise CommandError("You must supply at least one app label when using --empty.")
# Make a fake changes() result we can pass to arrange_for_graph
changes = {
app: [Migration("custom", app)]
for app in app_labels
}
changes = autodetector.arrange_for_graph(
changes=changes,
graph=loader.graph,
migration_name=self.migration_name,
)
self.write_migration_files(changes)
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
migration_name=self.migration_name,
)
if not changes:
# No changes? Tell them.
if self.verbosity >= 1:
if len(app_labels) == 1:
self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
elif len(app_labels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
else:
self.stdout.write("No changes detected")
if self.exit_code:
sys.exit(1)
else:
return
self.write_migration_files(changes)
def write_migration_files(self, changes):
"""
Takes a changes dict and writes them out as migration files.
"""
directory_created = {}
for app_label, app_migrations in changes.items():
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
for migration in app_migrations:
# Describe the migration
writer = MigrationWriter(migration)
if self.verbosity >= 1:
self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(writer.filename),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
if not self.dry_run:
# Write the migrations file to the disk.
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label):
if not os.path.isdir(migrations_directory):
os.mkdir(migrations_directory)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
migration_string = writer.as_string()
with open(writer.path, "wb") as fh:
fh.write(migration_string)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will output the migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
def handle_merge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner()
else:
questioner = MigrationQuestioner(defaults={'ask_merge': True})
for app_label, migration_names in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
merge_migrations = []
for migration_name in migration_names:
migration = loader.get_migration(app_label, migration_name)
migration.ancestry = [
mig for mig in loader.graph.forwards_plan((app_label, migration_name))
if mig[0] == migration.app_label
]
merge_migrations.append(migration)
all_items_equal = lambda seq: all(item == seq[0] for item in seq[1:])
merge_migrations_generations = zip(*[m.ancestry for m in merge_migrations])
common_ancestor_count = sum(1 for common_ancestor_generation
in takewhile(all_items_equal, merge_migrations_generations))
if not common_ancestor_count:
raise ValueError("Could not find common ancestor of %s" % migration_names)
# Now work out the operations along each divergent branch
for migration in merge_migrations:
migration.branch = migration.ancestry[common_ancestor_count:]
migrations_ops = (loader.get_migration(node_app, node_name).operations
for node_app, node_name in migration.branch)
migration.merged_operations = sum(migrations_ops, [])
# In future, this could use some of the Optimizer code
# (can_optimize_through) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label))
for migration in merge_migrations:
self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.merged_operations:
self.stdout.write(" - %s\n" % operation.describe | Cython |
())
if questioner.ask_merge(app_label):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parse_number(migration.name)
for migration in merge_migrations
]
try:
biggest_number = max(x for x in numbers if x is not None)
except ValueError:
biggest_number = 1
subclass = type("Migration", (Migration, ), {
"dependencies": [(app_label, migration.name) for migration in merge_migrations],
})
new_migration = subclass("%04i_merge" % (biggest_number + 1), app_label)
writer = MigrationWriter(new_migration)
if not self.dry_run:
# Write the merge migrations file to the disk
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write("\nCreated new merge migration %s" % writer.path)
elif self.verbosity == 3:
# Alternatively, makemigrations --merge --dry-run --verbosity 3
# will output the merge migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full merge migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
<|end_of_text|>cdef class UnaryOp:
def __init__(self):
raise TypeError('UnaryOp may not be initialized')
def __repr__(self):
return f'UnaryOp({self.name})'
@staticmethod
cdef UnaryOp _new(str name):
cdef UnaryOp obj = UnaryOp.__new__(UnaryOp)
obj.name = name
return obj
<|end_of_text|>cimport numpy as nm
import numpy as nm
nm.import_array()
cimport stdlib as stdlib
cimport stdio as stdio
cdef extern from "errorlist.h":
ctypedef struct error:
pass
void stringError(char* str, error *err)
int getErrorValue(error* err)
int isError(error *err)
void purgeError(error **err)
void printError(void* flog,error* err)
class CError(Exception):
def __init__(self,val,str):
self.val=val
self.comment=str
def __str__(self):
return self.comment.strip()
cdef doError(error **err):
cdef char estr[10000]
if (isError(err[0])):
stringError(estr,err[0])
er=CError(getErrorValue(err[0]),estr)
purgeError(err)
return er
return None
cdef extern from "clik_egfs.h":
ctypedef char egfs_parstr[256]
ctypedef char parname[256]
ctypedef struct c_egfs "egfs":
int nfr,nell
c_egfs *egfs_init(int nvar, char **keyvars, int ndefaults, char** keys, char** values, int lmin, int lmax, double* cib_clustering,double *patchy_ksz, double *homogenous_ksz,double *tsz, double* cib_decor_clust, double * cib_decor_poisson,error **err)
void egfs_compute(c_egfs *self, double *pars, double *rq, double *drq, error **err)
void egfs_free(void **pelf)
cdef class egfs:
cdef c_egfs* celf
cdef int ndim,nfr,nell
cdef object ids
def __init__(self,parnames,pardef,lmin,lmax,freqs,inorm,cib_clustering=None,patchy_ksz=None,homogenous_ksz=None,tsz=None,cib_decor_clust=None,cib_decor_poisson=None,flter=None):
cdef error *_err,**err
cdef char *keys[100], *values[100], *keyvars[50]
cdef double *_cib_clustering,*_patchy_ksz,*_homogenous_ksz,*_tsz,*_cib_decor_poisson,*_cib_decor_clust
self.celf=NULL
models = ["cib_clustering","cib_poisson","radio_poisson","tsz","ksz"]
mardef = dict(pardef)
frqstr = " ".join(["%s"%v for v in freqs])
mardef.update(dict(zip(["eff_fr_"+v for v in models],[frqstr]*5)))
mardef.update(dict(zip(["norm_fr_"+v for v in models],["%s"%"143"]*5)))
mardef.update({"nfr":"%d"%len(freqs)})
mkeys = mardef.keys()
for i from 0<=i<len(mardef):
keys[i] = mkeys[i]
values[i] = mardef[mkeys[i]]
self.celf=NULL
_err = NULL
err = &_err
_cib_clustering = NULL
_patchy_ksz = NULL
_homogenous_ksz = NULL
_tsz = NULL
if cib_clustering!=None:
cib_clustering_proxy=nm.PyArray_ContiguousFromAny(cib_clustering,nm.NPY_DOUBLE,1,1)
patchy_ksz_proxy=nm.PyArray_ContiguousFromAny(patchy_ksz,nm.NPY_DOUBLE,1,1)
homogenous_ksz_proxy=nm.PyArray_ContiguousFromAny(homogenous_ksz,nm.NPY_DOUBLE,1,1)
tsz_proxy=nm.PyArray_ContiguousFromAny(tsz,nm.NPY_DOUBLE,1,1)
_cib_clustering = <double*> nm.PyArray_DATA(cib_clustering_proxy)
_patchy_ksz = <double*> nm.PyArray_DATA(patchy_ksz_proxy)
_homogenous_ksz = <double*> nm.PyArray_DATA(homogenous_ksz_proxy)
_tsz = <double*> nm.PyArray_DATA(tsz_proxy)
_cib_decor_clust = NULL
if cib_decor_clust!=None:
cib_decor_clust_proxy=nm.PyArray_ContiguousFromAny(cib_decor_clust,nm.NPY_DOUBLE,2,2)
assert cib_decor_clust_proxy.shape[0]==len(freqs)
assert cib_decor_clust_proxy.shape[1]==len(freqs)
_cib_decor_clust = <double*> nm.PyArray_DATA(cib_decor_clust_proxy)
_cib_decor_poisson = NULL
if cib_decor_poisson!=None:
cib_decor_poisson_proxy=nm.PyArray_ContiguousFromAny(cib_decor_poisson,nm.NPY_DOUBLE,2,2)
assert cib_decor_poisson_proxy.shape[0]==len(freqs)
assert cib_decor_poisson_proxy.shape[1]==len(freqs)
_cib_decor_poisson = <double*> nm.PyArray_DATA(cib_decor_poisson_proxy)
if flter:
farnames = [p for p,v in zip(parnames,flter) if v!=None]
self.ids = [i for i,v in enumerate(flter) if v!=None]
else:
farnames = parnames
self.ids = None
for i from 0<=i<len(farnames):
keyvars[i] = farnames[i]
self.celf = egfs_init(len(farnames),keyvars,len(mardef),keys,values,lmin,lmax,_cib_clustering,_patchy_ksz,_homogenous_ksz,_tsz,_cib_decor_clust,_cib_decor_poisson,err)
er=doError(err)
if er:
raise er
self.ndim = len(parnames)
self.nfr = self.celf.nfr
self.nell = self.celf.nell
def __call__(self,pars):
cdef error *_err,**err
cdef double *_drq,*_rq
if len(pars)!=self.ndim:
raise Exception("Bad shape (expecting (%d) got (%d))"%(self.ndim,len(pars)))
rq = nm.zeros((self.nfr,self.nfr,self.nell),dtype=nm.double)
sars = pars
drqa = nm.zeros((self.nfr,self.nfr,self.nell,self.ndim),dtype=nm.double)
drq = drqa
if self.ids!=None:
drqb = nm.zeros((self.nfr,self.nfr,self.nell,len(self.ids)),dtype=nm.double)
drq = drqb
sars =nm.array([pars[i] for i in self.ids])
pars_proxy=nm.PyArray_ContiguousFromAny(sars,nm.NPY_DOUBLE,1,1)
_err = NULL
err = &_err
egfs_compute(self.celf, <double*> nm.PyArray_DATA(pars_proxy), <double*> nm.PyArray_DATA(rq),<double*> nm.PyArray_DATA(drq), err);
er=do | Cython |
Error(err)
if er:
raise er
if self.ids!=None:
drqa[:,:,:,self.ids] = drqb
return rq,drqa
def __dealloc__(self):
if self.celf!=NULL:
egfs_free(<void**>&(self.celf))
def default_models(defmodels=[],varmodels=[],varpars=[],defvalues={},dnofail=True,reduce=False):
prs = """
#-> cib clustering
alpha_dg_cl = 3.8
tilt_dg_cl = 0.8
norm_dg_cl = 6
#-> cib poisson
alpha_dg_po = 3.8
sigma_dg_po = 0.4
norm_dg_po = 9
fpol_dg_po = 0.01
#-> radio poisson
# Updated default values for best match with Paoletti et al. (De
# Zotti et al.) model. Commented values are original Millea et
# al. values.
# alpha_rg = -0.5
# sigma_rg = 0.1
# norm_rg = 133
alpha_rg = -0.36
sigma_rg = 0.64
norm_rg = 78.5
gamma_rg = -0.8
fpol_rg = 0.05
#-> tsz
tsz_pca1 = 0
tsz_pca2 = 0
tsz_mean_scale = 1
#-> ksz
norm_ov = 1
norm_patchy = 1
shift_patchy = 1
"""
lprs = prs.split("\n")
iii = [i for i,vv in enumerate(lprs) if vv.strip() and vv.strip()[:3]=="#->"]
pfs = {}
for i,j in zip(iii,iii[1:]+[-1]):
nmod = lprs[i].replace("#->","").strip()
pn = [vv.split("=")[0].strip() for vv in lprs[i:j] if vv.strip() and vv.strip()[0]!="#"]
pfs[nmod]=pn
pv = [float(vv.split("=")[1].strip()) for vv in lprs if vv.strip() and vv.strip()[0]!="#"]
pn = [vv.split("=")[0].strip() for vv in lprs if vv.strip() and vv.strip()[0]!="#"]
aps = dict(zip(pn,pv))
if varpars==[]:
if varmodels==[]:
varmodels = pfs.keys()
varpars = []
for mm in varmodels:
varpars = varpars + pfs[mm]
varmodels = set(varmodels)
for vp in varpars:
for kk in pfs.keys():
if vp in pfs[kk]:
varmodels.add(kk)
dmm = set(defmodels)
dmm.update(varmodels)
for vp in defvalues.keys():
for kk in pfs.keys():
if vp in pfs[kk]:
dmm.add(kk)
defs = {}
for dm in dmm:
for pn in pfs[dm]:
if pn not in varpars:
defs[pn] = str(aps[pn])
defs.update(defvalues)
if dnofail or reduce:
rv = [aps.get(pn,None) for pn in varpars]
else:
rv = [aps[pn] for pn in varpars]
if reduce:
varpars = [pn for pn,pv in zip(varpars,rv) if pv!=None]
rv = [pv for pv in rv if pv!=None]
return defs,varpars,rv
def init_defaults(datapath,defmodels=[],varmodels=[],varpars=[],defvalues={},dnofail=False,reduce=True):
import os.path as osp
defs = {}
defs["template_cib_clustering"]=osp.join(datapath,"clustered_1108.4614.dat")
defs["template_patchy_ksz"]=osp.join(datapath,"ksz_patchy.dat")
defs["template_homogenous_ksz"]=osp.join(datapath,"ksz_ov.dat")
defs["template_tsz"]=osp.join(datapath,"tsz.dat")
defs["rg_flux_cut"]="330"
defs["norm_rg_flux_cut"]="330"
oefs,varpars,pv = default_models(defmodels,varmodels,varpars,defvalues,dnofail,reduce)
defs.update(oefs)
return defs,varpars,pv
def simple_egfs(lmin,lmax,freq,norm_freq,varpars=[],varmodels=[],defmodels=[],datapath="./",defs={}):
oefs,pn,pv = init_defaults(datapath,defmodels,varmodels,varpars)
oefs.update(defs)
megfs = egfs(pn,oefs,lmin,lmax,freq,freq.index(norm_freq))
return megfs,pv
def testme(datapath="./"):
megfs,pv = simple_egfs(0,4000,[100,143,217,353,545,857],143,datapath=datapath)
return megfs,pv
try:
def add_xxx(agrp,vpars,defaults,values,lmin,lmax,template_names,tpls,cib_decor_clustering):
import parobject as php
agrp.attrs["ndim"] = len(vpars)
agrp.attrs["keys"] = php.pack256(*vpars)
agrp.attrs["ndef"] = len(defaults)
agrp.attrs["defaults"] = php.pack256(*defaults)
agrp.attrs["values"] = php.pack256(*values)
agrp.attrs["lmin"] = lmin
agrp.attrs["lmax"] = lmax
for nnm,vvv in zip(template_names,tpls):
agrp.create_dataset(nnm, data=vvv.flat[:])
if cib_decor_clustering!=None:
agrp.attrs["cib_decor_clustering"] = cib_decor_clustering.flat[:]
return agrp
except Exception:
pass
def build_decor_step(frqs,step):
ll = len(frqs)
import itertools as itt
chl = [0]+[ii+1 for ii,l1,l2 in zip(itt.count(),frqs,frqs[1:]) if l1!=l2]+[len(frqs)]
#make block
import numpy as nm
mat = nm.zeros((ll,ll))
for ipp0 in range(len(chl)-1):
p0,p1=chl[ipp0],chl[ipp0+1]
for ipp2 in range(len(chl)-1):
p2,p3=chl[ipp2],chl[ipp2+1]
away = ipp0-ipp2
mat[p0:p1,p2:p3] = step**away
mat[p2:p3,p0:p1] = step**away
return mat
<|end_of_text|>from cpython.ref cimport PyObject
from cpython.mem cimport PyMem_Malloc, PyMem_Free
from libc.string cimport memcpy
cdef inline bytes _bytes(s):
if isinstance(s, str):
# encode to the specific encoding used inside of the module
return (<str>s).encode('utf8')
else:
return s
cdef inline str _str(s):
if isinstance(s, bytes):
# encode to the specific encoding used inside of the module
return (<bytes>s).decode('utf8')
else:
return s
cdef extern from "Python.h":
cdef bint PyBytes_Check(object)
cdef int PyBytes_AsStringAndSize(object, char **, Py_ssize_t *)
cdef object PyBytes_FromString(const char*)
cdef object PyBytes_FromStringAndSize(const char *, Py_ssize_t)
cdef char* PyBytes_AsString(object)
cdef char* PyUnicode_AsUTF8AndSize(object, Py_ssize_t *)
cdef object PyUnicode_FromString(const char *)
cdef object PyUnicode_FromStringAndSize(const char *, Py_ssize_t)
cdef char* PyUnicode_AsUTF8(object)
"""
The following functions take a
o1 - python string object
length - a pointer that will store the length of the string
c_str2 - a pointer to the c string of the copy of o1
returns:
o2 - a python string object
For Python 3 it can take a bytes object or a unicode object
This is unsafe for single characters in cpython due to object reuse
https://github.com/python/cpython/blob/master/Objects/unicodeobject.c#L4688
"""
cdef inline object copy_obj_to_cstr_unsafe( object o1,
Py_ssize_t *length,
char** c_str2):
cdef object o2
cdef char* c_str1
cdef size_t b_length
if PyBytes_Check(o1):
if PyBytes_AsStringAndSize(o1, &(c_str1), length) == -1:
raise TypeError("copy_obj_to_cstr:")
b_length = length[0]
| Cython |
o2 = PyBytes_FromStringAndSize(c_str1, b_length)
#if o2 == NULL:
# raise OSError("copy_obj_to_cstr:")
c_str2[0] = PyBytes_AsString(o2)
else:
c_str1 = PyUnicode_AsUTF8AndSize(o1, length)
if c_str1 == NULL:
raise OSError("copy_obj_to_cstr:")
b_length = length[0]
o2 = PyUnicode_FromStringAndSize(<const char *>c_str1, b_length)
#if o2 == NULL:
# raise OSError("copy_obj_to_cstr:")
c_str2[0] = PyUnicode_AsUTF8(o2)
return o2
# end def
"""
The following functions take a
o1 - python string object
length - a pointer that will store the length of the string
c_str2 - a pointer to the c string of the copy of o1
returns:
obj_type - type a python string object for Python 3
1 == bytes
0 == str (unicode)
For Python 3 it can take a bytes object or a unicode object
"""
cdef inline int copy_obj_to_cstr(object o1,
Py_ssize_t *length,
char** c_str2) except -1:
cdef char* c_str1
cdef char* temp = NULL
cdef int obj_type
cdef size_t b_length
if PyBytes_Check(o1):
obj_type = 1
if PyBytes_AsStringAndSize(o1, &(c_str1), length) == -1:
#raise TypeError("copy_obj_to_cstr:")
return -1
else:
obj_type = 0
c_str1 = PyUnicode_AsUTF8AndSize(o1, length)
if c_str1 == NULL:
#raise OSError("copy_obj_to_cstr:")
return -1
b_length = length[0]+1
temp = <char *> PyMem_Malloc(b_length*sizeof(char))
if temp == NULL:
return -1
memcpy(temp, c_str1, b_length)
c_str2[0] = temp
return obj_type
# end def
"""
The following functions take a
c_str - a pointer to the c string of the copy of o1
length - the length of the string
obj_type - for Python 3
1 == bytes
0 == str (unicode)
returns:
obj - a python string object and frees the memory associated with c_str
"""
cdef inline cstr_to_obj(char* c_str,
Py_ssize_t length,
int obj_type):
cdef object obj
if obj_type:
obj = PyBytes_FromStringAndSize(c_str, length)
else:
obj = PyUnicode_FromStringAndSize(<const char *>c_str, length)
PyMem_Free(c_str)
return obj
# end def
cdef inline cstr_to_obj_nofree(char* c_str,
Py_ssize_t length,
int obj_type):
cdef object obj
if obj_type:
obj = PyBytes_FromStringAndSize(c_str, length)
else:
obj = PyUnicode_FromStringAndSize(<const char *>c_str, length)
return obj
# end def
cdef inline cstr_to_obj_nolength(char* c_str,
int obj_type):
cdef object obj
if obj_type:
obj = PyBytes_FromString(c_str)
else:
obj = PyUnicode_FromString(<const char *>c_str)
return obj
# end def
"""
The following functions take a
o1 - python string object
length - a pointer that will store the length of the string
returns:
c_str1: string pointer to the internal string of the o1
For Python 3 it can take a bytes object or a unicode object
"""
cdef inline char* obj_to_cstr(object o1):
cdef char* c_str1
cdef Py_ssize_t length
if PyBytes_Check(o1):
if PyBytes_AsStringAndSize(o1, &(c_str1), &length) == -1:
raise TypeError("obj_to_cstr:")
return c_str1
else:
c_str1 = PyUnicode_AsUTF8AndSize(o1, &length)
if c_str1 == NULL:
raise OSError("obj_to_cstr:")
return c_str1
# end def
"""
Same as above but fetches the string length too
"""
cdef inline char* obj_to_cstr_len(object o1, Py_ssize_t *length):
cdef char* c_str1
if PyBytes_Check(o1):
if PyBytes_AsStringAndSize(o1, &(c_str1), length) == -1:
raise TypeError("obj_to_cstr: PyBytes_AsStringAndSize error")
return c_str1
else:
c_str1 = PyUnicode_AsUTF8AndSize(o1, length)
if c_str1 == NULL:
raise OSError("obj_to_cstr: PyUnicode_AsUTF8AndSize error")
return c_str1
# end def
cdef inline char* copy_string(char* in_str, int length):
cdef int i
cdef char * out_str = <char *> PyMem_Malloc((length+1)*sizeof(char))
if out_str == NULL:
raise OSError("Could not allocate memory for sequence.")
memcpy(out_str, in_str, length+1)
return out_str
cdef inline void copy_string_buffer(char* in_str, char* out_str, int length):
memcpy(out_str, in_str, length+1)
<|end_of_text|># cython: language_level=3
# cython: boundscheck=False
# cython: wraparound=False
# cython: nonecheck=False
# cython: overflowcheck=False
# cython: initializedcheck=False
# cython: cdivision=True
# cython: auto_pickle=True
from libc.math cimport sqrt
import numpy as np
import cython
np.seterr(all='raise')
def rebuild_node(children, a, cpuct, e, q, n, p, player):
childs = []
for child_state in children:
rebuild, args = child_state[0], child_state[1:]
child = rebuild(*args)
childs.append(child)
node = Node(a, cpuct)
node._children = childs
node.a = a
node.cpuct = cpuct
node.e = e
node.q = q
node.n = n
node.p = p
node.player = player
return node
@cython.auto_pickle(True)
cdef class Node:
cdef public list _children
cdef public int a
cdef public float cpuct
cdef public (bint, int) e
cdef public float q
cdef public int n
cdef public float p
cdef public int player
def __init__(self, int action, float cpuct):
self._children = []
self.a = action
self.cpuct = cpuct
self.e = (False, 0)
self.q = 0
self.n = 0
self.p = 0
self.player = 0
def __reduce__(self):
return rebuild_node, ([n.__reduce__() for n in self._children], self.a, self.cpuct, self.e, self.q, self.n, self.p, self.player)
cdef add_children(self, int[:] v):
cdef Py_ssize_t a
for a in range(len(v)):
if v[a] == 1:
self._children.append(Node(a, self.cpuct))
# shuffle children
np.random.shuffle(self._children)
cdef update_policy(self, float[:] pi):
cdef Node c
for c in self._children:
c.p = pi[c.a]
cdef uct(self, float sqrtParentN):
uct = self.q + self.cpuct * self.p * sqrtParentN/(1+self.n)
return uct
cdef best_child(self):
child = None
curBest = -float('inf')
sqrtN = sqrt(self.n)
cdef Node c
for c in self._children:
uct = c.uct(sqrtN)
if uct > curBest:
curBest = uct
child = c
return child
def rebuild_mcts(cpuct, root, curnode, path):
mcts = MCTS()
mcts.cpuct = cpuct
mcts._root = root
mcts._curnode = curnode
mcts.path = path
return mcts
@cython.auto_pickle(True)
cdef class MCTS:
cdef public float cpuct
cdef public Node _root
cdef public Node _curnode
cdef public list path
def __init__(self, float cpuct=2.0):
self.cpuct = cpuct
self._root = Node(-1, cpuct)
self._curnode = self._root
self.path = []
def __reduce__(self):
return rebuild_mcts, (self.cpuct, self._root, self._curnode, self.path)
| Cython |
cpdef search(self, gs, nn, sims):
cdef float v
cdef float[:] p
for _ in range(sims):
leaf = self.find_leaf(gs)
p, v = nn(leaf.observation())
self.process_results(leaf, v, p)
cpdef update_root(self, gs, int a):
if self._root._children == []:
self._root.add_children(gs.valid_moves())
cdef Node c
for c in self._root._children:
if c.a == a:
self._root = c
return
raise ValueError(f'Invalid action while updating root: {c.a}')
cpdef find_leaf(self, gs):
self._curnode = self._root
gs = gs.clone()
while self._curnode.n > 0 and not self._curnode.e[0]:
self.path.append(self._curnode)
self._curnode = self._curnode.best_child()
gs.play_action(self._curnode.a)
if self._curnode.n == 0:
ws = gs.win_state()
self._curnode.player = gs.current_player()
self._curnode.e = (ws[0], ws[1]*self._curnode.player)
self._curnode.add_children(gs.valid_moves())
return gs
cpdef process_results(self, gs, float value, float[:] pi):
if self._curnode.e[0]:
value = self._curnode.e[1]
else:
self._curnode.update_policy(pi)
player = gs.current_player()
while self.path:
parent = self.path.pop()
v = value if parent.player == player else -value
self._curnode.q = (self._curnode.q * self._curnode.n + v) / (self._curnode.n + 1)
self._curnode.n += 1
self._curnode = parent
self._root.n += 1
cpdef counts(self, gs):
cdef int[:] counts = np.zeros(gs.action_size(), dtype=np.intc)
cdef Node c
for c in self._root._children:
counts[c.a] = c.n
return counts
cpdef probs(self, gs, temp=1):
counts = np.zeros(gs.action_size())
cdef Node c
for c in self._root._children:
counts[c.a] = c.n
if temp == 0:
bestA = np.argmax(counts)
probs = np.zeros_like(counts)
probs[bestA] = 1
return probs
try:
probs = counts ** (1.0/temp)
probs /= np.sum(probs)
return probs
except OverflowError:
bestA = np.argmax(counts)
probs = np.zeros_like(counts)
probs[bestA] = 1
return probs
cpdef value(self):
value = None
cdef Node c
for c in self._root._children:
if value == None or c.q > value:
value = c.q
return value
<|end_of_text|>cimport numpy as np
from libcpp.vector cimport vector
from libcpp.pair cimport pair
from libcpp cimport bool
from libc.stdint cimport uint32_t, uint64_t, int64_t, int32_t
cdef extern from "c_tools.hpp":
bool intersect_sph_box(uint32_t ndim, double *c, double r, double *le, double *re) nogil
bool arg_tLT[I](I *cells, uint32_t *idx_verts, uint32_t ndim, uint64_t i1, uint64_t i2) nogil
bool tEQ[I](I *cells, uint32_t ndim, int64_t i1, int64_t i2) nogil
bool tGT[I](I *cells, uint32_t ndim, int64_t i1, int64_t i2) nogil
bool tLT[I](I *cells, uint32_t ndim, int64_t i1, int64_t i2) nogil
int64_t arg_partition[I](I *arr, I *idx, uint32_t ndim,
int64_t l, int64_t r, int64_t p) nogil
void arg_quickSort[I](I *arr, I *idx, uint32_t ndim,
int64_t l, int64_t r) nogil
int64_t partition_tess[I](I *cells, I *neigh, I *idx, uint32_t ndim,
int64_t l, int64_t r, int64_t p) nogil
void quickSort_tess[I](I *cells, I *neigh, I *idx, uint32_t ndim,
int64_t l, int64_t r) nogil
void sortCellVerts[I](I *cells, I *neigh, uint64_t ncells, uint32_t ndim) nogil
void sortSerializedTess[I](I *cells, I *neigh,
uint64_t ncells, uint32_t ndim) nogil
int64_t arg_partition_tess[I](I *cells, uint32_t *idx_verts, uint64_t *idx_cells,
uint32_t ndim, int64_t l, int64_t r, int64_t p) nogil
void arg_quickSort_tess[I](I *cells, uint32_t *idx_verts, uint64_t *idx_cells,
uint32_t ndim, int64_t l, int64_t r) nogil
void arg_sortCellVerts[I](I *cells, uint32_t *idx_verts, uint64_t ncells, uint32_t ndim) nogil
void arg_sortSerializedTess[I](I *cells, uint64_t ncells, uint32_t ndim,
uint32_t *idx_verts, uint64_t *idx_cells) nogil
void swap_cells[I](I *verts, I *neigh, uint32_t ndim, uint64_t i1, uint64_t i2) nogil
cdef cppclass SerializedLeaf[I] nogil:
SerializedLeaf() except +
SerializedLeaf(int _id, uint32_t _ndim, int64_t _ncells, I _idx_inf,
I *_verts, I *_neigh,
uint32_t *_sort_verts, uint64_t *_sort_cells,
uint64_t idx_start, uint64_t idx_stop) except +
int id
uint32_t ndim
int64_t ncells
uint64_t idx_start
uint64_t idx_stop
I idx_inf
I *verts
I *neigh
uint32_t *sort_verts
uint64_t *sort_cells
bool init_from_file
void write_to_file(const char* filename)
int64_t read_from_file(const char* filename)
void cleanup()
cdef cppclass ConsolidatedLeaves[I] nogil:
ConsolidatedLeaves() except +
ConsolidatedLeaves(uint32_t _ndim, I _idx_inf, int64_t _max_ncells,
I *_verts, I *_neigh) except +
ConsolidatedLeaves(uint32_t _ndim, int64_t _ncells, I _idx_inf,
int64_t _max_ncells, I *_verts, I *_neigh) except +
ConsolidatedLeaves(uint32_t _ndim, int64_t _ncells, I _idx_inf,
int64_t _max_ncells, I *_verts, I *_neigh,
uint64_t n_split_map, I *key_split_map, uint64_t *val_split_map,
uint64_t n_inf_map, I *key_inf_map, uint64_t *val_inf_map) except +
int64_t ncells
int64_t max_ncells
I *allverts
I *allneigh
uint64_t size_split_map()
uint64_t size_inf_map()
void get_split_map(I *keys, uint64_t *vals)
void get_inf_map(I *keys, uint64_t *vals)
void cleanup()
void add_leaf[leafI](SerializedLeaf[leafI] leaf)
void add_leaf_fromfile(const char *filename)
int64_t count_inf()
void add_inf()
ctypedef SerializedLeaf[uint32_t] sLeaf32
ctypedef SerializedLeaf[uint64_t] sLeaf64
ctypedef vector[sLeaf32] sLeaves32
ctypedef vector[sLeaf64] sLeaves64
<|end_of_text|>cimport cpython
cdef class _UnaryCall:
cdef:
Channel channel
CallbackGlue watcher_call
grpc_completion_queue * cq
grpc_experimental_completion_queue_functor functor
object _waiter_call
@staticmethod
cdef void functor_run(grpc_experimental_completion_queue_functor* functor, int succeed)
@staticmethod
cdef void watcher_call_functor_run(grpc_experimental_completion_queue_functor* functor, int succeed)
<|end_of_text|>"""Module containing cython functions, which generate first order Redfield kernel.
For docstrings see documentation of module neumann1."""
from __future__ import absolute_import
from | Cython |
__future__ import division
from __future__ import print_function
import numpy as np
import itertools
from...mytypes import doublenp
from...mytypes import complexnp
from...aprclass import Approach
from.c_neumann1 import generate_phi1fct
from.c_pauli import generate_norm_vec
cimport numpy as np
cimport cython
ctypedef np.uint8_t bool_t
ctypedef np.int_t int_t
ctypedef np.int64_t long_t
ctypedef np.float64_t double_t
ctypedef np.complex128_t complex_t
# ---------------------------------------------------------------------------------------------------
# Redfield approach
# ---------------------------------------------------------------------------------------------------
@cython.boundscheck(False)
def generate_kern_redfield(self):
cdef np.ndarray[double_t, ndim=1] E = self.qd.Ea
cdef np.ndarray[complex_t, ndim=3] Tba = self.leads.Tba
cdef np.ndarray[complex_t, ndim=3] phi1fct = self.phi1fct
si = self.si
#
cdef bool_t bbp_bool, bbpi_bool
cdef int_t charge, acharge, bcharge, ccharge, l, nleads, \
aap_sgn, bppbp_sgn, bbpp_sgn, ccp_sgn
cdef long_t b, bp, bbp, bbpi, bb, \
a, ap, aap, aapi, \
bpp, bppbp, bppbpi, bbpp, bbppi, \
c, cp, ccp, ccpi, \
bpap, ba, bppa, cbpp, cpbp, cb
cdef long_t ndm0, ndm0r, npauli,
cdef complex_t fct_aap, fct_bppbp, fct_bbpp, fct_ccp
#
cdef np.ndarray[long_t, ndim=1] lenlst = si.lenlst
cdef np.ndarray[long_t, ndim=1] dictdm = si.dictdm
cdef np.ndarray[long_t, ndim=1] shiftlst0 = si.shiftlst0
cdef np.ndarray[long_t, ndim=1] shiftlst1 = si.shiftlst1
cdef np.ndarray[long_t, ndim=1] mapdm0 = si.mapdm0
cdef np.ndarray[bool_t, ndim=1] booldm0 = si.booldm0
cdef np.ndarray[bool_t, ndim=1] conjdm0 = si.conjdm0
#
ndm0r, ndm0, npauli, nleads = si.ndm0r, si.ndm0, si.npauli, si.nleads
self.kern_ext = np.zeros((ndm0r+1, ndm0r), dtype=doublenp)
self.kern = self.kern_ext[0:-1, :]
generate_norm_vec(self, ndm0r)
cdef np.ndarray[double_t, ndim=2] kern = self.kern
for charge in range(si.ncharge):
acharge = charge-1
bcharge = charge
ccharge = charge+1
for b, bp in itertools.combinations_with_replacement(si.statesdm[bcharge], 2):
bbp = mapdm0[lenlst[bcharge]*dictdm[b] + dictdm[bp] + shiftlst0[bcharge]]
bbp_bool = booldm0[lenlst[bcharge]*dictdm[b] + dictdm[bp] + shiftlst0[bcharge]]
if bbp!= -1 and bbp_bool:
bbpi = ndm0 + bbp - npauli
bbpi_bool = True if bbpi >= ndm0 else False
if bbpi_bool:
kern[bbp, bbpi] = kern[bbp, bbpi] + E[b]-E[bp]
kern[bbpi, bbp] = kern[bbpi, bbp] + E[bp]-E[b]
# --------------------------------------------------
for a, ap in itertools.product(si.statesdm[acharge], si.statesdm[acharge]):
aap = mapdm0[lenlst[acharge]*dictdm[a] + dictdm[ap] + shiftlst0[acharge]]
if aap!= -1:
bpap = lenlst[acharge]*dictdm[bp] + dictdm[ap] + shiftlst1[acharge]
ba = lenlst[acharge]*dictdm[b] + dictdm[a] + shiftlst1[acharge]
fct_aap = 0
for l in range(nleads):
fct_aap += (+ Tba[l, b, a]*Tba[l, ap, bp]*phi1fct[l, bpap, 0].conjugate()
- Tba[l, b, a]*Tba[l, ap, bp]*phi1fct[l, ba, 0])
aapi = ndm0 + aap - npauli
aap_sgn = +1 if conjdm0[lenlst[acharge]*dictdm[a] + dictdm[ap] + shiftlst0[acharge]] else -1
kern[bbp, aap] = kern[bbp, aap] + fct_aap.imag
if aapi >= ndm0:
kern[bbp, aapi] = kern[bbp, aapi] + fct_aap.real*aap_sgn
if bbpi_bool:
kern[bbpi, aapi] = kern[bbpi, aapi] + fct_aap.imag*aap_sgn
if bbpi_bool:
kern[bbpi, aap] = kern[bbpi, aap] - fct_aap.real
# --------------------------------------------------
for bpp in si.statesdm[bcharge]:
bppbp = mapdm0[lenlst[bcharge]*dictdm[bpp] + dictdm[bp] + shiftlst0[bcharge]]
if bppbp!= -1:
fct_bppbp = 0
for a in si.statesdm[acharge]:
bppa = lenlst[acharge]*dictdm[bpp] + dictdm[a] + shiftlst1[acharge]
for l in range(nleads):
fct_bppbp += +Tba[l, b, a]*Tba[l, a, bpp]*phi1fct[l, bppa, 1].conjugate()
for c in si.statesdm[ccharge]:
cbpp = lenlst[bcharge]*dictdm[c] + dictdm[bpp] + shiftlst1[bcharge]
for l in range(nleads):
fct_bppbp += +Tba[l, b, c]*Tba[l, c, bpp]*phi1fct[l, cbpp, 0]
bppbpi = ndm0 + bppbp - npauli
bppbp_sgn = +1 if conjdm0[lenlst[bcharge]*dictdm[bpp] + dictdm[bp] + shiftlst0[bcharge]] else -1
kern[bbp, bppbp] = kern[bbp, bppbp] + fct_bppbp.imag
if bppbpi >= ndm0:
kern[bbp, bppbpi] = kern[bbp, bppbpi] + fct_bppbp.real*bppbp_sgn
if bbpi_bool:
kern[bbpi, bppbpi] = kern[bbpi, bppbpi] + fct_bppbp.imag*bppbp_sgn
if bbpi_bool:
kern[bbpi, bppbp] = kern[bbpi, bppbp] - fct_bppbp.real
# --------------------------------------------------
bbpp = mapdm0[lenlst[bcharge]*dictdm[b] + dictdm[bpp] + shiftlst0[bcharge]]
if bbpp!= -1:
fct_bbpp = 0
for a in si.statesdm[acharge]:
bppa = lenlst[acharge]*dictdm[bpp] + dictdm[a] + shiftlst1[acharge]
for l in range(nleads):
fct_bbpp += -Tba[l, bpp, a]*Tba[l, a, bp]*phi1fct[l, bppa, 1]
for c in si.statesdm[ccharge]:
cbpp = lenlst[bcharge]*dictdm[c] + dictdm[bpp] + shiftlst1[bcharge]
for l in range(nleads):
fct_bbpp += -Tba[l, bpp, c]*Tba[l, c, bp]*phi1fct[l, cbpp, 0].conjugate()
bbppi = ndm0 + bbpp - npauli
bbpp_sgn = +1 if conjdm0[lenlst[bcharge]*dictdm[b] + dictdm[bpp] + shiftlst0[bcharge]] else -1
kern[bbp, bbpp] = kern[bbp, bbpp] + fct_bbpp.imag
if bbppi >= ndm0:
kern[bbp, bbppi] = kern[bbp, bbppi] + fct_bbpp.real*bbpp_s | Cython |
gn
if bbpi_bool:
kern[bbpi, bbppi] = kern[bbpi, bbppi] + fct_bbpp.imag*bbpp_sgn
if bbpi_bool:
kern[bbpi, bbpp] = kern[bbpi, bbpp] - fct_bbpp.real
# --------------------------------------------------
for c, cp in itertools.product(si.statesdm[ccharge], si.statesdm[ccharge]):
ccp = mapdm0[lenlst[ccharge]*dictdm[c] + dictdm[cp] + shiftlst0[ccharge]]
if ccp!= -1:
cpbp = lenlst[bcharge]*dictdm[cp] + dictdm[bp] + shiftlst1[bcharge]
cb = lenlst[bcharge]*dictdm[c] + dictdm[b] + shiftlst1[bcharge]
fct_ccp = 0
for l in range(nleads):
fct_ccp += (+ Tba[l, b, c]*Tba[l, cp, bp]*phi1fct[l, cpbp, 1]
- Tba[l, b, c]*Tba[l, cp, bp]*phi1fct[l, cb, 1].conjugate())
ccpi = ndm0 + ccp - npauli
ccp_sgn = +1 if conjdm0[lenlst[ccharge]*dictdm[c] + dictdm[cp] + shiftlst0[ccharge]] else -1
kern[bbp, ccp] = kern[bbp, ccp] + fct_ccp.imag
if ccpi >= ndm0:
kern[bbp, ccpi] = kern[bbp, ccpi] + fct_ccp.real*ccp_sgn
if bbpi_bool:
kern[bbpi, ccpi] = kern[bbpi, ccpi] + fct_ccp.imag*ccp_sgn
if bbpi_bool:
kern[bbpi, ccp] = kern[bbpi, ccp] - fct_ccp.real
# --------------------------------------------------
return 0
@cython.boundscheck(False)
def generate_current_redfield(self):
cdef np.ndarray[double_t, ndim=1] phi0p = self.phi0
cdef np.ndarray[double_t, ndim=1] E = self.qd.Ea
cdef np.ndarray[complex_t, ndim=3] Tba = self.leads.Tba
cdef np.ndarray[complex_t, ndim=3] phi1fct = self.phi1fct
cdef np.ndarray[complex_t, ndim=3] phi1fct_energy = self.phi1fct_energy
si = self.si
#
cdef bool_t bpb_conj, ccp_conj
cdef int_t bcharge, ccharge, charge, l, nleads,
cdef long_t c, b, cb, bp, bpb, cp, ccp, cbp, cpb
cdef long_t ndm0, ndm1, npauli
cdef complex_t fct1, fct2, fct1h, fct2h, phi0bpb, phi0ccp
ndm0, ndm1, npauli, nleads = si.ndm0, si.ndm1, si.npauli, si.nleads
#
cdef np.ndarray[long_t, ndim=1] lenlst = si.lenlst
cdef np.ndarray[long_t, ndim=1] dictdm = si.dictdm
cdef np.ndarray[long_t, ndim=1] shiftlst0 = si.shiftlst0
cdef np.ndarray[long_t, ndim=1] shiftlst1 = si.shiftlst1
cdef np.ndarray[long_t, ndim=1] mapdm0 = si.mapdm0
cdef np.ndarray[bool_t, ndim=1] booldm0 = si.booldm0
cdef np.ndarray[bool_t, ndim=1] conjdm0 = si.conjdm0
#
cdef np.ndarray[complex_t, ndim=2] phi1 = np.zeros((nleads, ndm1), dtype=complexnp)
cdef np.ndarray[complex_t, ndim=1] current = np.zeros(nleads, dtype=complexnp)
cdef np.ndarray[complex_t, ndim=1] energy_current = np.zeros(nleads, dtype=complexnp)
cdef np.ndarray[complex_t, ndim=1] phi0 = np.zeros(ndm0, dtype=complexnp)
#
phi0[0:npauli] = phi0p[0:npauli]
phi0[npauli:ndm0] = phi0p[npauli:ndm0] + 1j*phi0p[ndm0:]
#
for charge in range(si.ncharge-1):
ccharge = charge+1
bcharge = charge
for c, b in itertools.product(si.statesdm[ccharge], si.statesdm[bcharge]):
cb = lenlst[bcharge]*dictdm[c] + dictdm[b] + shiftlst1[bcharge]
for l in range(nleads):
for bp in si.statesdm[bcharge]:
bpb = mapdm0[lenlst[bcharge]*dictdm[bp] + dictdm[b] + shiftlst0[bcharge]]
if bpb!= -1:
cbp = lenlst[bcharge]*dictdm[c] + dictdm[bp] + shiftlst1[bcharge]
fct1 = phi1fct[l, cbp, 0]
fct1h = phi1fct_energy[l, cbp, 0]
bpb_conj = conjdm0[lenlst[bcharge]*dictdm[bp] + dictdm[b] + shiftlst0[bcharge]]
phi0bpb = phi0[bpb] if bpb_conj else phi0[bpb].conjugate()
phi1[l, cb] = phi1[l, cb] + Tba[l, c, bp]*phi0bpb*fct1
current[l] = current[l] + Tba[l, b, c]*Tba[l, c, bp]*phi0bpb*fct1
energy_current[l] = energy_current[l] + Tba[l, b, c]*Tba[l, c, bp]*phi0bpb*fct1h
for cp in si.statesdm[ccharge]:
ccp = mapdm0[lenlst[ccharge]*dictdm[c] + dictdm[cp] + shiftlst0[ccharge]]
if ccp!= -1:
cpb = lenlst[bcharge]*dictdm[cp] + dictdm[b] + shiftlst1[bcharge]
fct2 = phi1fct[l, cpb, 1]
fct2h = phi1fct_energy[l, cpb, 1]
ccp_conj = conjdm0[lenlst[ccharge]*dictdm[c] + dictdm[cp] + shiftlst0[ccharge]]
phi0ccp = phi0[ccp] if ccp_conj else phi0[ccp].conjugate()
phi1[l, cb] = phi1[l, cb] + Tba[l, cp, b]*phi0ccp*fct2
current[l] = current[l] + Tba[l, b, c]*phi0ccp*Tba[l, cp, b]*fct2
energy_current[l] = energy_current[l] + Tba[l, b, c]*phi0ccp*Tba[l, cp, b]*fct2h
self.phi1 = phi1
self.current = np.array(-2*current.imag, dtype=doublenp)
self.energy_current = np.array(-2*energy_current.imag, dtype=doublenp)
self.heat_current = self.energy_current - self.current*self.leads.mulst
return 0
@cython.boundscheck(False)
def generate_vec_redfield(np.ndarray[double_t, ndim=1] phi0p, self):
cdef np.ndarray[double_t, ndim=1] E = self.qd.Ea
cdef np.ndarray[complex_t, ndim=3] Tba = self.leads.Tba
cdef np.ndarray[complex_t, ndim=3] phi1fct = self.phi1fct
si = self.si
cdef long_t norm_row = self.funcp.norm_row
#
cdef bool_t bbp_bool
cdef int_t charge, acharge, bcharge, ccharge, l, nleads, \
aap_sgn, bppbp_sgn, bbpp_sgn, ccp_sgn
cdef long_t b, bp, bbp, bb, \
a, ap, aap, \
bpp, bppbp, bbpp, \
c, cp, ccp, \
bpap, ba, bppa, cbpp, cpbp, cb
cdef long_t ndm0, npauli
cdef complex_t fct_aap, fct_bppbp, fct_bbpp, fct_ccp, norm
cdef complex_t phi0aap, phi0bppbp, phi0bbpp, phi0 | Cython |
ccp
ndm0, npauli, nleads = si.ndm0, si.npauli, si.nleads
#
cdef np.ndarray[long_t, ndim=1] lenlst = si.lenlst
cdef np.ndarray[long_t, ndim=1] dictdm = si.dictdm
cdef np.ndarray[long_t, ndim=1] shiftlst0 = si.shiftlst0
cdef np.ndarray[long_t, ndim=1] shiftlst1 = si.shiftlst1
cdef np.ndarray[long_t, ndim=1] mapdm0 = si.mapdm0
cdef np.ndarray[bool_t, ndim=1] booldm0 = si.booldm0
cdef np.ndarray[bool_t, ndim=1] conjdm0 = si.conjdm0
#
cdef np.ndarray[complex_t, ndim=1] phi0 = np.zeros(ndm0, dtype=complexnp)
cdef np.ndarray[complex_t, ndim=1] i_dphi0_dt = np.zeros(ndm0, dtype=complexnp)
#
phi0[0:npauli] = phi0p[0:npauli]
phi0[npauli:ndm0] = phi0p[npauli:ndm0] + 1j*phi0p[ndm0:]
norm = 0
for charge in range(si.ncharge):
acharge = charge-1
bcharge = charge
ccharge = charge+1
for b, bp in itertools.combinations_with_replacement(si.statesdm[charge], 2):
bbp = mapdm0[lenlst[bcharge]*dictdm[b] + dictdm[bp] + shiftlst0[bcharge]]
if bbp!= -1:
if b == bp: norm = norm + phi0[bbp]
bbp_bool = booldm0[lenlst[bcharge]*dictdm[b] + dictdm[bp] + shiftlst0[bcharge]]
if bbp_bool:
i_dphi0_dt[bbp] = i_dphi0_dt[bbp] + (E[b]-E[bp])*phi0[bbp]
# --------------------------------------------------
for a, ap in itertools.product(si.statesdm[charge-1], si.statesdm[charge-1]):
aap = mapdm0[lenlst[acharge]*dictdm[a] + dictdm[ap] + shiftlst0[acharge]]
if aap!= -1:
bpap = lenlst[acharge]*dictdm[bp] + dictdm[ap] + shiftlst1[acharge]
ba = lenlst[acharge]*dictdm[b] + dictdm[a] + shiftlst1[acharge]
fct_aap = 0
for l in range(nleads):
fct_aap += (+ Tba[l, b, a]*Tba[l, ap, bp]*phi1fct[l, bpap, 0].conjugate()
- Tba[l, b, a]*Tba[l, ap, bp]*phi1fct[l, ba, 0])
phi0aap = phi0[aap] if conjdm0[lenlst[acharge]*dictdm[a] + dictdm[ap] + shiftlst0[acharge]] else phi0[aap].conjugate()
i_dphi0_dt[bbp] = i_dphi0_dt[bbp] + fct_aap*phi0aap
# --------------------------------------------------
for bpp in si.statesdm[charge]:
bppbp = mapdm0[lenlst[bcharge]*dictdm[bpp] + dictdm[bp] + shiftlst0[bcharge]]
if bppbp!= -1:
fct_bppbp = 0
for a in si.statesdm[charge-1]:
bppa = lenlst[acharge]*dictdm[bpp] + dictdm[a] + shiftlst1[acharge]
for l in range(nleads):
fct_bppbp += +Tba[l, b, a]*Tba[l, a, bpp]*phi1fct[l, bppa, 1].conjugate()
for c in si.statesdm[charge+1]:
cbpp = lenlst[bcharge]*dictdm[c] + dictdm[bpp] + shiftlst1[bcharge]
for l in range(nleads):
fct_bppbp += +Tba[l, b, c]*Tba[l, c, bpp]*phi1fct[l, cbpp, 0]
phi0bppbp = phi0[bppbp] if conjdm0[lenlst[bcharge]*dictdm[bpp] + dictdm[bp] + shiftlst0[bcharge]] else phi0[bppbp].conjugate()
i_dphi0_dt[bbp] = i_dphi0_dt[bbp] + fct_bppbp*phi0bppbp
# --------------------------------------------------
bbpp = mapdm0[lenlst[bcharge]*dictdm[b] + dictdm[bpp] + shiftlst0[bcharge]]
if bbpp!= -1:
fct_bbpp = 0
for a in si.statesdm[charge-1]:
bppa = lenlst[acharge]*dictdm[bpp] + dictdm[a] + shiftlst1[acharge]
for l in range(nleads):
fct_bbpp += -Tba[l, bpp, a]*Tba[l, a, bp]*phi1fct[l, bppa, 1]
for c in si.statesdm[charge+1]:
cbpp = lenlst[bcharge]*dictdm[c] + dictdm[bpp] + shiftlst1[bcharge]
for l in range(nleads):
fct_bbpp += -Tba[l, bpp, c]*Tba[l, c, bp]*phi1fct[l, cbpp, 0].conjugate()
phi0bbpp = phi0[bbpp] if conjdm0[lenlst[bcharge]*dictdm[b] + dictdm[bpp] + shiftlst0[bcharge]] else phi0[bbpp].conjugate()
i_dphi0_dt[bbp] = i_dphi0_dt[bbp] + fct_bbpp*phi0bbpp
# --------------------------------------------------
for c, cp in itertools.product(si.statesdm[charge+1], si.statesdm[charge+1]):
ccp = mapdm0[lenlst[ccharge]*dictdm[c] + dictdm[cp] + shiftlst0[ccharge]]
if ccp!= -1:
cpbp = lenlst[bcharge]*dictdm[cp] + dictdm[bp] + shiftlst1[bcharge]
cb = lenlst[bcharge]*dictdm[c] + dictdm[b] + shiftlst1[bcharge]
fct_ccp = 0
for l in range(nleads):
fct_ccp += (+ Tba[l, b, c]*Tba[l, cp, bp]*phi1fct[l, cpbp, 1]
- Tba[l, b, c]*Tba[l, cp, bp]*phi1fct[l, cb, 1].conjugate())
phi0ccp = phi0[ccp] if conjdm0[lenlst[ccharge]*dictdm[c] + dictdm[cp] + shiftlst0[ccharge]] else phi0[ccp].conjugate()
i_dphi0_dt[bbp] = i_dphi0_dt[bbp] + fct_ccp*phi0ccp
# --------------------------------------------------
i_dphi0_dt[norm_row] = 1j*(norm-1)
return np.concatenate((i_dphi0_dt.imag, i_dphi0_dt[npauli:ndm0].real))
class ApproachRedfield(Approach):
kerntype = 'Redfield'
generate_fct = generate_phi1fct
generate_kern = generate_kern_redfield
generate_current = generate_current_redfield
generate_vec = generate_vec_redfield
# ---------------------------------------------------------------------------------------------------
<|end_of_text|># -*- coding: utf-8 -*-
cpdef set_logger(int level=logging.DEBUG):
cdef PlogSeverity plog_level = PlogDebug
if level == logging.DEBUG:
plog_level = PlogDebug
elif level == logging.INFO:
plog_level = PlogInfo
elif level == logging.WARNING:
plog_level = PlogWarning
elif level == logging.ERROR:
plog_level = PlogError
elif level == logging.CRITICAL:
plog_level = PlogFatal
elif level == logging.NOTSET:
plog_level = PlogNone
set_console_logger(plog_level)
cpdef set_python_logger(name_or_logger_obj):
if isinstance(name_or_logger_obj, unicode):
name_or_logger_obj = make_utf8(name_or_logger_obj)
if isinstance(name_or_logger_obj, bytes):
set_py_logger(tocbstring(name_or_logger_obj))
else:
set_py_logger(name_or_logger_obj)
<|end_of_text|>cdef extern from 'dynd/config.hpp' namespace 'dynd':
char[] dynd_version_string
char[] dynd_git_sha1
<|end_of_text|>cdef extern from "wkhtmltox/pdf.h":
struct wkhtmltopdf_converter:
pass
struct | Cython |
wkhtmltopdf_object_settings:
pass
struct wkhtmltopdf_global_settings:
pass
bint wkhtmltopdf_init(int use_graphics)
bint wkhtmltopdf_deinit()
char *wkhtmltopdf_version()
wkhtmltopdf_global_settings *wkhtmltopdf_create_global_settings()
wkhtmltopdf_object_settings *wkhtmltopdf_create_object_settings()
bint wkhtmltopdf_set_global_setting(wkhtmltopdf_global_settings *settings, char *name, char *value)
bint wkhtmltopdf_get_global_setting(wkhtmltopdf_global_settings *settings, char *name, char *value, int vs)
bint wkhtmltopdf_set_object_setting(wkhtmltopdf_object_settings *settings, char *name, char *value)
bint wkhtmltopdf_get_object_setting(wkhtmltopdf_object_settings *settings, char *name, char *value, int vs)
wkhtmltopdf_converter *wkhtmltopdf_create_converter(wkhtmltopdf_global_settings *settings)
void wkhtmltopdf_destroy_converter(wkhtmltopdf_converter *converter)
bint wkhtmltopdf_convert(wkhtmltopdf_converter *converter)
void wkhtmltopdf_add_object(wkhtmltopdf_converter *converter, wkhtmltopdf_object_settings *setting, char *data)
int wkhtmltopdf_http_error_code(wkhtmltopdf_converter *converter)
cdef extern from "wkhtmltox/image.h":
struct wkhtmltoimage_global_settings:
pass
struct wkhtmltoimage_converter:
pass
bint wkhtmltoimage_init(int use_graphics)
bint wkhtmltoimage_deinit()
char *wkhtmltoimage_version()
wkhtmltoimage_global_settings *wkhtmltoimage_create_global_settings()
bint wkhtmltoimage_set_global_setting(wkhtmltoimage_global_settings *settings, char *name, char *value)
bint wkhtmltoimage_get_global_setting(wkhtmltoimage_global_settings *settings, char *name, char *value, int vs)
wkhtmltoimage_converter *wkhtmltoimage_create_converter(wkhtmltoimage_global_settings *settings, char *data)
void wkhtmltoimage_destroy_converter(wkhtmltoimage_converter *converter)
bint wkhtmltoimage_convert(wkhtmltoimage_converter *converter)
int wkhtmltoimage_http_error_code(wkhtmltoimage_converter *converter)
cdef int wkhtmltopdf_is_init = 0
cdef int wkhtmltoimage_is_init = 0
cdef class _Pdf:
cdef wkhtmltopdf_global_settings *_c_global_settings
cdef bint last_http_error_code
def __cinit__(self):
global wkhtmltopdf_is_init
if not wkhtmltopdf_is_init:
wkhtmltopdf_is_init = wkhtmltopdf_init(0)
self._c_global_settings = wkhtmltopdf_create_global_settings()
def __dealloc__(self):
pass
# wkhtmltopdf_deinit()
def version(self):
return wkhtmltopdf_version()
def set_global_setting(self, char *name, char *value):
return wkhtmltopdf_set_global_setting(self._c_global_settings, name, value)
def convert(self, pages):
cdef wkhtmltopdf_converter *c
cdef wkhtmltopdf_object_settings *os
if not len(pages):
return False
c = wkhtmltopdf_create_converter(self._c_global_settings)
for page in pages:
os = wkhtmltopdf_create_object_settings()
for k, v in page.items():
wkhtmltopdf_set_object_setting(os, k, v)
wkhtmltopdf_add_object(c, os, NULL)
ret = wkhtmltopdf_convert(c)
self.last_http_error_code = wkhtmltopdf_http_error_code(c)
wkhtmltopdf_destroy_converter(c)
return ret
def http_error_code(self):
return self.last_http_error_code
class Pdf:
pages = []
def __init__(self):
self._pdf = _Pdf()
self.pages = []
def add_page(self, settings):
self.pages.append(settings)
def convert(self):
self._pdf.convert(self.pages)
def __getattr__(self, name):
return getattr(self._pdf, name)
cdef class Image:
cdef wkhtmltoimage_global_settings *_c_global_settings
cdef bint last_http_error_code
def __cinit__(self):
global wkhtmltoimage_is_init
if not wkhtmltoimage_is_init:
wkhtmltoimage_is_init = wkhtmltoimage_init(0)
self._c_global_settings = wkhtmltoimage_create_global_settings()
def __dealloc__(self):
pass
# wkhtmltoimage_deinit()
def version(self):
return wkhtmltopdf_version()
def set_global_setting(self, char *name, char *value):
return wkhtmltoimage_set_global_setting(self._c_global_settings, name, value)
def convert(self):
cdef wkhtmltoimage_converter *c
c = wkhtmltoimage_create_converter(self._c_global_settings, NULL)
ret = wkhtmltoimage_convert(c)
self.last_http_error_code = wkhtmltoimage_http_error_code(c)
wkhtmltoimage_destroy_converter(c)
return ret
def http_error_code(self):
return self.last_http_error_code
<|end_of_text|>from.order_filter_delegate cimport OrderFilterDelegate
from libc.stdint cimport int64_t
cdef class PassThroughFilterDelegate(OrderFilterDelegate):
pass
<|end_of_text|>from sage.rings.ring cimport CommutativeRing
from sage.rings.number_field.number_field_element_quadratic cimport NumberFieldElement_quadratic
# Residue element
ctypedef long residue_element[2]
cdef class ResidueRingElement
cdef class ResidueRing_abstract(CommutativeRing):
cdef object P, F
cdef public object element_class, _residue_field
cdef long n0, n1, p, e, _cardinality
cdef long im_gen0
cdef object element_to_residue_field(self, residue_element x)
cdef void add(self, residue_element rop, residue_element op0, residue_element op1)
cdef void sub(self, residue_element rop, residue_element op0, residue_element op1)
cdef void mul(self, residue_element rop, residue_element op0, residue_element op1)
cdef int inv(self, residue_element rop, residue_element op) except -1
cdef bint is_unit(self, residue_element op)
cdef void neg(self, residue_element rop, residue_element op)
cdef ResidueRingElement new_element(self)
cdef int coefficients(self, long* v0, long* v1, NumberFieldElement_quadratic x) except -1
cdef int coerce_from_nf(self, residue_element r, NumberFieldElement_quadratic x) except -1
cdef bint element_is_1(self, residue_element op)
cdef bint element_is_0(self, residue_element op)
cdef void set_element_to_1(self, residue_element op)
cdef void set_element_to_0(self, residue_element op)
cdef void set_element(self, residue_element rop, residue_element op)
cdef int set_element_from_tuple(self, residue_element rop, x) except -1
cdef int cmp_element(self, residue_element left, residue_element right)
cdef int pow(self, residue_element rop, residue_element op, long e) except -1
cdef bint is_square(self, residue_element op) except -2
cdef int sqrt(self, residue_element rop, residue_element op) except -1
cdef int ith_element(self, residue_element rop, long i) except -1
cpdef long cardinality(self)
cdef void unsafe_ith_element(self, residue_element rop, long i)
cdef int next_element(self, residue_element rop, residue_element op) except -1
cdef bint is_last_element(self, residue_element op)
cdef long index_of_element(self, residue_element op) except -1
cdef long index_of_element_in_P(self, residue_element op) except -1
cdef int next_element_in_P(self, residue_element rop, residue_element op) except -1
cdef bint is_last_element_in_P(self, residue_element op)
cdef element_to_str(self, residue_element op)
cdef class ResidueRingElement:
cdef residue_element x
cdef ResidueRing_abstract _parent
cpdef long index(self)
cpdef parent(self)
cdef new_element(self)
cpdef bint is_unit(self)
cpdef bint is_square(self)
cpdef sqrt(self)
<|end_of_text|>#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
cdef class DurationStruct_Builder(thrift.py3.builder.StructBuilder):
_struct_type = _apache_thrift_type_standard_types.DurationStruct
def __iter__(self):
yield "seconds", self.seconds
yield "nanos", self.nanos
cdef class TimeStruct_Builder(thrift.py | Cython |
3.builder.StructBuilder):
_struct_type = _apache_thrift_type_standard_types.TimeStruct
def __iter__(self):
yield "seconds", self.seconds
yield "nanos", self.nanos
cdef class FractionStruct_Builder(thrift.py3.builder.StructBuilder):
_struct_type = _apache_thrift_type_standard_types.FractionStruct
def __iter__(self):
yield "numerator", self.numerator
yield "denominator", self.denominator
cdef class UriStruct_Builder(thrift.py3.builder.StructBuilder):
_struct_type = _apache_thrift_type_standard_types.UriStruct
def __iter__(self):
yield "scheme", self.scheme
yield "domain", self.domain
yield "path", self.path
yield "query", self.query
yield "fragment", self.fragment
cdef class TypeUri_Builder(thrift.py3.builder.StructBuilder):
_struct_type = _apache_thrift_type_standard_types.TypeUri
def __iter__(self):
yield "uri", self.uri
yield "typeHashPrefixSha2_256", self.typeHashPrefixSha2_256
cdef class TypeName_Builder(thrift.py3.builder.StructBuilder):
_struct_type = _apache_thrift_type_standard_types.TypeName
def __iter__(self):
yield "boolType", self.boolType
yield "byteType", self.byteType
yield "i16Type", self.i16Type
yield "i32Type", self.i32Type
yield "i64Type", self.i64Type
yield "floatType", self.floatType
yield "doubleType", self.doubleType
yield "stringType", self.stringType
yield "binaryType", self.binaryType
yield "enumType", self.enumType
yield "typedefType", self.typedefType
yield "structType", self.structType
yield "unionType", self.unionType
yield "exceptionType", self.exceptionType
yield "listType", self.listType
yield "setType", self.setType
yield "mapType", self.mapType
<|end_of_text|>cdef class NumberIterator:
cdef unsigned long current
cdef unsigned long stop
cdef class Numbers(dict):
cpdef NumberIterator count(self, unsigned long start=*, unsigned long stop=*)
cdef class Number(int):
cdef Number _inv
cdef unsigned long _order
cpdef unsigned long order(self)
cpdef inline unsigned long nimsum(unsigned long a, unsigned long b)
cpdef unsigned long nimproduct(unsigned long a, unsigned long b)
cpdef unsigned long nimpower(unsigned long a, long n)
cpdef unsigned long niminvert(unsigned long a)
cpdef unsigned long nimorder(unsigned long a)
<|end_of_text|># cython: language_level=3
import numpy as np
cimport numpy as np
def calculate_optimal_solution(int W, items, int n):
cdef:
int i, x, take_item, dont_take_item
A = np.zeros((n+1, W+1), dtype=int)
for i in range(1, n+1):
if i % 50 == 0:
print(i, n)
for x in range(0, W+1):
dont_take_item = A[i-1, x]
if x - items[i-1][1] >= 0:
take_item = A[i-1, x-items[i-1][1]] + items[i - 1][0]
else:
take_item = 0 # Fallback if can't fit item in sack
A[i, x] = max([dont_take_item, take_item])
return A<|end_of_text|>set terminal eps
set output 'ionmove_v1.eps'
set linestyle 1 points pointtype 6 colour blue
set linestyle 2 points pointtype 8 colour red
set key top left
set width 11
set xlabel 'Voltage difference between electrodes \#1 and \#7 (V)'
set ylabel 'Measured ion position ($\mu$m)'
set font 1.7
set xtics -4,2,8
set mxtics -5,2,8
set ytics -10,5,20
plot [-5.5:8.5][-10:20] 'ionmove.dat' t 'Horizontal' using 1:2 w ls 1,\
'ionmove.dat' t 'Vertical' using 1:3 w ls 2
<|end_of_text|>cdef extern from "rlib.h":
double rust_double(double x)
def call_rust_double(double x):
return rust_double(x)
<|end_of_text|>'''
@organization: Lancaster University & University of Leeds
@version: 1.0
Created on 11/12/2014
@author: Victor Padilla
@contact: [email protected]
Contains an implementation of the Needleman Wunsch algorithm.
for two dimension arrays. It is a Cython file and should be
compiled using the file setupCython.py
python setupCython.py build
The output will be in the build folder
MultipleOMR\\build\\lib.win32-2.7\\MultipleOMR
# if isFast==False, calculate the difference between the strings using the Needlemann Wunsch in C.
# The score value will be between +1 to -1
# if isFast==True, just compare the strings if they are equals (score=1) or different (score=-1)
# 'isFast=true' means that the differences between foo00 and foo000 is -1
# alignment are the sequences aligned
# finalValue is the last value of the matrix
# finalScore is the similarity of both sequences
usage:
seq1=["foo00","abc","123"]
seq2=["foo000","abc","1234"]
faa=FastAlignmentArray()
alignment,finalValue,finalScore=faa.needleman_wunsch(seq1, seq2,isFast=True)
# finalScore=-0.333333343267
alignment,finalValue,finalScore=faa.needleman_wunsch(seq1, seq2,isFast=False)
# finalScore=0.722222208977
'''
import numpy as np
cimport numpy as np
import NWunsch
# -*- coding: utf-8 -*-
from cpython cimport bool
# the three directions you can go in the traceback:
cdef int DIAG = 0
cdef int UP = 1
cdef int LEFT = 2
cdef float score=0
class FastAlignmentArrays:
def __needleman_wunsch_matrix(self,seq1,seq2,isFast):
"""
fill in the DP matrix according to the Needleman-Wunsch algorithm.
Returns the matrix of scores and the matrix of pointers
if isFast==False, calculate the difference between the strings using the Needlemann Wunsch in C.
The score value will be between +1 to -1
if isFast==True, just compare the strings if they are equals (score=1) or different (score=-1)
"""
indel = -1 # indel penalty
cdef int n = len(seq1)
cdef int m = len(seq2)
cdef np.ndarray s = np.zeros( (n+1, m+1) ) # DP matrix
cdef np.ndarray ptr = np.zeros( (n+1, m+1), dtype=int ) # matrix of pointers
##### INITIALIZE SCORING MATRIX (base case) #####
cdef int i
cdef int j
for i in range(1, n+1) :
s[i,0] = indel * i
for j in range(1, m+1):
s[0,j] = indel * j
########## INITIALIZE TRACEBACK MATRIX ##########
# Tag first row by LEFT, indicating initial "-"s
ptr[0,1:] = LEFT
# Tag first column by UP, indicating initial "-"s
ptr[1:,0] = UP
#####################################################
cdef int p
cdef int q
diagonalRange=350
for i in range(1,n+1):
p=i-diagonalRange
q=i+diagonalRange
if(p<1):
p=1
if(q>m+1):
q=m+1
for j in range(p,q):
# match
myseq1=seq1[i-1]
myseq2=seq2[j-1]
if isinstance(myseq1,list):
myseq1=myseq1[0]
if isinstance(myseq2,list):
myseq2=myseq2[0]
if(myseq1== myseq2):
score=1
else:
score=-1
if len(myseq1)==0 or len(myseq2)==0:
score=0
#####For double alignment###
if isFast==False:
if len(myseq1)==0 or len(myseq2)==0:
score=0
else:
score=NWunsch.NWunsch_getSimilarity(myseq1,myseq2)
############################
s[i,j] = s[i-1,j-1]+ score
# indel penalty
if s[i-1,j] + indel > s[i,j] :
s[i,j] = s[i-1,j] + indel
ptr[i,j] | Cython |
= UP
# indel penalty
if s[i, j-1] + indel > s[i,j]:
s[i,j] = s[i, j-1] + indel
ptr[i,j] = LEFT
return s, ptr
def __needleman_wunsch_trace(self,seq1, seq2,np.ndarray s, np.ndarray ptr) :
"""
Function that traces back the best path to get alignment
"""
#### TRACE BEST PATH TO GET ALIGNMENT ####
align1 = []
align2 = []
align1_gap = []
align2_gap = []
gap1=[]
gap2=[]
cdef int n,m
n, m = (len(seq1), len(seq2))
cdef int i,j,curr
i = n
j = m
curr = ptr[i, j]
while (i > 0 or j > 0):
ptr[i,j] += 3
if curr == DIAG :
align1.append(seq1[i-1])
align2.append(seq2[j-1])
align1_gap.append(seq1[i-1])
align2_gap.append(seq2[j-1])
i -= 1
j -= 1
elif curr == LEFT:
align1.append("*")
align2.append(seq2[j-1])
align1_gap.append("[GAP]")
align2_gap.append(seq2[j-1])
j -= 1
elif curr == UP:
align1.append(seq1[i-1])
align2.append("*")
align1_gap.append(seq1[i-1])
align2_gap.append("[GAP]")
i -= 1
curr = ptr[i,j]
align1.reverse()
align2.reverse()
align1_gap.reverse()
align2_gap.reverse()
#gaps
for index in range(len(align1_gap)):
if(align1_gap[index])=="[GAP]":
gap1.append(index)
for index in range(len(align2_gap)):
if(align2_gap[index])=="[GAP]":
gap2.append(index)
return align1, align2,gap1,gap2
def needleman_wunsch(self,seq1, seq2,isFast=True) :
"""
Computes an optimal global alignment of two sequences using the Needleman-Wunsch
algorithm
returns the alignment and its score
# if isFast==False, calculate the difference between the strings using the Needlemann Wunsch in C.
# The score value will be between +1 to -1
# if isFast==True, just compare the strings if they are equals (score=1) or different (score=-1)
# 'isFast=true' means that the differences between foo00 and foo000 is -1
# alignment are the sequences aligned
# finalValue is the last value of the matrix
# finalScore is the similarity of both sequences
usage:
seq1=["foo00","abc","123"]
seq2=["foo000","abc","1234"]
faa=FastAlignmentArray()
alignment,finalValue,finalScore=faa.needleman_wunsch(seq1, seq2,isFast=True)
# finalScore=-0.333333343267
alignment,finalValue,finalScore=faa.needleman_wunsch(seq1, seq2,isFast=False)
# finalScore=0.722222208977
"""
s,ptr = self.__needleman_wunsch_matrix(seq1, seq2,isFast)
alignment = self.__needleman_wunsch_trace(seq1, seq2, s, ptr)
cdef int maxlen=len(seq1)
if len(seq2)>len(seq1):
maxlen=len(seq2)
cdef float finalscore=s[len(seq1), len(seq2)]/maxlen
return alignment, s[len(seq1), len(seq2)],finalscore
<|end_of_text|>from libcpp.string cimport string
from libcpp.map cimport map
cdef extern from "rpc/common_types.h" namespace "Plow":
ctypedef string Guid
ctypedef long Timestamp
ctypedef map[string, string] Attrs
<|end_of_text|># cython: language_level=3
# Copyright (c) 2014-2023, Dr Alex Meakins, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
cimport cython
@cython.freelist(256)
cdef class Intersection:
"""
Describes the result of a ray-primitive intersection.
Users are unlikely to instance this class themselves, in most cases they will
need to inspect this object as the result of a Ray-Primitive intersection
encountered in the world.hit() method
(:meth:`raysect.core.scenegraph.world.World.hit <raysect.core.scenegraph.world.World.hit>`).
The inside and outside points are launch points for rays emitted from the hit point on the surface. Rays cannot be
launched from the hit point directly as they risk re-intersecting the same surface due to numerical accuracy. The
inside and outside points are slightly displaced from the primitive surface at a sufficient distance to prevent
re-intersection due to numerical accuracy issues. The inside_point is shifted backwards into the surface relative to
the surface normal. The outside_point is equivalently shifted away from the surface in the direction of the surface
normal.
:param Ray ray: The incident ray object (world space).
:param double ray_distance: The distance of the intersection along the ray path.
:param Primitive primitive: The intersected primitive object.
:param Point3D hit_point: The point of intersection between the ray and the primitive (primitive local space).
:param Point3D inside_point: The interior ray launch point (primitive local space).
:param Point3D outside_point: The exterior ray launch point (primitive local space).
:param Normal3D normal: The surface normal (primitive local space)
:param bool exiting: True if the ray is exiting the surface, False otherwise.
:param AffineMatrix3D world_to_primitive: A world to primitive local transform matrix.
:param AffineMatrix3D primitive_to_world: A primitive local to world transform matrix.
:ivar bool exiting: True if the ray is exiting the surface, False otherwise.
:ivar Point3D hit_point: The point of intersection between the ray and the primitive
(primitive local space).
:ivar Point3D inside_point: The interior ray launch point (primitive local space).
:ivar Normal3D normal: The surface normal (primitive local space).
:ivar Point3D outside_point: The exterior ray launch point (primitive local space).
:ivar Primitive primitive: The primitive object that was intersected by the Ray.
:ivar AffineMatrix3D primitive_to_world: The primitive's local to world transform matrix.
:ivar Ray ray: The incident ray object (world space).
:ivar double ray_distance: The distance of the intersection along the ray path.
:ivar AffineMatrix3D world_to_primitive: A world to primitive local transform matrix.
.. code-block:: pycon
>>> from raysect.core import Point3D, Vector3D
>>> from raysect.core.ray import Ray as CoreRay
>>>
>>> intersection = world.hit(CoreRay(Point3D(0, 0, 0,), Vector3D(1, 0, 0)))
>>> if intersection is not None:
>>> hit_point = intersection.hit_point.transform(intersection.primitive_to_world)
>>> # do something with the hit point...
"""
def __init__(self, Ray ray, double ray_distance, Primitive primitive,
Point3D hit_point, Point3D inside_point, Point3D outside_point,
Normal3D normal, bint exiting, AffineMatrix3D world_to_primitive, AffineMatrix3D primitive_to_world):
self._construct(ray, ray_distance, primitive, hit_point, inside_point, outside_point, normal, | Cython |
exiting, world_to_primitive, primitive_to_world)
cdef void _construct(self, Ray ray, double ray_distance, Primitive primitive,
Point3D hit_point, Point3D inside_point, Point3D outside_point,
Normal3D normal, bint exiting, AffineMatrix3D world_to_primitive,
AffineMatrix3D primitive_to_world):
self.ray = ray
self.ray_distance = ray_distance
self.exiting = exiting
self.primitive = primitive
self.hit_point = hit_point
self.inside_point = inside_point
self.outside_point = outside_point
self.normal = normal
self.world_to_primitive = world_to_primitive
self.primitive_to_world = primitive_to_world
def __repr__(self):
return "Intersection({}, {}, {}, {}, {}, {}, {}, {}, {}, {})".format(
self.ray, self.ray_distance, self.primitive,
self.hit_point, self.inside_point, self.outside_point,
self.normal, self.exiting,
self.world_to_primitive, self.primitive_to_world)
<|end_of_text|>ctypedef struct mycpx:
float real
float imag
ctypedef union uu:
int a
short b, c
cdef mycpx zz
zz.real = 3.1415
zz.imag = -1.0
print('zz.real = {0}\n'.format(zz.real))
print('zz.imag = {0} \n'.format(zz.imag))<|end_of_text|>* File: ALU_1Bit.pex.netlist.ALU_1BIT.pxi
* Created: Sat Nov 9 15:04:36 2019
*
x_PM_ALU_1BIT_X_IX5.A N_X_IX5.A_X_ix5.MN4_g N_X_IX5.A_X_ix5.MP4_g
+ N_X_IX5.A_X_ix5.MN2_d N_X_IX5.A_X_ix5.MP1_d N_X_IX5.A_X_ix5.MP2_d
+ PM_ALU_1BIT_X_IX5.A
x_PM_ALU_1BIT_NX0 N_NX0_X_ix5.MP1_g N_NX0_X_ix5.MN2_g N_NX0_X_ix1.MN6_d
+ N_NX0_X_ix1.MP1_d PM_ALU_1BIT_NX0
x_PM_ALU_1BIT_VSS N_VSS_X_ix80.MN1_s N_VSS_X_ix80.MN1_b N_VSS_X_ix82.MN1_s
+ N_VSS_X_ix1.MN6_s N_VSS_X_ix5.MN1_s N_VSS_X_ix1.MN3_s N_VSS_X_ix86.MN2_s
+ N_VSS_X_ix86.MN1_s N_VSS_X_ix84.MN5_s N_VSS_X_ix21.MN4_s N_VSS_X_ix84.MN3_s
+ VSS PM_ALU_1BIT_VSS
x_PM_ALU_1BIT_X_IX1.N$4 N_X_IX1.N$4_X_ix1.MN6_g N_X_IX1.N$4_X_ix1.MP1_g
+ N_X_IX1.N$4_X_ix1.MN1_d N_X_IX1.N$4_X_ix1.MP6_d PM_ALU_1BIT_X_IX1.N$4
x_PM_ALU_1BIT_VDD N_VDD_X_ix80.MP1_b N_VDD_X_ix80.MP1_s N_VDD_X_ix82.MP1_b
+ N_VDD_X_ix82.MP1_s N_VDD_X_ix84.MP4_b N_VDD_X_ix5.MP1_s N_VDD_X_ix1.MP1_s
+ N_VDD_X_ix5.MP3_s N_VDD_X_ix1.MP2_s N_VDD_X_ix1.MP3_s N_VDD_X_ix86.MP1_s
+ N_VDD_X_ix21.MP1_s N_VDD_X_ix84.MP4_s N_VDD_X_ix84.MP5_s N_VDD_X_ix21.MP4_s
+ N_VDD_X_ix84.MP2_s VDD PM_ALU_1BIT_VDD
x_PM_ALU_1BIT_CB N_CB_X_ix5.MN4_d N_CB_X_ix5.MP4_d CB PM_ALU_1BIT_CB
x_PM_ALU_1BIT_X_IX1.N$7 N_X_IX1.N$7_X_ix1.MP2_g N_X_IX1.N$7_X_ix1.MN3_g
+ N_X_IX1.N$7_X_ix1.MN4_d N_X_IX1.N$7_X_ix1.MP4_d PM_ALU_1BIT_X_IX1.N$7
x_PM_ALU_1BIT_NX85 N_NX85_X_ix21.MN4_g N_NX85_X_ix21.MP5_g N_NX85_X_ix86.MN2_d
+ N_NX85_X_ix86.MP2_d PM_ALU_1BIT_NX85
x_PM_ALU_1BIT_Y N_Y_X_ix21.MN1_d N_Y_X_ix21.MN2_d N_Y_X_ix21.MP3_d Y
+ PM_ALU_1BIT_Y
x_PM_ALU_1BIT_X_IX84.N$9 N_X_IX84.N$9_X_ix84.MP2_g N_X_IX84.N$9_X_ix84.MN3_g
+ N_X_IX84.N$9_X_ix84.MN4_d N_X_IX84.N$9_X_ix84.MP4_d PM_ALU_1BIT_X_IX84.N$9
x_PM_ALU_1BIT_NX83 N_NX83_X_ix21.MP4_g N_NX83_X_ix21.MN3_g N_NX83_X_ix84.MN2_d
+ N_NX83_X_ix84.MP3_d PM_ALU_1BIT_NX83
x_PM_ALU_1BIT_A N_A_X_ix80.MN1_g N_A_X_ix80.MP1_g N_A_X_ix1.MP6_g
+ N_A_X_ix1.MN2_g N_A_X_ix1.MP4_g N_A_X_ix1.MN5_g N_A_X_ix84.MN4_g
+ N_A_X_ix84.MP4_g N_A_X_ix84.MP1_g N_A_X_ix84.MN1_g A PM_ALU_1BIT_A
x_PM_ALU_1BIT_NX79 N_NX79_X_ix80.MN1_d N_NX79_X_ix80.MP1_d N_NX79_X_ix21.MN1_g
+ N_NX79_X_ix21.MP1_g PM_ALU_1BIT_NX79
x_PM_ALU_1BIT_CONTROL[1] N_CONTROL[1]_X_ix5.MN3_g N_CONTROL[1]_X_ix5.MP2_g
+ N_CONTROL[1]_X_ix86.MN1_g N_CONTROL[1]_X_ix86.MP2_g N_CONTROL[1]_X_ix21.MN5_g
+ N_CONTROL[1]_X_ix21.MP3_g CONTROL[1] PM_ALU_1BIT_CONTROL[1]
x_PM_ALU_1BIT_B N_B_X_ix82.MN1_g N_B_X_ix82.MP1_g N_B_X_ix5.MN1_g
+ N_B_X_ix5.MP3_g N_B_X_ix84.MN5_g N_B_X_ix84.MP5_g N_B_X_ix84.MN2_g
+ N_B_X_ix84.MP3_g B PM_ALU_1BIT_B
x_PM_ALU_1BIT_CONTROL[0] N_CONTROL[0]_X_ix1.MP5_g N_CONTROL[0]_X_ix1.MN1_g
+ N_CONTROL[0]_X_ix1.MP3_g N_CONTROL[0]_X_ix1.MN4_g N_CONTROL[0]_X_ix86.MN2_g
+ N_CONTROL[0]_X_ix86.MP1_g CONTROL[0] PM_ALU_1BIT_CONTROL[0]
x_PM_ALU_1BIT_NX81 N_NX81_X_ix82.MN1_d N_NX81_X_ix82.MP1_d N_NX81_X_ix21.MN2_g
+ N_NX81_X_ix21.MP2_g PM_ALU_1BIT_NX81
<|end_of_text|>from libc cimport stdlib
import locale
import os
cimport numpy as cnp
import numpy as np
from gnome import basic_types
from type_defs cimport Seconds, DateTimeRec
cimport utils
cdef class CyDateTime:
def __dealloc__(self):
"""
No python or cython objects need to be deallocated.
Tried deleting seconds, tSeconds, dateVal here, but compiler
throws errors
"""
pass
def DateToSeconds(self, cnp.ndarray[DateTimeRec, ndim=1] date):
cdef Seconds seconds
utils.DateToSeconds(&date[0], &seconds)
return seconds
def SecondsToDate(self, Seconds secs):
cdef cnp.ndarray[DateTimeRec, ndim = 1] daterec
daterec | Cython |
= np.empty((1, ), dtype=basic_types.date_rec)
utils.SecondsToDate(secs, &daterec[0])
return daterec[:][0]
def srand(seed):
"""
Resets C random seed
"""
stdlib.srand(seed)
def rand():
"""
Calls the C stdlib.rand() function
Only implemented for testing that the srand was set correctly
"""
return stdlib.rand()
cdef bytes to_bytes(unicode ucode):
"""
Encode a string to its unicode type to default file system encoding for
the OS.
It uses locale.getpreferredencoding() to get the filesystem encoding
For the mac it encodes it as utf-8.
For windows this appears to be cp1252.
The C++ expects char * so either of these encodings appear to work. If the
getpreferredencoding returns a type of encoding that is incompatible with
a char * C++ input, then things will fail.
"""
cdef bytes byte_string
try:
byte_string = ucode.encode(locale.getpreferredencoding())
except Exception as err:
raise err
return byte_string
def filename_as_bytes(filename):
'''
filename is a python basestring (either string or unicode).
make it a unicode, then call to_bytes to encode correctly and return
a byte string
'''
cdef bytes file_
filename = os.path.normpath(filename)
file_ = to_bytes(unicode(filename))
return file_
<|end_of_text|># Copyright 2021 Janek Bevendorff
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from resiliparse_inc.lexbor cimport lxb_html_document_t, lxb_dom_node_t, lxb_dom_collection_t, \
lxb_css_parser_t, lxb_selectors_t, lxb_css_selectors_t
cdef lxb_dom_node_t* get_element_by_id_impl(lxb_dom_node_t* node, bytes id_value, bint case_insensitive=*)
cdef lxb_dom_collection_t* get_elements_by_attr_impl(lxb_dom_node_t* node, bytes attr_name, bytes attr_value,
size_t init_size=*, bint case_insensitive=*)
cdef lxb_dom_collection_t* get_elements_by_class_name_impl(lxb_dom_node_t* node, bytes class_name, size_t init_size=*)
cdef lxb_dom_collection_t* get_elements_by_tag_name_impl(lxb_dom_node_t* node, bytes tag_name)
cdef lxb_dom_node_t* query_selector_impl(lxb_dom_node_t* node, HTMLTree tree, bytes selector)
cdef lxb_dom_collection_t* query_selector_all_impl(lxb_dom_node_t* node, HTMLTree tree, bytes selector,
size_t init_size=*)
cdef bint matches_impl(lxb_dom_node_t* node, HTMLTree tree, bytes selector)
cdef class DOMElementClassList:
cdef DOMNode node
cdef list _create_list(self)
cdef inline bytes _class_name_bytes(self)
cpdef void add(self, str class_name)
cpdef void remove(self, str class_name)
cdef class DOMNode:
cdef HTMLTree tree
cdef lxb_dom_node_t* node
cdef DOMElementClassList class_list_singleton
cpdef bint hasattr(self, str attr_name)
cdef str _getattr_impl(self, bytes attr_name)
cpdef str getattr(self, str attr_name, str default_value=*)
cdef bint _setattr_impl(self, bytes attr_name, bytes attr_value) except -1
cpdef void setattr(self, str attr_name, str attr_value)
cdef bint _delattr_impl(self, bytes attr_name) except -1
cpdef void delattr(self, str attr_name)
cpdef DOMNode get_element_by_id(self, str element_id, bint case_insensitive=*)
cpdef DOMCollection get_elements_by_attr(self, str attr_name, str attr_value, bint case_insensitive=*)
cpdef DOMCollection get_elements_by_class_name(self, str class_name, bint case_insensitive=*)
cpdef DOMCollection get_elements_by_tag_name(self, str tag_name)
cpdef DOMNode query_selector(self, str selector)
cpdef DOMCollection query_selector_all(self, str selector)
cpdef bint matches(self, str selector)
cpdef DOMNode append_child(self, DOMNode node)
cpdef DOMNode insert_before(self, DOMNode node, DOMNode reference)
cpdef DOMNode replace_child(self, DOMNode new_child, DOMNode old_child)
cpdef DOMNode remove_child(self, DOMNode node)
cpdef void decompose(self)
cdef class DOMCollection:
cdef HTMLTree tree
cdef lxb_dom_collection_t* coll
cdef inline size_t _wrap_idx(self, ssize_t idx)
cdef DOMCollection _forward_collection_match(self, bytes func, attrs)
cpdef DOMNode get_element_by_id(self, str element_id, bint case_insensitive=*)
cpdef DOMCollection get_elements_by_attr(self, str attr_name, str attr_value, bint case_insensitive=*)
cpdef DOMCollection get_elements_by_class_name(self, str class_name, bint case_insensitive=*)
cpdef DOMCollection get_elements_by_tag_name(self, str tag_name)
cpdef DOMNode query_selector(self, str selector)
cpdef DOMCollection query_selector_all(self, str selector)
cpdef bint matches(self, str selector)
# noinspection DuplicatedCode
cpdef enum NodeType:
ELEMENT = 0x01,
ATTRIBUTE = 0x02,
TEXT = 0x03,
CDATA_SECTION = 0x04,
ENTITY_REFERENCE = 0x05,
ENTITY = 0x06,
PROCESSING_INSTRUCTION = 0x07,
COMMENT = 0x08,
DOCUMENT = 0x09,
DOCUMENT_TYPE = 0x0A,
DOCUMENT_FRAGMENT = 0x0B,
NOTATION = 0x0C,
LAST_ENTRY = 0x0D
cdef HTMLTree create_html_tree(bytes document, bint reencode=*, str encoding=*, str errors=*)
cdef class HTMLTree:
cdef lxb_html_document_t* dom_document
cdef str encoding
cdef lxb_css_parser_t* css_parser
cdef lxb_selectors_t* selectors
cdef lxb_css_selectors_t* css_selectors
cpdef DOMNode create_element(self, str tag_name)
cpdef DOMNode create_text_node(self, str text)
cdef void init_css_parser(self)
<|end_of_text|># Copyright 2017,2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from _variable cimport Variable
from _nd_array cimport NdArray
cdef object pos(object self):
"""
This function simply returns itself.
Implements the unary plus operator, ``+A``.
Returns: :class:`nnabla.Variable` or :class:`nnabla.NdArray`
"""
return self
cdef object neg(object self):
"""
Element-wise negation.
Implements the unary negation expression ``-A``.
Returns: :class:`nnabla.Variable` or :class:`nnabla.NdArray`
"""
import nnabla.functions as F
return F.mul_scalar(self, -1)
cdef object add(object x, object y):
"""
Element-wise addition.
Implements the addition operator expression ``x + y``.
When both of ``x`` and ``y`` are either :obj:`~nnabla.Variable` or
:obj:`~nnabla.NdArray`, :func:`~nnabla.functions.add2`` is
internally called.
When one of ``x`` and ``y`` is a scalar,
:func:`~nnabla.functions.add_scalar` is called.
Args:
x (float or ~nnabla.Variable or ~nnabla.NdArray): Left operand.
y (float or ~nnabla.Variable or ~nnabla.NdArray): Right operand.
Returns: :class:`~nnabla.Variable` or :class:`~nnabla.NdArray`.
"""
import nnabla.functions as F
if isinstance(x, (NdArray, Variable)):
if isinstance(y, (NdArray, Variable)):
return F.add2 | Cython |
(x, y)
else:
return F.add_scalar(x, y)
else:
if isinstance(y, (NdArray, Variable)):
return F.add_scalar(y, x)
else:
return x + y
cdef object sub(object x, object y):
"""
Element-wise subtraction.
Implements the subtraction operator expression ``x - y``.
When both of ``x`` and ``y`` are either :obj:`~nnabla.Variable` or
:obj:`~nnabla.NdArray`, :func:`~nnabla.functions.sub2`` is
internally called.
When ``y`` is a scalar, :func:`~nnabla.functions.add_scalar`(x, -y) is
called. When ``x`` is a scalar,
:func:`~nnabla.functions.r_sub_scalar`(y, x) is called.
Args:
x (float or ~nnabla.Variable or ~nnabla.NdArray): Left operand.
y (float or ~nnabla.Variable or ~nnabla.NdArray): Right operand.
Returns: :class:`~nnabla.Variable` or :class:`~nnabla.NdArray`.
"""
import nnabla.functions as F
if isinstance(x, (NdArray, Variable)):
if isinstance(y, (NdArray, Variable)):
return F.sub2(x, y)
else:
return F.add_scalar(x, -y)
else:
if isinstance(y, (NdArray, Variable)):
return F.r_sub_scalar(y, x)
else:
return x - y
cdef object mul(object x, object y):
"""
Element-wise multiplication.
Implements the multiplication operator expression ``x * y``.
When both of ``x`` and ``y`` are either :obj:`~nnabla.Variable` or
:obj:`~nnabla.NdArray`, :func:`~nnabla.functions.mul2`` is
internally called.
When one of ``x`` and ``y`` is a scalar,
:func:`~nnabla.functions.mul_scalar` is called.
Args:
x (float or ~nnabla.Variable or ~nnabla.NdArray): Left operand.
y (float or ~nnabla.Variable or ~nnabla.NdArray): Right operand.
Returns: :class:`~nnabla.Variable` or :class:`~nnabla.NdArray`.
"""
import nnabla.functions as F
if isinstance(x, (NdArray, Variable)):
if isinstance(y, (NdArray, Variable)):
return F.mul2(x, y)
else:
return F.mul_scalar(x, y)
else:
if isinstance(y, (NdArray, Variable)):
return F.mul_scalar(y, x)
else:
return x * y
cdef object truediv(object x, object y):
"""
Element-wise division.
Implements the division operator expression ``x / y``.
When both of ``x`` and ``y`` are either :obj:`~nnabla.Variable` or
:obj:`~nnabla.NdArray`, :func:`~nnabla.functions.div2`` is
internally called.
When ``y`` is a scalar, :func:`~nnabla.functions.div_scalar`(x, y) is
called. When ``x`` is a scalar,
:func:`~nnabla.functions.r_div_scalar`(y, x) is called.
Args:
x (float or ~nnabla.Variable or ~nnabla.NdArray): Left operand.
y (float or ~nnabla.Variable or ~nnabla.NdArray): Right operand.
Returns: :class:`~nnabla.Variable` or :class:`~nnabla.NdArray`.
"""
import nnabla.functions as F
if isinstance(x, (NdArray, Variable)):
if isinstance(y, (NdArray, Variable)):
return F.div2(x, y)
else:
return F.mul_scalar(x, 1. / y)
else:
if isinstance(y, (NdArray, Variable)):
return F.r_div_scalar(y, x)
else:
return x / y
cdef object div(object x, object y):
"""
Element-wise division.
Implements the division operator expression ``x / y``.
When both of ``x`` and ``y`` are either :obj:`~nnabla.Variable` or
:obj:`~nnabla.NdArray`, :func:`~nnabla.functions.div2`` is
internally called.
When ``y`` is a scalar, :func:`~nnabla.functions.div_scalar`(x, y) is
called. When ``x`` is a scalar,
:func:`~nnabla.functions.r_div_scalar`(y, x) is called.
Args:
x (float or ~nnabla.Variable or ~nnabla.NdArray): Left operand.
y (float or ~nnabla.Variable or ~nnabla.NdArray): Right operand.
Returns: :class:`~nnabla.Variable` or :class:`~nnabla.NdArray`.
"""
import nnabla.functions as F
if isinstance(x, (NdArray, Variable)):
if isinstance(y, (NdArray, Variable)):
return F.div2(x, y)
else:
return F.mul_scalar(x, 1. / y)
else:
if isinstance(y, (NdArray, Variable)):
return F.r_div_scalar(y, x)
else:
return x / y
cdef object pow(object x, object y, object z):
"""
Element-wise power function.
Implements the power operator expression ``x ** y``,
optionally ``x ** y % z`` (but not implemented).
When both of ``x`` and ``y`` are either :obj:`~nnabla.Variable` or
:obj:`~nnabla.NdArray`, :func:`~nnabla.functions.pow2`` is
internally called.
When ``y`` is a scalar, :func:`~nnabla.functions.pow_scalar`(x, y) is
called. When ``x`` is a scalar,
:func:`~nnabla.functions.r_pow_scalar`(y, x) is called.
Args:
x (float or ~nnabla.Variable or ~nnabla.NdArray): Left operand.
y (float or ~nnabla.Variable or ~nnabla.NdArray): Right operand.
z (float or ~nnabla.Variable or ~nnabla.NdArray): Modulo (optional).
Returns: :class:`~nnabla.Variable` or :class:`~nnabla.NdArray`.
"""
import nnabla.functions as F
if z is not None:
return NotImplemented
if isinstance(x, (NdArray, Variable)):
if isinstance(y, (NdArray, Variable)):
return F.pow2(x, y)
else:
return F.pow_scalar(x, y)
else:
if isinstance(y, (NdArray, Variable)):
return F.r_pow_scalar(y, x)
else:
return x ** y
cdef object matmul(object x, object y):
"""
Matrix multiplication
Implements the matmul operator expression ``x @ y``.
When both of ``x`` and ``y`` are either :obj:`~nnabla.Variable` or
:obj:`~nnabla.NdArray`, :func:`~nnabla.functions.affine`, :func:`~nnabla.functions.sum`,
:func:`~nnabla.functions.reshape`, :func:`~nnabla.functions.batch_matmul` are internally called.
Note:
If both arguments are 1-D, it is inner product of vectors.
If both arguments are 2-D, it is matrix multiplication.
If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to the first argument's dimensions.
After matrix multiplication the prepended 1 is removed.
If the second argument is 1-D, is it promoted to a matrix by appending a 1 to the second argument's dimensions.
After matrix multiplication the appended 1 is removed.
If either arguments is N-D, N>2, it is treated as the batch matrix multiplication and broadcast accordingly.
e.g. If the first argument x is (p, j, n, k) and the second argument y is (k, m), the out is (p, j, n, m).
Args:
x (~nnabla.Variable or ~nnabla.NdArray): Left operand. Input array, scalar not allowed.
y (~nnabla.Variable or ~nnabla.NdArray): Right operand. Input array, scalar not allowed.
Returns: :class:`~nnabla.Variable` or :class:`~nnabla.NdArray`.
Examples:
.. code-block:: python
import numpy as np
import nnabla as nn
# vector * vector
arr1 = np.random.random([10]).astype(np.float32)
arr2 = np.random.random([10]).astype(np.float32)
n1 = nn.NdArray.from_numpy_array(arr1)
n2 = nn.NdArray.from_numpy_array(arr2)
v1 = nn.Variable.from_numpy_array(arr1)
v2 = nn.Variable.from_numpy_array(arr2)
ans1 = n1 @ n2
ans2 = v1 @ v2
ans2.forward()
print(ans1.shape)
print(ans2 | Cython |
.shape)
# ()
# ()
# matrix * vector
arr1 = np.random.random([10, 5]).astype(np.float32)
arr2 = np.random.random([5]).astype(np.float32)
n1 = nn.NdArray.from_numpy_array(arr1)
n2 = nn.NdArray.from_numpy_array(arr2)
v1 = nn.Variable.from_numpy_array(arr1)
v2 = nn.Variable.from_numpy_array(arr2)
ans1 = n1 @ n2
ans2 = v1 @ v2
ans2.forward()
print(ans1.shape)
print(ans2.shape)
# (10, )
# (10, )
# matrix * broadcasted vector
arr1 = np.random.random([10, 5, 2]).astype(np.float32)
arr2 = np.random.random([2]).astype(np.float32)
n1 = nn.NdArray.from_numpy_array(arr1)
n2 = nn.NdArray.from_numpy_array(arr2)
v1 = nn.Variable.from_numpy_array(arr1)
v2 = nn.Variable.from_numpy_array(arr2)
ans1 = n1 @ n2
ans2 = v1 @ v2
ans2.forward()
print(ans1.shape)
print(ans2.shape)
# (10, 5)
# (10, 5)
"""
import nnabla.functions as F
assert isinstance(x, (NdArray, Variable)) and isinstance(y, (NdArray, Variable)), "All inputs must be ~nnabla.Variable or ~nnabla.NdArray"
assert x.ndim!= 0, "1st input operand has 0 dimension, does not have enough dimensions (func core with signature (m?,k),(k,n?)->(m?,n?) requires 1)"
assert y.ndim!= 0, "2nd input operand has 0 dimension, does not have enough dimensions (func core with signature (m?,k),(k,n?)->(m?,n?) requires 1)"
if x.ndim == 1 and y.ndim == 1:
result = F.sum(x * y)
elif x.ndim == 2 and y.ndim==2:
result = F.affine(x, y)
elif x.ndim == 1 and y.ndim == 2:
h = F.affine(F.reshape(x, (1, -1)), y)
result = F.reshape(h, (h.shape[-1],))
elif x.ndim == 2 and y.ndim == 1:
h = F.affine(x, F.reshape(y, (-1, 1)))
result = F.reshape(h, h.shape[:-1])
elif x.ndim > 2 or y.ndim > 2:
if x.ndim == y.ndim:
result = F.batch_matmul(x, y)
else:
data_length = x.ndim - y.ndim
if data_length > 0:
shape = list(y.shape)
if y.ndim == 1:
shape.insert(1, 1)
data_length -= 1
for i in range(0, data_length):
shape.insert(0, 1)
y_ = F.reshape(y, tuple(shape))
result = F.batch_matmul(x, y_)
if y.ndim == 1:
result = F.reshape(result, result.shape[:-1])
else:
data_length = -data_length
shape = list(x.shape)
for i in range(0, data_length):
shape.insert(0, 1)
x_ = F.reshape(x, tuple(shape))
result = F.batch_matmul(x_, y)
if x.ndim == 1:
shape_ = list(result.shape)
shape_.pop(result.ndim - 2)
result = F.reshape(result, shape_)
return result
<|end_of_text|>
cdef extern from 'SFML/Config.h':
# portable boolean type
ctypedef int Bool'sfBool' # bint is used instead
cdef int FALSE'sfFalse'
cdef int TRUE'sfTrue'
# 8 bits integer types
ctypedef signed char Int8'sfInt8'
ctypedef unsigned char Uint8'sfUint8'
# 16 bits integer types
ctypedef signed short Int16'sfInt16'
ctypedef unsigned short Uint16'sfUint16'
# 32 bits integer types
ctypedef signed int Int32'sfInt32'
ctypedef unsigned int Uint32'sfUint32'
# 64 bits integer types (TODO: _MSC_VER)
ctypedef signed long long Int64'sfInt64'
ctypedef unsigned long long Uint64'sfUint64'
<|end_of_text|># cython: boundscheck=False
# cython: wraparound=False
# cython: cdivision=True
"""
This module contains highly optimized utility functions that are used in
the various tracking algorithms.
Author: Travis Dick ([email protected])
"""
import cv2
import numpy as np
from scipy.linalg import expm
import pdb
cdef extern from "math.h":
double floor(double)
double ceil(double)
double sqrt(double)
# ---------- Homography Parameterizations ---------- #
cpdef double[:,:] make_hom_sl3(double[:] p) except *:
cdef double[:,:] A = np.empty([3,3], dtype=np.float64)
A[0,0] = p[0]
A[0,1] = p[1]
A[0,2] = p[2]
A[1,0] = p[3]
A[1,1] = p[4] - p[0]
A[1,2] = p[5]
A[2,0] = p[6]
A[2,1] = p[7]
A[2,2] = -p[4]
return expm(np.mat(A))
cpdef double[:,:] aff_update_backward(double[:,:] warp, double[:] update) except *:
cdef double[:,:] interm1 = np.empty([3,3], dtype=np.float64)
cdef double[:,:] interm2 = np.empty([3,3], dtype=np.float64)
cdef double[:,:] ma1, ma2
cdef double[:,:] res# = np.empty([1,6], dtype=np.float64)
cdef double b
cdef int i,j
ma2 = np.empty([1,6], dtype=np.float64)
ma2[0,0] = update[0]
ma2[0,1] = update[3]
ma2[0,2] = update[1]
ma2[0,3] = update[4]
ma2[0,4] = update[2]
ma2[0,5] = update[5]
# H inv
b = 1.0 / ((1+ma2[0,0])*(1+ma2[0,3])-ma2[0,1]*ma2[0,2])
# Construct the second term
interm2[0,0] = 1 + b*(-ma2[0,0]-ma2[0,0]*ma2[0,3]+ma2[0,1]*ma2[0,2])
interm2[1,0] = -b*ma2[0,1]
interm2[0,1] = -b*ma2[0,2]
interm2[1,1] = 1 + b*(-ma2[0,3]-ma2[0,0]*ma2[0,3]+ma2[0,1]*ma2[0,2])
interm2[0,2] = b*(-ma2[0,4]-ma2[0,3]*ma2[0,4]+ma2[0,2]*ma2[0,5])
interm2[1,2] = b*(-ma2[0,5]-ma2[0,0]*ma2[0,5]+ma2[0,1]*ma2[0,4])
interm2[2,0] = 0
interm2[2,1] = 0
interm2[2,2] = 1
# Construct the first term
ma1 = np.array(warp).reshape([2,3])
interm1[0,0] = ma1[0,0]
interm1[0,1] = ma1[0,1]
interm1[0,2] = ma1[0,2]
interm1[1,0] = ma1[1,0]
interm1[1,1] = ma1[1,1]
interm1[1,2] = ma1[1,2]
interm1[2,0] = 0
interm1[2,1] = 0
interm1[2,2] = 1
# Getting the results
temp = np.asmatrix(interm1) * interm2
pdb.set_trace()
res = np.array(temp[:2,:]).reshape([1,6])
return res
# ---------- Efficient Image Sampling and Conversion ---------- #
cdef double bilin_interp(double [:,:] img, double x, double y):
| Cython |
cdef int h = img.shape[0]
cdef int w = img.shape[1]
cdef unsigned int lx = <int>floor(x)
cdef unsigned int ux = <int>ceil(x)
cdef unsigned int ly = <int>floor(y)
cdef unsigned int uy = <int>ceil(y)
# Need to be a bit careful here due to overflows
if not (0 <= lx < w and 0 <= ux < w and
0 <= ly < h and 0 <= uy < h): return 128
cdef double dx = x - lx
cdef double dy = y - ly
return img[ly,lx]*(1-dx)*(1-dy) + \
img[ly,ux]*dx*(1-dy) + \
img[uy,lx]*(1-dx)*dy + \
img[uy,ux]*dx*dy
cpdef double[:] sample_pts_all(double[:,:] img, int resx, int resy, double[:,:] warp, int MModel, double[:,:] tmplt_size = np.empty((0,0))) except *:
#print MModel
#print np.array(tmplt_size)
if MModel == 1:
return sample_pts(img, resx, resy, warp)
elif MModel == 5:
# Using rot with complete motion model
return sample_pts(img, resx, resy, warp)
cpdef double[:] sample_pts(double[:,:] img, int resx, int resy, double[:,:] warp) except *:
cdef int n_pts = resx * resy
cdef double[:] result = np.empty(n_pts, dtype=np.float64)
cdef int yi, xi, ri
cdef double y, x, d, wy, wx
ri = 0
for yi in range(resy):
y = <double>yi / (resy-1) - 0.5
for xi in range(resx):
x = <double>xi / (resx-1) - 0.5
d = warp[2,0]*x + warp[2,1]*y + warp[2,2]
wx = (warp[0,0]*x + warp[0,1]*y + warp[0,2]) / d
wy = (warp[1,0]*x + warp[1,1]*y + warp[1,2]) / d
result[ri] = bilin_interp(img, wx, wy)
ri += 1
return result
cpdef double [:,:] to_grayscale(unsigned char [:,:,:] img):
cdef int h = img.shape[0]
cdef int w = img.shape[1]
cdef int d = img.shape[2]
cdef double [:,:] result = np.empty((h,w), dtype=np.float64)
cdef int i,j,k
for i in range(h):
for j in range(w):
result[i,j] = 0
for k in range(d): result[i,j] += img[i,j,k]
result[i,j] /= d
return result
# ---------- Numerically estimating sampling jacobian ---------- #
cdef double[:,:] mat_mul(double[:,:] A, double [:,:] B):
cdef int h = A.shape[0]
cdef int m = A.shape[1]
cdef int w = B.shape[1]
cdef double[:,:] result = np.empty((h,w), dtype=np.float64)
cdef int i,k,j
for i in range(h):
for j in range(w):
result[i,j] = 0
for k in range(m):
result[i,j] += A[i,k]*B[k,j]
return result
cdef double[:,:] warp_update(double[:,:] old, double [:] update):
cdef double f,wx,wy,wz
cdef double[:,:] new_matrix1
cdef double[:,:] new_matrix2
cdef double[:,:] res
f = f_len
new_matrix1 = np.eye(3,dtype=np.float64)
new_matrix1[0,2] = update[0]
new_matrix1[1,2] = update[1]
new_matrix1[0,0] += update[2]
new_matrix1[1,1] += update[2]
wx = update[3]
wy = update[4]
wz = update[5]
new_matrix2 = np.eye(3,dtype=np.float64)
new_matrix2[0,1] = wz
#new_matrix2[0,2] = f*wy
new_matrix2[0,2] = wy
new_matrix2[1,0] = -wz
#new_matrix2[1,2] = f*wx
new_matrix2[1,2] = f*wx
#new_matrix2[2,0] = -wy/f
new_matrix2[2,0] = -wy
#new_matrix2[2,1] = -wx/f
new_matrix2[2,1] = -wx
res = mat_mul(new_matrix1,old)
return mat_mul(new_matrix2,res)
'''
cdef double[:] mat_min(double[:] A, double[:] B):
cdef int h = A.shape[0]:
# cdef int m = A.shape[1]
# cdef int w = B.shape[1]
cdef double[:,:] result = np.empty(h, dtype=np.float64)
cdef int i
for i in range(h):
result[i] = A[i] - B[i]
return result
'''
cdef double _eps = 1e-2 #1e-8
cdef double _epsl = 1e-8
cdef double _epsx
cdef double _epsy
cdef double _epsf = 0.8
cdef double img_dir_grad(double[:,:] img, double[:,:] warp, double x, double y, double dx, double dy):
cdef double wx1, wy1, wx2, wy2, d, ox, oy, offset_size
offset_size = sqrt(dx*dx + dy*dy)
ox = x + dx
oy = y + dy
d = warp[2,0]*ox + warp[2,1]*oy + warp[2,2]
wx1 = (warp[0,0]*ox + warp[0,1]*oy + warp[0,2]) / d
wy1 = (warp[1,0]*ox + warp[1,1]*oy + warp[1,2]) / d
ox = x - dx
oy = y - dy
d = warp[2,0]*ox + warp[2,1]*oy + warp[2,2]
wx2 = (warp[0,0]*ox + warp[0,1]*oy + warp[0,2]) / d
wy2 = (warp[1,0]*ox + warp[1,1]*oy + warp[1,2]) / d
return (bilin_interp(img, wx1, wy1) - bilin_interp(img, wx2, wy2)) / (2 * offset_size)
cpdef double[:,:] sample_pts_grad_batch(double[:,:] img, int resx, int resy, double[:,:] warp, double MModel) except *:
cdef int xi, yi, i, dims
cdef double x, y, ox, oy, d, wx, wy, w2x, w2y, Ix, Iy, Ixx, Iyy
cdef double[:,:] result
if MModel/100 == 1: dims = 8
elif MModel/100 == 4: dims = 6
elif MModel/100 == 6: dims = 2
elif MModel/100 == 7: dims = 4
else: print "No MModel match for jacobian"
result = np.empty((resx*resy, dims), dtype=np.float64)
i = 0
for yi in range(resy):
y = <double>yi / (resy-1) - 0.5
for xi in xrange(resx):
x = <double>xi / (resx-1) - 0.5
#d = warp[2,0]*x + warp[2,1]*y + warp[2,2]
#wx = (warp[0,0]*x + warp[0,1]*y + warp[0,2]) / d
#wy = (warp[1,0]*x + warp[1,1]*y + warp[1,2]) / d
Ix = img_dir_grad(img, warp, x, y, _eps, 0)
Iy = img_dir_grad(img, warp, x, y, 0, _eps)
Ixx = Ix * x
Iyy = Iy * y
if MModel/100 == 1:
result[i,0] = Ixx
result[i,1] = Ix*y
result[i,2] = Ix
| Cython |
result[i,3] = Iy*x
result[i,4] = Iy*y
result[i,5] = Iy
result[i,6] = -Ixx*x - Iy*x*y
result[i,7] = -Ixx*y - Iyy
elif MModel/100 == 4:
result[i,0] = Ixx
result[i,1] = Ix*y
result[i,2] = Ix
result[i,3] = Iy*x
result[i,4] = Iyy
result[i,5] = Iy
elif MModel/100 == 6:
result[i,0] = Ix
result[i,1] = Iy
elif MModel/100 == 7:
result[i,0] = Ixx + Iyy
result[i,1] = -y*Ix + x*Iy
result[i,2] = Ix
result[i,3] = Iy
i += 1
return result
cpdef double[:,:] sample_pts_grad_homo_ic(double[:,:] img, int resx, int resy, double[:,:] warp) except *:
cdef int xi, yi, i
cdef double x, y, ox, oy, d, wx, wy, w2x, w2y, Ix, Iy, Ixx, Iyy
cdef double[:,:] result = np.empty((resx*resy, 8), dtype=np.float64)
i = 0
for yi in range(resy):
y = <double>yi / (resy-1) - 0.5
for xi in xrange(resx):
x = <double>xi / (resx-1) - 0.5
# Computing the spatial image gradient
d = warp[2,0]*x + warp[2,1]*y + warp[2,2]
wx = (warp[0,0]*x + warp[0,1]*y + warp[0,2]) / d
wy = (warp[1,0]*x + warp[1,1]*y + warp[1,2]) / d
#Ix = img_dir_grad_fwd(img, wx, wy, _epsf, 0)
#Iy = img_dir_grad_fwd(img, wx, wy, 0, _epsf)
Ix = img_dir_grad(img, warp, x, y, _eps, 0)
Iy = img_dir_grad(img, warp, x, y, 0, _eps)
# Combining image gradient with jacobian of warp function
Ixx = Ix * x
Iyy = Iy * y
result[i,0] = Ixx
result[i,1] = Ix*y
result[i,2] = Ix
result[i,3] = Iy*x
result[i,4] = Iy*y
result[i,5] = Iy
result[i,6] = -Ixx*x - Iy*x*y
result[i,7] = -Ixx*y - Iyy
# Next row, please!
i += 1
return result
cpdef double[:,:] sample_pts_Jacob(double[:,:] img, int resx, int resy, double[:,:] warp, int MModel, double[:,:] tmplt_size = np.empty((0,0))) except *:
# These are all for backward methods
if MModel == 1:
return sample_pts_grad_sl3(img, resx, resy, warp)
elif MModel >= 100: # switch for revised DLKT_revise
return sample_pts_grad_batch(img, resx, resy, warp, MModel)
cdef double f_len = 1.0
cdef double img_dir_grad_fwd(double[:,:] img, double x, double y, double dx, double dy):
cdef double wx1, wy1, wx2, wy2, d, ox, oy, offset_size
offset_size = sqrt(dx*dx + dy*dy)
wx1 = x + dx
wy1 = y + dy
wx2 = x - dx
wy2 = y - dy
return (bilin_interp(img, wx1, wy1) - bilin_interp(img, wx2, wy2)) / (2.0 * offset_size)
cpdef double[:,:] sample_pts_grad_sl3(double[:,:] img, int resx, int resy, double[:,:] warp) except *:
cdef int xi, yi, i
cdef double x, y, ox, oy, d, w1x, w1y, w2x, w2y, Ix, Iy, Ixx, Iyy
cdef double[:,:] result = np.empty((resx*resy, 8), dtype=np.float64)
i = 0
for yi in range(resy):
y = <double>yi / (resy-1) - 0.5
for xi in xrange(resx):
x = <double>xi / (resx-1) - 0.5
# Computing the spatial image gradient
Ix = img_dir_grad(img, warp, x, y, _epsl, 0)
Iy = img_dir_grad(img, warp, x, y, 0, _epsl)
# Combining image gradient with jacobian of warp function
Ixx = Ix * x
Iyy = Iy * y
result[i,0] = Ixx - Iyy
result[i,1] = Ix*y
result[i,2] = Ix
result[i,3] = Iy*x
result[i,4] = Ixx + 2*Iyy
result[i,5] = Iy
result[i,6] = -Ixx*x - Iyy*x
result[i,7] = -Ixx*y - Iyy*y
# Next row, please!
i += 1
return result
cdef double[:] scv_intensity_map(double[:] src, double[:] dst):
cdef int n_pts = src.shape[0]
cdef double[:,:] P = np.zeros((256,256), dtype=np.float64)
cdef int k, i, j
for k in range(n_pts):
i = <int>src[k]
j = <int>dst[k]
P[i,j] += 1
cdef double[:] intensity_map = np.zeros(256, dtype=np.float64)
cdef double normalizer, total
for i in range(256):
normalizer = 0
total = 0
for j in range(256):
total += j * P[i,j]
normalizer += P[i,j]
if normalizer > 0: intensity_map[i] = total / normalizer
else: intensity_map[i] = i
return intensity_map
cdef double[:] scv_expected_img(double[:] img, double[:] intensity_map):
cdef int n_pts = img.shape[0]
cdef double[:] result = np.empty(n_pts, dtype=np.float64)
cdef int i
for i in range(n_pts):
result[i] = intensity_map[<int>img[i]]
return result
cdef normalize_hom(double[:,:] H):
cdef int i, j
for i in range(3):
for j in range(3):
H[i,j] = H[i,j] / H[2,2]
cpdef double[:,:] compute_homography(double[:,:] in_pts, double[:,:] out_pts):
cdef int num_pts = in_pts.shape[1]
cdef double[:,:] constraint_matrix = np.empty((num_pts*2,9), dtype=np.float64)
cdef int i, r1, r2
for i in range(num_pts):
r1 = 2*i
constraint_matrix[r1,0] = 0
constraint_matrix[r1,1] = 0
constraint_matrix[r1,2] = 0
constraint_matrix[r1,3] = -in_pts[0,i]
constraint_matrix[r1,4] = -in_pts[1,i]
constraint_matrix[r1,5] = -1
constraint_matrix[r1,6] = out_pts[1,i] * in_pts[0,i]
constraint_matrix[r1,7] = out_pts[1,i] * in_pts[1,i]
constraint_matrix[r1,8] = out_pts[1,i]
r2 = 2*i + 1
constraint_matrix[r2,0] = in_pts[0,i]
constraint_matrix[r2,1] = in_pts[1,i]
constraint_matrix[r2,2] = 1
constraint_matrix[r2,3] = 0
constraint_matrix[r2,4] = 0
constraint_matrix[r2,5] = 0
constraint_matrix[r2,6] = -out_pts[0,i] * in_pts[0,i]
constraint_matrix[r2,7] = -out_pts[0,i] * in_pts[1,i]
constraint_matrix[r2,8] | Cython |
= -out_pts[0,i]
U,S,V = np.linalg.svd(constraint_matrix)
cdef double[:,:] H = V[8].reshape(3,3) / V[8][8]
return H
cpdef double[:,:] compute_affine(double[:,:] in_pts, double[:,:] tmplt_size, double[:,:] out_pts):
cdef int num_pts = in_pts.shape[1]
cdef double[:,:] constraint_matrix = np.empty((num_pts*2,6),dtype=np.float64)
cdef double[:,:] res_matrix = np.empty((num_pts*2,1),dtype=np.float64)
cdef int i, r1, r2
for i in range(num_pts):
r1 = 2*i
constraint_matrix[r1,0] = in_pts[0,i]
constraint_matrix[r1,1] = in_pts[1,i]
constraint_matrix[r1,2] = 1
constraint_matrix[r1,3] = 0
constraint_matrix[r1,4] = 0
constraint_matrix[r1,5] = 0
res_matrix[r1,0] = out_pts[0,i]
r2 = 2*i + 1
constraint_matrix[r2,0] = 0
constraint_matrix[r2,1] = 0
constraint_matrix[r2,2] = 0
constraint_matrix[r2,3] = in_pts[0,i]
constraint_matrix[r2,4] = in_pts[1,i]
constraint_matrix[r2,5] = 1
res_matrix[r2,0] = out_pts[1,i]
X = np.linalg.lstsq(constraint_matrix,res_matrix)[0]
cdef double[:,:] H = X.reshape(1,6)
return H
def rectangle_to_region(ul, lr):
return np.array([ul, [lr[0], ul[1]], lr, [ul[0], lr[1]]],
dtype = np.float64).T
# ---------- Legacy Codes From Previous Implementation ---------- #
# TODO: Replace these with optimized versions. The only operations
# we need are compute_homography, square_to_corners_warp, and
# some way to get the image of the centered unit square under
# a homography (this is the only current use of apply_to_pts)x
def homogenize(pts):
(h,w) = pts.shape
results = np.empty((h+1,w))
results[:h] = pts
results[-1].fill(1)
return results
def dehomogenize(pts):
(h,w) = pts.shape
results = np.empty((h-1,w))
results[:h-1] = pts[:h-1]/pts[h-1]
return results
# def compute_homography(in_pts, out_pts):
# num_pts = in_pts.shape[1]
# in_pts = homogenize(in_pts)
# out_pts = homogenize(out_pts)
# constraint_matrix = np.empty((num_pts*2, 9))
# for i in xrange(num_pts):
# p = in_pts[:,i]
# q = out_pts[:,i]
# constraint_matrix[2*i,:] = np.concatenate([[0,0,0], -p, q[1]*p], axis=1)
# constraint_matrix[2*i+1,:] = np.concatenate([p, [0,0,0], -q[0]*p], axis=1)
# U,S,V = np.linalg.svd(constraint_matrix)
# homography = V[8].reshape((3,3))
# homography /= homography[2,2]
# return np.asmatrix(homography)
_square = np.array([[-.5,-.5],[.5,-.5],[.5,.5],[-.5,.5]]).T
def square_to_corners_warp(corners, MModel=1):
if MModel == 1:
return compute_homography(_square, corners)
elif MModel == 4:
return compute_affine(_square, np.zeros((1,6)), corners)
def apply_to_pts_all(warp_matrix, pts, MModel, tmplt_size=np.empty((0,0))):
if MModel == 1:
return apply_to_pts(warp_matrix, pts)
def apply_to_pts(homography, pts):
(h,w) = pts.shape
result = np.empty((h+1,w))
result[:h] = pts
result[h].fill(1)
result = np.asmatrix(homography) * result
result[:h] = result[:h] / result[h]
result[np.isnan(result)] = 0
return np.asarray(result[:h])
'''
def apply_to_pts_aff(warp_matrix, pts, tmplt_size):
result = np.empty((3,4))
result[-1].fill(1)
result[0,0] = 1
result[1,0] = 1
result[0,1] = tmplt_size[0,2] + 1
result[1,1] = 1
result[0,2] = tmplt_size[0,2] + 1
result[1,2] = tmplt_size[0,3] + 1
result[0,3] = 1
result[1,3] = tmplt_size[0,3] + 1
res = np.asmatrix(warp_matrix.reshape((2,3))) * result
return np.asarray(res)
(h,w) = pts.shape
result = np.empty((h+1,w))
result[:h] = pts
result[-1].fill(1)
res = np.asmatrix(warp_matrix.reshape((2,3))) * result
res[np.isnan(res)] = 0
return np.asarray(res)
'''
def apply_to_pts_aff(warp_matrix, pts):
(h,w) = pts.shape
result = np.empty((h+1,w))
result[:h] = pts
result[-1].fill(1)
# print warp_matrix.shape
res = np.asmatrix(warp_matrix.reshape((2,3))) * result
res[np.isnan(res)] = 0
return np.asarray(res)
def draw_region(img, corners, color, thickness=1, draw_x=False):
for i in xrange(4):
p1 = (int(corners[0,i]), int(corners[1,i]))
p2 = (int(corners[0,(i+1)%4]), int(corners[1,(i+1)%4]))
cv2.line(img, p1, p2, color, thickness)
if draw_x:
for i in xrange(4):
p1 = (int(corners[0,i]), int(corners[1,i]))
p2 = (int(corners[0,(i+2)%4]), int(corners[1,(i+2)%4]))
cv2.line(img, p1, p2, color, thickness)
def polygon_descriptors(corners):
""" Computes the area, perimeter, and center of mass of a polygon.
Parameters:
-----------
corners : (2,n) numpy array
The vertices of the polygon. Should be in clockwise or
counter-clockwise order.
Returns:
--------
A tuple (perimeter, area, (center of mass x, center of mass y)).
"""
n_points = corners.shape[1]
p, a, cx, cy = 0, 0, 0, 0
for i in xrange(n_points):
j = (i+1) % n_points
dot = corners[0,i]*corners[1,j] - corners[0,j]*corners[1,i]
a += dot
cx += (corners[0,i] + corners[0,j]) * dot
cy += (corners[1,i] + corners[1,j]) * dot
p += np.linalg.norm(corners[:,i] - corners[:,j])
a /= 2
cx /= 6*a
cy /= 6*a
a = abs(a)
return (p, a, (cx,cy))
<|end_of_text|># Autogenerated file containing Cython compile-time defines
DEF HAVE_C99_CPLX = 0
<|end_of_text|>"""
vec2d_cy.pyx
Representation for two-dimensional points.
"""
import operator
from math import atan2, cos, sin, sqrt
cdef class Vec2dCy:
"""Create a two dimensional vector (or point in space)."""
__slots__ = {
"x", "y"
}
def __init__(self, float x=0, float y=0):
self.x = x
self.y = y
def __str__(self):
return f"Vec2d({self.x}, {self.y})"
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def __getitem__(self, int i):
if i == 0:
return self.x
elif i == 1:
return self.y
raise IndexError()
def __setitem__(self, int i, float value):
if i == 0:
self.x = value | Cython |
elif i == 1:
self.y = value
else:
raise IndexError()
def __iter__(self):
yield self.x
yield self.y
def __len__(self):
return 2
def __eq__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x == other[0] and self.y == other[1]
else:
return False
def __lt__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
if self.x < other[0]:
return True
elif (self.x == other[0]) and (self.y < other[1]):
return True
else:
return False
else:
raise TypeError(f"Not possible to use '<' operator on objects {type(self)} and {type(other)}")
def __gt__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
if self.x > other[0]:
return True
elif (self.x == other[0]) and (self.y > other[1]):
return True
else:
return False
else:
raise TypeError(f"Not possible to use '>' operator on objects {type(self)} and {type(other)}")
def __ne__(self, other):
if hasattr(other, "__getitem__") and len(other) == 2:
return self.x!= other[0] or self.y!= other[1]
else:
return True
def __add__(self, other):
if isinstance(other, Vec2dCy):
return Vec2dCy(self.x + other.x, self.y + other.y)
elif hasattr(other, "__getitem__"):
return Vec2dCy(self.x + other[0], self.y + other[1])
else:
return Vec2dCy(self.x + other, self.y + other)
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vec2dCy):
self.x += other.x
self.y += other.y
elif hasattr(other, "__getitem__"):
self.x += other[0]
self.y += other[1]
else:
self.x += other
self.y += other
return self
def __sub__(self, other):
if isinstance(other, Vec2dCy):
return Vec2dCy(self.x - other.x, self.y - other.y)
elif hasattr(other, "__getitem__"):
return Vec2dCy(self.x - other[0], self.y - other[1])
else:
return Vec2dCy(self.x - other, self.y - other)
def __rsub__(self, other):
if isinstance(other, Vec2dCy):
return Vec2dCy(other.x - self.x, other.y - self.y)
if hasattr(other, "__getitem__"):
return Vec2dCy(other[0] - self.x, other[1] - self.y)
else:
return Vec2dCy(other - self.x, other - self.y)
def __isub__(self, other):
if isinstance(other, Vec2dCy):
self.x -= other.x
self.y -= other.y
elif hasattr(other, "__getitem__"):
self.x -= other[0]
self.y -= other[1]
else:
self.x -= other
self.y -= other
return self
def __mul__(self, other):
if isinstance(other, Vec2dCy):
return Vec2dCy(self.x * other.x, self.y * other.y)
if hasattr(other, "__getitem__"):
return Vec2dCy(self.x * other[0], self.y * other[1])
else:
return Vec2dCy(self.x * other, self.y * other)
__rmul__ = __mul__
def __imul__(self, other):
if isinstance(other, Vec2dCy):
self.x *= other.x
self.y *= other.y
elif hasattr(other, "__getitem__"):
self.x *= other[0]
self.y *= other[1]
else:
self.x *= other
self.y *= other
return self
cpdef Vec2dCy _operator_handler(self, other, f):
if isinstance(other, Vec2dCy):
return Vec2dCy(f(self.x, other.x),
f(self.y, other.y))
elif hasattr(other, "__getitem__"):
return Vec2dCy(f(self.x, other[0]),
f(self.y, other[1]))
else:
return Vec2dCy(f(self.x, other),
f(self.y, other))
cpdef Vec2dCy _right_operator_handler(self, other, f):
if hasattr(other, "__getitem__"):
return Vec2dCy(f(other[0], self.x),
f(other[1], self.y))
else:
return Vec2dCy(f(other, self.x),
f(other, self.y))
cpdef Vec2dCy _inplace_operator_handler(self, other, f):
if hasattr(other, "__getitem__"):
self.x = f(self.x, other[0])
self.y = f(self.y, other[1])
else:
self.x = f(self.x, other)
self.y = f(self.y, other)
return self
def __div__(self, other):
return self._operator_handler(other, operator.div)
def __rdiv__(self, other):
return self._right_operator_handler(other, operator.div)
def __idiv__(self, other):
return self._inplace_operator_handler(other, operator.div)
def __floordiv__(self, other):
return self._operator_handler(other, operator.floordiv)
def __rfloordiv__(self, other):
return self._right_operator_handler(other, operator.floordiv)
def __ifloordiv__(self, other):
return self._inplace_operator_handler(other, operator.floordiv)
def __truediv__(self, other):
return self._operator_handler(other, operator.truediv)
def __rtruediv__(self, other):
return self._right_operator_handler(other, operator.truediv)
def __itruediv__(self, other):
return self._inplace_operator_handler(other, operator.truediv)
def __neg__(self):
return Vec2dCy(operator.neg(self.x), operator.neg(self.y))
def __pos__(self):
return Vec2dCy(operator.pos(self.x), operator.pos(self.y))
def __abs__(self):
return Vec2dCy(abs(self.x), abs(self.y))
def __invert__(self):
return Vec2dCy(-self.x, -self.y)
def __round__(self, n=0):
return Vec2dCy(round(self.x, n), round(self.y, n))
def __copy__(self):
return Vec2dCy(self.x, self.y)
cpdef float get_angle(self):
return 0 if self.get_length() == 0 else atan2(self.y, self.x)
cpdef float get_length(self):
return sqrt(self.x ** 2 + self.y ** 2)
cpdef Vec2dCy normalized(self):
return self / self.get_length() if self.get_length()!= 0 else Vec2dCy(self)
cpdef tuple get_tuple(self, int r=0):
if r:
return round(self.x, r), round(self.y, r)
return self.x, self.y
cpdef Vec2dCy load_tuple(self, tuple t):
"""Load in a tuple to the object's parameters."""
self.x = t[0]
self.y = t[1]
return self
cpdef Vec2dCy angle_to_vec(float angle):
"""
Transform an angle to a normalized vector.
:param angle: Float
:return: Vec2dCy
"""
return Vec2dCy(cos(angle), sin(angle))
<|end_of_text|># distutils: language = c++
from libcpp.string cimport string
cdef extern from "google/protobuf/util/message_differencer.h" namespace "google::protobuf::util":
cdef cppclass MessageDifferencer:
MessageDifferencer()
bint Equals(const Message & message1, const Message & message2)
cdef extern from "google/protobuf/message.h" namespace "google::protobuf":
cdef cppclass Message:
bint ParseFromString(const string& data) except +
bint SerializeToString(string* output) const
string DebugString() const
cdef extern from "google/protobuf/stubs/status.h" namespace "google::protobuf::util":
cdef cppclass Status:
Status()
string ToString() const
cdef extern from "google/protobuf/util/json_util.h" namespace "google::protobuf::util":
cdef Status MessageToJsonString(const Message &, string* output)<|end_of_text|># distutils: language=c++
import pathlib
import networkx as nx
import time
from libcpp.vector cimport vector
from libcpp.stack cimport stack
from cython.operator cimport dereference, preincrement, postincrement
cdef extern from "< | Cython |
algorithm>" namespace "std":
Iter remove[Iter, T](Iter first, Iter last, const T& value)
OutputIt copy[InputIt, OutputIt](InputIt first, InputIt last, OutputIt d_first)
OutputIt copy_if[InputIt, OutputIt, Pred](InputIt first, InputIt last, OutputIt d_first, Pred pred) except +
# cdef extern from "<iterator>" namespace "std":
# back_insert_iterator[CONTAINER] back_inserter[CONTAINER](CONTAINER &)
cdef class Graph:
cdef unsigned int _size
cdef vector[int] _adj_matrix
def __init__(self, size):
self._size = size
self._adj_matrix.resize(size*size)
cdef int _position(self, int a, int b):
return (a * self._size) + b
@property
def size(self):
return self._size
cpdef void add_edge(self, int a, int b):
self._adj_matrix[self._position(a, b)] = 1
self._adj_matrix[self._position(b, a)] = 1
cpdef bint adjacent(self, int a, int b):
return self._adj_matrix[self._position(a, b)]
cpdef int degree(self, int a):
cdef int deg = 0
cdef int i
for i in range(a*self._size, (a+1)*self._size):
if self._adj_matrix[i]:
deg += 1
return deg
cdef void degree_sort(Graph graph, vector[int] & nodes):
cdef int i, node
for i, node in enumerate(sorted(range(graph.size), key=graph.degree, reverse=True)):
nodes[i] = node
cdef void expand(Graph graph, vector[vector[int]] & buckets, vector[int] & candidate, vector[int] & neighbors, vector[int] & best):
cdef vector[int] colors
colors.reserve(neighbors.size())
cdef vector[int] colored_nodes
colored_nodes.reserve(neighbors.size())
colourise(graph, buckets, neighbors, colors, colored_nodes)
cdef int i, j, n
cdef int bound, node, size
cdef vector[int] new_neighbors
new_neighbors.reserve(neighbors.size())
for i in reversed(range(colored_nodes.size())):
size = candidate.size()
bound = size + colors[i]
if bound <= best.size():
return
node = colored_nodes[i]
candidate.push_back(node)
postincrement(size)
new_neighbors.clear()
for n in neighbors:
if graph.adjacent(node, n):
new_neighbors.push_back(n)
# new_neighbors = [n for n in neighbors if graph.adjacent(node, n)]
if new_neighbors.empty():
if size > best.size():
best.resize(size)
copy(candidate.begin(), candidate.end(), best.begin())
else:
expand(graph, buckets, candidate, new_neighbors, best)
candidate.pop_back()
colored_nodes.pop_back()
colors.pop_back()
neighbors.erase(remove(neighbors.begin(), neighbors.end(), node))
cdef colourise(Graph graph, vector[vector[int]] & buckets, vector[int] & neighbors, vector[int] & colors, vector[int] & colored_nodes):
cdef vector[vector[int]].iterator it
it = buckets.begin()
while it!= buckets.end():
dereference(it).clear()
preincrement(it)
cdef int node, colored_node
for node in neighbors:
it = buckets.begin()
while it!= buckets.end():
for colored_node in dereference(it):
if graph.adjacent(node, colored_node):
break
else:
dereference(it).push_back(node)
break
preincrement(it)
it = buckets.begin()
while it!= buckets.end():
for node in dereference(it):
colored_nodes.push_back(node)
colors.push_back(it - buckets.begin() + 1)
preincrement(it)
cpdef vector[int] search_maximum_clique(Graph graph):
cdef vector[int] result
result.reserve(graph.size)
cdef vector[int] candidate
candidate.reserve(graph.size)
cdef vector[int] sorted_nodes
sorted_nodes.resize(graph.size)
degree_sort(graph, sorted_nodes)
cdef vector[vector[int]] buckets
cdef vector[vector[int]].iterator it
buckets.resize(graph.size)
it = buckets.begin()
while it!= buckets.end():
dereference(it).reserve(graph.size)
preincrement(it)
expand(graph, buckets, candidate, sorted_nodes, result)
return result
def main():
graph_path = pathlib.Path.cwd()
dimacs = nx.read_edgelist(graph_path.joinpath('brock200-1.txt'), nodetype=int)
graph = Graph(dimacs.number_of_nodes())
for i, j in dimacs.edges():
graph.add_edge(i-1, j-1)
x = time.time()
result = search_maximum_clique(graph)
print(time.time() - x)
print(len([i+1 for i in result]))
print([i+1 for i in result])
if __name__ == '__main__':
main()
<|end_of_text|>#
# Copyright (c) 2018-2020 by Kristoffer Paulsson <[email protected]>.
#
# This software is available under the terms of the MIT license. Parts are licensed under
# different terms if stated. The legal terms are attached to the LICENSE file and are
# made available on:
#
# https://opensource.org/licenses/MIT
#
# SPDX-License-Identifier: MIT
#
# Contributors:
# Kristoffer Paulsson - initial implementation
#
import datetime
import uuid
from typing import Union, List
from angelos.common.policy import PolicyPerformer, PolicyMixin, policy, PolicyException
from angelos.document.envelope import Envelope
from angelos.document.messages import Instant, MESSAGE_EXPIRY_PERIOD, Note, Mail, Attachment, Share, Report
from angelos.document.utils import Helper as DocumentHelper
from angelos.lib.policy.crypto import Crypto
from angelos.portfolio.collection import PrivatePortfolio, Portfolio
from angelos.portfolio.policy import IssuePolicy
from angelos.portfolio.utils import MimeTypes, Definitions
# TODO: Make an overhaul
class MailBuilder:
"""Mail building class."""
MIME = ("application/octet-stream",)
def __init__(self, sender: PrivatePortfolio, mail: Mail):
"""Init the mail builder"""
self.__sender = sender
self.__mail = mail
def message(self, subject: str, body: str, reply: Union[Mail, uuid.UUID] = None):
"""Add mail body, subject and reply-to."""
self.__mail.subject = subject if subject else ""
self.__mail.body = body if body else ""
if isinstance(reply, Mail):
self.__mail.reply = reply.id
elif isinstance(reply, uuid.UUID):
self.__mail.reply = reply
else:
self.__mail.reply = None
return self
def add(self, name: str, data: bytes, mime: str):
"""Add an attachment to the mail."""
attachement = Attachment(nd={"name": name if name else "Unnamed", "mime": mime, "data": data})
attachement.validate()
self.__mail.attachments.append(attachement)
return self
def done(self) -> Mail:
"""Finalize the mail message."""
self.__mail._fields["signature"].redo = True
self.__mail.signature = None
self.__mail.expires = datetime.date.today() + datetime.timedelta(MESSAGE_EXPIRY_PERIOD)
self.__mail.posted = datetime.datetime.utcnow()
mail = Crypto.sign(self.__mail, self.__sender)
mail.validate()
return mail
def draft(self) -> Mail:
"""Export draft mail document"""
self.__mail._fields["signature"].redo = True
self.__mail.signature = None
self.__mail.expires = datetime.date.today() + datetime.timedelta(365)
self.__mail.posted = datetime.datetime(1, 1, 1, 1, 1, 1)
return Crypto.sign(self.__mail, self.__sender)
class ShareBuilder(MailBuilder):
def share(self, portfolio: Portfolio) -> Share:
"""Create a Share message containing documents to be shared."""
for doc in portfolio.documents():
self.add(
doc.__class__.__name__,
DocumentHelper.serialze(doc),
ShareBuilder.MIME[0]
)
return self.done()
class ReportBuilder(MailBuilder):
def report(
self, message: Union[Mail, Instant], envelope: Envelope,
claims: List[str], msg: str
) -> Report:
"""Create a Share message containing documents to be shared."""
if len(claims) < 1 or len(claims) > 3:
raise ValueError("At least 1 and most 3 claims.")
for doc in set(message, envelope):
self.add(
doc.__class__.__name__,
DocumentHelper.serialze(doc),
ReportBuilder.MIME
)
text = "\n".join(["{0}: {1}".format(
claim, Definitions.REPORT[claim]) for claim in claims])
self.message(
"Claims: {0}".format(", ".join(claims)),
"MESSAGE:\n{0}\n\nCLAIMS:\n{1}".format(
msg if msg else "n/a", text
),
)
return self.done()
class CreateInstant(I | Cython |
ssuePolicy, PolicyPerformer, PolicyMixin):
def __init__(self):
super().__init__()
self._data = None
self._mime = None
self._reply = None
def _setup(self):
self._document = None
def _clean(self):
self._portfolio = None
self._owner = None
self._data = None
self._mime = None
self._reply = None
def apply(self) -> bool:
if self._mime not in list(map(str, MimeTypes)):
raise ValueError("Unsupported mime-type for instant messages.")
self._document = Instant(nd={
"owner": self._owner.entity.id,
"issuer": self._portfolio.entity.id,
"mime": self._mime,
"body": self._data if self._data else b"",
"reply": self._reply.id if self._reply else None,
"expires": datetime.date.today() + datetime.timedelta(MESSAGE_EXPIRY_PERIOD),
"posted": datetime.datetime.now(),
})
self._document = Crypto.sign(self._document, self._portfolio)
if not all([
self._check_document_issuer(),
self._check_document_expired(),
self._check_document_valid(),
self._check_document_verify(),
]):
raise PolicyException()
return True
@policy(b'I', 0, "Instant:Create")
def perform(
self, sender: PrivatePortfolio, recipient: Portfolio,
data: bytes, mime: str, reply: Instant = None
) -> Instant:
self._portfolio = sender
self._owner = recipient
self._data = data
self._mime = mime
self._reply = reply
self._applier()
return self._document
class CreateNote(IssuePolicy, PolicyPerformer, PolicyMixin):
def __init__(self):
super().__init__()
self._body = None
self._reply = None
def _setup(self):
self._document = None
def _clean(self):
self._portfolio = None
self._owner = None
self._data = None
self._reply = None
def apply(self) -> bool:
self._document = Note(nd={
"owner": self._owner.entity.id,
"issuer": self._portfolio.entity.id,
"body": self._body if self._body else "",
"reply": self._reply.id if self._reply else None,
"expires": datetime.date.today() + datetime.timedelta(MESSAGE_EXPIRY_PERIOD),
"posted": datetime.datetime.now(),
})
self._document = Crypto.sign(self._document, self._portfolio)
if not all([
self._check_document_issuer(),
self._check_document_expired(),
self._check_document_valid(),
self._check_document_verify(),
]):
raise PolicyException()
return True
@policy(b'I', 0, "Node:Create")
def perform(self, sender: PrivatePortfolio, recipient: Portfolio, body: str, reply: Note = None) -> Note:
"""Compose a mail by using a mailbuilder."""
self._portfolio = sender
self._owner = recipient
self._body = body
self._reply = reply
self._applier()
return self._document
class CreateMail(IssuePolicy, PolicyPerformer, PolicyMixin):
def __init__(self):
super().__init__()
def _setup(self):
self._document = None
def _clean(self):
self._owner = None
def apply(self) -> bool:
self._document = Mail(nd={
"owner": self._owner.entity.id,
"issuer": self._portfolio.entity.id
})
return True
@policy(b'I', 0, "Mail:Create")
def perform(self, sender: PrivatePortfolio, recipient: Portfolio) -> MailBuilder:
self._portfolio = sender
self._owner = recipient
self._applier()
return MailBuilder(self._portfolio, self._document)
class CreateShare(IssuePolicy, PolicyPerformer, PolicyMixin):
def __init__(self):
super().__init__()
def _setup(self):
self._document = None
def _clean(self):
self._owner = None
def apply(self) -> bool:
self._document = Share(nd={
"owner": self._owner.entity.id,
"issuer": self._portfolio.entity.id
})
return True
@policy(b'I', 0, "Share:Create")
def perform(self, sender: PrivatePortfolio, recipient: Portfolio) -> ShareBuilder:
self._portfolio = sender
self._owner = recipient
self._applier()
return ShareBuilder(self._portfolio, self._document)
class CreateReport(IssuePolicy, PolicyPerformer, PolicyMixin):
def __init__(self):
super().__init__()
def _setup(self):
self._document = None
def _clean(self):
self._owner = None
def apply(self) -> bool:
self._document = Report(nd={
"owner": self._portfolio.entity.id,
"issuer": self._owner.entity.id
})
return True
@policy(b'I', 0, "Report:Create")
def perform(self, sender: PrivatePortfolio, recipient: Portfolio) -> ReportBuilder:
self._portfolio = sender
self._owner = recipient
self._applier()
return ReportBuilder(self._portfolio, self._document)<|end_of_text|>* File: and.pex.netlist.AND.pxi
* Created: Mon Apr 15 15:43:41 2019
*
x_PM_AND_VDD N_VDD_M3_b N_VDD_M3_s N_VDD_M4_d VDD PM_AND_VDD
x_PM_AND_2 N_2_M2_g N_2_M5_g N_2_M0_s N_2_M3_d N_2_c_1_p N_2_c_2_p N_2_c_3_p
+ N_2_c_5_p N_2_c_6_p N_2_c_7_p PM_AND_2
x_PM_AND_GROUND N_GROUND_M1_d N_GROUND_M0_b GROUND PM_AND_GROUND
x_PM_AND_Y N_Y_M2_d N_Y_M5_d Y PM_AND_Y
x_PM_AND_A N_A_c_9_n N_A_M3_g N_A_M0_g A PM_AND_A
x_PM_AND_B N_B_c_14_n N_B_M1_g N_B_M4_g B PM_AND_B
cc_1 N_2_c_1_p N_A_c_9_n 0.1508f
cc_2 N_2_c_2_p N_A_c_9_n 0.104f
cc_3 N_2_c_3_p N_A_c_9_n 1.2675f
cc_4 N_2_c_3_p N_A_M3_g 0.57525f
cc_5 N_2_c_5_p N_A_M0_g 0.91f
cc_6 N_2_c_6_p N_B_c_14_n 0.518f
cc_7 N_2_c_7_p N_B_c_14_n 2.0735f
cc_8 N_2_c_5_p N_B_M1_g 0.91f
<|end_of_text|>""" Simpler example of wrapping cos function from math.h using Cython. """
from libc.math cimport cos
def cos_func(arg):
return cos(arg)
<|end_of_text|>include 'types.pxi'
from.handle cimport shared_ptr
cdef extern from 'ql/patterns/observable.hpp' namespace 'QuantLib' nogil:
cdef cppclass Observable:
pass
cdef cppclass Observer:
Observer()
void registerWith(const shared_ptr[Observable]&)
Size unregisterWith(const shared_ptr[Observable]&)
cdef extern from 'cpp_layer/observable.hpp' nogil:
cdef cppclass PyObserver(Observer):
PyObserver()
PyObserver(object callback) except +
<|end_of_text|>cdef class Json:
def __init__(self, x=None):
self.from_python(x)
def from_python(self, x):
if x is None:
set_nil(self.j)
else:
raise NotImplementedError(type(x))
<|end_of_text|>from Types cimport *
from libcpp cimport bool
from libcpp.pair cimport pair as libcpp_pair
from libcpp.vector cimport vector as libcpp_vector
from MetaInfoInterface cimport *
cdef extern from "<OpenMS/METADATA/ID/IdentificationData.h>" namespace "OpenMS":
cdef cppclass IdentificationData(MetaInfoInterface):
# wrap-inherits:
# MetaInfoInterface
IdentificationData() nogil except + # wrap-doc:Representation of spectrum identification results and associated data
IdentificationData(IdentificationData &) nogil except + # wrap-doc:Copy constructor
<|end_of_text|>import re
def p2l(key):
"""
Converts the many different ways of addressing an ODS path to a list of keys ['bla',0,'bla']
:param key: ods location in some format
:return: list of keys that make the ods path
"""
if isinstance(key, list):
return key
if isinstance(key, tuple):
return list(key)
if isinstance(key, int):
return [int(key)]
if isinstance(key, str) and not ('.' in key or '[' in key):
if len(key | Cython |
):
try:
return [int(key)]
except ValueError:
return [key]
else:
return []
if key is None:
raise TypeError('OMAS key cannot be None')
if isinstance(key, dict):
raise TypeError('OMAS key cannot be of type dictionary')
if not isinstance(key, (list, tuple)):
key = str(key).replace('[', '.').replace(']', '').split('.')
key = [k for k in key if k]
for k, item in enumerate(key):
try:
key[k] = int(item)
except ValueError:
pass
return key
def l2i(path):
"""
Formats a list ['bla',0,'bla'] into a IMAS path 'bla[0].bla'
:param path: ODS path format
:return: IMAS path format
"""
ipath = ''
for kstep, step in enumerate(path):
if isinstance(step, int) or step == ':':
ipath += "[%s]" % step
elif kstep == 0:
ipath += '%s' % step
else:
ipath += '.%s' % step
return ipath
def l2o(path):
"""
Formats a list ['bla',0,'bla'] into an ODS path format 'bla.0.bla'
:param path: list of strings and integers
:return: ODS path format
"""
return '.'.join(filter(None, map(str, path)))
_o2u_pattern = re.compile(r'\.[0-9:]+')
_o2u_pattern_no_split = re.compile(r'^[0-9:]+')
_o2i_pattern = re.compile(r'\.([:0-9]+)')
def o2u(path):
"""
Converts an ODS path 'bla.0.bla' into a universal path 'bla.:.bla'
:param path: ODS path format
:return: universal ODS path format
"""
if '.' in path:
return re.sub(_o2u_pattern, '.:', path)
else:
return re.sub(_o2u_pattern_no_split, ':', path)
_o2i_pattern = re.compile(r'\.([:0-9]+)')
def i2o(path):
"""
Formats a IMAS path 'bla[0].bla' into an ODS path 'bla.0.bla'
:param path: IMAS path format
:return: ODS path format
"""
return path.replace(']', '').replace('[', '.')
def o2i(path):
"""
Formats a ODS path 'bla.0.bla' into an IMAS path 'bla[0].bla'
:param path: ODS path format
:return: IMAS path format
"""
return re.sub(_o2i_pattern, r'[\1]', path)
def u2o(upath, path):
"""
Replaces `:` and integers in `upath` with ':' and integers from in `path`
e.g. uo2('a.:.b.:.c.1.d.1.e','f.:.g.1.h.1.i.:.k')) becomes 'bla.1.hello.2.bla'
:param upath: universal ODS path
:param path: ODS path
:return: filled in ODS path
"""
if upath.startswith('1...'):
return upath
ul = p2l(upath)
ol = p2l(path)
for k in range(min([len(ul), len(ol)])):
if (ul[k] == ':' or isinstance(ul[k], int)) and (ol[k] == ':' or isinstance(ol[k], int)):
ul[k] = ol[k]
elif ul[k] == ol[k]:
continue
else:
break
return l2o(ul)
def u2n(upath, list_of_n):
"""
Replaces `:` and integers in `upath` with integers provided
e.g. uo2('a.:.b.:.c.1.d.1.e',[2,3]) becomes 'a.2.b.3.c.1.d.1.e'
:param upath: universal ODS path
:param list_of_n: list of numbers
:return: filled in ODS path
"""
ul = p2l(upath)
i = 0
for k in range(len(ul)):
if ul[k] == ':' or isinstance(ul[k], int):
if ul[k] == ':' and i < len(list_of_n):
ul[k] = list_of_n[i]
i += 1
return l2o(ul)
def l2u(path):
"""
Formats a list ['bla',0,'bla'] into a universal path 'bla.:.bla'
NOTE: a universal ODS path substitutes lists indices with :
:param path: list of strings and integers
:return: universal ODS path format
"""
return o2u(l2o(path))
def trim_common_path(p1, p2):
"""
return paths in lists format trimmed of the common first path between paths p1 and p2
:param p1: ODS path
:param p2: ODS path
:return: paths in list format trimmed of common part
"""
p1 = p2l(p1)
p2 = p2l(p2)
both = [x if x[0] == x[1] else None for x in zip(p1, p2)] + [None]
return p1[both.index(None) :], p2[both.index(None) :]
<|end_of_text|>#cython: boundscheck=False
#cython: nonecheck=False
#cython: wraparound=False
#cython: profile=False
#cython: cdivision=True
from libc.stdio cimport *
import numpy as np
cimport numpy as cnp
from operator import mul
cdef extern from "math.h":
double sqrt(double)
double log(double)
cnp.import_array()
cdef class WeightVector:
def __init__(self, dims, ada_grad=True, w=None):
if isinstance(dims, int):
self.n = dims
self.dims = (dims,)
elif isinstance(dims, tuple):
self.n = reduce(mul, dims)
self.dims = dims
self.shape0 = dims[0]
if w is None:
self.w = np.zeros(self.n, dtype=np.float64)
else:
assert w.size == self.n
self.w = w
self.ada_grad = int(ada_grad)
self.acc = np.zeros_like(self.w, dtype=np.float64)
self.adagrad_squares = np.ones_like(self.w, dtype=np.float64)
self.last_update = np.zeros_like(self.w, dtype=np.int32)
self.active = np.ones_like(self.w, dtype=np.float64)
def average(self):
w_copy = np.asarray(self.w)
acc_copy = np.asarray(self.acc)
last_update_copy = np.asarray(self.last_update)
acc_copy += w_copy * (self.n_updates - last_update_copy)
w_copy = acc_copy / self.n_updates
self.w = w_copy
cdef void _update_running_mean(self, double old_val, double new_val):
# Keep running mean and variance using a variant of Welford's algorithm.
# This function substitutes `old_val` for `new_val` in the set of
# numbers the mean and variance are computed over.
delta = new_val - old_val
d_old = old_val - self.mean
self.mean += delta / self.n
d_new = new_val - self.mean
self.m2 += delta * (d_old + d_new)
cdef void _update_ada_grad(self, int feat_i, double val):
cdef double learning_rate = 1.0
# print feat_i, self.adagrad_squares[feat_i], sqrt(self.adagrad_squares[feat_i])
val *= (learning_rate / sqrt(self.adagrad_squares[feat_i]))
self.adagrad_squares[feat_i] += val*val
cpdef update(self, int feat_i, double val):
if feat_i < 0:
raise ValueError("feature index is < 0")
val *= self.active[feat_i]
if val == 0:
return
cdef int missed_updates = self.n_updates - self.last_update[feat_i] - 1
# Perform missing updates for previous rounds
self.last_update[feat_i] = self.n_updates
self.acc[feat_i] += (missed_updates + 1) * self.w[feat_i]
self._update_running_mean(self.w[feat_i], self.w[feat_i] + val)
if self.ada_grad:
self._update_ada_grad(feat_i, val)
# New update
self.acc[feat_i] += val
self.w[feat_i] += val
cpdef double variance(self):
return self.m2 / float(self.n)
cpdef double stddev(self):
return sqrt(self.variance())
cpdef update2d(self, int i1, int i2, double val):
self.update(self.shape0 * i1 + i2, val)
cdef inline double get(self, int | Cython |
i1):
return self.w[i1]
cdef inline double get2d(self, int i1, int i2):
return self.w[self.shape0 * i1 + i2]
cdef double score(self, Example *example, int label, FeatMap feat_map):
cdef double e_score = 0
cdef int feat_i
for feat in example.features:
feat_i = feat_map.feat_i_for_label(feat.index, label)
e_score += self.w[feat_i] * feat.value * self.active[feat_i]
return e_score
@classmethod
def load(cls, file):
with np.load(file) as npz_file:
dims = tuple(npz_file['dims'])
w = WeightVector(dims)
w.ada_grad = npz_file['ada_grad'].sum()
w.w = npz_file['w']
w.acc = npz_file['acc']
w.adagrad_squares = npz_file['adagrad_squares']
w.last_update = npz_file['last_update']
w.active = np.ones_like(w.w, dtype=np.float64)
return w
def save(self, file):
np.savez(file,
ada_grad=self.ada_grad,
w=self.w,
acc=self.acc,
adagrad_squares=self.adagrad_squares,
last_update=self.last_update,
dims=self.dims)
def copy(self):
return WeightVector(self.dims, self.ada_grad, np.asarray(self.w).copy())<|end_of_text|>"""
Many implementations of the dot product.
See `Cython documentation <http://docs.cython.org/en/latest/>`_.
"""
from libc.stdlib cimport calloc, free
from libc.string cimport memcpy
from libc.stdio cimport printf
from libc.math cimport NAN
import numpy
from cython.parallel import prange, parallel
cimport numpy
cimport cython
cimport openmp
numpy.import_array()
cdef double _ddot_cython_array_omp(const double[::1] va, const double[::1] vb,
int chunksize, int schedule) nogil:
"""
dot product implemented with cython and C types
using :epkg:`prange` (:epkg:`openmp` in :epkg:`cython`).
:param va: first vector, dtype must be float64
:param vb: second vector, dtype must be float64
:param chunksize: see :epkg:`prange`
:param schedule: 0 no parallelization, 1 for `'static'`,
2 for `'dynamic'`
:return: dot product
"""
cdef int n = va.shape[0]
cdef Py_ssize_t i
cdef double s = 0
if schedule == 1:
for i in prange(n, schedule='static', chunksize=chunksize):
s += va[i] * vb[i]
elif schedule == 2:
for i in prange(n, schedule='dynamic', chunksize=chunksize):
s += va[i] * vb[i]
else:
for i in prange(n):
s += va[i] * vb[i]
return s
def ddot_cython_array_omp(const double[::1] va, const double[::1] vb,
cython.int chunksize=32, cython.int schedule=0):
"""
dot product implemented with cython and C types
using :epkg:`prange` (:epkg:`openmp` in :epkg:`cython`).
:param va: first vector, dtype must be float64
:param vb: second vector, dtype must be float64
:param chunksize: see :epkg:`prange`
:param schedule: 0 simple :epkg:`prange`,
1 for `'static'`, 2 for `'dynamic'`
:return: dot product
"""
if va.shape[0]!= vb.shape[0]:
raise ValueError("Vectors must have same shape.")
cdef double s
with nogil:
s = _ddot_cython_array_omp(va, vb, chunksize, schedule)
return s
cdef extern from "dot_cython_omp_.h":
cdef cython.int get_omp_max_threads_cpp() nogil
cdef double vector_ddot_openmp(const double *p1, const double *p2, cython.int size, cython.int nthreads) nogil
cdef double vector_ddot_openmp_16(const double *p1, const double *p2, cython.int size, cython.int nthreads) nogil
@cython.boundscheck(False)
@cython.wraparound(False)
def get_omp_max_threads():
"""
Returns the number of threads.
"""
cdef cython.int i
with nogil:
i = get_omp_max_threads_cpp()
return i
@cython.boundscheck(False)
@cython.wraparound(False)
def ddot_array_openmp(const double[::1] va, const double[::1] vb):
"""
dot product using :epkg:`openmp` inside C++ code.
:param va: first vector, dtype must be float64
:param vb: second vector, dtype must be float64
:return: dot product
"""
cdef double r;
with nogil:
r = vector_ddot_openmp(&va[0], &vb[0], va.shape[0], 0)
return r
@cython.boundscheck(False)
@cython.wraparound(False)
def ddot_array_openmp_16(const double[::1] va, const double[::1] vb):
"""
dot product using :epkg:`openmp` inside C++ code,
parallelizes 16x16 computation.
:param va: first vector, dtype must be float64
:param vb: second vector, dtype must be float64
:return: dot product
"""
cdef double r;
with nogil:
r = vector_ddot_openmp_16(&va[0], &vb[0], va.shape[0], 0)
return r
<|end_of_text|>from exception.custom_exception cimport raise_py_error
from libc.stdint cimport int64_t
from libcpp cimport bool
cdef extern from "GenApi/Synch.h" namespace 'GENAPI_NAMESPACE':
cdef cppclass CLock:
# Constructor
CLock() except +raise_py_error
#Destructor
void Destructor "~CLock"() except +raise_py_error
#tries to enter the critical section; returns true if successful
bool TryLock() except +raise_py_error
#enters the critical section (may block)
void Lock() except +raise_py_error
#leaves the critical section
void Unlock() except +raise_py_error<|end_of_text|># This file is part of the TIGRE Toolbox
# Copyright (c) 2015, University of Bath and
# CERN-European Organization for Nuclear Research
# All rights reserved.
# License: Open Source under BSD.
# See the full license at
# https://github.com/CERN/TIGRE/license.txt
# Contact: [email protected]
# Codes: https://github.com/CERN/TIGRE/
# --------------------------------------------------------------------------
# Coded by: Tomoyuki SADAKANE
cimport numpy as np
import numpy as np
from tigre.utilities.errors import TigreCudaCallError
from tigre.utilities.cuda_interface._gpuUtils cimport GpuIds as c_GpuIds, convert_to_c_gpuids, free_c_gpuids
np.import_array()
from libc.stdlib cimport malloc, free
cdef extern from "numpy/arrayobject.h":
void PyArray_ENABLEFLAGS(np.ndarray arr, int flags)
void PyArray_CLEARFLAGS(np.ndarray arr, int flags)
cdef extern from "RandomNumberGenerator.hpp":
cdef void poisson_1d(float* img, size_t uiLen, float* dst, c_GpuIds gpuids)
cdef void poisson_gaussian_1d(float* img, size_t uiLen, float fGaussMu, float fGaussSigma, float* dst, c_GpuIds gpuids)
def cuda_raise_errors(error_code):
if error_code:
raise TigreCudaCallError('RandomNumberGenerator::', error_code)
def add_poisson(np.ndarray[np.float32_t, ndim=3] src, gpuids=None):
# print("add_poisson()")
cdef c_GpuIds* c_gpuids = convert_to_c_gpuids(gpuids)
if not c_gpuids:
raise MemoryError()
cdef np.npy_intp size_img[3]
size_img[0]= <np.npy_intp> src.shape[0]
size_img[1]= <np.npy_intp> src.shape[1]
size_img[2]= <np.npy_intp> src.shape[2]
cdef float* c_imgout = <float*> malloc(size_img[0] *size_img[1] *size_img[2]* sizeof(float))
cdef long imgsize[3]
imgsize[0] = <long> size_img[2]
imgsize[1] = <long> size_img[1 | Cython |
]
imgsize[2] = <long> size_img[0]
src = np.ascontiguousarray(src)
cdef float* c_src = <float*> src.data
cdef np.npy_intp c_uiSigLen = <np.npy_intp> (size_img[0] *size_img[1] *size_img[2])
cuda_raise_errors(poisson_1d(c_src, c_uiSigLen, c_imgout, c_gpuids[0]))
imgout = np.PyArray_SimpleNewFromData(3, size_img, np.NPY_FLOAT32, c_imgout)
PyArray_ENABLEFLAGS(imgout, np.NPY_OWNDATA)
return imgout
def add_noise(np.ndarray[np.float32_t, ndim=3] poisson_lambda,
np.float32_t gaussian_mu,
np.float32_t gaussian_sigma,
gpuids=None):
# print("add_noise()")
cdef c_GpuIds* c_gpuids = convert_to_c_gpuids(gpuids)
if not c_gpuids:
raise MemoryError()
cdef np.npy_intp size_img[3]
size_img[0]= <np.npy_intp> poisson_lambda.shape[0]
size_img[1]= <np.npy_intp> poisson_lambda.shape[1]
size_img[2]= <np.npy_intp> poisson_lambda.shape[2]
cdef float* c_imgout = <float*> malloc(size_img[0] *size_img[1] *size_img[2]* sizeof(float))
cdef long imgsize[3]
imgsize[0] = <long> size_img[2]
imgsize[1] = <long> size_img[1]
imgsize[2] = <long> size_img[0]
poisson_lambda = np.ascontiguousarray(poisson_lambda)
cdef float* c_src = <float*> poisson_lambda.data
cdef np.npy_intp c_uiSigLen = <np.npy_intp> (size_img[0] *size_img[1] *size_img[2])
cdef np.float32_t c_fGaussMu = gaussian_mu
cdef np.float32_t c_fGaussSigma = gaussian_sigma
cuda_raise_errors(poisson_gaussian_1d(c_src, c_uiSigLen, c_fGaussMu, c_fGaussSigma, c_imgout, c_gpuids[0]))
imgout = np.PyArray_SimpleNewFromData(3, size_img, np.NPY_FLOAT32, c_imgout)
PyArray_ENABLEFLAGS(imgout, np.NPY_OWNDATA)
return imgout
<|end_of_text|># Copyright 2016 by Nedim Sabic (RabbitStack)
# http://rabbitstack.github.io
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kstream.includes.python cimport PyTuple_SetItem, Py_BuildValue, PyTuple_New
from cpython.ref cimport PyObject
from kstream.includes.windows cimport UCHAR
cdef inline PyObject* build_ktuple(PyObject* kguid, UCHAR opcode) nogil:
cdef PyObject* ktuple = PyTuple_New(2)
cdef PyObject* oco = Py_BuildValue('b', opcode)
PyTuple_SetItem(ktuple, 0, kguid)
PyTuple_SetItem(ktuple, 1, oco)
return ktuple
<|end_of_text|>from trainer_base cimport BaseTrainer
cdef class Trainer(BaseTrainer):
cdef:
public int steps
public float change_high, change_low
<|end_of_text|># distutils: language=c++
# distutils: sources=hummingbot/core/cpp/LimitOrder.cpp
from cpython cimport PyObject
from libcpp.string cimport string
from decimal import Decimal
import pandas as pd
from typing import List
cdef class LimitOrder:
@classmethod
def to_pandas(cls, limit_orders: List[LimitOrder]) -> pd.DataFrame:
cdef:
list columns = ["Order_Id", "is_buy", "Trading_Pair", "Base_Asset", "Quote_Asset", "Price", "Quantity"]
list data = [[
limit_order.client_order_id,
limit_order.is_buy,
limit_order.trading_pair,
limit_order.base_currency,
limit_order.quote_currency,
float(limit_order.price),
float(limit_order.quantity)
] for limit_order in limit_orders]
return pd.DataFrame(data=data, columns=columns)
def __init__(self,
client_order_id: str,
trading_pair: str,
is_buy: bool,
base_currency: str,
quote_currency: str,
price: Decimal,
quantity: Decimal):
cdef:
string cpp_client_order_id = client_order_id.encode("utf8")
string cpp_trading_pair = trading_pair.encode("utf8")
string cpp_base_currency = base_currency.encode("utf8")
string cpp_quote_currency = quote_currency.encode("utf8")
self._cpp_limit_order = CPPLimitOrder(cpp_client_order_id,
cpp_trading_pair,
is_buy,
cpp_base_currency,
cpp_quote_currency,
<PyObject *> price,
<PyObject *> quantity)
@property
def client_order_id(self) -> str:
cdef:
string cpp_client_order_id = self._cpp_limit_order.getClientOrderID()
str retval = cpp_client_order_id.decode("utf8")
return retval
@property
def trading_pair(self) -> str:
cdef:
string cpp_trading_pair = self._cpp_limit_order.getTradingPair()
str retval = cpp_trading_pair.decode("utf8")
return retval
@property
def is_buy(self) -> bool:
return self._cpp_limit_order.getIsBuy()
@property
def base_currency(self) -> str:
cdef:
string cpp_base_currency = self._cpp_limit_order.getBaseCurrency()
str retval = cpp_base_currency.decode("utf8")
return retval
@property
def quote_currency(self) -> str:
cdef:
string cpp_quote_currency = self._cpp_limit_order.getQuoteCurrency()
str retval = cpp_quote_currency.decode("utf8")
return retval
@property
def price(self) -> Decimal:
return <object>(self._cpp_limit_order.getPrice())
@property
def quantity(self) -> Decimal:
return <object>(self._cpp_limit_order.getQuantity())
def __repr__(self) -> str:
return (f"LimitOrder('{self.client_order_id}', '{self.trading_pair}', {self.is_buy}, '{self.base_currency}', "
f"'{self.quote_currency}', {self.price}, {self.quantity})")
cdef LimitOrder c_create_limit_order_from_cpp_limit_order(const CPPLimitOrder cpp_limit_order):
cdef LimitOrder retval = LimitOrder.__new__(LimitOrder)
retval._cpp_limit_order = cpp_limit_order
return retval
<|end_of_text|># This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
cimport cython
from qutip.cy.spmatfuncs cimport spmvpy
from qutip.cy.openmp.parfuncs cimport spmvpy_openmp
@cython.boundscheck(False)
@cython.wraparound(False)
def _spmvpy(complex[::1] data,
| Cython |
int[::1] ind,
int[::1] ptr,
complex[::1] vec,
complex a,
complex[::1] out):
spmvpy(&data[0], &ind[0], &ptr[0], &vec[0], a, &out[0], vec.shape[0])
@cython.boundscheck(False)
@cython.wraparound(False)
def _spmvpy_openmp(complex[::1] data,
int[::1] ind,
int[::1] ptr,
complex[::1] vec,
complex a,
complex[::1] out,
unsigned int num_threads):
spmvpy_openmp(&data[0], &ind[0], &ptr[0], &vec[0], a, &out[0], vec.shape[0], num_threads)<|end_of_text|># -*-cython-*-
#
# Common functions required when doing complex arithmetic with Cython.
#
import cython
cimport numpy as np
cimport libc.math
cdef extern from "_complexstuff.h":
double npy_cabs(np.npy_cdouble z) nogil
double npy_carg(np.npy_cdouble z) nogil
np.npy_cdouble npy_clog(np.npy_cdouble z) nogil
np.npy_cdouble npy_cexp(np.npy_cdouble z) nogil
np.npy_cdouble npy_csin(np.npy_cdouble z) nogil
np.npy_cdouble npy_ccos(np.npy_cdouble z) nogil
np.npy_cdouble npy_csqrt(np.npy_cdouble z) nogil
np.npy_cdouble npy_cpow(np.npy_cdouble x, np.npy_cdouble y) nogil
double npy_log1p(double x) nogil
int npy_isnan(double x) nogil
int npy_isinf(double x) nogil
int npy_isfinite(double x) nogil
double inf "NPY_INFINITY"
double pi "NPY_PI"
double nan "NPY_NAN"
DEF tol = 2.220446092504131e-16
ctypedef double complex double_complex
ctypedef fused number_t:
double
double_complex
ctypedef union _complex_pun:
np.npy_cdouble npy
double_complex c99
cdef inline np.npy_cdouble npy_cdouble_from_double_complex(
double_complex x) nogil:
cdef _complex_pun z
z.c99 = x
return z.npy
cdef inline double_complex double_complex_from_npy_cdouble(
np.npy_cdouble x) nogil:
cdef _complex_pun z
z.npy = x
return z.c99
cdef inline bint zisnan(number_t x) nogil:
if number_t is double_complex:
return npy_isnan(x.real) or npy_isnan(x.imag)
else:
return npy_isnan(x)
cdef inline bint zisfinite(number_t x) nogil:
if number_t is double_complex:
return npy_isfinite(x.real) and npy_isfinite(x.imag)
else:
return npy_isfinite(x)
cdef inline bint zisinf(number_t x) nogil:
if number_t is double_complex:
return not zisnan(x) and not zisfinite(x)
else:
return npy_isinf(x)
cdef inline double zreal(number_t x) nogil:
if number_t is double_complex:
return x.real
else:
return x
cdef inline double zabs(number_t x) nogil:
if number_t is double_complex:
return npy_cabs(npy_cdouble_from_double_complex(x))
else:
return libc.math.fabs(x)
cdef inline double zarg(double complex x) nogil:
return npy_carg(npy_cdouble_from_double_complex(x))
cdef inline number_t zlog(number_t x) nogil:
cdef np.npy_cdouble r
if number_t is double_complex:
r = npy_clog(npy_cdouble_from_double_complex(x))
return double_complex_from_npy_cdouble(r)
else:
return libc.math.log(x)
cdef inline number_t zexp(number_t x) nogil:
cdef np.npy_cdouble r
if number_t is double_complex:
r = npy_cexp(npy_cdouble_from_double_complex(x))
return double_complex_from_npy_cdouble(r)
else:
return libc.math.exp(x)
cdef inline number_t zsin(number_t x) nogil:
cdef np.npy_cdouble r
if number_t is double_complex:
r = npy_csin(npy_cdouble_from_double_complex(x))
return double_complex_from_npy_cdouble(r)
else:
return libc.math.sin(x)
cdef inline number_t zcos(number_t x) nogil:
cdef np.npy_cdouble r
if number_t is double_complex:
r = npy_ccos(npy_cdouble_from_double_complex(x))
return double_complex_from_npy_cdouble(r)
else:
return libc.math.cos(x)
cdef inline number_t zsqrt(number_t x) nogil:
cdef np.npy_cdouble r
if number_t is double_complex:
r = npy_csqrt(npy_cdouble_from_double_complex(x))
return double_complex_from_npy_cdouble(r)
else:
return libc.math.sqrt(x)
cdef inline number_t zpow(number_t x, double y) nogil:
cdef np.npy_cdouble r, z
# FIXME
if number_t is double_complex:
z.real = y
z.imag = 0.0
r = npy_cpow(npy_cdouble_from_double_complex(x), z)
return double_complex_from_npy_cdouble(r)
else:
return libc.math.pow(x, y)
cdef inline double_complex zpack(double zr, double zi) nogil:
cdef np.npy_cdouble z
z.real = zr
z.imag = zi
return double_complex_from_npy_cdouble(z)
@cython.cdivision(True)
cdef inline double complex zlog1(double complex z) nogil:
"""
Compute log, paying special attention to accuracy around 1. We
implement this ourselves because some systems (most notably the
Travis CI machines) are weak in this regime.
"""
cdef:
int n
double complex coeff = -1
double complex res = 0
if zabs(z - 1) > 0.1:
return zlog(z)
z = z - 1
if z == 0:
return 0
for n in range(1, 17):
coeff *= -z
res += coeff/n
if zabs(res/coeff) < tol:
break
return res
<|end_of_text|>cimport cython
cimport numpy
import numpy
from libcpp.vector cimport vector
cdef extern from "maxflow.cc":
vector[int] Solve2LabelProblem(double *, const int, double *, const int, int *, const int,);
vector[int] solveMultiLabelProblem(const int, double*, const int, double *, const int, int *, const int);
# @cython.boundscheck(False)
def solve_mincut(numpy.ndarray[int, mode="c", ndim = 2] edges, numpy.ndarray[double, ndim = 2] unary, numpy.ndarray[double, ndim = 3] pairwise):
# TODO: check if arrays are C-ordered
assert edges.shape[0] == 3
cdef int n_edges = edges.shape[1]
cdef int n_labels = unary.shape[0]
cdef int n_unaries = unary.shape[1]
cdef numpy.ndarray[int, mode="c", ndim=2] c_edges = numpy.ascontiguousarray(edges)
cdef numpy.ndarray[double, mode="c", ndim=2] c_unary = numpy.ascontiguousarray(unary)
cdef numpy.ndarray[double, mode="c", ndim=3] c_pairwise = numpy.ascontiguousarray(pairwise)
assert n_labels == pairwise.shape[1]
assert n_labels == pairwise.shape[2]
cdef int n_pairwise_cliques = pairwise.shape[0]
assert n_labels >= 2
cdef vector[int] v_labelling
if False:
#case when n_labels == 2 is buggy, code below is working for n_labels >= 2(!)
assert False
v_labelling = Solve2LabelProblem(<double*>c_unary.data, n_unaries, \
<double*>c_pairwise.data, n_pairwise_cliques, \
<int*>c_edges.data, n_edges)
else:
v_labelling = solveMultiLabelProblem(n_labels, <double*>c_unary.data, n_unaries, \
<double*>c_pairwise.data, n_pairwise_cliques, \
<int*>c_edges.data, n_edges)
cdef numpy.ndarray[int, mode="c", ndim=1] labelling = numpy.asarray(v_labelling).astype(numpy.int32)
return labelling<|end_of_text|>cdef class VertMesh:
cdef int _attrib_count
cdef float* _data
cdef int _vert_count
cdef int _index | Cython |
_count
cdef unsigned short* _indices<|end_of_text|>from libc.stdint cimport uintptr_t
from weakref import WeakValueDictionary
# NOTE(vbkaisetsu):
# This is used for holding python instances related to C++.
# Without this variable, python instances are always created when C++ class
# instances are returned from functions.
# It means that users can not compare instances by using "is" operator.
cdef object py_primitiv_device_weak_dict = WeakValueDictionary()
cdef object py_primitiv_default_device = None
cdef class Device:
"""Interface of the Tensor provider.
"""
@staticmethod
def set_default(Device device):
"""Specifies a new default device.
:param device: Reference of the new default device.
:type device: primitiv.Device
"""
global py_primitiv_default_device
py_primitiv_default_device = device
@staticmethod
def get_default():
"""Retrieves the current default device.
:return: The current default device
:rtype: primitiv.Device
"""
if py_primitiv_default_device is None:
raise RuntimeError("Default object is null.")
return py_primitiv_default_device
def dump_description(self):
"""Prints device description to stderr.
"""
self.wrapped.dump_description()
return
def __copy__(self):
raise NotImplementedError(type(self).__name__ + " does not support `__copy__` for now.")
def __deepcopy__(self, memo):
raise NotImplementedError(type(self).__name__ + " does not support `__deepcopy__` for now.")
@staticmethod
cdef void register_wrapper(CppDevice *ptr, Device wrapper):
if <uintptr_t> ptr in py_primitiv_device_weak_dict:
raise ValueError("Attempted to register the same C++ object twice.")
py_primitiv_device_weak_dict[<uintptr_t> ptr] = wrapper
@staticmethod
cdef Device get_wrapper(CppDevice *ptr):
# NOTE(vbkaisetsu):
# Device instances should be created and be registered before this
# function is called.
return py_primitiv_device_weak_dict[<uintptr_t> ptr]
<|end_of_text|>
import datetime
import multiprocessing
import random
from tobacco.text_passages.text_passages_helper_doc_ids import get_doc_ids_and_offsets
from tobacco.text_passages.text_passages_helper_db import document_iterator, insert_passages_yearly_result
from tobacco.text_passages.text_passages_helper_process_doc import get_final_sections, get_readability
from tobacco.frequencies_preprocessing.preprocessing_docs import get_ocr_sections
def process_year_of_sections_cython(str first_token, list tokens, list search_regexes, token_vector, int year,
int passage_length, active_filters, set vocabulary, globals,
insert_result_to_db=True):
""" Processes one year of a text passage search and returns them.
"""
cdef bytearray first_token_encoded = bytearray(first_token.encode('utf-8'))
cdef list doc_ids, raw_sections
cdef str doc_id, date_str, collection, tid, title, author, general, token
cdef int total_sections, complete
cdef list tokens_with_removed_wildcards = [token.replace('*', '') for token in tokens]
doc_ids_and_offsets, total_sections = get_doc_ids_and_offsets(token_vector, year,
globals['year_parts_id_list']['sections'],
globals['section_to_doc_and_offset_arr'])
if total_sections < 2000:
complete = 1
else:
complete = 0
# if the year has no documents, insert empty list and return
if len(doc_ids_and_offsets) == 0:
insert_passages_yearly_result(tokens, active_filters, year, passage_length, complete, [])
return
doc_ids = list(doc_ids_and_offsets.keys())
output_sections = []
for row in document_iterator(doc_ids):
date = datetime.datetime.fromtimestamp(row['timestamp']) + datetime.timedelta(hours=6)
date_str = date.strftime('%Y-%m-%d')
collection = globals['collections_and_idx_dict'][int(row['collection_id'])]['name_short']
tid = row['tid']
title = row['title']
if title is None:
title = ''
if len(title) > 50:
title = title[:50] + '...'
author = row['author']
if len(author) > 50:
author = author[:50] + '...'
general = '{}<br><br><a href="https://www.industrydocumentslibrary.ucsf.edu/tobacco/docs/#id={}" target="_blank">{}</a><br><br>{}'.format(date_str, tid, tid, collection)
raw_sections = get_ocr_sections(tid, doc_ids_and_offsets[row['id']])
final_sections = get_final_sections(raw_sections, first_token_encoded, passage_length, search_regexes)
for final_section in final_sections:
for token in tokens_with_removed_wildcards:
final_section = final_section.replace(token, '<b>{}</b>'.format(token))
output_sections.append((
tid,
title,
year,
author,
date_str,
collection,
get_readability(final_section, vocabulary),
final_section,
general
))
if len(output_sections) > 5000:
output_sections = random.sample(output_sections, 5000)
if insert_result_to_db:
p = multiprocessing.Process(target=insert_passages_yearly_result, args=(tokens, active_filters, year, passage_length, complete, output_sections))
p.start()
return output_sections
# insert_passages_yearly_result(tokens, active_filters, year, passage_length, complete, output_sections)
<|end_of_text|>from libc.stdint cimport int64_t
from hummingbot.wallet.wallet_base cimport WalletBase
from hummingbot.core.event.event_listener cimport EventListener
cdef class Web3Wallet(WalletBase):
cdef:
object _local_account
list _wallet_backends
list _last_backend_network_states
object _best_backend
object _select_best_backend_task
object _chain
object _event_dedup_window
object _erc20_token
EventListener _received_asset_forwarder
EventListener _gas_used_forwarder
EventListener _token_approved_forwarder
EventListener _eth_wrapped_forwarder
EventListener _eth_unwrapped_forwarder
EventListener _transaction_failure_forwarder
EventListener _zeroex_fill_forwarder
cdef c_receive_forwarded_event(self, int64_t event_tag, object args)
<|end_of_text|># Sun Jul 10 18:29:28 EDT 2016
"""
Copyright (c) 2016 Peter R. Schmitt and Ryan J. Urbanowicz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import time as tm
import numpy as np
from math import isnan
###############################################################################
# this runs both SURF and SURF*
def runSURF(header, double[:,::1] x, double[::1] y, attr, var, distArray, options):
V = options['verbose']
algorithm = options['algorithm']
start = tm.time()
cdef:
int maxInst = var['datalen']
int numattr = var['NumAttributes']
Scores = [0] * numattr
#--------------------------------------------------------------------------
def find_nearest_neighbor(): # for SURF
NN = []
min_indicies = []
cdef int i
for i in range(maxInst):
if(inst!= i):
locator = [inst,i]
if(i > inst): locator.reverse()
d = distArray[locator[0]][locator[1]]
if(d < avgDist):
min_indicies.append(i)
for i in range(len(min_indicies)):
NN.append(min_indicies[i])
return NN
#--------------------------------------------------------------------------
def find_data_instances(): # for SURFStar
NN_near=[]; NN_far=[]
min_indices=[]; max_indices=[]
cdef int i
for i in range(maxInst):
if(inst!= i):
locator = [inst,i]
if(i > inst): locator.reverse()
d = distArray[locator[0]][locator[1]]
if(d < avgDist): min_indices.append(i)
if(d > avgDist): max_indices.append(i)
for i in range(len(min_indices)):
NN_near.append(min_indices[i])
for i in range(len(max_indices)):
NN_far.append(max_indices[i])
return NN_near, NN_far
#--------------------------------------------------------------------------
# Find number of classes in the dataset and store them into the map
def getMultiClass | Cython |
Map():
mcmap = dict()
for i in range(maxInst):
if(y[i] not in mcmap):
mcmap[y[i]] = 0
else:
mcmap[y[i]] += 1
for each in var['phenoTypeList']:
mcmap[each] = mcmap[each]/float(maxInst)
return mcmap
#--------------------------------------------------------------------------
pname = var['phenoTypeName']
cdef int inst, i
# calculate avgDist
alg = options['algorithm']
sm = cnt = 0
for i in range(maxInst):
sm += sum(distArray[i])
cnt += len(distArray[i])
avgDist = sm/float(cnt)
if(V): print('Average Distance ='+ str(avgDist))
#------------------------------#
if(var['classType'] =='multiclass'):
mcmap = getMultiClassMap()
else:
mcmap = 0
for inst in range(maxInst):
if(algorithm =='surf'):
NN = find_nearest_neighbor()
NN = np.array(NN, dtype=np.int32)
if(len(NN) <= 0): continue
for f in range(var['NumAttributes']):
Scores[f] += \
evaluate_SURF(x,y,header,attr,var,NN,f,inst,mcmap,alg)
elif(algorithm =='surfstar'):
NN_near,NN_far = find_data_instances()
NN_near = np.array(NN_near, dtype=np.int32)
NN_far = np.array(NN_far, dtype=np.int32)
for f in range(var['NumAttributes']):
if(len(NN_near) > 0):
Scores[f] += \
evaluate_SURF(x,y,header,attr,var,NN_near,f,inst,mcmap,alg)
if(len(NN_far) > 0):
Scores[f] -= \
evaluate_SURF(x,y,header,attr,var,NN_far,f,inst,mcmap,alg)
if(V): print("surf time = " + str(tm.time() - start))
return Scores
###############################################################################
# evaluates both SURF and SURF* scores
cdef double evaluate_SURF(double[:,::1] x,double[::1] y, header, attr, var,
int[::1] NN, int feature, int inst, mcmap, algorithm):
fname = header[feature]
ftype = attr[fname][0] # feature type
ctype = var['classType'] # class type
cdef:
double diff_hit = 0
double diff_miss = 0
double count_hit = 0
double count_miss = 0
double mmdiff = 1
double xNNifeature, xinstfeature, diff = 0
int i
xinstfeature = x[inst][feature]
if(ftype == 'continuous'): mmdiff = attr[fname][3]
if(ctype =='multiclass'):
class_Store = dict()
missClassPSum = 0 # for SURF
for each in mcmap:
if(each!= y[inst]):
class_Store[each] = [0,0]
missClassPSum += mcmap[each] # for SURF
for i in range(len(NN)):
NN[i] = int(NN[i])
xNNifeature = x[NN[i]][feature]
absvalue = abs(xinstfeature - xNNifeature)/mmdiff
if(isnan(xinstfeature) or isnan(xNNifeature)): continue
if(y[inst] == y[NN[i]]): # HIT
count_hit += 1
if(xinstfeature!= xNNifeature):
if(ftype == 'continuous'):
diff_hit -= absvalue
else: # discrete
diff_hit -= 1
else: # MISS
for missClass in class_Store:
if(y[NN[i]] == missClass):
class_Store[missClass][0] += 1
if(xinstfeature!= xNNifeature):
if(ftype == 'continuous'):
class_Store[missClass][1] += absvalue
else: # discrete
class_Store[missClass][1] += 1
# corrects for both multiple classes as well as missing data
missSum = 0
for each in class_Store: missSum += class_Store[each][0]
missAverage = missSum/float(len(class_Store))
hit_proportion = count_hit / float(len(NN)) # Correct for NA
for each in class_Store:
if(algorithm =='surf'):
diff_miss += (mcmap[each] / float(missClassPSum)) * \
class_Store[each][1]
else: #surfstar
diff_miss += (class_Store[each][0]/float(missSum)) * \
class_Store[each][1]
diff = diff_miss * hit_proportion
miss_proportion = missAverage / float(len(NN))
diff += diff_hit * miss_proportion
#--------------------------------------------------------------------------
elif(ctype == 'discrete'):
for i in range(len(NN)):
xNNifeature = x[NN[i]][feature]
xinstfeature = x[inst][feature]
absvalue = abs(xinstfeature - xNNifeature)/mmdiff
if(isnan(xinstfeature) or isnan(xNNifeature)): continue
if(y[inst] == y[NN[i]]): # HIT
count_hit += 1
if(xinstfeature!= xNNifeature):
if(ftype == 'continuous'):
diff_hit -= absvalue
else: # discrete
diff_hit -= 1
else: # MISS
count_miss += 1
if(xinstfeature!= xNNifeature):
if(ftype == 'continuous'):
diff_miss += absvalue
else: # discrete
diff_miss += 1
hit_proportion = count_hit/float(len(NN))
miss_proportion = count_miss/float(len(NN))
diff = diff_hit * miss_proportion + diff_miss * hit_proportion
#--------------------------------------------------------------------------
else: # CONTINUOUS endpoint
same_class_bound = var['phenSD']
for i in range(len(NN)):
xNNifeature = x[NN[i]][feature]
xinstfeature = x[inst][feature]
absvalue = abs(xinstfeature - xNNifeature)/mmdiff
if(isnan(xinstfeature) or isnan(xNNifeature)): continue
if(abs(y[inst] - y[NN[i]]) < same_class_bound): # HIT
count_hit += 1
if(xinstfeature!= xNNifeature):
if(ftype == 'continuous'):
diff_hit -= absvalue
else: # discrete
diff_hit -= 1
else: # MISS
count_miss += 1
if(xinstfeature!= xNNifeature):
if(ftype == 'continuous'):
diff_miss += absvalue
else: # discrete
diff_miss += 1
hit_proportion = count_hit/float(len(NN))
miss_proportion = count_miss/float(len(NN))
diff = diff_hit * miss_proportion + diff_miss * hit_proportion
return diff
<|end_of_text|>cimport cython
from asim.support cimport mathwrapper as math
@cython.final
cdef class Location(object):
cdef public double x, y, z
cpdef double distance_to(self, Location other) nogil except -1.
@cython.locals(ret = Location)
cpdef Location __copy__(self)
@cython.final
cdef class Nuclide(object):
cdef public double dose_mju, dose_mjue, dose_a, dose_b, dose_e, dose_cb, half_life, lambda_, depo_vg
cdef class SourceModel(object):
cpdef Nuclide[:] inventory(self)
cpdef double[:] release_rate(self, int time)
cpdef Location location(self)
cdef class MeteoModel(object):
cpdef double mixing_layer_height_at(self, Location loc, int time) except -1.
cpdef double wind_speed_at(self, Location loc, int time) except -1.
cpdef double wind_direction_at(self, Location loc, int time) except 7.
cpdef double dispersion_xy(self, Location loc, int time, double total_distance) except -1.
cpdef double dispersion_z(self, Location loc, int time, double total_distance) except -1.
cdef class DispersionModel(object):
cpdef int propagate(self, MeteoModel meteo_model, SourceModel source_model) except -1
cpdef double[:] concentration_at(self, Location loc)
cpdef double[:] dose_at(self, Location loc)
cpdef double[:] deposition_at(self, Location loc)
<|end_of_text|># Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES | Cython |
OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cython
cpdef object get_original_fn(object fn)
@cython.locals(_full_name_=str)
cpdef str get_full_name(object src)
@cython.locals(result=str, first=bint)
cpdef str get_function_call_str(fn, tuple args, dict kwargs)
@cython.locals(result=str, first=bint)
cpdef str get_function_call_repr(fn, tuple args, dict kwargs)
cpdef object getargspec(object func)
cpdef bint is_cython_or_generator(object fn) except -1
@cython.locals(name=str)
cpdef bint is_cython_function(object fn) except -1
cpdef bint is_cython_class(object cls) except -1
cpdef bint is_classmethod(object fn) except -1
cpdef wraps(object wrapped, object assigned=?, object updated=?)
<|end_of_text|>from libcpp cimport bool
cdef extern from "../src/DMP.h" namespace "DMP":
double computeAlphaX(double goal_x, double goal_t, double start_t) except +
cdef extern from "../src/DMP.h" namespace "DMP":
void initializeRBF(double* widths, double* centers, int num_widths,
int num_centers, const double goal_t, const double start_t,
double overlap, double alpha) except +
cdef extern from "../src/DMP.h" namespace "DMP":
void LearnfromDemo(double* T, int num_T, double* Y, int num_steps,
int num_task_dims, double* weights, int num_weights_per_dim,
int num_weight_dims, double* widths, int num_widths,
double* centers, int num_centers, double regularization_coefficient,
double alpha_y, double beta_y, double alpha_x,
bool allow_final_velocity) except +
cdef extern from "../src/DMP.h" namespace "DMP":
void LearnfromDemoQuaternion(double* T, int num_T, double* Y, int num_steps,
int num_task_dims, double* weights, int num_weights_per_dim,
int num_weight_dims, double* widths, int num_widths,
double* centers, int num_centers, double regularization_coefficient,
double alpha_y, double beta_y, double alpha_x,
bool allow_final_velocity) except +
cdef extern from "../src/DMP.h" namespace "DMP":
void dmpPropagate(double last_t, double t, double* last_y, int num_last_y, double* last_yd,
int num_last_yd, double* last_ydd, int num_last_ydd, double* y, int num_y,
double* yd, int num_yd, double* ydd, int num_ydd, double* goal_y, int num_goal_y,
double* goal_yd, int num_goal_yd, double* goal_ydd, int num_goal_ydd, double* start_y,
int num_start_y, double* start_yd, int num_start_yd, double* start_ydd, int num_start_ydd,
double goal_t, double start_t, double* weights, int num_weights_per_dim, int num_weight_dims,
double* widths, int num_widths, double* centers, int num_centers,
double alpha_y, double beta_y, double alpha_z, double integration_dt) except +
cdef extern from "../src/DMP.h" namespace "DMP":
void dmpPropagateQuaternion(double last_t, double t, double* last_y, int num_last_y, double* last_yd,
int num_last_yd, double* last_ydd, int num_last_ydd, double* y, int num_y,
double* yd, int num_yd, double* ydd, int num_ydd, double* goal_y, int num_goal_y,
double* goal_yd, int num_goal_yd, double* goal_ydd, int num_goal_ydd, double* start_y,
int num_start_y, double* start_yd, int num_start_yd, double* start_ydd, int num_start_ydd,
double goal_t, double start_t, double* weights, int num_weights_per_dim, int num_weight_dims,
double* widths, int num_widths, double* centers, int num_centers,
double alpha_y, double beta_y, double alpha_z, double integration_dt) except +
cdef extern from "../src/DMP.h" namespace "DMP":
void compute_gradient(double* _in, int num_in_steps, int num_in_dims,
double* out, int num_out_steps, int num_out_dims,
double* time, int num_time,
bool allow_final_velocity) except +
cdef extern from "../src/DMP.h" namespace "DMP":
void compute_quaternion_gradient(double* _in, int num_in_steps, int num_in_dims,
double* out, int num_out_steps, int num_out_dims,
double* time, int num_time,
bool allow_final_velocity) except +
<|end_of_text|>import json
from cythonic.plugins import queries
import numpy as np
from libc.math cimport fmax as cmax
from cythonic.plugins.functions import score
cdef class sim_recorder(Sim):
def __init__(self,queen_args, domain_args, sim_args, **kwargs):
""" Need to store the dicts for inserting into database """
self.queen_dict = queen_args
self.sim_dict = sim_args
self.domain_dict = domain_args
self.ant_dict=queen_args['ant_dict']
if 'initializer' in kwargs:
self.initiator = kwargs['initializer']
else:
self.initiator = 'NULL'
if 'comment' in kwargs:
self.comment = kwargs['comment']
else:
self.comment = 'NULL'
super(sim_recorder,self).__init__(queen_args = queen_args, domain_args = domain_args, **sim_args)
def init_connection(self,str path, str dbname):
self.db = db_controller(path,dbname)
def setup_sim(self, dict deposit_dict, dict gauss_dict, unsigned int upload_interval):
" prepare the simulation "
self.update_interval = upload_interval
self.set_depositing(self.ant_dict['deposit_fun'], deposit_dict )
self.set_gaussian(**gauss_dict)
self.id = self.db.new_sim_id()
self.push_settings(deposit_dict, gauss_dict)
self.flush_resultset() # initialize the insert state query
def push_settings(self, dict deposit_dict, dict gauss_dict):
""" INSERT the simulation settings into the database prior to experiment """
# manipulation, convert nested dict to string for database
cdef dict sim_dict = self.sim_dict.copy()
sim_dict['deploy_timing_args'] = json.dumps(self.sim_dict['deploy_timing_args']).replace("'","''")
queries_list =(
queries.insert_ant(self.id, **self.ant_dict),
queries.insert_queen(self.id,**self.queen_dict),
queries.insert_domain(self.id,**self.domain_dict),
queries.insert_gauss(self.id,**gauss_dict),
queries.insert_deposit(self.id,**deposit_dict),
queries.insert_sens(self.id,**self.ant_dict['sens_dict']),
queries.sim_settings(self.id,**sim_dict), # use the local manipulated copy of sim_dict
)
for qry in queries_list:
self.db.execute(qry)
cdef void flush_resultset(self,):
" prepare for a new batch of settings "
# self.pending_qry = queries.insert_stepupdates()
self.qry_args = []
self.pending_qry = "INSERT INTO STEP (SIM_ID, STEP_NR, ANT_ID, X, Y, THETA, Q) VALUES (?,?,?,?,?,?,?)"
cdef void extract_antstate(self, unsigned int step):
" make a copy of the vital ant state elements and store "
cdef unsigned int i
for i in range(self.queen.count_active):
" push the state to a list "
if self.queen.state_list[i].pos.x >= 0:
#do not push empty state
self.qry_args.append((self.id, step, self.queen.state_list[i].id, self.queen.state_list[i].pos.x,
self.queen.state_list[i].pos.y, self.queen.state_list[i].theta,
self.queen.drop_quantity[i]))
cdef void record_step(self, unsigned int stepnr):
" record the state at each step "
self.extract_antstate(stepnr) #append query with current ant_state
if stepnr>0 and (stepnr==self.steps or stepnr%self.update_interval ==0) and len(self.qry_args) > 0:
# check if results must be pushed to the database
self.db.executemany(self.pending_qry, self.qry_args)
self.flush_resultset() # start with a fresh query string
cdef dict run_sim(self, bint record, str initiator ):
" run the simulation "
cdef unsigned int i #stepcounter
cdef unsigned int action_counter = 0 #count all ant actions
cdef double max_pheromone_level = 0 # keep track of the absolute max
cdef double start_entropy = self.domain.entropy()
cdef double end_entropy = 0 #placeholder for resulting entropy
cdef unsigned int stepupdates = 0 #counter for storing the result
cdef unsigned int[:] k_vec = np.arange(0,self.steps+1, <unsigned int>int(np.ceil(self.steps/ | Cython |
100)), dtype = np.uint32)
k_vec[len(k_vec)-1] = self.steps #make sure the last performance sample is taken at the last step
cdef list entropy_vec = [], nestcount_vec = []
if record:
self.db.execute(f"UPDATE sim SET RECORDING = 'TRUE' WHERE ID = {self.id}")
self.db.execute(f"UPDATE sim SET initializer = '{initiator}' WHERE ID = {self.id}")
self.db.execute(queries.update_sim(self.id, status = 'STARTED'))
# === loop ===
for i in range(self.steps+1):
self.sim_step() # do the stepping
if record:
# do the recording
self.record_step(i)
action_counter +=self.queen.count_active
if i == k_vec[stepupdates]:
# this iteration the result vector is appended
entropy_vec.append(round(self.domain.entropy(),3))
nestcount_vec.append(self.nestcount)
stepupdates+=1 # increase the vector index
max_pheromone_level = cmax(max_pheromone_level,self.domain.Map.max())
# === end loop ===
# store results:
end_entropy = self.domain.entropy()
dx = self.domain.food_location.x - self.domain.nest_location.x
dy = self.domain.food_location.y - self.domain.nest_location.y
final_score = score(dx = dx, dy = dy, steps = action_counter, dt=self.dt, speed = self.queen.default_speed, nestcount = self.nestcount)
result = {'sim_id': self.id,'foodcount': self.foodcount, 'nestcount': self.nestcount,
'entropy_vec': entropy_vec,'start_entropy': round(start_entropy,3), 'end_entropy': round(end_entropy,3),
'scorecard':nestcount_vec,'step_vec':np.asarray(k_vec).tolist(),'score':final_score, 'pheromone_max': round(max_pheromone_level,1)}
self.db.execute(queries.insert_results(**result))
self.db.execute(queries.update_sim(self.id, status = 'FINISHED', initializer = self.initiator, comment = self.comment, steps = action_counter))
return result
<|end_of_text|># -*- coding: utf-8 -*-
from libcpp cimport bool
cimport numpy as np
np.import_array()
import time
from bitbots.robot.pypose cimport PyPose as Pose
from bitbots.robot.kinematics cimport Robot
from bitbots.kick.functionWrapper cimport SingleArgFunctionWrapper, DoubleArgFunctionWrapper, ValidateFunctionWrapper
from bitbots.kick.bezier cimport Bezier
cdef inline float degrees(float radians):
return radians * 180 / 3.1415926535897932384626433832795028841971693
cdef inline float radians(float degrees):
return degrees / 180 * 3.1415926535897932384626433832795028841971693
cdef class Phase(object):
cdef float delta
cdef float epsilon
cdef Robot robot
cdef DoubleArgFunctionWrapper distance
cdef SingleArgFunctionWrapper cog
cdef SingleArgFunctionWrapper cog_distance
cdef ValidateFunctionWrapper validate
cdef int max_iter
cdef list iter
cdef np.ndarray direction
cdef Bezier bezier
cdef bool no_cog
cdef np.ndarray get_direction(self)
cdef Bezier get_bezier(self)
cdef list get_iter(self)
cdef Pose calc_angles(self, Pose ipc_pose,float t)
cdef np.ndarray step(self, np.ndarray angles, np.ndarray dest, float error)
cdef np.ndarray normalize_vector(np.ndarray correction, float error, float weight=?)
cdef np.ndarray pose_to_array(Pose ipc_pose)
cdef Pose array_to_pose(np.ndarray array)
cdef class Phaseholer(object):
cdef int num, idx
cdef Phase phase
cdef Phaseholer next
cdef Phase get(self, int idx)
cdef append(self, Phase phase)
cdef remove_first(self)
cdef int get_num(self)
<|end_of_text|>'''
texture_from_pixmap
'''
from libc.stdint cimport intptr_t, uintptr_t
from libc.stdio cimport printf, fprintf, stderr
from libc.string cimport strlen
from libc.stdlib cimport malloc, free
DEF EGL_TRUE = 1
DEF EGL_FALSE = 0
DEF EGL_NONE = 0x3038
DEF EGL_NATIVE_PIXMAP_KHR = 0x30B0
DEF EGL_NO_CONTEXT = 0
DEF EGL_IMAGE_PRESERVED_KHR = 0x30D2
DEF EGL_NO_IMAGE_KHR = 0x0
cdef extern from "X11/Xlib.h":
ctypedef struct XErrorEvent:
Display *display
XID resourceid
unsigned long serial
unsigned char error_code
unsigned char request_code
unsigned char minor_code
cdef void XFree(void *data) nogil
ctypedef int (*XErrorHandler)(Display *d, XErrorEvent *e)
cdef XErrorHandler XSetErrorHandler(XErrorHandler)
cdef void XGetErrorText(Display *, unsigned char, char *, int)
cdef extern from "EGL/egl.h":
ctypedef intptr_t EGLAttrib
EGLDisplay eglGetCurrentDisplay() nogil
EGLint eglGetError() nogil
cdef extern from "GL/gl.h":
ctypedef unsigned int GLenum
GLenum glGetError() nogil
cdef EGLImageKHR bindTexImage(Pixmap pixmap) nogil:
cdef EGLDisplay egl_display = eglGetCurrentDisplay()
cdef EGLint *attribs = [
EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
EGL_NONE
]
cdef EGLImageKHR image = egl.eglCreateImageKHR(
egl_display,
<EGLContext>EGL_NO_CONTEXT,
EGL_NATIVE_PIXMAP_KHR,
<EGLClientBuffer>pixmap,
attribs,
)
egl.glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, <GLeglImageOES>image)
if image!= <EGLImageKHR>EGL_NO_IMAGE_KHR:
egl.eglDestroyImageKHR(egl_display, image)
<|end_of_text|>cimport cython
cdef class B
cdef class A(object):
cdef list dealloc1
cdef class Y(X): pass
cdef class X(C): pass
cdef class C: pass
cdef class B(A):
cdef list dealloc2
cdef class Z(A): pass
def test():
"""
>>> test()
A
B
C
X
Y
Z
"""
A(), B(), C(), X(), Y(), Z()
import sys
py_version = sys.version_info[:2]
if py_version >= (3, 7): # built-in dict is insertion-ordered
global_values = list(globals().values())
else:
global_values = [A, B, C, X, Y, Z]
for value in global_values:
if isinstance(value, type):
print(value.__name__)
<|end_of_text|>
cimport cython
dict_size = 4
d = dict(zip(range(10,dict_size+10), range(dict_size)))
@cython.test_fail_if_path_exists(
"//WhileStatNode")
def items(dict d):
"""
>>> items(d)
[(10, 0), (11, 1), (12, 2), (13, 3)]
"""
l = []
for k,v in d.items():
l.append((k,v))
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def iteritems(dict d):
"""
>>> iteritems(d)
[(10, 0), (11, 1), (12, 2), (13, 3)]
"""
l = []
for k,v in d.iteritems():
l.append((k,v))
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def iteritems_int(dict d):
"""
>>> iteritems_int(d)
[(10, 0), (11, 1), (12, 2), (13, 3)]
"""
cdef int k,v
l = []
for k,v in d.iteritems():
l.append((k,v))
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def iteritems_tuple(dict d):
"""
>>> iteritems_tuple(d)
[(10, 0), (11, 1), (12, 2), (13, 3)]
"""
l = []
for t in d.iteritems():
l.append(t)
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def iteritems_listcomp(dict d):
cdef list l = [(k,v) for k,v in d.iteritems()]
l.sort()
| Cython |
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def iterkeys(dict d):
"""
>>> iterkeys(d)
[10, 11, 12, 13]
"""
l = []
for k in d.iterkeys():
l.append(k)
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def iterkeys_int(dict d):
"""
>>> iterkeys_int(d)
[10, 11, 12, 13]
"""
cdef int k
l = []
for k in d.iterkeys():
l.append(k)
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def iterdict(dict d):
"""
>>> iterdict(d)
[10, 11, 12, 13]
"""
l = []
for k in d:
l.append(k)
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def iterdict_int(dict d):
"""
>>> iterdict_int(d)
[10, 11, 12, 13]
"""
cdef int k
l = []
for k in d:
l.append(k)
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def iterdict_reassign(dict d):
"""
>>> iterdict_reassign(d)
[10, 11, 12, 13]
"""
cdef dict d_new = {}
l = []
for k in d:
d = d_new
l.append(k)
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def iterdict_listcomp(dict d):
"""
>>> iterdict_listcomp(d)
[10, 11, 12, 13]
"""
cdef list l = [k for k in d]
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def itervalues(dict d):
"""
>>> itervalues(d)
[0, 1, 2, 3]
"""
l = []
for v in d.itervalues():
l.append(v)
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def itervalues_int(dict d):
"""
>>> itervalues_int(d)
[0, 1, 2, 3]
"""
cdef int v
l = []
for v in d.itervalues():
l.append(v)
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def itervalues_listcomp(dict d):
"""
>>> itervalues_listcomp(d)
[0, 1, 2, 3]
"""
cdef list l = [v for v in d.itervalues()]
l.sort()
return l
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode/SimpleCallNode",
"//WhileStatNode/SimpleCallNode/NameNode")
def itervalues_kwargs(**d):
"""
>>> itervalues_kwargs(a=1, b=2, c=3, d=4)
[1, 2, 3, 4]
"""
cdef list l = [v for v in d.itervalues()]
l.sort()
return l
<|end_of_text|># -*- coding: utf-8 -*-
from bnz.defs cimport *
from bnz.coordinates.coord cimport GridCoord
from bnz.coordinates.grid cimport GridData, BnzGrid
IF MHDPIC: from bnz.particles.particle cimport BnzParticles
from bnz.integration.integrator cimport BnzIntegr
# primitive or conservative variables in grid output
cdef enum VarType:
VAR_PRIM
VAR_CONS
VAR_PIC
# Function pointer to calculate user history variables.
ctypedef real(*HstFunc)(BnzGrid,BnzIntegr)
IF MHDPIC:
# Pointer to user-defined particle selection function.
ctypedef int(*PrtSelFunc)(ParticleData*,long)
cdef class BnzIO:
cdef:
str usr_dir
real hst_dt # write history every hst_dt
real grid_dt # the grid every grid_dt
real slc_dt # a slice every slc_st
real prt_dt # particles every prt_dt
real rst_dt # restart files every rst_dt
int use_npy # output.npy arrays instaad of HDF5 (only without MPI)
# history
list hst_funcs_u # user history variable functions
list hst_names_u # names of user history variables
int nhst # total number (user+default) of active history variables
# grids
VarType var_type # primitive/conservative variables in grid output
# grid slice
int slc_axis # axis perpendicular to the slice
int slc_loc # cell index of the slice along the slc_axis
int write_ghost # 1 to write ghost cells
int restart # 1 to restart simulation
# particles
PrtSelFunc prt_sel_func # particle selection function
int prt_stride # write every prt_stride particle
cdef void write_output(self, BnzGrid,BnzIntegr)
cdef void write_restart(self, BnzGrid,BnzIntegr)
cdef void set_restart(self, BnzGrid,BnzIntegr)
<|end_of_text|># -*- coding: utf-8 -*-
#cython: boundscheck=False
#cython: wraparound=False
import numpy as np
cimport numpy as np
#CAN STILL BE OPTIMIZED IF NECESSARY
def M_calc(int kmax, int tmax, double ds, double dk, double dt, double rho,
np.ndarray[np.double_t, ndim=1] c,
np.ndarray[np.double_t, ndim=1] S,
np.ndarray[np.double_t, ndim=2] F):
outM = open('OutM','w',1)
cdef np.ndarray[np.double_t, ndim=2] Memory = np.zeros( [kmax, tmax] )#, dtype=np.double_t )
cdef int q, t, k, s, u, smax = int(1./ds)
cdef double sumk, sums, carnot, cos
cdef double coeff = rho*(dk**3)*ds/( 8*(np.pi)**2 )
# loop over the M(q,t) arguments
for t in range(tmax):
for q in range(1,kmax):
# integral over dummy variable k
sumk = 0.0
for k in range(kmax):
sums = 0.0
for s in range(-smax,smax+1):
# carnot = |q-k| from cosine theorem
cos = s*ds
carnot = q**2+k**2-2.*k*q*cos
if carnot<.25:
u = 0
else:
u = int(np.sqrt(carnot)+0.5)
if u >= kmax:
u = kmax-1
# remember to better construct the weights!
if abs(s)==smax:
sums +=.5*F[u,t]*(k*cos*c[k]+(q-k*cos)*c[u])**2
else:
sums += F[u,t]*(k*cos*c[k]+(q-k*cos)*c[u])**2
sumk += sums*F[k,t]*(k**2)
#integral done; scale for factors
Memory[q,t] = sumk*coeff*S[q]/(q**2)
outM.write( ('{0:.6e}\t{1:.6e}\t{2:.10e}\n').format(
t*dt,q*dk,Memory[q,t] ))
# job done
return Memory
<|end_of_text|># ticket: t307
# mode: error
nonexisting(3, with_kw_arg=4)
_ERRORS = u"""
4:0: undeclared name not builtin: nonexisting
"""
<|end_of_text|>"""
CVM-S4 Velocity Model
CVM-SCEC version 4 (CVM-S4), also known as SCEC | Cython |
CVM-4, is a 3D seismic velocity model.
The current version is CVM-S4 was released in 2006 and was originally posted for download from the
SCEC Data Center SCEC 3D Velocity Models Site.
This code is the Cython interface to the legacy CVM-S4 Fortran code. It returns equivalent material
properties to UCVM.
Copyright 2017 Southern California Earthquake Center
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python Imports
import os
from typing import List
# Cython Imports
cimport cython
from libc.stdlib cimport malloc, free
# UCVM Imports
from ucvm.src.model.velocity import VelocityModel
from ucvm.src.shared import VelocityProperties
from ucvm.src.shared.properties import SeismicData
# Cython defs
cdef extern from "src/cvms.h":
void cvms_init_(char *, int *)
void cvms_version_(char *, int *)
void cvms_query_(int *, float *, float *, float *, float *, float *, float *, int *)
class CVMS4VelocityModel(VelocityModel):
"""
Defines the CVM-S4 interface to UCVM. This class queries the legacy Fortran code to retrieve
the material properties and records the data to the new UCVM data structures.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
cdef char *model_path
cdef int errcode
py_model_path = os.path.join(self.model_location, "data").encode("ASCII")
model_path = py_model_path
errcode = 0
cvms_init_(model_path, &errcode)
if errcode!= 0:
raise RuntimeError("CVM-S4 not initialized properly.")
def _query(self, points: List[SeismicData], **kwargs) -> bool:
"""
This is the method that all models override. It handles querying the velocity model
and filling in the SeismicData structures.
Args:
points (:obj:`list` of :obj:`SeismicData`): List of SeismicData objects containing the
points in depth. These are to be populated with :obj:`VelocityProperties`:
Returns:
True on success, false if there is an error.
"""
cdef int nn
cdef float *lon
cdef float *lat
cdef float *dep
cdef float *vp
cdef float *vs
cdef float *density
cdef int retcode
nn = len(points)
retcode = 0
lon = <float *> malloc(len(points)*cython.sizeof(float))
for i in range(0, len(points)):
lon[i] = points[i].converted_point.x_value
lat = <float *> malloc(len(points)*cython.sizeof(float))
for i in range(0, len(points)):
lat[i] = points[i].converted_point.y_value
dep = <float *> malloc(len(points)*cython.sizeof(float))
for i in range(0, len(points)):
dep[i] = points[i].converted_point.z_value
vs = <float *> malloc(len(points)*cython.sizeof(float))
for i in range(0, len(points)):
vs[i] = 0.0
vp = <float *> malloc(len(points)*cython.sizeof(float))
for i in range(0, len(points)):
vp[i] = 0.0
density = <float *> malloc(len(points)*cython.sizeof(float))
for i in range(0, len(points)):
density[i] = 0.0
cvms_query_(&nn, lon, lat, dep, vp, vs, density, &retcode)
# Now we need to go through the material properties and add them to the SeismicData objects.
id_s4 = self._public_metadata["id"]
for i in range(0, len(points)):
if points[i].converted_point.z_value >= 0:
points[i].set_velocity_data(
VelocityProperties(
vp[i], vs[i], density[i], # From CVM-S4
None, None, # No Qp or Qs defined
id_s4, id_s4, id_s4, # All data comes direct from CVM-S4
None, None # No Qp or Qs defined
)
)
else:
# CVM-S4 has no information on negative depth points.
self._set_velocity_properties_none(points[i])
free(lon)
free(lat)
free(dep)
free(vs)
free(vp)
free(density)
return True
<|end_of_text|># distutils: language = c++
# cython: language_level=2
cdef extern from "atanherf.h" namespace "AtanhErf" nogil:
double atanherf_largex (const double & x);
double atanherf_interp (const double & x);
double evalpoly (const double & x);
double atanherf (const double & x);
<|end_of_text|>"""
Squish Python bindings
======================
Squish is a c++ library for compressing and uncompressing image using S3TC
algorithm.
Check website at http://code.google.com/p/libsquish/
"""
cdef extern from "stdlib.h":
ctypedef unsigned long size_t
void *malloc(size_t size)
void free(void *ptr)
cdef extern from "Python.h":
object PyBytes_FromStringAndSize(char *s, Py_ssize_t len)
cdef extern from "squish.h" namespace "squish":
void CompressImage(unsigned char* rgba, int width, int height,
void* blocks, int flags, float *metric)
void CompressMasked(unsigned char* rgba, int mask,
void* block, int flags)
void CompressImage(unsigned char* rgba, int width, int height,
void* blocks, int flags)
void DecompressImage(unsigned char *rgba, int width, int height,
void *blocks, int flags)
int GetStorageRequirements(int width, int height, int flags)
__version__ = '1.0'
#: Use DXT1 compression.
DXT1 = ( 1 << 0 )
#: Use DXT3 compression.
DXT3 = ( 1 << 1 )
#: Use DXT5 compression.
DXT5 = ( 1 << 2 )
#: Use a very slow but very high quality color compressor.
COLOR_ITERATIVE_CLUSTER_FIT = ( 1 << 8 )
#: Use a slow but high quality color compressor (the default).
COLOR_CLUSTER_FIT = ( 1 << 3 )
#: Use a fast but low quality color compressor.
COLOR_RANGE_FIT = ( 1 << 4 )
#: Weight the color by alpha during cluster fit (disabled by default).
WEIGHT_COLOR_BY_ALPHA = ( 1 << 7 )
class SquishException(Exception):
"""Used when squish fail in one function"""
pass
def compressImage(bytes rgba, int width, int height, int flags):
"""
Compresses an image in memory.
:param rgba: The pixels of the source.
:param width: The width of the source image.
:param height: The height of the source image.
:param flags: Compression flags.
:return: A string containing the compressed image
The source pixels should be presented as a contiguous array of width*height
rgba values, with each component as 1 byte each. In memory this should be::
( r1, g1, b1, a1,...., rn, gn, bn, an ) for n = width*height
The flags parameter should specify either DXT1, DXT3 or DXT5 compression,
however, DXT1 will be used by default if none is specified. When using DXT1
compression, 8 bytes of storage are required for each compressed DXT block.
DXT3 and DXT5 compression require 16 bytes of storage per block.
The flags parameter can also specify a preferred color compressor to use
when fitting the RGB components of the data. Possible color compressors
are: COLOR_CLUSTER_FIT (the default), COLOR_RANGE_FIT (very fast, low
quality) or COLOR_ITERATIVE_CLUSTER_FIT (slowest, best quality).
When using COLOR_CLUSTER_FIT or COLOR_ITERATIVE_CLUSTER_FIT, an additional
flag can be specified to weight the importance of each pixel by its alpha
value. For images that are rendered using alpha blending, this can
significantly increase the perceived quality.
The metric parameter can be used to weight the relative importance of each
color channel, or pass NULL to use the default uniform weight of
( 1.0f, 1.0f, 1.0f ). This replaces the previous flag-based control that
allowed either uniform or "perceptual" weights with the fixed values
( 0.2126f, 0 | Cython |
.7152f, 0.0722f ). If non-NULL, the metric should point to a
contiguous array of 3 floats.
"""
cdef int datasize
cdef void *data
cdef unsigned char* buf_rgba
cdef object result = None
buf_rgba = <unsigned char*><char*>rgba
if buf_rgba == NULL:
raise SquishException("Invalid rgba parameter")
if width <= 0 or height <= 0:
raise SquishException("Invalid width/height value")
datasize = GetStorageRequirements(width, height, flags)
if datasize == 0:
raise SquishException("Unable to calculate size for storage")
data = malloc(datasize)
if data == NULL:
raise SquishException("Unable to allocate storage")
CompressImage(buf_rgba, width, height, data, flags)
try:
result = PyBytes_FromStringAndSize(<char *>data, datasize)
finally:
free(data)
return result
def compressMasked(bytes rgba, int mask, int flags):
cdef int datasize
cdef void *data
cdef unsigned char* buf_rgba
cdef object result = None
buf_rgba = <unsigned char*><char*>rgba
if buf_rgba == NULL:
raise SquishException("Invalid rgba parameter")
data = malloc(8)
if data == NULL:
raise SquishException("Unable to allocate storage")
CompressMasked(buf_rgba, mask, data, flags)
try:
result = PyBytes_FromStringAndSize(<char *>data, 8)
finally:
free(data)
return result
def decompressImage(bytes blocks, int width, int height, int flags):
"""
Decompresses an image in memory.
:param blocks: The compressed DXT blocks.
:param width: The width of the source image.
:param height: The height of the source image.
:param flags: Compression flags.
:return: A string containing the decompressed image
The decompressed pixels will be written as a contiguous array of width*height
16 rgba values, with each component as 1 byte each. In memory this is:
( r1, g1, b1, a1,...., rn, gn, bn, an ) for n = width*height
The flags parameter should specify either DXT1, DXT3 or DXT5 compression,
however, DXT1 will be used by default if none is specified. All other flags
are ignored.
"""
cdef int datasize = width * height * 4
cdef void *data
cdef unsigned char *buf_blocks
cdef object result = None
buf_blocks = <unsigned char *>blocks
if buf_blocks == NULL:
raise SquishException("Invalid blocks parameter")
if width <= 0 or height <= 0:
raise SquishException("Invalid width/height value")
data = malloc(datasize)
if data == NULL:
raise SquishException("Unable to allocate storage")
DecompressImage(<unsigned char *>data, width, height, buf_blocks, flags)
try:
result = PyBytes_FromStringAndSize(<char *>data, datasize)
finally:
free(data)
return result<|end_of_text|>from libcpp.string cimport string as libcpp_string
from libcpp.vector cimport vector as libcpp_vector
from libcpp.pair cimport pair as libcpp_pair
from libcpp cimport bool
from libc.string cimport const_char
import numpy as np
cimport numpy as np
cdef extern from "libhmsbeagle/beagle.h":
const_char* beagleGetVersion()
const_char* beagleGetCitation()
BeagleResourceList* beagleGetResourceList()
int beagleSetTipStates(int instance, int tip_index, const int* in_states)
int beagleSetTipPartials(int instance, int tip_index, const double* in_states)
int beagleSetPartials(int instance, int buffer_index, const double* in_partials)
int beagleGetPartials(int instance, int buffer_index, int state_index, double* out_partials)
int beagleSetEigenDecomposition(int instance, int eigenIndex, const double* inEigenVectors, const double* inInverseEigenVectors, const double* inEigenValues)
int beagleSetStateFrequencies(int instance, int stateFrequenciesIndex, const double* inStateFrequencies)
int beagleSetCategoryWeights(int instance, int categoryWeightsIndex, const double* inCategoryWeights)
int beagleSetCategoryRates(int instance, const double* inCategoryRates)
int beagleSetPatternWeights(int instance, const double* inPatternWeights)
int beagleConvolveTransitionMatrices(int instance, const int* firstIndices, const int* secondIndices, const int* resultIndices, int matrixCount)
int beagleUpdateTransitionMatrices(int instance, int eigenIndex, const int* probabilityIndices, const int* firstDerivativeIndices, const int* secondDerivativeIndices, const double* edgeLengths, int count)
int beagleSetTransitionMatrix(int instance, int matrixIndex, const double* inMatrix, double paddedValue)
int beagleGetTransitionMatrix(int instance, int matrixIndex, double* outMatrix)
int beagleSetTransitionMatrices(int instance, const int* matrixIndices, const double* inMatrices, const double* paddedValues, int count)
int beagleWaitForPartials(const int instance, const int* destinationPartials, int destinationPartialsCount)
int beagleAccumulateScaleFactors(int instance, const int* scaleIndices, int count, int cumulativeScaleIndex)
int beagleRemoveScaleFactors(int instance, const int* scaleIndices, int count, int cumulativeScaleIndex)
int beagleResetScaleFactors(int instance, int cumulativeScaleIndex)
int beagleCopyScaleFactors(int instance, int destScalingIndex, int srcScalingIndex)
int beagleGetScaleFactors(int instance, int srcScalingIndex, double* outScaleFactors)
int beagleCalculateRootLogLikelihoods(int instance, const int* bufferIndices, const int* categoryWeightsIndices, const int* stateFrequenciesIndices, const int* cumulativeScaleIndices, int count, double* outSumLogLikelihood)
int beagleCalculateEdgeLogLikelihoods(int instance, const int* parentBufferIndices, const int* childBufferIndices, const int* probabilityIndices, const int* firstDerivativeIndices, const int* secondDerivativeIndices, const int* categoryWeightsIndices, const int* stateFrequenciesIndices, const int* cumulativeScaleIndices, int count, double* outSumLogLikelihood, double* outSumFirstDerivative, double* outSumSecondDerivative)
int beagleGetSiteLogLikelihoods(int instance, double* outLogLikelihoods)
int beagleGetSiteDerivatives(int instance, double* outFirstDerivatives, double* outSecondDerivatives)
ctypedef enum BeagleFlags_wrap "BeagleFlags":
_PRECISION_SINGLE "BEAGLE_FLAG_PRECISION_SINGLE"
_PRECISION_DOUBLE "BEAGLE_FLAG_PRECISION_DOUBLE"
_COMPUTATION_SYNCH "BEAGLE_FLAG_COMPUTATION_SYNCH"
_COMPUTATION_ASYNCH "BEAGLE_FLAG_COMPUTATION_ASYNCH"
_EIGEN_REAL "BEAGLE_FLAG_EIGEN_REAL"
_EIGEN_COMPLEX "BEAGLE_FLAG_EIGEN_COMPLEX"
_SCALING_MANUAL "BEAGLE_FLAG_SCALING_MANUAL"
_SCALING_AUTO "BEAGLE_FLAG_SCALING_AUTO"
_SCALING_ALWAYS "BEAGLE_FLAG_SCALING_ALWAYS"
_SCALING_DYNAMIC "BEAGLE_FLAG_SCALING_DYNAMIC"
_SCALERS_RAW "BEAGLE_FLAG_SCALERS_RAW"
_SCALERS_LOG "BEAGLE_FLAG_SCALERS_LOG"
_INVEVEC_STANDARD "BEAGLE_FLAG_INVEVEC_STANDARD"
_INVEVEC_TRANSPOSED "BEAGLE_FLAG_INVEVEC_TRANSPOSED"
_VECTOR_SSE "BEAGLE_FLAG_VECTOR_SSE"
_VECTOR_AVX "BEAGLE_FLAG_VECTOR_AVX"
_VECTOR_NONE "BEAGLE_FLAG_VECTOR_NONE"
_THREADING_OPENMP "BEAGLE_FLAG_THREADING_OPENMP"
_THREADING_NONE "BEAGLE_FLAG_THREADING_NONE"
_PROCESSOR_CPU "BEAGLE_FLAG_PROCESSOR_CPU"
_PROCESSOR_GPU "BEAGLE_FLAG_PROCESSOR_GPU"
_PROCESSOR_FPGA "BEAGLE_FLAG_PROCESSOR_FPGA"
_PROCESSOR_CELL "BEAGLE_FLAG_PROCESSOR_CELL"
_PROCESSOR_PHI "BEAGLE_FLAG_PROCESSOR_PHI"
_PROCESSOR_OTHER "BEAGLE_FLAG_PROCESSOR_OTHER"
_FRAMEWORK_CUDA "BEAGLE_FLAG_FRAMEWORK_CUDA"
_FRAMEWORK_OPENCL "BEAGLE_FLAG_FRAMEWORK_OPENCL"
_FRAMEWORK_CPU "BEAGLE_FLAG_FRAMEWORK_CPU"
ctypedef enum BeagleReturnCodes_wrap "BeagleReturnCodes":
_SUCCESS "BEAGLE_SUCCESS" #= 0, /**< Success */
_GENERAL "BEAGLE_ERROR_GENERAL" #= -1, /**< Unspecified error */
_OUT_OF_MEMORY "BEAG | Cython |
LE_ERROR_OUT_OF_MEMORY" #= -2, /**< Not enough memory could be allocated */
_UNIDENTIFIED_EXCEPTION "BEAGLE_ERROR_UNIDENTIFIED_EXCEPTION" #= -3, /**< Unspecified exception */
_UNINITIALIZED_INSTANCE "BEAGLE_ERROR_UNINITIALIZED_INSTANCE" #= -4, /**< The instance index is out of range, or the instance has not been created */
_OUT_OF_RANGE "BEAGLE_ERROR_OUT_OF_RANGE" #= -5, /**< One of the indices specified exceeded the range of the" array" */
_NO_RESOURCE "BEAGLE_ERROR_NO_RESOURCE" #= -6, /**< No resource matches requirements */
_NO_IMPLEMENTATION "BEAGLE_ERROR_NO_IMPLEMENTATION" #= -7, /**< No implementation matches requirements */
_FLOATING_POINT "BEAGLE_ERROR_FLOATING_POINT" #= -8
ctypedef enum BeagleOpCodes "BeagleOpCodes":
_OP_COUNT "BEAGLE_OP_COUNT"
_OP_NONE "BEAGLE_OP_NONE"
ctypedef struct BeagleResource:
char* name
char* description
long supportFlags
long requiredFlags
ctypedef struct BeagleResourceList:
BeagleResource* list
int length
cdef extern from "src/beagle_wrapper.h":
cdef cppclass beagle_instance:
beagle_instance(int tipCount, int partialsBufferCount, int compactBufferCount, int stateCount, int patternCount, int eigenBufferCount, int matrixBufferCount, int categoryCount, int scaleBufferCount, int resourceCount, long preferenceFlags, long requirementFlags)
int instance
int beagle_update_partials(const int instance, const int* operations, int operationCount, int cumulativeScaleIndex)
<|end_of_text|>import numpy as np
cimport numpy as np
ctypedef np.float64_t DOUBLE
cdef extern:
void c_product_in_fortran(double *a, double *b, double *aprodb)
void c_plus_in_fortran(double *a, double *b, double *aplusb)
def plus_in_python(double a, double b):
cdef double aplusb
c_plus_in_fortran(&a,&b,&aplusb)
return aplusb
def product_in_python(double a, double b):
cdef double aprodb
c_product_in_fortran(&a,&b,&aprodb)
return aprodb
def plus_and_product(double a, double b, double c):
cdef double temp, result
c_product_in_fortran(&b,&c,&temp)
c_plus_in_fortran(&a,&temp,&result)
return result
<|end_of_text|>import numpy as np
cimport numpy as np
from scipy import stats
cdef class cy_GaussianMixture() :
cdef int k, retries, max_iters
cdef bint kmeans_init, multivariate
cdef univariate_model, multivariate_model
def __cinit__(self, int n_clusters = 5, int retries = 3, int max_iters = 200, bint kmeans_init = True):
self.k = n_clusters
self.retries = retries
self.max_iters = max_iters
self.kmeans_init = kmeans_init
self.univariate_model = UnivariateGaussianMixture(n_clusters=n_clusters, retries=retries,max_iters=max_iters)
self.multivariate_model = MultivariateGaussianMixture(n_clusters=n_clusters, retries=retries,max_iters=max_iters, kmeans_init=kmeans_init)
if self.k == 1 :
print("It is not recommended to use a gaussian mixture model with data you expect to just have one cluster (one gaussian.)".upper())
cpdef fit(self, X):
X = np.array(X)
cdef int dimensions
_, dimensions = X.shape
if dimensions == 1:
self.multivariate = False
elif dimensions > 1 :
self.multivariate = True
if self.multivariate : self.multivariate_model.fit(X)
else :
if self.kmeans_init : print("Gaussian Mixtures with data of 1 dimension do not support KMeans initialization. Such initialization "
"is turned off.".upper())
self.univariate_model.fit(X)
cpdef np.ndarray predict(self, X):
if self.multivariate :
return self.multivariate_model.predict(X)
else :
return self.univariate_model.predict(X)
cpdef np.ndarray soft_predict(self, X):
if self.multivariate :
return self.multivariate_model.soft_predict(X)
else :
return self.univariate_model.soft_predict(X)
cpdef np.ndarray confidence_samples(self, X):
if self.multivariate :
return self.multivariate_model.confidence_samples(X)
else :
return self.univariate_model.confidence_samples(X)
cpdef double aic(self):
if self.multivariate :
return self.multivariate_model.aic()
else :
return self.univariate_model.aic()
cpdef double bic(self):
if self.multivariate :
return self.multivariate_model.bic()
else :
return self.univariate_model.bic()
cpdef np.ndarray anomaly_detect(self, X, threshold): # fix this mehtod
if self.multivariate :
return self.multivariate_model.anomaly_detect(X, threshold)
else :
return self.univariate_model.anomaly_detect(X, threshold)
cpdef bint _is_multivariate(self) :
return self.multivariate
cdef float multivariate_Gaussian(np.ndarray x, np.ndarray mu, np.ndarray cov_matrix) :
# my slow implementation of a multivariate Gaussian function
cdef int D = len(x)
return 1/np.sqrt(np.power(2 * np.pi, D) * np.linalg.det(cov_matrix)) * np.exp(-0.5 * (x - mu).T.dot(np.linalg.inv(cov_matrix)).dot(x-mu))
cdef class MultivariateGaussianMixture :
cdef int k, retries, max_iters
cdef bint kmeans_init, instant_retry
cdef kmeans
cdef np.ndarray mixture_weights
cdef np.ndarray mu, X
cdef np.ndarray responsibilties
cdef np.ndarray covariance_matrices
cdef float log_likelihood, max_log_likelihood
cdef int N, new_ndims
def __cinit__(self, int n_clusters = 5, int retries = 3, int max_iters = 200, bint kmeans_init = True):
self.k = n_clusters
self.retries = retries
self.max_iters = max_iters
self.kmeans_init = kmeans_init
from sealion.unsupervised_clustering import KMeans
self.kmeans = KMeans
self.instant_retry = False
cpdef void _expectation(self, X):
"""here we do the expectation step where we recalculate all responsibilties"""
cdef np.ndarray new_responsibilties = np.zeros((len(X), self.k))
cdef int k_index
cdef double pi_k
cdef np.ndarray mu_k
cdef np.ndarray covariance_matrices
try :
for k_index, pi_k, mu_k, sigma_k in zip(range(self.k), self.mixture_weights, self.mu, self.covariance_matrices) :
new_responsibilties[:, k_index][:] = pi_k * stats.multivariate_normal.pdf(X, mean = mu_k, cov = sigma_k)
except Exception :
self.instant_retry = True
# before normalization, find the log likelihood
self.log_likelihood = np.sum(np.log(np.sum(new_responsibilties, axis = 1)))
cdef np.ndarray normalization_factor = np.expand_dims(np.sum(new_responsibilties, axis = 1), 1)
new_responsibilties /= normalization_factor
self.responsibilties = new_responsibilties
cpdef void _maximization(self, X):
"""here we update the means, covariance matrices, and mixture weights to increase log-likelihood"""
cdef int k_index
cdef np.ndarray responsibilties_at_k, updated_mu_k, data_point
cdef np.ndarray updated_sigma_k
cdef float r_nk
for k_index in range(self.k) :
responsibilties_at_k = self.responsibilties[:, k_index] # vector of N responsibilities for a given cluster
N_k = np.sum(responsibilties_at_k) # summation of the the datasets responsibility to a given cluster
# get updated mu_k
updated_mu_k = np.sum(np.expand_dims(responsibilties_at_k, 1) * X, axis = 0) / N_k
# get updated covariance matrix for k cluster
updated_sigma_k = np.zeros((self.new_ndims, self.new_ndims))
for data_point, r_nk in zip(X, responsibilties_at_k) :
data_point = (np.array(data_point) - np.array(self.mu[k_index])).reshape(-1, 1)
updated_sigma_k += r_nk * np.dot(data_point, data_point.T)
updated_sigma_k /= N_k
# get the updated mixture_weight
updated_pi_k = N_k / len(X)
# update all
self.mixture_weights[k_index] = updated_pi_k
self.covariance_matrices[k_index] = updated_sigma_k
| Cython |
self.mu[k_index] = updated_mu_k
cpdef np.ndarray predict(self, X):
X = np.array(X)
MultivariateGaussianMixture._expectation(self, X)
return np.array(np.apply_along_axis(np.argmax, 1, self.responsibilties))
cpdef np.ndarray soft_predict(self, X):
X = np.array(X)
MultivariateGaussianMixture._expectation(self, X)
return self.responsibilties
cpdef np.ndarray confidence_samples(self, X):
cdef np.ndarray prediction_indices = MultivariateGaussianMixture.predict(self, X)
return np.array([responsibility[chosen_index] for responsibility, chosen_index in zip(self.responsibilties, prediction_indices)])
cpdef void fit(self, X):
X = np.array(X)
self.X = X
self.N, self.new_ndims = X.shape
# now we have to init mu, the covariance matrix, and mixture weight for each cluster
self.mixture_weights = (np.ones(self.k) * 1/self.k)
# kmeans initialization!
cdef kmeans
if self.kmeans_init :
kmeans = self.kmeans(k = self.k)
_ = kmeans.fit_predict(X)
self.mu = kmeans._get_centroids()
else :
self.mu = np.random.randn(self.k, self.new_ndims) # for each k cluster, give the number of dims
self.covariance_matrices = np.array([np.identity(self.new_ndims) for k_index in range(self.k)]) # create an D * D matrix for each k cluster
# don't init responsibilities, calculate them in the first expectation step
cdef dict tries_dict = {} # likelihood : [weights, means, sigmas]
cdef bint converged = False
cdef int trial, num_converge, iteration
cdef old_likelihood
for trial in range(self.retries) :
# time to start doing the iterations of expectation-maximization!
old_likelihood = None # this is the old likelihood
num_converge = 0
for iteration in range(self.max_iters) :
# expectation step, evaluate all responsibilties
MultivariateGaussianMixture._expectation(self, X)
if self.instant_retry :
self.instant_retry = False
break
# now we update (maximization)
MultivariateGaussianMixture._maximization(self, X)
if old_likelihood == None :
old_likelihood = self.log_likelihood
else :
if (self.log_likelihood - old_likelihood) < 0.001 :
num_converge += 1
if num_converge == 3 :
break # model has converged
# otherwise, keep going
old_likelihood = self.log_likelihood
if num_converge == 3 :
converged = True # it actually converged here, not just went through the maximum amount of iterations
tries_dict[self.log_likelihood] = [self.mixture_weights, self.mu, self.covariance_matrices]
# finally choose the one that did best
self.mixture_weights, self.mu, self.covariance_matrices = tries_dict[max(tries_dict)]
self.max_log_likelihood = max(tries_dict)
if not converged :
# just went through the loops, never actually converged
print("GAUSSIAN MIXTURE MODEL FAILED CONVERGENCE. PLEASE RETRY WITH MORE RETRIES IN THE INIT "
"AND MAKE SURE YOU ARE USING KMEANS_INIT (DEFAULT PARAMETER.)")
cpdef float bic(self):
return np.log(self.N) * 3 * self.k - 2 * self.max_log_likelihood # no need for log as self.max_likelihood is already the log optimization
cpdef float aic(self):
return 2 * 3 * self.k - 2 * self.max_log_likelihood
cpdef np.ndarray anomaly_detect(self, X, threshold):
# huge thanks to handsonml v2 for showing me how to do this
cdef np.ndarray probabilities = MultivariateGaussianMixture.confidence_samples(self, X)
cdef float prob_threshold = np.percentile(probabilities, threshold)
cdef np.ndarray anomalies = np.ones(len(X), bool)
anomalies[np.where(probabilities < prob_threshold)] = True # the indices of all of the outliers
anomalies[np.where(probabilities >= prob_threshold)] = False
return anomalies
cdef class UnivariateGaussianMixture :
cdef int k, retries, max_iters, N
cdef float log_likelihood, max_log_likelihood
cdef np.ndarray mixture_weights, sigmas, mu, X
cdef np.ndarray responsibilties
cdef bint instant_retry
def __init__(self, n_clusters = 5, retries = 3, max_iters = 200):
self.k = n_clusters
self.retries = retries
self.max_iters = max_iters
self.instant_retry = False
cpdef void _expectation(self, X):
"""here we do the expectation step where we recalculate all responsibilties"""
cdef np.ndarray new_responsibilties = np.zeros((len(X), self.k))
cdef int k_index
cdef float mu_k, sigma_k
try :
for k_index, pi_k, mu_k, sigma_k in zip(range(self.k), self.mixture_weights, self.mu, self.sigmas) :
new_responsibilties[:, k_index] = pi_k * stats.multivariate_normal.pdf(X, mean = mu_k, cov = sigma_k)
except Exception :
self.instant_retry = True
# before normalization, find the log likelihood
self.log_likelihood = np.sum(np.log(np.sum(new_responsibilties, axis = 1)))
cdef np.ndarray normalization_factor = np.expand_dims(np.sum(new_responsibilties, axis = 1), 1)
new_responsibilties /= normalization_factor
self.responsibilties = new_responsibilties
cpdef void _maximization(self, X):
"""here we update the means, covariance matrices, and mixture weights to increase log-likelihood"""
cdef int k_index
cdef np.ndarray responsibilties_at_k
cdef float updated_mu_k, updated_pi_k, updated_sigma_k
for k_index in range(self.k) :
responsibilties_at_k = self.responsibilties[:, k_index] # vector of N responsibilities for a given cluster
N_k = np.sum(responsibilties_at_k) # summation of the the datasets responsibility to a given cluster
# get updated mu_k
updated_mu_k = np.sum(responsibilties_at_k * X) / N_k # remember X is 1D
# get the updated sigma over here
X_de_mean = X - self.mu[k_index]
updated_sigma_k = np.sum(responsibilties_at_k * np.power(X_de_mean, 2)) /N_k
# get the updated mixture_weight
updated_pi_k = N_k / len(X)
# update all
self.mixture_weights[k_index] = updated_pi_k
self.sigmas[k_index] = updated_sigma_k
self.mu[k_index] = updated_mu_k
cpdef np.ndarray predict(self, X):
X = np.array(X)
UnivariateGaussianMixture._expectation(self, X)
return np.array(np.apply_along_axis(np.argmax, 1, self.responsibilties))
cpdef np.ndarray soft_predict(self, X):
X = np.array(X)
UnivariateGaussianMixture._expectation(self, X)
return self.responsibilties
cpdef np.ndarray confidence_samples(self, X):
prediction_indices = UnivariateGaussianMixture.predict(self, X)
return np.array([responsibility[chosen_index] for responsibility, chosen_index in zip(self.responsibilties, prediction_indices)])
cpdef void fit(self, X):
X = np.array(X)
self.X = X.flatten()
self.N = len(X)
# now we have to init mu, the covariance matrix, and mixture weight for each cluster
self.mixture_weights = (np.ones(self.k) * 1/self.k)
# kmeans initialization!
self.mu = np.random.randn(self.k) # for each k cluster, give the number of dims
self.sigmas = np.abs(np.random.randn(self.k)) # give the sigma here for that too
# don't init responsibilities, calculate them in the first expectation step
cdef dict tries_dict = {} # likelihood : [weights, means, sigmas]
cdef bint converged = False
cdef old_likelihood
cdef int trial, num_converge, iteration
for trial in range(self.retries) :
# time to start doing the iterations of expectation-maximization!
old_likelihood = None # this is the old likelihood
num_converge = 0
for iteration in range(self.max_iters) :
# expectation step, evaluate all responsibilties
UnivariateGaussianMixture._expectation(self, X)
if self.instant_retry :
self.instant_retry = False
break
# now we update (maximization)
UnivariateGaussianMixture._maximization(self, X)
if old_likelihood == None :
old_likelihood = self.log_likelihood
| Cython |
else :
if (self.log_likelihood - old_likelihood) < 0.001 :
num_converge += 1
if num_converge == 3 :
break # model has converged
# otherwise, keep going
old_likelihood = self.log_likelihood
if num_converge == 3 :
converged = True # it actually converged here, not just went through the maximum amount of iterations
tries_dict[self.log_likelihood] = [self.mixture_weights, self.mu, self.sigmas]
# finally choose the one that did best
self.mixture_weights, self.mu, self.sigmas = tries_dict[max(tries_dict)]
self.max_log_likelihood = max(tries_dict)
if not converged :
# just went through the loops, never actually converged
print("GAUSSIAN MIXTURE MODEL FAILED CONVERGENCE. PLEASE RETRY WITH MORE RETRIES IN THE INIT "
"AND MAKE SURE YOU ARE USING KMEANS_INIT (DEFAULT PARAMETER.)")
cpdef float bic(self):
return np.log(self.N) * 3 - 2 * self.max_log_likelihood # no need for log as self.max_likelihood is already the log optimization
cpdef float aic(self):
return 2 * 3 - 2 * self.max_log_likelihood
cpdef np.ndarray anomaly_detect(self, X, threshold):
# huge thanks to handsonml v2 for showing me how to do this
cdef np.ndarray probabilities = UnivariateGaussianMixture.confidence_samples(self, X)
cdef float prob_threshold = np.percentile(probabilities, threshold)
cdef np.ndarray anomalies = np.ones(len(X), bool)
anomalies[np.where(probabilities < prob_threshold)] = True
anomalies[np.where(probabilities >= prob_threshold)] = False
return anomalies
<|end_of_text|>from distutils import cmd, core, version
def import1():
"""
>>> import1() == (cmd, core, version)
True
"""
from distutils import (
cmd,
core, version)
return cmd, core, version
def import2():
"""
>>> import2() == (cmd, core, version)
True
"""
from distutils import (cmd,
core,
version
)
return cmd, core, version
def import3():
"""
>>> import3() == (cmd, core, version)
True
"""
from distutils import (cmd, core,version)
return cmd, core, version
def import4():
"""
>>> import4() == (cmd, core, version)
True
"""
from distutils import cmd, core, version
return cmd, core, version
def typed_imports():
"""
>>> typed_imports()
True
True
an integer is required
Expected type, got int
"""
import sys
import types
cdef long maxunicode
cdef type t
from sys import maxunicode
print(maxunicode == sys.maxunicode)
from types import ModuleType as t
print(t is types.ModuleType)
try:
from sys import version_info as maxunicode
except TypeError, e:
if getattr(sys, "pypy_version_info", None):
# translate message
if e.args[0].startswith("int() argument must be"):
e = "an integer is required"
print(e)
try:
from sys import maxunicode as t
except TypeError, e:
print(e)
<|end_of_text|># cython: nonecheck=False
# cython: boundscheck=False
# cython: wraparound=False
# cython: cdivision=True
# cython: language_level=3
# cython: infer_types=False
# cython: embedsignature=True
from..miscs.dtypes cimport DT_D, DT_UL, DT_ULL
cdef DT_D get_mean_prt(
const DT_D[::1] in_arr,
const DT_UL[::1] bool_arr,
const DT_UL off_idx,
) nogil
cdef DT_D get_ln_mean_prt(
const DT_D[::1] in_arr,
const DT_UL[::1] bool_arr,
const DT_UL off_idx,
) nogil
cdef DT_D get_demr_prt(
const DT_D[::1] x_arr,
const DT_UL[::1] bool_arr,
const DT_D mean_ref,
const DT_UL off_idx,
) nogil
cdef DT_D get_ln_demr_prt(
const DT_D[::1] x_arr,
const DT_UL[::1] bool_arr,
const DT_D ln_mean_ref,
const DT_UL off_idx,
) nogil
cdef DT_D get_ns_prt(
const DT_D[::1] x_arr,
const DT_D[::1] y_arr,
const DT_UL[::1] bool_arr,
const DT_D demr,
const DT_UL off_idx,
) nogil
cdef DT_D get_ln_ns_prt(
const DT_D[::1] x_arr,
const DT_D[::1] y_arr,
const DT_UL[::1] bool_arr,
const DT_D demr,
const DT_UL off_idx,
) nogil
cdef DT_D get_variance_prt(
const DT_D in_mean,
const DT_D[::1] in_arr,
const DT_UL[::1] bool_arr,
const DT_UL off_idx,
) nogil
cdef DT_D get_covariance_prt(
const DT_D in_mean_1,
const DT_D in_mean_2,
const DT_D[::1] in_arr_1,
const DT_D[::1] in_arr_2,
const DT_UL[::1] bool_arr,
const DT_UL off_idx,
) nogil
cdef DT_D _get_pcorr_prt(
const DT_D in_arr_1_std_dev,
const DT_D in_arr_2_std_dev,
const DT_D arrs_covar
) nogil
cdef DT_D get_kge_prt(
const DT_D[::1] act_arr,
const DT_D[::1] sim_arr,
const DT_UL[::1] bool_arr,
const DT_D act_mean,
const DT_D act_std_dev,
const DT_UL off_idx,
) nogil
cdef DT_D get_pcorr_coeff_prt(
const DT_D[::1] x_arr,
const DT_D[::1] y_arr,
const DT_UL[::1] bool_arr,
const DT_UL off_idx,
) nogil
cdef DT_D get_sum_sq_diff_prt(
const DT_D[::1] x_arr,
const DT_D[::1] y_arr,
const DT_UL[::1] bool_arr,
const DT_UL off_idx,
) nogil
cdef DT_D get_ln_sum_sq_diff_prt(
const DT_D[::1] x_arr,
const DT_D[::1] y_arr,
const DT_UL[::1] bool_arr,
const DT_UL off_idx,
) nogil
cdef void cmpt_resampled_arr_prt(
const DT_D[::1] ref_arr,
DT_D[::1] resamp_arr,
const DT_ULL[::1] tags_arr,
const DT_UL[::1] bools_arr,
) nogil
<|end_of_text|># cython: cdivision=True
import cython
import numpy as np
cimport numpy as np
@cython.boundscheck(False)
@cython.wraparound(False)
def affine_map(unsigned int geomdim, np.ndarray[np.float64_t, ndim=2] box not None, np.ndarray[np.float64_t] x not None, np.ndarray[np.float64_t] y not None):
"""Map from [0,1]^d to box."""
# cdef np.ndarray[double] y = np.zeros_like(x)
cdef float w, p
cdef Py_ssize_t d, i, dim, N
N = x.shape[0]/geomdim
for d in range(geomdim):
w = box[d,1] - box[d,0]
p = box[d,0]
for i in range(N):
y[i*dim+d] = p + x[i*dim+d] * w
@cython.boundscheck(False)
@cython.wraparound(False)
def affine_map_inverse(unsigned int geomdim, np.ndarray[np.float64_t, ndim=2] box not None, np.ndarray[np.float64_t] y not None, np.ndarray[np.float64_t] x not None):
"""Map from box to [0,1]^d."""
# cdef np.ndarray[double] x = np.zeros_like(y)
cdef float w, p
cdef Py_ssize_t d, i, dim, N
N = x.shape[0]/geomdim
for d in range(geomdim):
w = box[d,1] - box[d,0]
p = box[d,0]
for i in range(N):
x[i*dim+d] = (y[i*dim+d] - p) / w
<|end_of_text|>from libc.stdlib cimport malloc, free
from libc.string cimport memcpy, memmove
cdef class TCyBuffer(object):
def __cinit__(self, buf_size | Cython |
):
self.buf = <char*>malloc(buf_size)
self.buf_size = buf_size
self.cur = 0
self.data_size = 0
def __dealloc__(self):
if self.buf!= NULL:
free(self.buf)
self.buf = NULL
cdef void move_to_start(self):
memmove(self.buf, self.buf + self.cur, self.data_size)
self.cur = 0
cdef void clean(self):
self.cur = 0
self.data_size = 0
cdef int write(self, int sz, const char *value):
cdef:
int cap = self.buf_size - self.data_size
int remain = cap - self.cur
if sz <= 0:
return 0
if remain < sz:
self.move_to_start()
# recompute remain spaces
remain = cap - self.cur
if remain < sz:
if self.grow(sz - remain + self.buf_size)!= 0:
return -1
memcpy(self.buf + self.cur + self.data_size, value, sz)
self.data_size += sz
return sz
cdef read_trans(self, trans, int sz, char *out):
cdef int cap, new_data_len
if sz <= 0:
return 0
if self.data_size < sz:
if self.buf_size < sz:
if self.grow(sz)!= 0:
return -2 # grow buffer error
cap = self.buf_size - self.data_size
new_data = trans.read(cap)
new_data_len = len(new_data)
while new_data_len + self.data_size < sz:
more = trans.read(cap - new_data_len)
more_len = len(more)
if more_len <= 0:
return -1 # end of file error
new_data += more
new_data_len += more_len
if cap - self.cur < new_data_len:
self.move_to_start()
memcpy(self.buf + self.cur + self.data_size, <char*>new_data,
new_data_len)
self.data_size += new_data_len
memcpy(out, self.buf + self.cur, sz)
self.cur += sz
self.data_size -= sz
return sz
cdef int grow(self, int min_size):
if min_size <= self.buf_size:
return 0
cdef int multiples = min_size / self.buf_size
if min_size % self.buf_size!= 0:
multiples += 1
cdef int new_size = self.buf_size * multiples
cdef char *new_buf = <char*>malloc(new_size)
if new_buf == NULL:
return -1
memcpy(new_buf + self.cur, self.buf + self.cur, self.data_size)
free(self.buf)
self.buf_size = new_size
self.buf = new_buf
return 0
cdef class CyTransportBase(object):
cdef c_read(self, int sz, char* out):
pass
cdef c_write(self, char* data, int sz):
pass
cdef c_flush(self):
pass
def clean(self):
pass
@property
def sock(self):
if not self.trans:
return
return getattr(self.trans,'sock', None)
cdef get_string(self, int sz):
cdef:
char out[STACK_STRING_LEN]
char *dy_out
if sz > STACK_STRING_LEN:
dy_out = <char*>malloc(sz)
try:
size = self.c_read(sz, dy_out)
return dy_out[:size]
finally:
free(dy_out)
else:
size = self.c_read(sz, out)
return out[:size]
<|end_of_text|>"""Cython header for C/C++ standard library functionailty."""
cdef extern from'stdlib.h':
double atof(char*)
cdef extern from'string.h':
ctypedef char const_char "const char"
char* strtok(char*, char*)
char* strcpy(char*, char*)
void* memcpy(void*, void*, size_t)
cdef extern from "<string>" namespace "std":
cdef cppclass string:
string()
string(char *)
char * c_str()
<|end_of_text|>from libcpp cimport bool
from libcpp.string cimport string
from cython.operator cimport dereference as d
cimport pypopt.ipopt as ip
cdef class Journalist:
cdef ip.SmartPtr[ip.Journalist] c_journalist
def printf(self, level, category, fmt, *args, **kwargs):
py_bytes = fmt.format(*args, **kwargs).encode('utf-8')
cdef const char *c_str = py_bytes
cdef ip.EJournalCategory c_category = category
cdef ip.EJournalLevel c_level = level
d(self.c_journalist).Printf(c_level, c_category, c_str)
def print_string_over_lines(self, level, category, ip.Index indent_level,
ip.Index max_length, str line):
cdef string c_line = line.encode('utf-8')
cdef ip.EJournalCategory c_category = category
cdef ip.EJournalLevel c_level = level
d(self.c_journalist).PrintStringOverLines(
c_level, c_category, indent_level, max_length, c_line,
)
def printf_indented(self, level, category, ip.Index indent_level, fmt, *args, **kwargs):
py_bytes = fmt.format(*args, **kwargs).encode('utf-8')
cdef const char *c_str = py_bytes
cdef ip.EJournalCategory c_category = category
cdef ip.EJournalLevel c_level = level
d(self.c_journalist).PrintfIndented(c_level, c_category, indent_level, c_str)
def can_produce_output(self, level, category):
cdef ip.EJournalCategory c_category = category
cdef ip.EJournalLevel c_level = level
return d(self.c_journalist).ProduceOutput(c_level, c_category)
def flush_buffer(self):
d(self.c_journalist).FlushBuffer()
def add_journal(self, Journal journal):
cdef ip.SmartPtr[ip.Journal] c_journal = journal.c_journal
return d(self.c_journalist).AddJournal(c_journal)
def add_file_journal(self, str location_name, str fname, default_level=None):
cdef ip.EJournalLevel c_level = ip.EJournalLevel.J_WARNING
if default_level is not None:
c_level = default_level
cdef string c_location_name = location_name.encode('utf-8')
cdef string c_fname = fname.encode('utf-8')
c_journal = d(self.c_journalist).AddFileJournal(c_location_name, c_fname, c_level)
if ip.IsNull(c_journal):
return None
journal = Journal()
journal.c_journal = c_journal
return journal
def get_journal(self, str name):
cdef string c_name = name.encode('utf-8')
c_journal = d(self.c_journalist).GetJournal(c_name)
if ip.IsNull(c_journal):
return None
journal = Journal()
journal.c_journal = c_journal
return journal
def delete_all_journals(self):
d(self.c_journalist).DeleteAllJournals()
cpdef enum EJournalLevel:
J_INSUPPRESSIBLE=-1,
J_NONE=0,
J_ERROR,
J_STRONGWARNING,
J_SUMMARY,
J_WARNING,
J_ITERSUMMARY,
J_DETAILED,
J_MOREDETAILED,
J_VECTOR,
J_MOREVECTOR,
J_MATRIX,
J_MOREMATRIX,
J_ALL,
J_LAST_LEVEL
cpdef enum EJournalCategory:
J_DBG=0,
J_STATISTICS,
J_MAIN,
J_INITIALIZATION,
J_BARRIER_UPDATE,
J_SOLVE_PD_SYSTEM,
J_FRAC_TO_BOUND,
J_LINEAR_ALGEBRA,
J_LINE_SEARCH,
J_HESSIAN_APPROXIMATION,
J_SOLUTION,
J_DOCUMENTATION,
J_NLP,
J_TIMING_STATISTICS,
J_USER_APPLICATION ,
J_USER1 ,
J_USER2 ,
J_USER3 ,
J_USER4 ,
J_USER5 ,
J_USER6 ,
J_USER7 ,
J_USER8 ,
J_USER9 ,
J_USER10 ,
J_USER11 ,
J_USER12 ,
J_USER13 ,
J_USER14 ,
J_USER15 ,
J_USER16 ,
J_USER17 ,
J_LAST_CATEGORY
<|end_of_text|># mode: error
DEF t = (1,2,3)
DEF t_const = (1,t,2)
DEF t_non_const = (1,[1,2,3],3,t[4])
x = t_non_const
_ERRORS = u"""
5:32: Error in compile-time expression: IndexError: tuple index out of range
7:4: Invalid type for compile-time constant: [1, 2, 3] (type list)
"""
<|end_of_text|>from libc.stdio cimport printf
import random
import numpy as np
cdef list create_random_matrix(int size):
cdef list m = []
cdef int i
cdef int n
for i in range(size):
m.append([])
for _ in range(size):
n = random.randint(1, | Cython |
9)
m[i].append(n)
return m
cdef int print_matrix(list m):
cdef list row
cdef int n
for row in m:
printf("|")
for n in row:
printf(" %u ", n)
printf("|\n")
cdef list transf_signs(list l, int row):
cdef int i
for i in range(len(l)):
if (i + row) % 2!= 0:
l[i] *= -1
return l
cdef list reduce_matrix(list m, int best_row, int col_ignore):
cdef reduced_matrix = []
cdef int i
cdef int j
for i in range(len(m[0])):
if i == best_row:
continue
else:
reduced_matrix.append([])
for j in range(len(m[i])):
if j!= col_ignore:
reduced_matrix[i - 1].append(m[i][j])
return reduced_matrix
cdef int determinat(list m):
if len(m[0]) == 2:
return (m[0][0] * m[1][1]) - (m[1][0] * m[0][1])
cdef int best_row = 0
# list with the signs transform
cdef list l_trans = transf_signs(m[best_row], best_row)
cdef list l_reduced_m = []
cdef int i
for i in range(len(l_trans)):
l_reduced_m.append(reduce_matrix(m, best_row, i))
cdef int result = 0
for i in range(len(l_trans)):
result += l_trans[i] * determinat(l_reduced_m[i])
return result
cpdef int main():
cdef size = int(input("Size of the matrix: "))
cdef list m = create_random_matrix(size)
# m2 = [[2,2], [2,2]]
print_matrix(m)
m_np = np.array(m)
cdef int n = determinat(m)
printf("|a| = %u ", n)
r_np = np.linalg.det(m_np)
print(r_np)
return 0<|end_of_text|># this script is to test a simple brute force minimization of the free energy with respect to lattice bond lengths
# haven't changed anything from test_semiclassical_ssh.py yet
import networkx as nx
import numpy as np
cimport numpy as np
import sys
from scipy.sparse.linalg import eigs
import scipy.linalg as scilin
import numpy.linalg as lin
import matplotlib.pyplot as plt
import argparse
from latticeutil import create_inhomo_lattice
cimport cython
DTYPE = np.complex128
ctypedef np.complex128_t DTYPE_t
# evaluate action
@cython.boundscheck(False) # turn off bounds-checking for entire function
@cython.wraparound(False) # Deactivate negative indexing.
cdef complex objective_eval(float beta, complex[:] myphiqs, complex[:] lat_disp, complex[:] elec_disp, complex[:,:,:] omega_1s, complex[:,:,:] omega_2s, int num, long[:, :] I, np.ndarray[DTYPE_t, ndim=2] xi):
# one doesn't have to explicitly construct the operator G, just the operator xi, since we've summed over Matsubara frequencies
# compute Tr ln (G^{-1}_{el-ph})
cdef complex elphterm = 0
cdef complex j = 1j
for eta in range(num):
for etap in range(num):
xi[eta, etap] = 0
if (eta == etap):
xi[eta, etap] = xi[eta, etap] + elec_disp[eta]
elphterm = 0
for gamma in range(num): ### TODO check if 1j multiplication belongs here - first indication is yes, it does
elphterm = elphterm + j*((myphiqs[gamma].conjugate())*omega_1s[eta, etap, gamma] - myphiqs[gamma]*omega_2s[eta, etap, gamma])
xi[eta, etap] = xi[eta, etap] - elphterm
cdef complex trlnGinv = np.trace(scilin.logm(np.add(I, scilin.expm(-beta*xi)))) ### TODO - if no electrons, trlnGinv should reduce to ln(I) = 0, not ln(2I)
cdef complex ph_term = 0
for q in range(num):
ph_term += (myphiqs[q].conjugate())*(lat_disp[q])*myphiqs[q]
return (ph_term + trlnGinv) ### originally had minus, but maybe Matsubara sum changes it
@cython.boundscheck(False) # turn off bounds-checking for entire function
def start(args):
dims = args.dims
cdef int dimi = int(dims[0])
cdef int dimj = int(dims[1])
cdef int dimk = int(dims[2])
disorder_type = args.disorder_type[0] # default disorder if I think it'll help convergence
cdef float disorder_strength = float(args.disorder_strength[0])
outname = args.outfile[0]
cdef float alpha = float(args.alpha[0])
cdef float t = float(args.t[0])
cdef float K = float(args.K[0])
cdef float C = float(args.C[0])
cdef float Temp = float(args.Tem[0])
choice = int(args.which[0])
cdef float m = 1
per = True
if (int(args.periodic[0])!= 1):
per = False
# create graph
inits, G = create_inhomo_lattice(dimi, dimj, dimk,None, 1, periodic=per)
inits_w, G_w = create_inhomo_lattice(dimi, dimj, dimk,"Alternating", disorder_strength, periodic=per)
#####
# create graph Laplacian for lattice and find its eigenvectors and eigenvalues
L_at_py = (K/2)*(nx.laplacian_matrix(G, weight='weight').real).toarray()
cdef np.ndarray L_at = np.array(L_at_py, dtype=DTYPE)
lat_eigvalspy, lat_eigvecspy = lin.eig(L_at)
idxs = lat_eigvalspy.argsort()[::-1]
lat_eigvalspy = np.array(lat_eigvalspy, dtype=DTYPE)
cdef np.ndarray[DTYPE_t, ndim=1] lat_eigvals = lat_eigvalspy[idxs]
cdef np.ndarray lat_eigvecs = (lat_eigvecspy[:, idxs])
zero_check_idxs = np.abs(lat_eigvals) < 1e-14
lat_eigvals[zero_check_idxs] = 0
# create graph Laplacian for electrons; will need to add chemical potential as necessary, make other modifications
H_TBpy = t*(np.real(nx.laplacian_matrix(G, weight=None))).toarray()
cdef np.ndarray[DTYPE_t, ndim=2] H_TB = np.array(H_TBpy, dtype=DTYPE)
el_energiespy, el_eigvecs = lin.eig(H_TB)
idxs = el_energiespy.argsort()[::1]
el_energiespy = np.array(el_energiespy, dtype=DTYPE)
cdef np.ndarray[DTYPE_t, ndim=1] el_eigvals = el_energiespy[idxs]
zero_check_idxs = np.abs(el_eigvals) < 1e-13
el_eigvals[zero_check_idxs] = 0
#####
#####
# initialize phiqs to be zero for now; may do otherwise when exploring metatsbale states
#import numpy.random as rand
cdef np.ndarray[DTYPE_t, ndim=1] phiqs = np.zeros((G.number_of_nodes()), dtype=DTYPE) ##
cdef complex[:] phiqs_view = phiqs
#phiqs = rand.randn(G.number_of_nodes(), 1)/50
#phiqs = np.array(phiqs, dtype=complex)
#####
# initialize temperature and whatnot
cdef float kb = 1
cdef Py_ssize_t N = sum([1 for e in el_eigvals]) # one inefficient way of counting number of eigenvectors
#####
cdef Py_ssize_t M = (lat_eigvecs[:, 0]).size
cdef complex[:] el_eigvals_view = el_eigvals
cdef complex[:] lat_eigvals_view = lat_eigvals
# compute omega function elements
cdef np.ndarray[DTYPE_t, ndim=3] omega_1 = np.zeros((N, N, N), dtype=DTYPE)
cdef np.ndarray[DTYPE_t, ndim=3] omega_2 = np.zeros((N, N, N), dtype=DTYPE)
cdef complex[:,:,:] omega_1_view = omega_1
cdef complex[:,:,:] omega_2_view = omega_2
c | Cython |
def complex[:,:] el_eigvecs_view = el_eigvecs
cdef complex[:,:] lat_eigvecs_view = lat_eigvecs
import time
startt = time.clock()
cdef complex eps_ijetaetap = 0
# TODO - it appears these omegas are not generated correctly
cdef Py_ssize_t eta, etap, i, j, gamma
print("Beginning omega function generation")
for eta in range(N):
for etap in range(N):
for i in range(M):
for j in range(M):
eps_ijetaetap = ((el_eigvecs_view[i, eta]).conjugate())*el_eigvecs_view[j, etap]
for gamma in range(N):
omega_1_view[eta, etap, gamma] = omega_1_view[eta, etap, gamma] + alpha*((lat_eigvecs_view[i, gamma] - lat_eigvecs_view[j, gamma]).conjugate())*eps_ijetaetap
omega_2_view[eta, etap, gamma] = omega_2_view[eta, etap, gamma] + alpha*(lat_eigvecs_view[i, gamma] - lat_eigvecs_view[j, gamma])*eps_ijetaetap
print("Time for omega function generation, ", time.clock() - startt)
#####
# Begin Loop
cdef np.ndarray[DTYPE_t, ndim=2] xi = np.zeros((N,N), dtype=DTYPE)
cdef np.ndarray I = np.eye(N, dtype=np.int)
cdef long[:,:] I_view = I
cdef complex curr_action = objective_eval(1/(kb*Temp), phiqs_view, lat_eigvals_view, el_eigvals_view, omega_1_view, omega_2_view, N, I_view, xi)
cdef complex min_action = curr_action
print("Starting Action:", curr_action)
cdef np.ndarray min_phiqs = np.empty_like(phiqs, dtype=DTYPE)
min_phiqs[:] = phiqs
cdef complex[:] min_phiqs_view = min_phiqs
print("Starting Phiqs:", phiqs)
np.set_printoptions(linewidth=135)
cdef double gam = 1
cdef complex jay = 1j
mingam = 1e-1
for i in range(4): # 8 times we repeat this hackjob of a line search
print("Pass: ", i)
for idx1 in range(N):
gam = 0.2
flag1 = False
while (gam > mingam):
if (flag1):
gam = gam/4
####### we have a search direction; lat_eigvecs[:, idx1]. We would like to minimize our free energy along that direction before continuing in the next direction.
phiqs_view[idx1] = min_phiqs_view[idx1]
phiqs_view[idx1] = phiqs_view[idx1] - gam*(jay)**(2*i+1) # we let it minimize across both real and imaginary parts of phi
curr_action = objective_eval(1/(kb*Temp), phiqs_view, lat_eigvals_view, el_eigvals_view, omega_1_view, omega_2_view, N, I_view, xi)
if ((curr_action.real) - (min_action.real) < -1e-8):
print("Action", curr_action)
print("Accepted: ", idx1, " With Stepsize:", gam*(jay)**(2*i+1))
min_action = curr_action
min_phiqs_view[idx1] = phiqs_view[idx1]
flag1 = False
else:
flag1 = True
##### End Loop
min_phiqs[np.abs(min_phiqs) < 1e-14] = 0
##### Print Stuff
print("Minimum action", min_action)
print("Printing and saving Minimum phiqs setup")
print(min_phiqs)
name=("K%f-t%f-alpha%f-dims-%d-%d-%d-periodic-%d-Temp-%f" % (K, t, alpha, dimi, dimj, dimk, per, Temp))
np.save(name, min_phiqs)
#####
##### Compute average lattice displacements X_i
# We can now compute the average lattice displacements from the above and compute the effective noninteracting electron DoS using a modified Hamiltonian
##### inverse graph Fourier transform to get real-space components
phirs = np.zeros((N,1), dtype=complex)
for q, phi_q in enumerate(min_phiqs):
np.add(phirs[:], np.reshape(min_phiqs[q]*lat_eigvecs[:, q], np.shape(phirs)), out=phirs[:])
#####
# Perhaps now that I have phi(l, tau=0) I must convert this to average lattice displacements. Maybe <x_i> = <phi_i e^{-phi_i}>?
disps = np.zeros((N, 1), dtype=complex)
for r, phi_r in enumerate(phirs):
#disps[r] = phirs[r] * np.exp(np.conj(phirs[r])*phirs[r]) # weight
### TODO - not sure how to compute it, but need an imaginary unit??? supposed to be plus below, not minus
disps[r] = 1j*(phirs[r] - (phirs[r]).conjugate()) # lattice displacement is sum of phi and phibar
print("Real-space values of phi_l, disps")
print(phirs)
print(disps)
##### Modify TB Hamiltonian with lattice changes X_i - X_j
H_TB = np.array(H_TB, dtype=complex)
for i, rowi in enumerate(H_TB):
for j, colj in enumerate(H_TB):
if (i!= j):
if (H_TB[i, j]!= 0):
if (i > j):
H_TB[i, j] = H_TB[i, j] + alpha*(disps[i] - disps[j])
H_TB[j, i]= H_TB[i, j]
#v = v[:, idx]
### testing purposes, do weighted laplacian
w, v = lin.eig(H_TB)
from quickdos import delta, dos
oms = w
#oms = np.sqrt(np.abs(oms))
Es = np.linspace(min(oms) - 0.5, max(oms) + 0.5, 400)
Es2 = np.linspace(min(w) - 0.5, max(w) + 0.5, 400)
DOS = [dos(oms, E) for E in Es]
DOS2 = [dos(w, E) for E in Es2]
plt.figure(1)
plt.plot(Es, DOS, label="Density of States of TB Ham")
plt.title("Optimized SSH DOS", fontsize=24)
plt.xlabel("Energy", fontsize=22)
plt.ylim(bottom=0)
plt.xticks(fontsize=18)
plt.legend(fontsize=20)
fign = name + ".png"
plt.savefig(fign)
#plt.figure(2)
#plt.plot(Es2, DOS2, label="Density of States of Graph Laplacian")
#plt.xlabel("Energy", fontsize=22)
#plt.ylim(bottom=0)
#plt.xticks(fontsize=18)
#plt.legend(fontsize=20)
plt.show()
# want to get the eigenvectors of this graph and observe the (1) most common states (2) states with highest eigenvalue?
<|end_of_text|># cython: cdivision = True
# cython: wraparound = False
# cython: boundscheck = False
import numpy as np
from libc.math cimport exp, log
cpdef double _sgd_update(X, int[:] y, double w0, double[:] w, double[:, :] v,
int n_factors, double learning_rate, double reg_w, double reg_v):
cdef:
int i, index, feature, factor
double loss = 0.0, pred, loss_gradient, v_gradient, term
int n_samples = X.shape[0], n_features = X.shape[1]
double[:] data = X.data
int[:] indptr = X.indptr, indices = X.indices
double[:] summed = np.zeros(n_factors) # memset
for i in range(n_samples):
# n_iter += 1
# learning_rate /= (1 + learning_rate * lambda_t * n_iter) # move to the back
pred = _predict_instance(data, indptr, indices, w0, w, v, n_factors, i)
# calculate loss and its gradient
loss += _log_loss(pred, y[i])
loss_gradient = -y[i] / (exp(y[i] * pred) + 1.0)
# other people have computed the gradient in a different formula, haven't
# compared which one is faster or more numerically stable
# https://github.com/mathewlee11/lmfm/blob/master/lmfm/sgd_fast.pyx#L130
# loss_gradient = y[i] * ((1.0 / (1.0 + exp(-y[i] * pred))) - 1.0)
# update bias/intercept term
| Cython |
w0 -= learning_rate * loss_gradient
# update weight
for index in range(indptr[i], indptr[i + 1]):
feature = indices[index]
w[feature] -= learning_rate * (loss_gradient * data[index] + 2 * reg_w * w[feature])
# update feature factors
# needs re-factoring, as the summed part is a duplicated computation
for factor in range(n_factors):
for index in range(indptr[i], indptr[i + 1]):
feature = indices[index]
term = v[factor, feature] * data[index]
summed[factor] += term
for factor in range(n_factors):
for index in range(indptr[i], indptr[i + 1]):
feature = indices[index]
term = summed[factor] - v[factor, feature] * data[index]
v_gradient = loss_gradient * data[index] * term
v[factor, feature] -= learning_rate * (v_gradient + 2 * reg_v * v[factor, feature])
return loss
cpdef double _predict_instance(double[:] data, int[:] indptr, int[:] indices,
double w0, double[:] w, double[:, :] v, int n_factors, int i):
"""predicting a single instance"""
cdef:
int index, feature, factor
double pred = w0, term = 0.0
double[:] summed = np.zeros(n_factors) # memset
double[:] summed_squared = np.zeros(n_factors)
# linear output w * x
for index in range(indptr[i], indptr[i + 1]):
feature = indices[index]
pred += w[feature] * data[index]
# factor output
for factor in range(n_factors):
for index in range(indptr[i], indptr[i + 1]):
feature = indices[index]
term = v[factor, feature] * data[index]
summed[factor] += term
summed_squared[factor] += term * term
pred += 0.5 * (summed[factor] * summed[factor] - summed_squared[factor])
return pred
cdef double _log_loss(double pred, double y):
"""
negative log likelihood of the
current prediction and label, y.
"""
# potential ways of speeding this part
# https://github.com/coreylynch/pyFM/blob/master/pyfm_fast.pyx#L439
return log(exp(-pred * y) + 1.0)
<|end_of_text|>STUFF = 'hi'
import numpy as np
cdef extern from "sofa.h":
int iauGd2gc ( int n, double elong, double phi, double height,
double xyz[3])
def py_iauGd2gc(int n, double elong, double phi, double height, return_status=False):
"""Wrapper for iauGd2gc module (`SOFA Documentation <http://www.iausofa.org/current_C.html>`_.). Transform geodetic coordinates to geocentric using the specified reference ellipsoid.
Args:
n (int): Ellipsoid identifier.
elong (float): Longitude with East positive (radians).
phi (float): Geodetic latitude (radians).
height (float): Geodetic height above ellipsoid (m).
return_status (bool, optional): Choose whether or not to return the SOFA validation status.
Returns:
(Numpy Array or tuple):
* If return_status = False --> xyz -- (1x3) geocentric vector (m)
* If return_status = True --> The tuple (xyz, status).
.. note::
The values of status are:
* 0 = OK
* -1 = illegal identifier
* -2 = illegal case
"""
#Initialise output matrix
cdef double xyz[3]
#Do the C routine, returning the status
status = iauGd2gc(n, elong, phi, height, xyz)
#return tuple of xyz array and the status
xyz = np.array(xyz)
if return_status:
return xyz, status
else:
return xyz<|end_of_text|># """
# Search images for tagged objects via a local Tagbox instance.
# For more details about this platform, please refer to the documentation at
# https://home-assistant.io/components/image_processing.tagbox
# This file is stolen from @robmarkcole's repo
# """
# import base64
# import requests
# import logging
# import voluptuous as vol
# from homeassistant.core import (
# callback, split_entity_id)
# import homeassistant.helpers.config_validation as cv
# from homeassistant.components.image_processing import (
# PLATFORM_SCHEMA, ImageProcessingEntity, ATTR_CONFIDENCE, CONF_CONFIDENCE,
# CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME)
# from homeassistant.const import (
# ATTR_ENTITY_ID, ATTR_NAME, CONF_IP_ADDRESS, CONF_PORT)
# from homeassistant.util.async_ import run_callback_threadsafe
# _LOGGER = logging.getLogger(__name__)
# CLASSIFIER = 'tagbox'
# EVENT_DETECT_TAG = 'image_processing.detect_tag'
# TIMEOUT = 9
# PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
# vol.Required(CONF_IP_ADDRESS): cv.string,
# vol.Required(CONF_PORT): cv.port,
# })
# def encode_image(image):
# """base64 encode an image stream."""
# base64_img = base64.b64encode(image).decode('ascii')
# return base64_img
# def get_matched_tags(tags, confidence):
# """Return the name and rounded confidence of matched tags."""
# return {tag['name']: tag['confidence']
# for tag in tags if tag['confidence'] > confidence}
# def parse_tags(api_tags):
# """Parse the API tag data into the format required."""
# parsed_tags = []
# for entry in api_tags:
# tag = {}
# tag[ATTR_NAME] = entry['tag']
# tag[ATTR_CONFIDENCE] = round(100.0*entry['confidence'], 2)
# parsed_tags.append(tag)
# return parsed_tags
# def post_image(url, image):
# """Post an image to the classifier."""
# try:
# response = requests.post(
# url,
# json={"base64": encode_image(image)},
# timeout=TIMEOUT
# )
# return response
# except requests.exceptions.ConnectionError:
# _LOGGER.error("ConnectionError: Is %s running?", CLASSIFIER)
# def setup_platform(hass, config, add_devices, discovery_info=None):
# """Set up the classifier."""
# entities = []
# for camera in config[CONF_SOURCE]:
# entities.append(ImageProcessingTagEntity(
# config[CONF_IP_ADDRESS],
# config[CONF_PORT],
# camera[CONF_ENTITY_ID],
# camera.get(CONF_NAME),
# config[CONF_CONFIDENCE],
# ))
# add_devices(entities)
# class ImageProcessingTagEntity(ImageProcessingEntity):
# """Perform a tag search via a Tagbox."""
# def __init__(self, ip, port, camera_entity, name, confidence):
# """Init with the IP and PORT"""
# super().__init__()
# self._url_check = "http://{}:{}/{}/check".format(ip, port, CLASSIFIER)
# self._camera = camera_entity
# if name:
# self._name = name
# else:
# camera_name = split_entity_id(camera_entity)[1]
# self._name = "{} {}".format(
# CLASSIFIER, camera_name)
# self._confidence = confidence
# self.tags = []
# self._matched = {}
# def process_image(self, image):
# """Process an image."""
# response = post_image(self._url_check, image)
# if response is not None:
# response_json = response.json()
# if response_json['success']:
# api_tags = response_json['tags'] + response_json['custom_tags']
# tags = parse_tags(api_tags)
# self.process_tags(tags)
# self._matched = get_matched_tags(tags, self.confidence)
# else:
# self.tags = []
# self._matched = {}
# @property
# def confidence(self):
# """Return minimum confidence for send events."""
# return self._confidence
# @property
# def state(self):
# """Return the state of the entity."""
# state = None
# if len(self._matched) > 0:
# return self.tags[0][ATTR_NAME]
# return state
# def process_tags(self, tags):
# """Send event with detected tags and store data."""
# run_callback_threadsafe(
# self.hass.loop, self.async_process_tags, tags).result()
# @callback
# def async_process_tags(self, tags):
# """Send event with detected tags and store data.
# Tags are a dict in follow format:
# [
# {
# ATTR_CONFIDENCE: 80,
# ATTR_NAME: 'people',
# },
# ]
# This method must be run in the event loop.
# """
# # Send events
# for tag in tags:
# tag.update({ATTR_ENTITY_ID: self.entity_id})
# if tag[ATTR_CONFIDENCE] > self.confidence:
# self.hass.async_add_job(
# self.hass.bus.async | Cython |
_fire, EVENT_DETECT_TAG, tag
# )
# # Update entity store
# self.tags = tags
# @property
# def camera_entity(self):
# """Return camera entity id from process pictures."""
# return self._camera
# @property
# def name(self):
# """Return the name of the sensor."""
# return self._name
# @property
# def device_state_attributes(self):
# """Return other details about the sensor state."""
# return {
# 'tags': self.tags,
# 'total_tags': len(self.tags),
# 'matched_tags': self._matched,
# 'total_matched_tags': len(self._matched),
# }<|end_of_text|>"""
Copyright (C) 2013, Enthought Inc
Copyright (C) 2013, Patrick Henaff
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
include '../types.pxi'
from libcpp.vector cimport vector
from libcpp cimport bool
from quantlib.handle cimport shared_ptr, Handle
from.._instrument cimport Instrument
from quantlib.time._calendar cimport BusinessDayConvention, Calendar
from quantlib.time._date cimport Date
from quantlib.time._daycounter cimport DayCounter
from quantlib.time._schedule cimport Schedule
from quantlib._cashflow cimport Leg
cdef extern from 'ql/instruments/swap.hpp' namespace 'QuantLib' nogil:
cdef cppclass Swap(Instrument):
enum Type:
Receiver
Payer
## Swap(Leg& firstLeg,
## Leg& secondLeg)
## Swap(vector[Leg]& legs,
## vector[bool]& payer)
bool isExpired()
Size numberOfLegs()
Date startDate()
Date maturityDate()
Real legBPS(Size j) except +
Real legNPV(Size j) except +
DiscountFactor startDiscounts(Size j) except +
DiscountFactor endDiscounts(Size j) except +
DiscountFactor npvDateDiscount() except +
Leg& leg(Size j) except +
<|end_of_text|>from libcpp.vector cimport vector
ctypedef unsigned char uint8
#TODO: make the path relative to what the client has
cdef extern from "/usr/local/cuda-10.2/include/vector_types.h" :
struct float2:
float x
float y
struct double2:
double x
double y
cdef extern from "../../../compiled/cuda/structures/include/data_structures.cuh" namespace "fdsp":
cdef cppclass GPUArray[T]:
GPUArray(const T* ptr, const vector[size_t]& dimSizes)
GPUArray(const vector[size_t]& dimSizes)
GPUArray(const GPUArray[T]& array)
void Get(T *h_ptr)const
T GetElement(size_t index)const
T* GetPointerToArray()
const T* GetPointerToArrayConst()const
<|end_of_text|>#imports
# cython: profile=True
#cimport cython
cimport dilap.core.vector as dpv
import dilap.core.vector as dpv
#import dp_vector as dpv
from libc.math cimport sqrt
from libc.math cimport cos
from libc.math cimport sin
from libc.math cimport tan
from libc.math cimport hypot
import numpy as np
import matplotlib.pyplot as plt
stuff = 'hi'
# 3d classes/functions
cdef class bbox:
def _center(self):
mx = (self.x.x+self.x.y)/2.0
my = (self.y.x+self.y.y)/2.0
mz = (self.z.x+self.z.y)/2.0
return dpv.vector(mx,my,mz)
def __str__(self):
xstr = (self.x.x,self.x.y)
ystr = (self.y.x,self.y.y)
zstr = (self.z.x,self.z.y)
strr = 'bbox:' + str((xstr,ystr,zstr))
return strr
def __cinit__(self,dpv.vector2d x,dpv.vector2d y,dpv.vector2d z):
self.x = x
self.y = y
self.z = z
#self._edge_data()
# establishes data about the xy projection of this bbox
def _edge_data(self):
cs = [dpv.vector(self.x.x,self.y.x,0),dpv.vector(self.x.y,self.y.x,0),
dpv.vector(self.x.y,self.y.y,0),dpv.vector(self.x.x,self.y.y,0)]
self.corners = cs
self.edgenorms = dpv.edge_normals_xy(self.corners)
self.edgecount = len(self.edgenorms)
self.center = dpv.com(self.corners)
cvs = [dpv.v1_v2_c(self.center,v) for v in self.corners]
cdists = [c.magnitude() for c in cvs]
self.radius = max(cdists)
# modify self.x to encompass proj
cpdef bbox _consume_x(self,dpv.vector2d proj):
if self.x.x > proj.x:
self.x.x = proj.x
if self.x.y < proj.y:
self.x.y = proj.y
return self
# modify self.y to encompass proj
cpdef bbox _consume_y(self,dpv.vector2d proj):
if self.y.x > proj.x:
self.y.x = proj.x
if self.y.y < proj.y:
self.y.y = proj.y
return self
# modify self.z to encompass proj
cpdef bbox _consume_z(self,dpv.vector2d proj):
if self.z.x > proj.x:
self.z.x = proj.x
if self.z.y < proj.y:
self.z.y = proj.y
return self
# modify self to encompass other
cpdef bbox _consume(self,bbox other):
self._consume_x(other.x)
self._consume_y(other.y)
self._consume_z(other.z)
return self
cpdef bint point_inside(self,dpv.vector point):
if not p_in_rng_c(point.x,self.x.x,self.x.y):return 0
if not p_in_rng_c(point.y,self.y.x,self.y.y):return 0
if not p_in_rng_c(point.z,self.z.x,self.z.y):return 0
return 1
cpdef bint intersect_tri(self,list tri):
for p in tri:
if self.point_inside(p):
return 1
return 0
cpdef bint separating_axis(self,bbox other):
cdef bint xlap = overlap_c(self.x,other.x)
cdef bint ylap = overlap_c(self.y,other.y)
cdef bint zlap = overlap_c(self.z,other.z)
return 0 if xlap+ylap+zlap > 0 else 1
cpdef list intersect_tri_filter(bbox bb,list tris,list tpts):
cdef list isected = []
cdef int tcnt = len(tris)
cdef int tdx
for tdx in range(tcnt):
tri = [tpts[tris[tdx][x]] for x in range(3)]
if bb.intersect_tri(tri):
isected.append(tdx)
return isected
cdef bint p_in_rng_c(float p,float x,float y):
if p < x:return 0
if p > y:return 0
return 1
cpdef bint p_in_rng(float p,float x,float y):
return p_in_rng_c(p,x,y)
cdef bbox zero_c():
cdef dpv.vector2d z = dpv.zero2d()
cdef bbox new = bbox(z.copy(),z.copy(),z.copy())
return new
cpdef bbox zero():
return zero_c()
cpdef bbox bb_from_ps(list ps):
xproj = dpv.project_coords(ps,dpv.xhat)
yproj = dpv.project_coords(ps,dpv.yhat)
zproj = dpv.project_coords(ps,dpv.zhat)
bb = bbox(xproj,yproj,zproj)
return bb
cdef bint overlap_c(dpv.vector2d rng1,dpv.vector2d rng2):
if rng1.y < rng2.x:return 0
elif rng2.y < rng1.x:return 0
else:return 1
cpdef bint overlap(dpv.vector2d rng1,dpv.vector2d rng2):
return overlap_c(rng1,rng2)
<|end_of_text|>cimport cython
cimport numpy as np
@cython.boundscheck(False)
@cython.wraparound(False)
cdef (int) calcular_altura_sitio(np.ndarray[int, ndim=1] altura_sitios, int sitio, int L):
cdef int e = 0, d = L - 1, altura = altura_sitios[sitio] + 1
# setar indice da esquerda e da direita
if sitio > 0:
e = sitio - 1
if sitio < L - 1:
d = sitio + 1
if altura_sitios[e] > altura:
altura = altura_sitios[e]
if altura_sit | Cython |
ios[d] > altura:
altura = altura_sitios[d]
return altura
@cython.boundscheck(False)
@cython.wraparound(False)
cdef _executar_deposicao_balistica(np.ndarray[int, ndim=1] altura_sitios, np.ndarray[int, ndim=1] sequencia_sitios, int L):
cdef int i, sitio
cdef np.ndarray[int, ndim = 1] resultante = altura_sitios
for i in range(L):
sitio = sequencia_sitios[i]
resultante[sitio] = calcular_altura_sitio(resultante, sitio, L)
return resultante
@cython.boundscheck(False)
@cython.wraparound(False)
def executar_deposicao_balistica(np.ndarray[int, ndim=1] altura_sitios, np.ndarray[int, ndim=1] sequencia_sitios, int L):
cdef np.ndarray[int, ndim = 1] resultante = _executar_deposicao_balistica(altura_sitios, sequencia_sitios, L)
return resultante
<|end_of_text|># -*- coding: utf8
'''Probability based on pre-computed values'''
cimport base
import numpy as np
cimport numpy as np
cdef class PrecomputedEstimator(base.ProbabilityEstimator):
cdef list users_fpaths
cdef dict user_to_piu
cdef dict user_to_pitu
cdef dict user_to_tags
cdef dict user_to_gamma
<|end_of_text|># -*- coding: utf-8 -*-
# distutils: language = c++
# cython: embedsignature = True
from cython.operator cimport dereference as deref
cdef class StkGhosting:
"""stk::mesh::Ghosting """
def __cinit__(self):
self.ghosting = NULL
@staticmethod
cdef wrap_instance(Ghosting* ghosting):
cdef StkGhosting sghost = StkGhosting.__new__(StkGhosting)
sghost.ghosting = ghosting
return sghost
@staticmethod
cdef wrap_reference(Ghosting& ghosting):
cdef StkGhosting sghost = StkGhosting.__new__(StkGhosting)
sghost.ghosting = &ghosting
return sghost
@property
def name(self):
"""Name of the ghosting instance"""
assert (self.ghosting!= NULL)
return deref(self.ghosting).name().decode('UTF-8')
@property
def ordinal(self):
"""Unique ordinal to identify ghosting subset"""
return deref(self.ghosting).ordinal()
<|end_of_text|>import numpy as np
import sys
__feature = 'HI'
cdef extern from "c/simplify.h":
cdef tuple simplify_mesh_c(positions, face, features, unsigned int num_nodes, double threshold, double max_err)
def simplify_mesh(positions, face, num_nodes, features=None, threshold=0., max_err=np.Infinity):
r"""simplify a mesh by contracting edges using the algortihm from `"Surface Simplification Using Quadric Error Metrics"
<http://mgarland.org/files/papers/quadrics.pdf>`_.
Args:
positions (:class:`ndarray`): array of shape num_nodes x 3 containing the x, y, z position for each node
face (:class:`ndarray`): array of shape num_faces x 3 containing the indices for each triangular face
num_nodes (number): number of nodes that the final mesh will have
threshold (number, optional): threshold of vertices distance to be a valid pair
features (:class:`ndarray`): features for all nodes [num_nodes x feature_length]
threshold (number): if the distance between two vertices is below this threshold, they are considered as valid pairs that can be merged.
max_err (float): no vertices are merged that have an error higher than this number
:rtype: (:class:`ndarray`, :class:`ndarray`, :class:`ndarray`)
"""
# check types
if not type(positions) == np.ndarray:
raise Exception('positions has to be an ndarray.')
if not positions.shape[1] == 3:
raise Exception('positions has to be of shape N x 3.')
if not positions.dtype == np.double:
raise Exception('positions has to be of type double')
if not type(face) == np.ndarray:
raise Exception('face has to be an ndarray.')
if not face.shape[1] == 3:
raise Exception('face has to be of shape N x 3.')
if not face.dtype == np.uint32:
raise Exception('face has to be of type unsigned int (np.uint32)')
if features is None:
features = np.zeros((positions.shape[0], 0), np.double)
if not type(features) == np.ndarray:
raise Exception('features has to be an ndarray.')
if not features.shape[0] == positions.shape[0]:
raise Exception('first dimensions of features has to match first shape of positions.')
if not features.dtype == np.double:
raise Exception('features has to be of type double')
if (positions.shape[0] ** 2 + positions.shape[0]) / 2 > sys.maxsize * 2:
raise Exception('too many vertices. cannot build edge matrix.')
new_pos = None
new_face = None
new_features = None
return simplify_mesh_c(positions, face, features, num_nodes, threshold, max_err)<|end_of_text|># -*- coding: utf-8 -*-
# distutils: language = c++
# cython: embedsignature = True
from libcpp.string cimport string
from libcpp.memory cimport unique_ptr
from..utils.iostream cimport ostream
from..cpp cimport amrex as crx
from..cpp_core.amr_mesh cimport AmrCore
cdef extern from "AMReX_AmrLevel.H" namespace "amrex::AmrLevel" nogil:
cpdef enum TimeLevel:
AmrOldTime, AmrHalfTime, AmrNewTime, Amr1QtrTime, Amr3QtrTime, AmrOtherTime
cdef extern from "AMReX_AmrLevel.H" namespace "amrex" nogil:
cdef cppclass AmrLevel
cdef extern from "AMReX_Amr.H" namespace "amrex" nogil:
cdef cppclass Amr(AmrCore):
Amr()
Amr(const crx.RealBox*, int, const crx.Vector[int]&, int)
void InitAmr()
void init(crx.Real, crx.Real)
void InitializeInit(crx.Real, crx.Real, const crx.BoxArray*,
const crx.Vector[int]*)
void FinalizeInit(crx.Real, crx.Real)
void setDtLevel(const crx.Vector[crx.Real]&)
void setDtMin(const crx.Vector[crx.Real]&)
void setNCycle(const crx.Vector[int]&)
int subCycle()
const string& subcyclingMode()
int level_being_advanced()
crx.Real cumTime()
void setCumTime(crx.Real)
crx.Real startTime()
void setStartTime(crx.Real)
crx.Real dtLevel(int)
const crx.Vector[crx.Real]& dtLevel()
crx.Real dtMin(int)
int nCycle(int)
int levelSteps(int)
void setLevelSteps(int, int)
int levelCount(int)
void setLevelCount(int, int)
bint RegridOnRestart()
int regridInt(int)
int checkInt()
crx.Real checkPer()
int plotInt()
crx.Real plotPer()
@staticmethod
void Initialize()
@staticmethod
void Finalize()
AmrLevel& getLevel(int)
crx.Vector[unique_ptr[AmrLevel]]& getAmrLevels()
crx.Long cellCount()
crx.Long cellCount(int)
int numGrids()
int numGrids(int)
int okToContinue()
void RegridOnly(crx.Real, bint)
bint okToRegrid(int)
void coarseTimeStep(crx.Real)
crx.Real coarseTimeStepDt(crx.Real)
int stepOfLastPlotFile()
int stepOfLastSmallPlotFile()
int stepOfLastCheckPoint()
void writePlotFile()
void checkPoint()
cdef extern from "AMReX_AmrLevel.H" namespace "amrex" nogil:
cdef cppclass AmrLevel:
void LevelDirectoryNames(const string&, string&, string&)
void CreateLevelDirectory(const string&)
void SetLevelDirectoryCreated(bint)
# TODO: VisMF
void writePlotFile(const string&, ostream&)
void writePlotFilePre(const string&, ostream&)
void writePlotFilePost(const string&, ostream&)
void checkPoint(const string&, ostream&)
void checkPointPre(const string&, ostream&)
void checkPointPost(const string&, ostream&)
<|end_of_text|>"""
Template for each `dtype` helper function for take
WARNING: DO NOT edit.pxi FILE directly,.pxi is generated from.pxi.in
"""
# ----------------------------------------------------------------------
# take_1d, take_2d
# ----------------------------------------------------------------------
@cython.wraparound(False)
@cython.boundscheck(False)
| Cython |
def take_1d_bool_bool(const uint8_t[:] values,
const intp_t[:] indexer,
uint8_t[:] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, n, idx
uint8_t fv
n = indexer.shape[0]
fv = fill_value
with nogil:
for i in range(n):
idx = indexer[i]
if idx == -1:
out[i] = fv
else:
out[i] = values[idx]
@cython.wraparound(False)
@cython.boundscheck(False)
def take_2d_axis0_bool_bool(const uint8_t[:, :] values,
ndarray[intp_t, ndim=1] indexer,
uint8_t[:, :] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, j, k, n, idx
uint8_t fv
const uint8_t *v
uint8_t *o
n = len(indexer)
k = values.shape[1]
fv = fill_value
# GH#3130
if (values.strides[1] == out.strides[1] and
values.strides[1] == sizeof(uint8_t) and
sizeof(uint8_t) * n >= 256):
for i in range(n):
idx = indexer[i]
if idx == -1:
for j in range(k):
out[i, j] = fv
else:
v = &values[idx, 0]
o = &out[i, 0]
memmove(o, v, <size_t>(sizeof(uint8_t) * k))
return
for i in range(n):
idx = indexer[i]
if idx == -1:
for j in range(k):
out[i, j] = fv
else:
for j in range(k):
out[i, j] = values[idx, j]
@cython.wraparound(False)
@cython.boundscheck(False)
def take_2d_axis1_bool_bool(const uint8_t[:, :] values,
ndarray[intp_t, ndim=1] indexer,
uint8_t[:, :] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, j, k, n, idx
uint8_t fv
n = len(values)
k = len(indexer)
if n == 0 or k == 0:
return
fv = fill_value
for i in range(n):
for j in range(k):
idx = indexer[j]
if idx == -1:
out[i, j] = fv
else:
out[i, j] = values[i, idx]
@cython.wraparound(False)
@cython.boundscheck(False)
def take_2d_multi_bool_bool(ndarray[uint8_t, ndim=2] values,
indexer,
ndarray[uint8_t, ndim=2] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, j, k, n, idx
ndarray[intp_t, ndim=1] idx0 = indexer[0]
ndarray[intp_t, ndim=1] idx1 = indexer[1]
uint8_t fv
n = len(idx0)
k = len(idx1)
fv = fill_value
for i in range(n):
idx = idx0[i]
if idx == -1:
for j in range(k):
out[i, j] = fv
else:
for j in range(k):
if idx1[j] == -1:
out[i, j] = fv
else:
out[i, j] = values[idx, idx1[j]]
@cython.wraparound(False)
@cython.boundscheck(False)
def take_1d_bool_object(const uint8_t[:] values,
const intp_t[:] indexer,
object[:] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, n, idx
object fv
n = indexer.shape[0]
fv = fill_value
if True:
for i in range(n):
idx = indexer[i]
if idx == -1:
out[i] = fv
else:
out[i] = True if values[idx] > 0 else False
@cython.wraparound(False)
@cython.boundscheck(False)
def take_2d_axis0_bool_object(const uint8_t[:, :] values,
ndarray[intp_t, ndim=1] indexer,
object[:, :] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, j, k, n, idx
object fv
n = len(indexer)
k = values.shape[1]
fv = fill_value
for i in range(n):
idx = indexer[i]
if idx == -1:
for j in range(k):
out[i, j] = fv
else:
for j in range(k):
out[i, j] = True if values[idx, j] > 0 else False
@cython.wraparound(False)
@cython.boundscheck(False)
def take_2d_axis1_bool_object(const uint8_t[:, :] values,
ndarray[intp_t, ndim=1] indexer,
object[:, :] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, j, k, n, idx
object fv
n = len(values)
k = len(indexer)
if n == 0 or k == 0:
return
fv = fill_value
for i in range(n):
for j in range(k):
idx = indexer[j]
if idx == -1:
out[i, j] = fv
else:
out[i, j] = True if values[i, idx] > 0 else False
@cython.wraparound(False)
@cython.boundscheck(False)
def take_2d_multi_bool_object(ndarray[uint8_t, ndim=2] values,
indexer,
ndarray[object, ndim=2] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, j, k, n, idx
ndarray[intp_t, ndim=1] idx0 = indexer[0]
ndarray[intp_t, ndim=1] idx1 = indexer[1]
object fv
n = len(idx0)
k = len(idx1)
fv = fill_value
for i in range(n):
idx = idx0[i]
if idx == -1:
for j in range(k):
out[i, j] = fv
else:
for j in range(k):
if idx1[j] == -1:
out[i, j] = fv
else:
out[i, j] = True if values[idx, idx1[j]] > 0 else False
@cython.wraparound(False)
@cython.boundscheck(False)
def take_1d_int8_int8(const int8_t[:] values,
const intp_t[:] indexer,
int8_t[:] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, n, idx
int8_t fv
n = indexer.shape[0]
fv = fill_value
with nogil:
for i in range(n):
idx = indexer[i]
if idx == -1:
out[i] = fv
else:
out[i] = values[idx]
@cython.wraparound(False)
@cython.boundscheck(False)
def take_2d_axis0_int8_int8(const int8_t[:, :] values,
ndarray[intp_t, ndim=1] indexer,
int8_t[:, :] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, j, k, n, idx
int8_t fv
const int8_t *v
int8_t *o
n = len(indexer)
k = values.shape[1]
fv = fill_value
# GH#3130
if (values.strides[1] == out.strides[1] and
values.strides[1] == sizeof(int8_t) and
sizeof(int8_t) * n >= 256):
for i in range(n):
idx = indexer[i]
if idx == -1:
for j in range(k):
out[i, j] = fv
else:
v = &values[idx, 0]
o = &out[i, 0]
memmove(o, v, <size_t>(sizeof(int8_t) * k))
return
for i in range(n):
idx = indexer[i]
if idx == -1:
for j in range(k):
out[i, j] = fv
else:
for j in range(k):
out[i, j] = values[idx, j]
@cython.wraparound(False)
@cython.boundscheck(False)
def take_2d_axis1_int8_int8(const int8_t[:, :] values,
ndarray[intp_t, ndim=1] indexer,
int8_t[:, :] out,
fill_value=np.nan):
cdef:
Py_ssize_t i, j, k, n, idx
int8_t fv
n = len(values)
k = len(indexer | Cython |