text
stringlengths 1.18k
92.5k
| lang
stringclasses 39
values |
---|---|
# cython: language_level=3
# Copyright (c) 2014-2023, Dr Alex Meakins, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from raysect.optical.colour import d65_white
from raysect.optical cimport World, Primitive, Ray, Spectrum, Point3D, AffineMatrix3D, Normal3D, Intersection
from libc.math cimport round, fabs
cimport cython
cdef class Checkerboard(NullVolume):
"""
Isotropic checkerboard surface emitter
Defines a plane of alternating squares of emission forming a checkerboard
pattern. Useful in debugging and as a light source in test scenes.
:param float width: The width of the squares in metres.
:param SpectralFunction emission_spectrum1: Emission spectrum for square one.
:param SpectralFunction emission_spectrum2: Emission spectrum for square two.
:param float scale1: Intensity of square one emission.
:param float scale2: Intensity of square two emission.
.. code-block:: pycon
>>> from raysect.primitive import Box
>>> from raysect.optical import World, rotate, Point3D, d65_white
>>> from raysect.optical.material import Checkerboard
>>>
>>> world = World()
>>>
>>> # checker board wall that acts as emitter
>>> emitter = Box(lower=Point3D(-10, -10, 10), upper=Point3D(10, 10, 10.1), parent=world,
transform=rotate(45, 0, 0))
>>> emitter.material=Checkerboard(4, d65_white, d65_white, 0.1, 2.0)
"""
def __init__(self, double width=1.0, SpectralFunction emission_spectrum1=d65_white,
SpectralFunction emission_spectrum2=d65_white, double scale1=0.25, double scale2=0.5):
super().__init__()
self._width = width
self._rwidth = 1.0 / width
self.emission_spectrum1 = emission_spectrum1
self.emission_spectrum2 = emission_spectrum2
self.scale1 = scale1
self.scale2 = scale2
self.importance = 1.0
@property
def width(self):
"""
The width of the squares in metres.
:rtype: float
"""
return self._width
@width.setter
@cython.cdivision(True)
def width(self, double v):
self._width = v
self._rwidth = 1.0 / v
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.initializedcheck(False)
cpdef Spectrum evaluate_surface(self, World world, Ray ray, Primitive primitive, Point3D hit_point,
bint exiting, Point3D inside_point, Point3D outside_point,
Normal3D normal, AffineMatrix3D world_to_primitive, AffineMatrix3D primitive_to_world,
Intersection intersection):
cdef:
Spectrum spectrum
double[::1] emission
bint v
int index
double scale
v = False
# generate check pattern
v = self._flip(v, hit_point.x)
v = self._flip(v, hit_point.y)
v = self._flip(v, hit_point.z)
# select emission
spectrum = ray.new_spectrum()
if v:
emission = self.emission_spectrum1.sample_mv(spectrum.min_wavelength, spectrum.max_wavelength, spectrum.bins)
scale = self.scale1
else:
emission = self.emission_spectrum2.sample_mv(spectrum.min_wavelength, spectrum.max_wavelength, spectrum.bins)
scale = self.scale2
for index in range(spectrum.bins):
spectrum.samples_mv[index] = emission[index] * scale
return spectrum
@cython.cdivision(True)
cdef bint _flip(self, bint v, double p) nogil:
# round to avoid numerical precision issues (rounds to nearest nanometer)
p = round(p * 1e9) / 1e9
# generates check pattern from [0, inf]
if fabs(self._rwidth * p) % 2 >= 1.0:
v = not v
# invert pattern for negative
if p < 0:
v = not v
return v
<|end_of_text|># cython: c_string_type=unicode, c_string_encoding=ascii
# -------------------------------------------------------------------------------------------------
# Imports
# -------------------------------------------------------------------------------------------------
from functools import wraps, partial
import logging
import traceback
import sys
cimport numpy as np
import numpy as np
from cpython.ref cimport Py_INCREF
from libc.string cimport memcpy
from libc.stdio cimport printf
cimport datoviz.cydatoviz as cv
logger = logging.getLogger('datoviz')
# -------------------------------------------------------------------------------------------------
# Types
# -------------------------------------------------------------------------------------------------
ctypedef np.float32_t FLOAT
ctypedef np.double_t DOUBLE
ctypedef np.uint8_t CHAR
ctypedef np.uint8_t[4] CVEC4
ctypedef np.int16_t SHORT
ctypedef np.uint16_t USHORT
ctypedef np.int32_t INT
ctypedef np.uint32_t UINT
ctypedef np.uint32_t[3] TEX_SHAPE
# -------------------------------------------------------------------------------------------------
# Constants
# -------------------------------------------------------------------------------------------------
# region # folding in VSCode
DEFAULT_WIDTH = 800
DEFAULT_HEIGHT = 600
cdef TEX_SHAPE DVZ_ZERO_OFFSET = (0, 0, 0)
# TODO: add more keys
_KEYS = {
cv.DVZ_KEY_LEFT: 'left',
cv.DVZ_KEY_RIGHT: 'right',
cv.DVZ_KEY_UP: 'up',
cv.DVZ_KEY_DOWN: 'down',
cv.DVZ_KEY_HOME: 'home',
cv.DVZ_KEY_END: 'end',
cv.DVZ_KEY_KP_ADD: '+',
cv.DVZ_KEY_KP_SUBTRACT: '-',
cv.DVZ_KEY_F: 'f',
cv.DVZ_KEY_R: 'r',
cv.DVZ_KEY_G: 'g',
}
# HACK: these keys do not raise a Python key event
_EXCLUDED_KEYS = (
cv.DVZ_KEY_NONE,
cv.DVZ_KEY_LEFT_SHIFT,
cv.DVZ_KEY_LEFT_CONTROL,
cv.DVZ_KEY_LEFT_ALT,
cv.DVZ_KEY_LEFT_SUPER,
cv.DVZ_KEY_RIGHT_SHIFT,
cv.DVZ_KEY_RIGHT_CONTROL,
cv.DVZ_KEY_RIGHT_ALT,
cv.DVZ_KEY_RIGHT_SUPER,
)
_BUTTONS = {
cv.DVZ_MOUSE_BUTTON_LEFT: 'left',
cv.DVZ_MOUSE_BUTTON_MIDDLE:'middle',
cv.DVZ_MOUSE_BUTTON_RIGHT: 'right',
}
_MODIFIERS = {
cv.DVZ_KEY_MODIFIER_SHIFT:'shift',
cv.DVZ_KEY_MODIFIER_CONTROL: 'control',
cv.DVZ_KEY_MODIFIER_ALT: 'alt',
cv.DVZ_KEY_MODIFIER_SUPER:'super',
}
_BUTTONS_INV = {v: k for k, v in _BUTTONS.items()}
_EVENTS ={
'mouse_press': cv.DVZ_EVENT_MOUSE_PRESS,
'mouse_release': cv.DVZ_EVENT_MOUSE_RELEASE,
'mouse_move': cv.DVZ_EVENT_MOUSE_MOVE,
'mouse_wheel': cv.DVZ_EVENT_MOUSE_WHEEL,
'mouse_drag_begin': cv.DVZ_EVENT_MOUSE_DRAG_BEGIN,
'mouse_drag_end': cv.DVZ_EVENT_MOUSE_DRAG_END,
'mouse_click': cv.DVZ_EVENT_MOUSE_CLICK,
'mouse_double_click': cv.DVZ_EVENT_MOUSE_DOUBLE_CLICK,
'key_press': cv.DVZ_EVENT_KEY_PRESS,
'key_release': cv.DVZ_EVENT_KEY_RELEASE,
'frame': cv.DVZ_EVENT_FRAME,
'timer': cv.DVZ_EVENT_TIMER,
'gui': cv.DVZ_EVENT_GUI,
}
_VISUALS = | Cython |
{
'point': cv.DVZ_VISUAL_POINT,
'marker': cv.DVZ_VISUAL_MARKER,
'mesh': cv.DVZ_VISUAL_MESH,
'path': cv.DVZ_VISUAL_PATH,
'text': cv.DVZ_VISUAL_TEXT,
'polygon': cv.DVZ_VISUAL_POLYGON,
'image': cv.DVZ_VISUAL_IMAGE,
'image_cmap': cv.DVZ_VISUAL_IMAGE_CMAP,
'volume': cv.DVZ_VISUAL_VOLUME,
'volume_slice': cv.DVZ_VISUAL_VOLUME_SLICE,
'line_strip': cv.DVZ_VISUAL_LINE_STRIP,
'rectangle': cv.DVZ_VISUAL_RECTANGLE,
}
_CONTROLLERS = {
'panzoom': cv.DVZ_CONTROLLER_PANZOOM,
'axes': cv.DVZ_CONTROLLER_AXES_2D,
'arcball': cv.DVZ_CONTROLLER_ARCBALL,
'camera': cv.DVZ_CONTROLLER_CAMERA,
}
_TRANSPOSES = {
None: cv.DVZ_CDS_TRANSPOSE_NONE,
'xfyrzu': cv.DVZ_CDS_TRANSPOSE_XFYRZU,
'xbydzl': cv.DVZ_CDS_TRANSPOSE_XBYDZL,
'xlybzd': cv.DVZ_CDS_TRANSPOSE_XLYBZD,
}
_COORDINATE_SYSTEMS = {
'data': cv.DVZ_CDS_DATA,
'scene': cv.DVZ_CDS_SCENE,
'vulkan': cv.DVZ_CDS_VULKAN,
'framebuffer': cv.DVZ_CDS_FRAMEBUFFER,
'window': cv.DVZ_CDS_WINDOW,
}
_PROPS = {
'pos': cv.DVZ_PROP_POS,
'color': cv.DVZ_PROP_COLOR,
'alpha': cv.DVZ_PROP_ALPHA,
'ms': cv.DVZ_PROP_MARKER_SIZE,
'marker': cv.DVZ_PROP_MARKER_TYPE,
'normal': cv.DVZ_PROP_NORMAL,
'texcoords': cv.DVZ_PROP_TEXCOORDS,
'index': cv.DVZ_PROP_INDEX,
'range': cv.DVZ_PROP_RANGE,
'length': cv.DVZ_PROP_LENGTH,
'text': cv.DVZ_PROP_TEXT,
'glyph': cv.DVZ_PROP_GLYPH,
'text_size': cv.DVZ_PROP_TEXT_SIZE,
'scale': cv.DVZ_PROP_SCALE,
'cap_type': cv.DVZ_PROP_CAP_TYPE,
'light_params': cv.DVZ_PROP_LIGHT_PARAMS,
'light_pos': cv.DVZ_PROP_LIGHT_POS,
'texcoefs': cv.DVZ_PROP_TEXCOEFS,
'linewidth': cv.DVZ_PROP_LINE_WIDTH,
'colormap': cv.DVZ_PROP_COLORMAP,
'transferx': cv.DVZ_PROP_TRANSFER_X,
'transfery': cv.DVZ_PROP_TRANSFER_Y,
'clip': cv.DVZ_PROP_CLIP,
}
_DTYPES = {
cv.DVZ_DTYPE_CHAR: (np.uint8, 1),
cv.DVZ_DTYPE_CVEC2: (np.uint8, 2),
cv.DVZ_DTYPE_CVEC3: (np.uint8, 3),
cv.DVZ_DTYPE_CVEC4: (np.uint8, 4),
cv.DVZ_DTYPE_USHORT: (np.uint16, 1),
cv.DVZ_DTYPE_USVEC2: (np.uint16, 2),
cv.DVZ_DTYPE_USVEC3: (np.uint16, 3),
cv.DVZ_DTYPE_USVEC4: (np.uint16, 4),
cv.DVZ_DTYPE_SHORT: (np.int16, 1),
cv.DVZ_DTYPE_SVEC2: (np.int16, 2),
cv.DVZ_DTYPE_SVEC3: (np.int16, 3),
cv.DVZ_DTYPE_SVEC4: (np.int16, 4),
cv.DVZ_DTYPE_UINT: (np.uint32, 1),
cv.DVZ_DTYPE_UVEC2: (np.uint32, 2),
cv.DVZ_DTYPE_UVEC3: (np.uint32, 3),
cv.DVZ_DTYPE_UVEC4: (np.uint32, 4),
cv.DVZ_DTYPE_INT: (np.int32, 1),
cv.DVZ_DTYPE_IVEC2: (np.int32, 2),
cv.DVZ_DTYPE_IVEC3: (np.int32, 3),
cv.DVZ_DTYPE_IVEC4: (np.int32, 4),
cv.DVZ_DTYPE_FLOAT: (np.float32, 1),
cv.DVZ_DTYPE_VEC2: (np.float32, 2),
cv.DVZ_DTYPE_VEC3: (np.float32, 3),
cv.DVZ_DTYPE_VEC4: (np.float32, 4),
cv.DVZ_DTYPE_DOUBLE: (np.double, 1),
cv.DVZ_DTYPE_DVEC2: (np.double, 2),
cv.DVZ_DTYPE_DVEC3: (np.double, 3),
cv.DVZ_DTYPE_DVEC4: (np.double, 4),
cv.DVZ_DTYPE_MAT2: (np.float32, (2, 2)),
cv.DVZ_DTYPE_MAT3: (np.float32, (3, 3)),
cv.DVZ_DTYPE_MAT4: (np.float32, (4, 4)),
cv.DVZ_DTYPE_STR: (np.dtype('S1'), 1),
}
_TRANSFORMS = {
'earth': cv.DVZ_TRANSFORM_EARTH_MERCATOR_WEB,
}
_CONTROLS = {
'slider_float': cv.DVZ_GUI_CONTROL_SLIDER_FLOAT,
'slider_float2': cv.DVZ_GUI_CONTROL_SLIDER_FLOAT2,
'slider_int': cv.DVZ_GUI_CONTROL_SLIDER_INT,
'input_float': cv.DVZ_GUI_CONTROL_INPUT_FLOAT,
'checkbox': cv.DVZ_GUI_CONTROL_CHECKBOX,
'button': cv.DVZ_GUI_CONTROL_BUTTON,
'label': cv.DVZ_GUI_CONTROL_LABEL,
}
_COLORMAPS = {
'binary': cv.DVZ_CMAP_BINARY,
'hsv': cv.DVZ_CMAP_HSV,
'cividis': cv.DVZ_CMAP_CIVIDIS,
'inferno': cv.DVZ_CMAP_INFERNO,
'magma': cv.DVZ_CMAP_MAGMA,
'plasma': cv.DVZ_CMAP_PLASMA,
'viridis': cv.DVZ_CMAP_VIRIDIS,
'blues': cv.DVZ_CMAP_BLUES,
'bugn': cv.DVZ_CMAP_BUGN,
'bupu': cv.DVZ_CMAP_BUPU,
'gnbu': cv.DVZ_CMAP_GNBU,
'greens': cv.DVZ_CMAP_GREENS,
'greys': cv.DVZ_CMAP_GREYS,
'oranges': cv.DVZ_CMAP_ORANGES,
'orrd': cv.DVZ_CMAP_ORRD,
'pubu': cv.DVZ_CMAP_PUBU,
'pubugn': cv.DVZ_CMAP_PUBUGN,
'purples': cv.DVZ_CMAP_PURPLES,
'rdpu': cv.DVZ_CMAP_RDPU,
'reds': cv.DVZ_CMAP_REDS,
'ylgn': cv.DVZ_CMAP_YLGN,
'ylgnbu': cv.DVZ_CMAP_YLGNBU,
'ylorbr': cv.DVZ_CMAP_YLORBR,
'ylorrd': cv.DVZ_CMAP_YLORRD,
'afmhot': cv.DVZ_CMAP_AFMHOT,
'autumn': cv.DVZ_CMAP_AUTUMN,
'bone': cv.DVZ_CMAP_BONE,
'cool': cv.DVZ_CMAP_COOL,
'copper': cv.DVZ_CMAP_COPPER,
'gist_heat': cv.DVZ_CMAP_GIST_HEAT,
'gray': cv.DVZ_CMAP_GRAY,
'hot': cv.DVZ_CMAP_HOT,
'pink': cv.DVZ_CMAP_PINK,
'spring': cv.DVZ_CMAP_SPRING,
'summer': cv.DVZ_CMAP_SUMMER,
'winter': cv.DVZ_CMAP_WINTER,
'wistia': cv.DVZ_CMAP_WISTIA,
'brbg': cv.DVZ_CMAP_BRBG,
'bwr': cv.DVZ_CMAP_BWR,
'coolwarm': cv.DVZ_CMAP_COOLWARM,
'piyg': cv.DVZ_CMAP_PIYG,
'prgn': cv.DVZ_CMAP_PRGN,
'puor': cv.DVZ_CMAP_PUOR,
'rdbu': cv.DVZ_CMAP_RDBU,
'rdgy': cv.DVZ_CMAP_RDGY,
'rd | Cython |
ylbu': cv.DVZ_CMAP_RDYLBU,
'rdylgn': cv.DVZ_CMAP_RDYLGN,
'seismic': cv.DVZ_CMAP_SEISMIC,
'spectral': cv.DVZ_CMAP_SPECTRAL,
'twilight_shifted': cv.DVZ_CMAP_TWILIGHT_SHIFTED,
'twilight': cv.DVZ_CMAP_TWILIGHT,
'brg': cv.DVZ_CMAP_BRG,
'cmrmap': cv.DVZ_CMAP_CMRMAP,
'cubehelix': cv.DVZ_CMAP_CUBEHELIX,
'flag': cv.DVZ_CMAP_FLAG,
'gist_earth': cv.DVZ_CMAP_GIST_EARTH,
'gist_ncar': cv.DVZ_CMAP_GIST_NCAR,
'gist_rainbow': cv.DVZ_CMAP_GIST_RAINBOW,
'gist_stern': cv.DVZ_CMAP_GIST_STERN,
'gnuplot2': cv.DVZ_CMAP_GNUPLOT2,
'gnuplot': cv.DVZ_CMAP_GNUPLOT,
'jet': cv.DVZ_CMAP_JET,
'nipy_spectral': cv.DVZ_CMAP_NIPY_SPECTRAL,
'ocean': cv.DVZ_CMAP_OCEAN,
'prism': cv.DVZ_CMAP_PRISM,
'rainbow': cv.DVZ_CMAP_RAINBOW,
'terrain': cv.DVZ_CMAP_TERRAIN,
'bkr': cv.DVZ_CMAP_BKR,
'bky': cv.DVZ_CMAP_BKY,
'cet_d10': cv.DVZ_CMAP_CET_D10,
'cet_d11': cv.DVZ_CMAP_CET_D11,
'cet_d8': cv.DVZ_CMAP_CET_D8,
'cet_d13': cv.DVZ_CMAP_CET_D13,
'cet_d3': cv.DVZ_CMAP_CET_D3,
'cet_d1a': cv.DVZ_CMAP_CET_D1A,
'bjy': cv.DVZ_CMAP_BJY,
'gwv': cv.DVZ_CMAP_GWV,
'bwy': cv.DVZ_CMAP_BWY,
'cet_d12': cv.DVZ_CMAP_CET_D12,
'cet_r3': cv.DVZ_CMAP_CET_R3,
'cet_d9': cv.DVZ_CMAP_CET_D9,
'cwr': cv.DVZ_CMAP_CWR,
'cet_cbc1': cv.DVZ_CMAP_CET_CBC1,
'cet_cbc2': cv.DVZ_CMAP_CET_CBC2,
'cet_cbl1': cv.DVZ_CMAP_CET_CBL1,
'cet_cbl2': cv.DVZ_CMAP_CET_CBL2,
'cet_cbtc1': cv.DVZ_CMAP_CET_CBTC1,
'cet_cbtc2': cv.DVZ_CMAP_CET_CBTC2,
'cet_cbtl1': cv.DVZ_CMAP_CET_CBTL1,
'bgy': cv.DVZ_CMAP_BGY,
'bgyw': cv.DVZ_CMAP_BGYW,
'bmw': cv.DVZ_CMAP_BMW,
'cet_c1': cv.DVZ_CMAP_CET_C1,
'cet_c1s': cv.DVZ_CMAP_CET_C1S,
'cet_c2': cv.DVZ_CMAP_CET_C2,
'cet_c4': cv.DVZ_CMAP_CET_C4,
'cet_c4s': cv.DVZ_CMAP_CET_C4S,
'cet_c5': cv.DVZ_CMAP_CET_C5,
'cet_i1': cv.DVZ_CMAP_CET_I1,
'cet_i3': cv.DVZ_CMAP_CET_I3,
'cet_l10': cv.DVZ_CMAP_CET_L10,
'cet_l11': cv.DVZ_CMAP_CET_L11,
'cet_l12': cv.DVZ_CMAP_CET_L12,
'cet_l16': cv.DVZ_CMAP_CET_L16,
'cet_l17': cv.DVZ_CMAP_CET_L17,
'cet_l18': cv.DVZ_CMAP_CET_L18,
'cet_l19': cv.DVZ_CMAP_CET_L19,
'cet_l4': cv.DVZ_CMAP_CET_L4,
'cet_l7': cv.DVZ_CMAP_CET_L7,
'cet_l8': cv.DVZ_CMAP_CET_L8,
'cet_l9': cv.DVZ_CMAP_CET_L9,
'cet_r1': cv.DVZ_CMAP_CET_R1,
'cet_r2': cv.DVZ_CMAP_CET_R2,
'colorwheel': cv.DVZ_CMAP_COLORWHEEL,
'fire': cv.DVZ_CMAP_FIRE,
'isolum': cv.DVZ_CMAP_ISOLUM,
'kb': cv.DVZ_CMAP_KB,
'kbc': cv.DVZ_CMAP_KBC,
'kg': cv.DVZ_CMAP_KG,
'kgy': cv.DVZ_CMAP_KGY,
'kr': cv.DVZ_CMAP_KR,
'black_body': cv.DVZ_CMAP_BLACK_BODY,
'kindlmann': cv.DVZ_CMAP_KINDLMANN,
'extended_kindlmann': cv.DVZ_CMAP_EXTENDED_KINDLMANN,
'glasbey': cv.DVZ_CPAL256_GLASBEY,
'glasbey_cool': cv.DVZ_CPAL256_GLASBEY_COOL,
'glasbey_dark': cv.DVZ_CPAL256_GLASBEY_DARK,
'glasbey_hv': cv.DVZ_CPAL256_GLASBEY_HV,
'glasbey_light': cv.DVZ_CPAL256_GLASBEY_LIGHT,
'glasbey_warm': cv.DVZ_CPAL256_GLASBEY_WARM,
'accent': cv.DVZ_CPAL032_ACCENT,
'dark2': cv.DVZ_CPAL032_DARK2,
'paired': cv.DVZ_CPAL032_PAIRED,
'pastel1': cv.DVZ_CPAL032_PASTEL1,
'pastel2': cv.DVZ_CPAL032_PASTEL2,
'set1': cv.DVZ_CPAL032_SET1,
'set2': cv.DVZ_CPAL032_SET2,
'set3': cv.DVZ_CPAL032_SET3,
'tab10': cv.DVZ_CPAL032_TAB10,
'tab20': cv.DVZ_CPAL032_TAB20,
'tab20b': cv.DVZ_CPAL032_TAB20B,
'tab20c': cv.DVZ_CPAL032_TAB20C,
'category10_10': cv.DVZ_CPAL032_CATEGORY10_10,
'category20_20': cv.DVZ_CPAL032_CATEGORY20_20,
'category20b_20': cv.DVZ_CPAL032_CATEGORY20B_20,
'category20c_20': cv.DVZ_CPAL032_CATEGORY20C_20,
'colorblind8': cv.DVZ_CPAL032_COLORBLIND8,
}
_TEXTURE_FILTERS = {
None: cv.VK_FILTER_NEAREST,
'nearest': cv.VK_FILTER_NEAREST,
'linear': cv.VK_FILTER_LINEAR,
# 'cubic': cv.VK_FILTER_CUBIC_EXT, # requires extension VK_EXT_filter_cubic
}
_SOURCE_TYPES = {
1: cv.DVZ_SOURCE_TYPE_TRANSFER,
2: cv.DVZ_SOURCE_TYPE_IMAGE,
3: cv.DVZ_SOURCE_TYPE_VOLUME,
}
_FORMATS = {
(np.dtype(np.uint8), 1): cv.VK_FORMAT_R8_UNORM,
(np.dtype(np.uint8), 3): cv.VK_FORMAT_R8G8B8_UNORM,
(np.dtype(np.uint8), 4): cv.VK_FORMAT_R8G8B8A8_UNORM,
(np.dtype(np.uint16), 1): cv.VK_FORMAT_R16_UNORM,
(np.dtype(np.int16), 1): cv.VK_FORMAT_R16_SNORM,
(np.dtype(np.uint32), 1): cv.VK_FORMAT_R32_UINT,
(np.dtype(np.int32), 1): cv.VK_FORMAT_R32_SINT,
(np.dtype(np.float32), 1): cv.VK_FORMAT_R32_SFLOAT,
}
_MARKER_TYPES = {
'disc': cv.DVZ_MARKER_DISC,
'vbar': cv.DVZ_MARKER_VBAR,
'cross': cv.DVZ_MARKER_CROSS,
}
_CUSTOM_COLORMAPS = {}
#endregion | Cython |
# -------------------------------------------------------------------------------------------------
# Constant utils
# -------------------------------------------------------------------------------------------------
def _key_name(key):
"""From key code used by Datoviz to key name."""
return _KEYS.get(key, key)
def _button_name(button):
"""From button code used by Datoviz to button name."""
return _BUTTONS.get(button, None)
def _get_modifiers(mod):
"""From modifier flag to a tuple of strings."""
mods_py = []
for c_enum, name in _MODIFIERS.items():
if mod & c_enum:
mods_py.append(name)
return tuple(mods_py)
def _c_modifiers(*mods):
cdef int mod, c_enum
mod = 0
for c_enum, name in _MODIFIERS.items():
if name in mods:
mod |= c_enum
return mod
def _get_prop(name):
"""From prop name to prop enum for Datoviz."""
return _PROPS[name]
# -------------------------------------------------------------------------------------------------
# Python event callbacks
# -------------------------------------------------------------------------------------------------
cdef _get_event_args(cv.DvzEvent c_ev):
"""Prepare the arguments to the Python event callbacks from the Datoviz DvzEvent struct."""
cdef float* fvalue
cdef int* ivalue
cdef bint* bvalue
dt = c_ev.type
# GUI events.
if dt == cv.DVZ_EVENT_GUI:
if c_ev.u.g.control.type == cv.DVZ_GUI_CONTROL_SLIDER_FLOAT:
fvalue = <float*>c_ev.u.g.control.value
return (fvalue[0],), {}
elif c_ev.u.g.control.type == cv.DVZ_GUI_CONTROL_SLIDER_FLOAT2:
fvalue = <float*>c_ev.u.g.control.value
return (fvalue[0], fvalue[1]), {}
elif c_ev.u.g.control.type == cv.DVZ_GUI_CONTROL_SLIDER_INT:
ivalue = <int*>c_ev.u.g.control.value
return (ivalue[0],), {}
elif c_ev.u.g.control.type == cv.DVZ_GUI_CONTROL_INPUT_FLOAT:
fvalue = <float*>c_ev.u.g.control.value
return (fvalue[0],), {}
elif c_ev.u.g.control.type == cv.DVZ_GUI_CONTROL_CHECKBOX:
bvalue = <bint*>c_ev.u.g.control.value
return (bvalue[0],), {}
elif c_ev.u.g.control.type == cv.DVZ_GUI_CONTROL_BUTTON:
bvalue = <bint*>c_ev.u.g.control.value
return (bvalue[0],), {}
# Key events.
elif dt == cv.DVZ_EVENT_KEY_PRESS or dt == cv.DVZ_EVENT_KEY_RELEASE:
key = _key_name(c_ev.u.k.key_code)
modifiers = _get_modifiers(c_ev.u.k.modifiers)
return (key, modifiers), {}
# Mouse button events.
elif dt == cv.DVZ_EVENT_MOUSE_PRESS or dt == cv.DVZ_EVENT_MOUSE_RELEASE:
button = _button_name(c_ev.u.b.button)
modifiers = _get_modifiers(c_ev.u.b.modifiers)
return (button, modifiers), {}
# Mouse button events.
elif dt == cv.DVZ_EVENT_MOUSE_CLICK or dt == cv.DVZ_EVENT_MOUSE_DOUBLE_CLICK:
x = c_ev.u.c.pos[0]
y = c_ev.u.c.pos[1]
button = _button_name(c_ev.u.c.button)
modifiers = _get_modifiers(c_ev.u.c.modifiers)
dbl = c_ev.u.c.double_click
return (x, y), dict(button=button, modifiers=modifiers) #, double_click=dbl)
# Mouse move event.
elif dt == cv.DVZ_EVENT_MOUSE_MOVE:
x = c_ev.u.m.pos[0]
y = c_ev.u.m.pos[1]
modifiers = _get_modifiers(c_ev.u.m.modifiers)
return (x, y), dict(modifiers=modifiers)
# Mouse wheel event.
elif dt == cv.DVZ_EVENT_MOUSE_WHEEL:
x = c_ev.u.w.pos[0]
y = c_ev.u.w.pos[1]
dx = c_ev.u.w.dir[0]
dy = c_ev.u.w.dir[1]
modifiers = _get_modifiers(c_ev.u.w.modifiers)
return (x, y, dx, dy), dict(modifiers=modifiers)
# Frame event.
elif dt == cv.DVZ_EVENT_FRAME:
idx = c_ev.u.f.idx
return (idx,), {}
return (), {}
cdef _wrapped_callback(cv.DvzCanvas* c_canvas, cv.DvzEvent c_ev):
"""C callback function that wraps a Python callback function."""
# NOTE: this function may run in a background thread if using async callbacks
# It should not acquire the GIL.
cdef object tup
if c_ev.user_data!= NULL:
# The Python function and its arguments are wrapped in this Python object.
tup = <object>c_ev.user_data
# For each type of event, get the arguments to the function
ev_args, ev_kwargs = _get_event_args(c_ev)
# Recover the Python function and arguments.
f, args = tup
# This is the control type the callback was registered for.
name = args[0] if args else None
# NOTE: we only call the callback if the raised GUI event is for that control.
dt = c_ev.type
if dt == cv.DVZ_EVENT_GUI:
if c_ev.u.g.control.name!= name:
return
# We run the registered Python function on the event arguments.
try:
f(*ev_args, **ev_kwargs)
except Exception as e:
print(traceback.format_exc())
cdef _add_event_callback(
cv.DvzCanvas* c_canvas, cv.DvzEventType evtype, double param, f, args,
cv.DvzEventMode mode=cv.DVZ_EVENT_MODE_SYNC):
"""Register a Python callback function using the Datoviz C API."""
# Create a tuple with the Python function, and the arguments.
cdef void* ptr_to_obj
tup = (f, args)
# IMPORTANT: need to either keep a reference of this tuple object somewhere in the class,
# or increase the ref, otherwise this tuple will be deleted by the time we call it in the
# C callback function.
Py_INCREF(tup)
# Call the Datoviz C API to register the C-wrapped callback function.
ptr_to_obj = <void*>tup
cv.dvz_event_callback(
c_canvas, evtype, param, mode,
<cv.DvzEventCallback>_wrapped_callback, ptr_to_obj)
# -------------------------------------------------------------------------------------------------
# Public functions
# -------------------------------------------------------------------------------------------------
def colormap(np.ndarray[DOUBLE, ndim=1] values, vmin=None, vmax=None, cmap=None, alpha=None):
"""Apply a colormap to a 1D array of values."""
N = values.size
if cmap in _COLORMAPS:
cmap_ = _COLORMAPS[cmap]
elif cmap in _CUSTOM_COLORMAPS:
cmap_ = _CUSTOM_COLORMAPS[cmap]
else:
cmap_ = cv.DVZ_CMAP_VIRIDIS
# TODO: ndarrays
cdef np.ndarray out = np.zeros((N, 4), dtype=np.uint8)
if vmin is None:
vmin = values.min()
if vmax is None:
vmax = values.max()
if vmin >= vmax:
logger.warn("colormap vmin is larger than or equal to vmax")
vmax = vmin + 1
cv.dvz_colormap_array(cmap_, N, <double*>&values.data[0], vmin, vmax, <cv.cvec4*>&out.data[0])
if alpha is not None:
if not isinstance(alpha, np.ndarray):
alpha = np.array(alpha)
alpha = (alpha * 255).astype(np.uint8)
out[:, 3] = alpha
return out
def colorpal(np.ndarray[INT, ndim=1] values, cpal=None, alpha=None):
"""Apply a colormap to a 1D array of values."""
N = values.size
if cpal in _COLORMAPS:
cpal_ = _COLORMAPS[cpal]
elif cpal in _CUSTOM_COLORMAPS:
cpal_ = _CUSTOM_COLORMAPS[cpal]
else:
cpal_ = cv.DVZ_CPAL256_GLASBEY
# TODO: ndarrays
cdef np.ndarray out = np.zeros((N, 4), dtype=np.uint8)
cv.dvz_colorpal_array(cpal_, N, <cv.int32_t*>&values.data[0], <cv.cvec4*>&out.data[0])
if alpha is not None:
if not isinstance(alpha, np.ndarray):
alpha = np.array(alpha)
alpha = (alpha * 255).astype(np.uint8)
out[:, 3] = alpha
return out
def demo():
cv.dvz_demo_standalone()
# -------------------------------------------------------------------------------------------------
# Util functions
# -------------------------------------------------------------------------------------------------
def _validate_data(dt, nc, data):
"""Ensure a NumPy array has a given dtype and shape and is contiguous."""
data = data.astype(dt)
if not data.flags['C_CONTIGUOUS']:
data = np.ascontiguousarray(data)
| Cython |
if not hasattr(nc, '__len__'):
nc = (nc,)
nd = len(nc) # expected dimension of the data - 1
if nc[0] == 1 and data.ndim == 1:
data = data.reshape((-1, 1))
if data.ndim < nd + 1:
data = data[np.newaxis, :]
assert data.ndim == nd + 1, f"Incorrect array dimension {data.shape}, nc={nc}"
assert data.shape[1:] == nc, f"Incorrect array shape {data.shape} instead of {nc}"
assert data.dtype == dt, f"Array dtype is {data.dtype} instead of {dt}"
return data
cdef _canvas_flags(show_fps=None, pick=None, high_dpi=None, offscreen=None):
"""Make the canvas flags from the Python keyword arguments to the canvas creation function."""
cdef int flags = 0
flags |= cv.DVZ_CANVAS_FLAGS_IMGUI
if show_fps:
flags |= cv.DVZ_CANVAS_FLAGS_FPS
if pick:
flags |= cv.DVZ_CANVAS_FLAGS_PICK
if high_dpi:
flags |= cv.DVZ_CANVAS_FLAGS_DPI_SCALE_200
if offscreen:
flags |= cv.DVZ_CANVAS_FLAGS_OFFSCREEN
return flags
# -------------------------------------------------------------------------------------------------
# App
# -------------------------------------------------------------------------------------------------
cdef class App:
"""Singleton object that gives access to the GPUs."""
cdef cv.DvzApp* _c_app
_gpus = {}
def __cinit__(self):
"""Create a Datoviz app."""
# TODO: selection of the backend
self._c_app = cv.dvz_app(cv.DVZ_BACKEND_GLFW)
if self._c_app is NULL:
raise MemoryError()
def __dealloc__(self):
self.destroy()
def destroy(self):
"""Destroy the app."""
if self._c_app is not NULL:
# # Destroy all GPUs.
# for gpu in self._gpus.values():
# gpu.destroy()
# self._gpus.clear()
cv.dvz_app_destroy(self._c_app)
self._c_app = NULL
def gpu(self, idx=None):
"""Get a GPU, identified by its index, or the "best" one by default."""
if idx in self._gpus:
return self._gpus[idx]
g = GPU()
if idx is None:
g.create_best(self._c_app)
else:
assert idx >= 0
g.create(self._c_app, idx)
self._gpus[idx] = g
return g
def run(
self, int n_frames=0, unicode screenshot=None, unicode video=None,
bint offscreen=False):
"""Start the rendering loop."""
# Autorun struct.
cdef cv.DvzAutorun autorun = [0, 0]
if screenshot or video:
logger.debug("Enabling autorun")
autorun.enable = True
autorun.n_frames = n_frames
autorun.offscreen = offscreen
if screenshot:
ss = screenshot.encode('UTF-8')
autorun.screenshot[:len(ss) + 1] = ss
logger.debug(f"Autorun screenshot: {ss}")
autorun.video[0] = 0
if video:
sv = video.encode('UTF-8')
autorun.video[:len(sv) + 1] = sv
autorun.screenshot[0] = 0
logger.debug(f"Autorun video: {sv}")
cv.dvz_autorun_setup(self._c_app, autorun)
cv.dvz_app_run(self._c_app, n_frames)
def next_frame(self):
"""Run a single frame for all canvases."""
return cv.dvz_app_run(self._c_app, 1)
def _set_running(self, bint running):
"""Manually set whether the app is running or not."""
self._c_app.is_running = running
def __repr__(self):
return "<Datoviz App>"
# -------------------------------------------------------------------------------------------------
# GPU
# -------------------------------------------------------------------------------------------------
cdef class GPU:
"""The GPU object allows to create GPU objects and canvases."""
cdef cv.DvzApp* _c_app
cdef cv.DvzGpu* _c_gpu
cdef object _context
# _canvases = []
cdef create(self, cv.DvzApp* c_app, int idx):
"""Create a GPU."""
assert c_app is not NULL
self._c_app = c_app
self._c_gpu = cv.dvz_gpu(self._c_app, idx);
if self._c_gpu is NULL:
raise MemoryError()
cdef create_best(self, cv.DvzApp* c_app):
"""Create the best GPU found."""
assert c_app is not NULL
self._c_app = c_app
self._c_gpu = cv.dvz_gpu_best(self._c_app);
if self._c_gpu is NULL:
raise MemoryError()
@property
def name(self):
return self._c_gpu.name
def canvas(
self,
int width=DEFAULT_WIDTH,
int height=DEFAULT_HEIGHT,
bint show_fps=False,
bint pick=False,
bint high_dpi=False,
bint offscreen=False,
clear_color=None,
):
"""Create a new canvas."""
# Canvas flags.
cdef int flags = 0
flags = _canvas_flags(show_fps=show_fps, pick=pick, high_dpi=high_dpi, offscreen=offscreen)
# Create the canvas using the Datoviz C API.
c_canvas = cv.dvz_canvas(self._c_gpu, width, height, flags)
# Canvas clear color.
if clear_color == 'white':
cv.dvz_canvas_clear_color(c_canvas, 1, 1, 1)
if c_canvas is NULL:
raise MemoryError()
# Create and return the Canvas Cython wrapper.
c = Canvas()
c.create(self, c_canvas)
# self._canvases.append(c)
return c
def context(self):
"""Return the GPU context object, used to create GPU buffers and textures."""
if self._context is not None:
return self._context
c = Context()
assert self._c_gpu is not NULL
# If the context has not been created, it means we must create the GPU in offscreen mode.
if self._c_gpu.context is NULL:
logger.debug("Automatically creating a GPU context with no surface (offscreen only)")
# Create the GPU without a surface.
cv.dvz_gpu_default(self._c_gpu, NULL)
# Create the context.
cv.dvz_context(self._c_gpu)
c.create(self._c_app, self._c_gpu, self._c_gpu.context)
self._context = c
return c
def __repr__(self):
return f"<GPU \"{self.name}\">"
# -------------------------------------------------------------------------------------------------
# Context
# -------------------------------------------------------------------------------------------------
cdef class Context:
"""A Context is attached to a GPU and allows to create GPU buffers and textures."""
cdef cv.DvzApp* _c_app
cdef cv.DvzGpu* _c_gpu
cdef cv.DvzContext* _c_context
cdef create(self, cv.DvzApp* c_app, cv.DvzGpu* c_gpu, cv.DvzContext* c_context):
assert c_app is not NULL
assert c_gpu is not NULL
assert c_context is not NULL
self._c_app = c_app
self._c_gpu = c_gpu
self._c_context = c_context
def texture(
self, int height, int width=1, int depth=1,
int ncomp=4, np.dtype dtype=None, int ndim=2):
"""Create a 1D, 2D, or 3D texture."""
dtype = np.dtype(dtype or np.uint8)
tex = Texture()
# Texture shape.
assert width > 0
assert height > 0
assert depth > 0
if depth > 1:
ndim = 3
cdef TEX_SHAPE shape
# NOTE: shape is in Vulkan convention.
shape[0] = width
shape[1] = height
shape[2] = depth
# Create the texture.
tex.create(self._c_context, ndim, ncomp, shape, dtype)
logger.debug(f"Create a {str(tex)}")
return tex
def colormap(self, unicode name, np.ndarray[CHAR, ndim=2] colors):
"""Create a custom colormap"""
assert colors.shape[1] == 4
color_count = colors.shape[0]
assert color_count > 0
assert color_count <= 256
colors = colors.astype(np.uint8)
if not colors.flags['C_CONTIGUOUS']:
colors = np.ascontiguousarray(colors)
# TODO: use constant CMAP_CUSTOM instead of hard-coded value
cmap = 160 + len(_CUSTOM_COLORMAPS)
_CUSTOM_COLORMAPS[name] = cmap
cv.dvz_colormap_custom(cmap, color_count, <cv.cvec4*>&colors.data[0])
cv | Cython |
.dvz_context_colormap(self._c_context)
def __repr__(self):
return f"<Context for GPU \"{self._c_gpu.name}\">"
# -------------------------------------------------------------------------------------------------
# Texture
# -------------------------------------------------------------------------------------------------
cdef class Texture:
"""A 1D, 2D, or 3D GPU texture."""
cdef cv.DvzContext* _c_context
cdef cv.DvzTexture* _c_texture
cdef TEX_SHAPE _c_shape # always 3 values: width, height, depth (WARNING, reversed in NumPy)
cdef np.dtype dtype
cdef int ndim # 1D, 2D, or 3D texture
cdef int ncomp # 1-4 (eg 3 for RGB, 4 for RGBA)
cdef cv.DvzSourceType _c_source_type
cdef create(self, cv.DvzContext* c_context, int ndim, int ncomp, TEX_SHAPE shape, np.dtype dtype):
"""Create a texture."""
assert c_context is not NULL
self._c_context = c_context
assert 1 <= ndim <= 3
assert 1 <= ncomp <= 4
self.ndim = ndim
self.ncomp = ncomp
self._update_shape(shape)
# Find the source type.
assert ndim in _SOURCE_TYPES
self._c_source_type = _SOURCE_TYPES[ndim]
# Find the Vulkan format.
cdef cv.VkFormat c_format
self.dtype = dtype
assert (dtype, ncomp) in _FORMATS
c_format = _FORMATS[dtype, ncomp]
# Create the Datoviz texture.
self._c_texture = cv.dvz_ctx_texture(self._c_context, ndim, &shape[0], c_format)
cdef _update_shape(self, TEX_SHAPE shape):
# Store the shape.
for i in range(self.ndim):
self._c_shape[i] = shape[i]
for i in range(self.ndim, 3):
self._c_shape[i] = 1
@property
def item_size(self):
"""Size, in bytes, of every value."""
return np.dtype(self.dtype).itemsize
@property
def size(self):
"""Total number of values in the texture (including the number of color components)."""
return np.prod(self.shape)
@property
def shape(self):
"""Shape (NumPy convention: height, width, depth).
Also, the last dimension is the number of color components."""
shape = [1, 1, 1]
for i in range(3):
shape[i] = self._c_shape[i]
if self.ndim > 1:
# NOTE: Vulkan considers textures as (width, height, depth) whereas NumPy
# considers them as (height, width, depth), hence the need to transpose here.
shape[0], shape[1] = shape[1], shape[0]
return tuple(shape[:self.ndim]) + (self.ncomp,)
def set_filter(self, name):
"""Change the filtering of the texture."""
cv.dvz_texture_filter(self._c_texture, cv.DVZ_FILTER_MIN, _TEXTURE_FILTERS[name])
cv.dvz_texture_filter(self._c_texture, cv.DVZ_FILTER_MAG, _TEXTURE_FILTERS[name])
def resize(self, w=1, h=1, d=1):
cdef TEX_SHAPE shape
shape[0] = h
shape[1] = w
shape[2] = d
cv.dvz_texture_resize(self._c_texture, &shape[0])
self._update_shape(shape)
def upload(self, np.ndarray arr):
"""Set the texture data from a NumPy array."""
assert arr.dtype == self.dtype
for i in range(self.ndim):
assert arr.shape[i] == self.shape[i]
logger.debug(f"Upload NumPy array to {self}.")
cv.dvz_upload_texture(
self._c_context, self._c_texture, &DVZ_ZERO_OFFSET[0], &DVZ_ZERO_OFFSET[0],
self.size * self.item_size, &arr.data[0])
def download(self):
"""Download the texture data to a NumPy array."""
cdef np.ndarray arr
arr = np.empty(self.shape, dtype=self.dtype)
logger.debug(f"Download {self}.")
cv.dvz_download_texture(
self._c_context, self._c_texture, &DVZ_ZERO_OFFSET[0], &DVZ_ZERO_OFFSET[0],
self.size * self.item_size, &arr.data[0])
cv.dvz_process_transfers(self._c_context)
return arr
def __repr__(self):
"""The shape axes are in the following order: height, width, depth, ncomp."""
return f"<Texture {self.ndim}D {'x'.join(map(str, self.shape))} ({self.dtype})>"
# -------------------------------------------------------------------------------------------------
# Canvas
# -------------------------------------------------------------------------------------------------
cdef class Canvas:
"""A canvas."""
cdef cv.DvzCanvas* _c_canvas
cdef object _gpu
cdef bint _video_recording
cdef object _scene
cdef create(self, gpu, cv.DvzCanvas* c_canvas):
"""Create a canvas."""
self._c_canvas = c_canvas
self._gpu = gpu
self._scene = None
def gpu(self):
return self._gpu
def scene(self, rows=1, cols=1):
"""Create a scene, which allows to use subplots, controllers, visuals, and so on."""
if self._scene is not None:
logger.debug("reusing existing Scene object, discarding rows and cols")
return self._scene
else:
logger.debug("creating new scene")
s = Scene()
s.create(self, self._c_canvas, rows, cols)
self._scene = s
return s
def screenshot(self, unicode path):
"""Make a screenshot and save it to a PNG file."""
cdef char* _c_path = path
cv.dvz_screenshot_file(self._c_canvas, _c_path);
def video(self, unicode path, int fps=30, int bitrate=10000000):
"""Start a high-quality video recording."""
cdef char* _c_path = path
cv.dvz_canvas_video(self._c_canvas, fps, bitrate, _c_path, False)
self._video_recording = False
def record(self):
"""Start or restart the video recording."""
self._video_recording = True
cv.dvz_canvas_pause(self._c_canvas, self._video_recording)
def pause(self):
"""Pause the video recording."""
self._video_recording = not self._video_recording
cv.dvz_canvas_pause(self._c_canvas, self._video_recording)
def stop(self):
"""Stop the video recording and save the video to disk."""
cv.dvz_canvas_stop(self._c_canvas)
def pick(self, cv.uint32_t x, cv.uint32_t y):
"""If the canvas was created with picking support, get the color value at a given pixel."""
cdef cv.uvec2 xy
cdef cv.ivec4 rgba
xy[0] = x
xy[1] = y
cv.dvz_canvas_pick(self._c_canvas, xy, rgba)
cdef cv.int32_t r, g, b, a
r = rgba[0]
g = rgba[1]
b = rgba[2]
a = rgba[3]
return (r, g, b, a)
def gui(self, unicode title):
"""Create a new GUI."""
c_gui = cv.dvz_gui(self._c_canvas, title, 0)
gui = Gui()
gui.create(self._c_canvas, c_gui)
return gui
def gui_demo(self):
"""Show the Dear ImGui demo."""
cv.dvz_imgui_demo(self._c_canvas)
def close(self):
if self._c_canvas is not NULL:
logger.debug("Closing canvas")
cv.dvz_canvas_destroy(self._c_canvas)
# cv.dvz_canvas_to_close(self._c_canvas)
# cv.dvz_app_run(self._c_canvas.app, 1)
self._c_canvas = NULL
def _connect(self, evtype_py, f, param=0, cv.DvzEventMode mode=cv.DVZ_EVENT_MODE_SYNC):
# NOTE: only SYNC callbacks for now.
cdef cv.DvzEventType evtype
evtype = _EVENTS.get(evtype_py, 0)
_add_event_callback(self._c_canvas, evtype, param, f, (), mode=mode)
def connect(self, f):
"""Add an event callback function."""
assert f.__name__.startswith('on_')
ev_name = f.__name__[3:]
self._connect(ev_name, f)
return f
def click(self, float x, float y, button='left', modifiers=()):
"""Simulate a mouse click at a given position."""
cdef cv.vec2 pos
cdef int mod
cdef cv.DvzMouseButton c_button
pos[0] = x
pos[1] = y
c_button | Cython |
= _BUTTONS_INV.get(button, 0)
mod = _c_modifiers(*modifiers)
cv.dvz_event_mouse_click(self._c_canvas, pos, c_button, mod)
# -------------------------------------------------------------------------------------------------
# Scene
# -------------------------------------------------------------------------------------------------
cdef class Scene:
"""The Scene is attached to a canvas, and provides high-level scientific
plotting facilities."""
cdef cv.DvzCanvas* _c_canvas
cdef cv.DvzScene* _c_scene
cdef cv.DvzGrid* _c_grid
cdef object _canvas
_panels = []
cdef create(self, canvas, cv.DvzCanvas* c_canvas, int rows, int cols):
"""Create the scene."""
self._canvas = canvas
self._c_canvas = c_canvas
self._c_scene = cv.dvz_scene(c_canvas, rows, cols)
self._c_grid = &self._c_scene.grid
def destroy(self):
"""Destroy the scene."""
if self._c_scene is not NULL:
cv.dvz_scene_destroy(self._c_scene)
self._c_scene = NULL
def panel(self, int row=0, int col=0, controller='axes', transform=None, transpose=None, **kwargs):
"""Add a new panel with a controller."""
cdef int flags
flags = 0
if controller == 'axes':
if kwargs.pop('hide_minor_ticks', False):
flags |= cv.DVZ_AXES_FLAGS_HIDE_MINOR
if kwargs.pop('hide_grid', False):
flags |= cv.DVZ_AXES_FLAGS_HIDE_GRID
ctl = _CONTROLLERS.get(controller, cv.DVZ_CONTROLLER_NONE)
trans = _TRANSPOSES.get(transpose, cv.DVZ_CDS_TRANSPOSE_NONE)
transf = _TRANSFORMS.get(transform, cv.DVZ_TRANSFORM_CARTESIAN)
c_panel = cv.dvz_scene_panel(self._c_scene, row, col, ctl, flags)
if c_panel is NULL:
raise MemoryError()
c_panel.data_coords.transform = transf
cv.dvz_panel_transpose(c_panel, trans)
p = Panel()
p.create(self._c_scene, c_panel)
self._panels.append(p)
return p
def panel_at(self, x, y):
"""Find the panel at a given pixel position."""
cdef cv.vec2 pos
pos[0] = x
pos[1] = y
c_panel = cv.dvz_panel_at(self._c_grid, pos)
cdef Panel panel
for p in self._panels:
panel = p
if panel._c_panel == c_panel:
return panel
# -------------------------------------------------------------------------------------------------
# Panel
# -------------------------------------------------------------------------------------------------
cdef class Panel:
"""The Panel is a subplot in the Scene."""
cdef cv.DvzScene* _c_scene
cdef cv.DvzPanel* _c_panel
_visuals = []
cdef create(self, cv.DvzScene* c_scene, cv.DvzPanel* c_panel):
"""Create the panel."""
self._c_panel = c_panel
self._c_scene = c_scene
@property
def row(self):
"""Get the panel's row index."""
return self._c_panel.row
@property
def col(self):
"""Get the panel's column index."""
return self._c_panel.col
def visual(self, vtype, depth_test=None, transform='auto'):
"""Add a visual to the panel."""
visual_type = _VISUALS.get(vtype, 0)
if not visual_type:
raise ValueError("unknown visual type")
flags = 0
if depth_test:
flags |= cv.DVZ_GRAPHICS_FLAGS_DEPTH_TEST
if transform is None:
flags |= cv.DVZ_VISUAL_FLAGS_TRANSFORM_NONE
# This keyword means that the panel box will NOT be recomputed every time the POS prop
# changes
elif transform == 'init':
flags |= cv.DVZ_VISUAL_FLAGS_TRANSFORM_BOX_INIT
c_visual = cv.dvz_scene_visual(self._c_panel, visual_type, flags)
if c_visual is NULL:
raise MemoryError()
v = Visual()
v.create(self._c_panel, c_visual, vtype)
self._visuals.append(v)
return v
def size(self, axis, float value):
cdef cv.DvzGridAxis c_axis
if axis == 'x':
c_axis = cv.DVZ_GRID_HORIZONTAL
else:
c_axis = cv.DVZ_GRID_VERTICAL
cv.dvz_panel_size(self._c_panel, c_axis, value)
def span(self, axis, int n):
cdef cv.DvzGridAxis c_axis
if axis == 'x':
c_axis = cv.DVZ_GRID_HORIZONTAL
else:
c_axis = cv.DVZ_GRID_VERTICAL
cv.dvz_panel_span(self._c_panel, c_axis, n)
def pick(self, x, y, target_cds='data'):
"""Convert a position in pixels to the data coordinate system, or another
coordinate system."""
cdef cv.dvec3 pos_in
cdef cv.dvec3 pos_out
pos_in[0] = x
pos_in[1] = y
pos_in[2] = 0
source = cv.DVZ_CDS_WINDOW
target = _COORDINATE_SYSTEMS[target_cds]
cv.dvz_transform(self._c_panel, source, pos_in, target, pos_out)
return pos_out[0], pos_out[1]
def get_lim(self):
cdef cv.vec4 out
cv.dvz_panel_lim_get(self._c_panel, out);
return (out[0], out[1], out[2], out[3])
def set_lim(self, lim):
cdef cv.vec4 clim
clim[0] = lim[0]
clim[1] = lim[1]
clim[2] = lim[2]
clim[3] = lim[3]
cv.dvz_panel_lim_set(self._c_panel, clim);
def link_to(self, Panel panel):
cv.dvz_panel_link(&self._c_scene.grid, self._c_panel, panel._c_panel)
def camera_pos(self, float x, float y, float z):
cdef cv.vec3 pos
pos[0] = x
pos[1] = y
pos[2] = z
cv.dvz_camera_pos(self._c_panel, pos)
def arcball_rotate(self, float u, float v, float w, float a):
cdef cv.vec3 axis
axis[0] = u
axis[1] = v
axis[2] = w
cdef float angle
angle = a
cv.dvz_arcball_rotate(self._c_panel, angle, axis)
# -------------------------------------------------------------------------------------------------
# Visual
# -------------------------------------------------------------------------------------------------
cdef class Visual:
"""A visual is added to a given panel."""
cdef cv.DvzPanel* _c_panel
cdef cv.DvzVisual* _c_visual
cdef cv.DvzContext* _c_context
cdef unicode vtype
_textures = {}
cdef create(self, cv.DvzPanel* c_panel, cv.DvzVisual* c_visual, unicode vtype):
"""Create a visual."""
self._c_panel = c_panel
self._c_visual = c_visual
self._c_context = c_visual.canvas.gpu.context
self.vtype = vtype
def data(self, name, np.ndarray value, idx=0, mode=None, drange=None):
"""Set the data of the visual associated to a given property."""
prop_type = _get_prop(name)
c_prop = cv.dvz_prop_get(self._c_visual, prop_type, idx)
dtype, nc = _DTYPES[c_prop.dtype]
value = _validate_data(dtype, nc, value)
N = value.shape[0]
if mode == 'append':
cv.dvz_visual_data_append(self._c_visual, prop_type, idx, N, &value.data[0])
elif mode == 'partial' and drange is not None:
first_item, n_items = drange
assert first_item >= 0, "first item should be positive"
assert n_items > 0, "n_items should be strictly positive"
cv.dvz_visual_data_partial(
self._c_visual, prop_type, idx, first_item, n_items, N, &value.data[0])
else:
cv.dvz_visual_data(self._c_visual, prop_type, idx, N, &value.data[0])
def append(self, *args, **kwargs):
"""Add some data to a visual prop's data."""
return self.data(*args, **kwargs, mode='append')
def partial(self, *args, **kwargs):
"""Make a partial data update."""
return self.data(*args, **kwargs, mode='partial')
def texture(self, Texture tex, idx=0):
"""Attach a texture to a visual."""
# Bind the texture with the visual for the specified source.
cv.dvz_visual_texture(
self._c_visual, tex._c_source_type, idx, tex._c_texture)
def load | Cython |
_obj(self, unicode path, compute_normals=False):
"""Load a mesh from an OBJ file."""
# TODO: move to subclass Mesh?
cdef cv.DvzMesh mesh = cv.dvz_mesh_obj(path);
if compute_normals:
print("computing normals")
cv.dvz_mesh_normals(&mesh)
nv = mesh.vertices.item_count;
ni = mesh.indices.item_count;
cv.dvz_visual_data_source(self._c_visual, cv.DVZ_SOURCE_TYPE_VERTEX, 0, 0, nv, nv, mesh.vertices.data);
cv.dvz_visual_data_source(self._c_visual, cv.DVZ_SOURCE_TYPE_INDEX, 0, 0, ni, ni, mesh.indices.data);
# -------------------------------------------------------------------------------------------------
# GUI
# -------------------------------------------------------------------------------------------------
cdef class GuiControl:
"""A GUI control."""
cdef cv.DvzGui* _c_gui
cdef cv.DvzCanvas* _c_canvas
cdef cv.DvzGuiControl* _c_control
cdef unicode name
cdef unicode ctype
cdef bytes str_ascii
cdef object _callback
cdef create(self, cv.DvzGui* c_gui, cv.DvzGuiControl* c_control, unicode name, unicode ctype):
"""Create a GUI control."""
self._c_gui = c_gui
self._c_canvas = c_gui.canvas
assert self._c_canvas is not NULL
self._c_control = c_control
self.ctype = ctype
self.name = name
def get(self):
"""Get the current value."""
cdef void* ptr
ptr = cv.dvz_gui_value(self._c_control)
if self.ctype == 'input_float' or self.ctype =='slider_float':
return (<float*>ptr)[0]
def set(self, obj):
"""Set the control's value."""
cdef void* ptr
cdef char* c_str
ptr = cv.dvz_gui_value(self._c_control)
if self.ctype == 'input_float' or self.ctype =='slider_float':
(<float*>ptr)[0] = <float>float(obj)
elif self.ctype =='slider_float2':
(<float*>ptr)[0] = <float>float(obj[0])
(<float*>ptr)[1] = <float>float(obj[1])
elif self.ctype == 'label':
self.str_ascii = obj.encode('ascii')
if len(self.str_ascii) >= 1024:
self.str_ascii = self.str_ascii[:1024]
c_str = self.str_ascii
# HACK: +1 for string null termination
memcpy(ptr, c_str, len(self.str_ascii) + 1)
else:
raise NotImplementedError(
f"Setting the value for a GUI control `{self.ctype}` is not implemented yet.")
def connect(self, f):
"""Bind a callback function to the control."""
self._callback = f
_add_event_callback(self._c_canvas, cv.DVZ_EVENT_GUI, 0, f, (self.name,))
return f
def press(self):
"""For buttons only: simulate a press."""
if self.ctype == 'button' and self._callback:
self._callback(False)
@property
def pos(self):
"""The x, y coordinates of the widget, in screen coordinates."""
cdef float x, y
x = self._c_control.pos[0]
y = self._c_control.pos[1]
return (x, y)
@property
def size(self):
"""The width and height of the widget, in screen coordinates."""
cdef float w, h
w = self._c_control.size[0]
h = self._c_control.size[1]
return (w, h)
cdef class Gui:
"""A GUI dialog."""
cdef cv.DvzCanvas* _c_canvas
cdef cv.DvzGui* _c_gui
cdef create(self, cv.DvzCanvas* c_canvas, cv.DvzGui* c_gui):
"""Create a GUI."""
self._c_canvas = c_canvas
self._c_gui = c_gui
def control(self, unicode ctype, unicode name, **kwargs):
"""Add a GUI control."""
ctrl = _CONTROLS.get(ctype, 0)
cdef char* c_name = name
cdef cv.DvzGuiControl* c
cdef cv.vec2 vec2_value
if (ctype =='slider_float'):
c_vmin = kwargs.get('vmin', 0)
c_vmax = kwargs.get('vmax', 1)
c_value = kwargs.get('value', (c_vmin + c_vmax) / 2.0)
c = cv.dvz_gui_slider_float(self._c_gui, c_name, c_vmin, c_vmax, c_value)
elif (ctype =='slider_float2'):
c_vmin = kwargs.get('vmin', 0)
c_vmax = kwargs.get('vmax', 1)
c_value = kwargs.get('value', (c_vmin, c_vmax))
c_force = kwargs.get('force_increasing', False)
vec2_value[0] = c_value[0]
vec2_value[1] = c_value[1]
c = cv.dvz_gui_slider_float2(self._c_gui, c_name, c_vmin, c_vmax, vec2_value, c_force)
elif (ctype =='slider_int'):
c_vmin = kwargs.get('vmin', 0)
c_vmax = kwargs.get('vmax', 1)
c_value = kwargs.get('value', c_vmin)
c = cv.dvz_gui_slider_int(self._c_gui, c_name, c_vmin, c_vmax, c_value)
elif (ctype == 'input_float'):
c_step = kwargs.get('step',.1)
c_step_fast = kwargs.get('step_fast', 1)
c_value = kwargs.get('value', 0)
c = cv.dvz_gui_input_float(self._c_gui, c_name, c_step, c_step_fast, c_value)
elif (ctype == 'checkbox'):
c_value = kwargs.get('value', 0)
c = cv.dvz_gui_checkbox(self._c_gui, c_name, c_value)
elif (ctype == 'button'):
c = cv.dvz_gui_button(self._c_gui, c_name, 0)
elif (ctype == 'label'):
c_value = kwargs.get('value', "")
c = cv.dvz_gui_label(self._c_gui, c_name, c_value)
# Gui control object
w = GuiControl()
w.create(self._c_gui, c, name, ctype)
return w
<|end_of_text|># cython: profile=False
cimport cython
from libc.stdint cimport uint32_t
cdef uint32_t *CRC_TABLE = [0x00000000L, 0xf26b8303L, 0xe13b70f7L, 0x1350f3f4L, 0xc79a971fL,
0x35f1141cL, 0x26a1e7e8L, 0xd4ca64ebL, 0x8ad958cfL, 0x78b2dbccL,
0x6be22838L, 0x9989ab3bL, 0x4d43cfd0L, 0xbf284cd3L, 0xac78bf27L,
0x5e133c24L, 0x105ec76fL, 0xe235446cL, 0xf165b798L, 0x030e349bL,
0xd7c45070L, 0x25afd373L, 0x36ff2087L, 0xc494a384L, 0x9a879fa0L,
0x68ec1ca3L, 0x7bbcef57L, 0x89d76c54L, 0x5d1d08bfL, 0xaf768bbcL,
0xbc267848L, 0x4e4dfb4bL, 0x20bd8edeL, 0xd2d60dddL, 0xc186fe29L,
0x33ed7d2aL, 0xe72719c1L, 0x154c9ac2L, 0x061c6936L, 0xf477ea35L,
0xaa64d611L, 0x580f5512L, 0x4b5fa6e6L, 0xb93425e5L, 0x6dfe410eL,
0x9f95c20dL, 0x8cc531f9L, 0x7eaeb2faL, 0x30e349b1L, 0xc288cab2L,
0xd1d83946L, 0x23b3ba45L, 0xf779deaeL, 0x05125dadL, 0x1642ae59L,
0xe429 | Cython |
2d5aL, 0xba3a117eL, 0x4851927dL, 0x5b016189L, 0xa96ae28aL,
0x7da08661L, 0x8fcb0562L, 0x9c9bf696L, 0x6ef07595L, 0x417b1dbcL,
0xb3109ebfL, 0xa0406d4bL, 0x522bee48L, 0x86e18aa3L, 0x748a09a0L,
0x67dafa54L, 0x95b17957L, 0xcba24573L, 0x39c9c670L, 0x2a993584L,
0xd8f2b687L, 0x0c38d26cL, 0xfe53516fL, 0xed03a29bL, 0x1f682198L,
0x5125dad3L, 0xa34e59d0L, 0xb01eaa24L, 0x42752927L, 0x96bf4dccL,
0x64d4cecfL, 0x77843d3bL, 0x85efbe38L, 0xdbfc821cL, 0x2997011fL,
0x3ac7f2ebL, 0xc8ac71e8L, 0x1c661503L, 0xee0d9600L, 0xfd5d65f4L,
0x0f36e6f7L, 0x61c69362L, 0x93ad1061L, 0x80fde395L, 0x72966096L,
0xa65c047dL, 0x5437877eL, 0x4767748aL, 0xb50cf789L, 0xeb1fcbadL,
0x197448aeL, 0x0a24bb5aL, 0xf84f3859L, 0x2c855cb2L, 0xdeeedfb1L,
0xcdbe2c45L, 0x3fd5af46L, 0x7198540dL, 0x83f3d70eL, 0x90a324faL,
0x62c8a7f9L, 0xb602c312L, 0x44694011L, 0x5739b3e5L, 0xa55230e6L,
0xfb410cc2L, 0x092a8fc1L, 0x1a7a7c35L, 0xe811ff36L, 0x3cdb9bddL,
0xceb018deL, 0xdde0eb2aL, 0x2f8b6829L, 0x82f63b78L, 0x709db87bL,
0x63cd4b8fL, 0x91a6c88cL, 0x456cac67L, 0xb7072f64L, 0xa457dc90L,
0x563c5f93L, 0x082f63b7L, 0xfa44e0b4L, 0xe9141340L, 0x1b7f9043L,
0xcfb5f4a8L, 0x3dde77abL, 0x2e8e845fL, 0xdce5075cL, 0x92a8fc17L,
0x60c37f14L, 0x73938ce0L, 0x81f80fe3L, 0x55326b08L, 0xa759e80bL,
0xb4091bffL, 0x466298fcL, 0x1871a4d8L, 0xea1a27dbL, 0xf94ad42fL,
0x0b21572cL, 0xdfeb33c7L, 0x2d80b0c4L, 0x3ed04330L, 0xccbbc033L,
0xa24bb5a6L, 0x502036a5L, 0x4370c551L, 0xb11b4652L, 0x65d122b9L,
0x97baa1baL, 0x84ea524eL, 0x7681d14dL, 0x2892ed69L, 0xdaf96e6aL,
0xc9a99d9eL, 0x3bc21e9dL, 0xef087a76L, 0x1d63f975L, 0x0e330a81L,
0xfc588982L, 0xb21572c9L, 0x407ef1caL, 0x532e023eL, 0xa145813dL,
0x758fe5d6L, 0x87e466d5L, 0x94b49521L, 0x66df1622L, 0x38cc2a06L,
0xcaa7a905L, 0xd9f75af1L, 0x2b9cd9f2L, 0xff56bd19L, 0x0d3d3e1aL,
0x1e6dcdeeL, 0xec064eedL, 0xc38d26c4L, 0x31e6a5c7L, 0x22b65633L,
0xd0ddd530L, 0x0417b1dbL, 0xf67c32d8L, 0xe52cc12cL, 0x1747422fL,
0x49547e0bL, 0xbb3ffd08L, 0xa86f0efcL, 0x5a048dffL, 0x8ecee914L,
0x7ca56a17L, 0x6ff599e3L, 0x9d9e1ae0L, 0xd3d3e1abL, 0x21b862a8L,
0x32e8915cL, 0xc083125fL, 0x144976b4L, 0xe622f5b7L, 0xf5720643L,
0x07198540L, 0x590ab964L, 0xab613a67L, 0xb831c993L, 0x4a5a4a90L,
0x9e902e7bL, 0x6cfbad78L, 0x7fab5e8cL, 0x8dc0dd8fL, 0xe330a81aL,
0x115b2b19L, 0x020bd8edL, 0xf0605beeL, 0x24aa3f05L, 0xd6c1bc06L,
0xc5914ff2L, 0x37faccf1L, 0x69e9f0d5L, 0x9b8273d6L, 0x88d28022L,
0x7ab90321L, 0xae7367caL, 0x5c18e4c9L, 0x4f48173dL, 0xbd23943eL,
0xf36e6f75L, 0x0105ec76L, 0x12551f82L, 0xe03e9c81L, 0x34f4f86aL,
0xc69f7b69L, 0xd5cf889dL, 0x27a40b9eL, 0x79b737baL, 0x8bdcb4b9L,
0x988c474dL, 0x6ae7c44eL, 0xbe2da0a5L, 0x4c4623a6L, 0x5f16d052L,
0xad7d5351L]
# initial CRC value
cdef uint32_t CRC_INIT = 0
cdef uint32_t _MASK = 0xFFFFFFFFL
#cdef uint32_t crc_update(uint32_t crc, bytes data):
# cdef char b
# cdef int table_index
#
# crc = crc ^ _MASK
# for b in data:
# table_index = (crc ^ b) & 0xff
# crc = (CRC_TABLE[table_index] ^ ( | Cython |
crc >> 8)) & _MASK
# return crc ^ _MASK
@cython.wraparound(False)
@cython.boundscheck(False)
cdef inline uint32_t crc_update(uint32_t crc, char *data, size_t n) nogil:
cdef char b
cdef int table_index
cdef size_t i
crc = crc ^ _MASK
for i in range(n):
b = data[i]
table_index = (crc ^ b) & 0xff
crc = (CRC_TABLE[table_index] ^ (crc >> 8)) & _MASK
return crc ^ _MASK
@cython.wraparound(False)
@cython.boundscheck(False)
cdef inline uint32_t crc_finalize(uint32_t crc) nogil:
return crc & _MASK
@cython.wraparound(False)
@cython.boundscheck(False)
cdef inline uint32_t crc32c(char *data, size_t n) nogil:
return crc_finalize(crc_update(CRC_INIT, data, n))
@cython.wraparound(False)
@cython.boundscheck(False)
cpdef uint32_t masked_crc32c(bytes data):
cdef uint32_t crc = crc32c(data, len(data))
return (((crc >> 15) | (crc << 17)) + 0xa282ead8) & 0xffffffff
<|end_of_text|>cdef extern from "zoltan_types.h":
# basic type used by all of Zoltan
ctypedef unsigned int ZOLTAN_ID_TYPE
# MPI data type
cdef unsigned int ZOLTAN_ID_MPI_TYPE
# pointer to the basic type
ctypedef ZOLTAN_ID_TYPE* ZOLTAN_ID_PTR
# /*****************************************************************************/
# /*
# * Error codes for Zoltan library
# * ZOLTAN_OK - no errors
# * ZOLTAN_WARN - some warning occurred in Zoltan library;
# * application should be able to continue running
# * ZOLTAN_FATAL - a fatal error occurred
# * ZOLTAN_MEMERR - memory allocation failed; with this error, it could be
# * possible to try a different, more memory-friendly,
# * algorithm.
# */
# /*****************************************************************************/
cdef int ZOLTAN_OK
cdef int ZOLTAN_WARN
cdef int ZOLTAN_FATAL
cdef int ZOLTAN_MEMERR
# /*****************************************************************************/
# /* Hypergraph query function types
# */
# /*****************************************************************************/
cdef int _ZOLTAN_COMPRESSED_EDGE
cdef int _ZOLTAN_COMPRESSED_VERTEX
<|end_of_text|>cdef extern from 'ql/math/interpolations/backwardflatinterpolation.hpp' namespace 'QuantLib' nogil:
cdef cppclass BackwardFlat:
pass
cdef extern from 'ql/math/interpolations/loginterpolation.hpp' namespace 'QuantLib' nogil:
cdef cppclass LogLinear:
pass
cdef extern from 'ql/math/interpolations/linearinterpolation.hpp' namespace 'QuantLib' nogil:
cdef cppclass Linear:
pass
cdef extern from 'ql/math/interpolations/bilinearinterpolation.hpp' namespace 'QuantLib' nogil:
cdef cppclass Bilinear:
pass
cdef extern from 'ql/math/interpolations/bicubicsplineinterpolation.hpp' namespace 'QuantLib' nogil:
cdef cppclass Bicubic:
pass
cdef extern from 'ql/math/interpolations/sabrinterpolation.hpp' namespace 'QuantLib' nogil:
cdef cppclass SABRInterpolation:
pass
cdef extern from 'ql/math/interpolations/cubicinterpolation.hpp' namespace 'QuantLib' nogil:
cdef cppclass Cubic:
pass
<|end_of_text|>print("hello extension")
<|end_of_text|>cimport cython as ct
from cython.parallel cimport prange
cimport numpy as np
import numpy as np
from cython cimport parallel
from libc cimport math as cmath
import multiprocessing as mp
cdef int nproc = mp.cpu_count()
@ct.boundscheck(False)
@ct.wraparound(False)
def projectCluster( clusters, id, output, axis ):
assert( len(clusters.shape) == 3 )
assert( len(output.shape) == 2)
assert( clusters.shape[0] == clusters.shape[1] )
assert( clusters.shape[0] == clusters.shape[2] )
assert( clusters.shape[0] == output.shape[0] )
assert( clusters.shape[0] == output.shape[1] )
cdef np.ndarray[np.uint8_t, ndim=3] clustersC = clusters
cdef int idC = id
cdef np.ndarray[np.uint8_t, ndim=2] outputC = output
cdef int N = clusters.shape[0]
cdef int ix, iy, iz
for ix in range(N):
for iy in range(N):
for iz in range(N):
if ( clustersC[ix,iy,iz] == idC and axis==0 ):
outputC[iy,iz] = outputC[iy,iz]+1
elif ( clustersC[ix,iy,iz] == idC and axis==1 ):
outputC[ix,iz] = outputC[ix,iz]+1
elif ( clustersC[ix,iy,iz] == idC and axis==2 ):
outputC[ix,iy] = outputC[ix,iy]+1
return outputC
@ct.boundscheck(False)
@ct.wraparound(False)
def averageAzimuthalClusterWeightsAroundX( clusters, clusterID, output, mask ):
assert( len(output.shape) == 2 )
assert( len(clusters.shape) == 3 )
assert( clusters.shape[0] == clusters.shape[1] )
assert( clusters.shape[0] == clusters.shape[2] )
assert( clusters.shape[0] == output.shape[0] )
assert( clusters.shape[0] == output.shape[1] )
cdef np.ndarray[np.float64_t, ndim=2] outputR = output
cdef np.ndarray[np.uint8_t,ndim=3] clustersR = clusters
cdef np.ndarray[np.uint8_t,ndim=3] maskC = mask
cdef int N = clusters.shape[0]
cdef int cID = clusterID
cdef float qy, qz, qperp, weight
cdef int qperpInt
cdef center=N/2
for ix in range(N):
for iy in range(N):
for iz in range(N):
if ( maskC[ix,iy,iz] == 0 ):
continue
if ( clustersR[ix,iy,iz] == cID ):
qy = iy-N/2
qz = iz-N/2
qperp = cmath.sqrt(qy*qy+qz*qz)
qperpInt = <int>qperp # Rounds to integer below
if ( qperpInt < (N-1)/2 ):
weight = qperp-qperpInt
outputR[ix,center+qperpInt] = outputR[ix,center+qperpInt]+1.0-weight
outputR[ix,center+qperpInt+1] = outputR[ix,center+qperpInt+1] + weight
elif ( qperpInt < N/2 ):
outputR[ix,center+qperpInt] = outputR[ix,center+qperpInt] + 1.0
return output
@ct.boundscheck(False)
@ct.wraparound(False)
def azimuthalAverageX( data3D, output, mask ):
assert( len(data3D.shape) == 3 )
assert( len(output.shape) == 2 )
assert( data3D.shape[0] == data3D.shape[1] )
assert( data3D.shape[0] == data3D.shape[2] )
assert( data3D.shape[0] == output.shape[0] )
assert( data3D.shape[0] == output.shape[1] )
cdef np.ndarray[np.float64_t,ndim=3] data3DC = data3D
cdef np.ndarray[np.float64_t,ndim=2] outputC = output
cdef np.ndarray[np.uint8_t,ndim=3] maskC = mask
cdef int N = data3D.shape[0]
cdef float qy, qz, qperp, weight
cdef int qperpInt
cdef int center = N/2
for ix in range(N):
for iy in range(N):
for iz in range(N):
if ( maskC[ix,iz,iz] == 0 ):
continue
qy = iy-N/2
qz = iz-N/2
qperp = cmath.sqrt( qy*qy + qz*qz )
qperpInt = <int>qperp # Rounds to integer below
if ( qperpInt < N-1 ):
weight = qperp-qperpInt
outputC | Cython |
[ix,center+qperpInt] = outputC[ix,center+qperpInt] + data3DC[ix,iy,iz]*(1.0-weight)
outputC[ix,center+qperpInt+1] = outputC[ix,center+qperpInt+1] + data3DC[ix,iy,iz]*weight
elif ( qperpInt < N ):
outputC[ix,center+qperpInt] = outputC[ix,center+qperpInt] + data3DC[ix,iy,iz]
return output
@ct.boundscheck(False)
@ct.wraparound(False)
def maskedSumOfSquares( reference, data, mask ):
cdef np.ndarray[np.float64_t] ref = reference.ravel()
cdef np.ndarray[np.float64_t] dataR = data.ravel()
cdef np.ndarray[np.uint8_t] maskR = mask.ravel()
cdef int N = data.size
cdef int i
cdef np.ndarray[np.float64_t] sumsq = np.zeros(nproc)
for i in prange(N, nogil=True, num_threads=nproc):
if ( maskR[i] == 1 ):
sumsq[parallel.threadid()] = sumsq[parallel.threadid()] + cmath.pow( ref[i]-dataR[i], 2 )
return np.sum(sumsq)
<|end_of_text|># distutils: language = c++
#
# Copyright 2015-2020 CNRS-UM LIRMM, CNRS-AIST JRL
#
cimport mc_control.c_mc_control as c_mc_control
cimport eigen.eigen as eigen
cimport sva.c_sva as c_sva
cimport sva.sva as sva
cimport mc_observers.c_mc_observers as c_mc_observers
cimport mc_observers.mc_observers as mc_observers
cimport mc_rbdyn.c_mc_rbdyn as c_mc_rbdyn
cimport mc_rbdyn.mc_rbdyn as mc_rbdyn
cimport mc_solver.mc_solver as mc_solver
cimport mc_tasks.mc_tasks as mc_tasks
cimport mc_rtc.mc_rtc as mc_rtc
cimport mc_rtc.gui.gui as mc_rtc_gui
from cython.operator cimport preincrement as preinc
from cython.operator cimport dereference as deref
from libcpp.map cimport map as cppmap
from libcpp.string cimport string
from libcpp.vector cimport vector
from libcpp cimport bool as cppbool
import warnings
def deprecated():
warnings.simplefilter('always', category=DeprecationWarning)
warnings.warn("This call is deprecated", DeprecationWarning)
warnings.simplefilter('ignore', category=DeprecationWarning)
cdef class ControllerResetData(object):
def __cinit__(self):
pass
property q:
def __get__(self):
return self.impl.q
cdef ControllerResetData ControllerResetDataFromPtr(c_mc_control.ControllerResetData * p):
cdef ControllerResetData ret = ControllerResetData()
ret.impl = p
return ret
cdef class Contact(object):
def __ctor__(self, r1, r2, r1Surface, r2Surface, friction = mc_rbdyn.Contact.defaultFriction, eigen.Vector6d dof = None):
if isinstance(r1, unicode):
r1 = r1.encode(u'ascii')
if isinstance(r1Surface, unicode):
r1Surface = r1Surface.encode(u'ascii')
if isinstance(r2, unicode):
r2 = r2.encode(u'ascii')
if isinstance(r2Surface, unicode):
r2Surface = r2Surface.encode(u'ascii')
if dof is None:
self.impl = c_mc_control.Contact(r1, r2, r1Surface, r2Surface, friction)
else:
self.impl = c_mc_control.Contact(r1, r2, r1Surface, r2Surface, friction, dof.impl)
def __cinit__(self, *args):
if len(args) > 0:
self.__ctor__(*args)
property r1:
def __get__(self):
if self.impl.r1.has_value():
return self.impl.r1.value()
else:
return None
def __set__(self, r1):
if isinstance(r1, unicode):
r1 = r1.encode(u'ascii')
self.impl.r1 = <string>(r1)
property r1Surface:
def __get__(self):
return self.impl.r1Surface
def __set__(self, r1Surface):
if isinstance(r1Surface, unicode):
r1Surface = r1Surface.encode(u'ascii')
self.impl.r1Surface = r1Surface
property r2:
def __get__(self):
if self.impl.r2.has_value():
return self.impl.r2.value()
else:
return None
def __set__(self, r2):
if isinstance(r2, unicode):
r2 = r2.encode(u'ascii')
self.impl.r2 = <string>(r2)
property r2Surface:
def __get__(self):
return self.impl.r2Surface
def __set__(self, r2Surface):
if isinstance(r2Surface, unicode):
r2Surface = r2Surface.encode(u'ascii')
self.impl.r2Surface = r2Surface
property friction:
def __get__(self):
return self.impl.friction
def __set__(self, friction):
self.impl.friction = friction
property dof:
def __get__(self):
return eigen.Vector6dFromC(self.impl.dof)
def __set__(self, dof):
if isinstance(dof, eigen.Vector6d):
self.impl.dof = (<eigen.Vector6d>dof).impl
else:
self.dof = eigen.Vector6d(dof)
cdef Contact ContactFromC(const c_mc_control.Contact & c):
cdef Contact ret = Contact()
ret.impl = c
return ret
cdef class MCController(object):
def __cinit__(self):
pass
def run(self):
return self.base.run()
def reset(self, ControllerResetData data):
self.base.reset(deref(data.impl))
def env(self):
return mc_rbdyn.RobotFromC(self.base.env())
def robots(self):
return mc_rbdyn.RobotsFromRef(self.base.robots())
def supported_robots(self):
supported = []
self.base.supported_robots(supported)
return supported
def config(self):
return mc_rtc.ConfigurationFromRef(self.base.config())
def logger(self):
return mc_rtc.LoggerFromRef(self.base.logger())
def gui(self):
return mc_rtc_gui.StateBuilderFromShPtr(self.base.gui())
property timeStep:
def __get__(self):
return self.base.timeStep
property contactConstraint:
def __get__(self):
return mc_solver.ContactConstraintFromPtr(self.base.contactConstraint.get())
property dynamicsConstraint:
def __get__(self):
return mc_solver.DynamicsConstraintFromPtr(self.base.dynamicsConstraint.get())
property kinematicsConstraint:
def __get__(self):
return mc_solver.KinematicsConstraintFromPtr(self.base.kinematicsConstraint.get())
property selfCollisionConstraint:
def __get__(self):
return mc_solver.CollisionsConstraintFromPtr(self.base.selfCollisionConstraint.get())
property postureTask:
def __get__(self):
return mc_tasks.PostureTaskFromPtr(self.base.postureTask.get())
property qpsolver:
def __get__(self):
return mc_solver.QPSolverFromRef(self.base.solver())
def hasObserverPipeline(self, name):
if isinstance(name, unicode):
name = name.encode(u'ascii')
return self.base.hasObserverPipeline(name)
def observerPipeline(self, name = None):
if isinstance(name, unicode):
name = name.encode(u'ascii')
if name is None:
return mc_observers.ObserverPipelineFromRef(self.base.observerPipeline())
else:
return mc_observers.ObserverPipelineFromRef(self.base.observerPipeline(name))
def observerPipelines(self):
it = self.base.observerPipelines().begin()
end = self.base.observerPipelines().end()
ret = []
while it!= end:
ret.append(mc_observers.ObserverPipelineFromRef(deref(it)))
preinc(it)
return ret
def addCollisions(self, r1, r2, collisions):
assert(all([isinstance(col, mc_rbdyn.Collision) for col in collisions]))
cdef vector[c_mc_rbdyn.Collision] cols
if isinstance(r1, unicode):
r1 = r1.encode(u'ascii')
if isinstance(r2, unicode):
r2 = r2.encode(u'ascii')
for col in collisions:
cols.push_back((<mc_rbdyn.Collision>col).impl)
self.base.addCollisions(r1, r2, cols)
def removeCollisions(self, r1, r2, collisions = None):
cdef vector[c_mc_rbdyn.Collision] cols
if isinstance(r1, unicode):
r1 = r1.encode(u'ascii')
if isinstance(r2, unicode):
r2 = r2.encode(u'ascii')
if collisions is None:
self.base.removeCollisions(r1, r2)
else:
for col in collisions:
| Cython |
cols.push_back((<mc_rbdyn.Collision>col).impl)
self.base.removeCollisions(r1, r2, cols)
def hasRobot(self, name):
if isinstance(name, unicode):
name = name.encode(u'ascii')
return self.base.hasRobot(name)
def robot(self, name = None):
if isinstance(name, unicode):
name = name.encode(u'ascii')
if name is None:
return mc_rbdyn.RobotFromC(self.base.robot())
else:
return mc_rbdyn.RobotFromC(self.base.robot(name))
def addContact(self, c, *args):
if isinstance(c, Contact):
assert len(args) == 0, "addContact takes either an mc_control.Contact object or arguments for its construction"
self.base.addContact((<Contact>c).impl)
else:
self.addContact(Contact(c, *args))
def removeContact(self, c, *args):
if isinstance(c, Contact):
assert len(args) == 0, "removeContact takes either an mc_control.Contact object or arguments for its construction"
self.base.removeContact((<Contact>c).impl)
else:
self.removeContact(Contact(c, *args))
def contacts(self):
cdef c_mc_control.ContactSet cs = self.base.contacts()
return [ContactFromC(c) for c in cs]
def hasContact(self, Contact c):
self.base.hasContact(c.impl)
cdef MCController MCControllerFromPtr(c_mc_control.MCController * p):
cdef MCController ret = MCController()
ret.base = p
return ret
cdef class PythonRWCallback(object):
def __cinit__(self, succ, out):
self.impl.success = succ
self.impl.out = out
property success:
def __get__(self):
return self.impl.success
def __set__(self, value):
self.impl.success = value
property out:
def __get__(self):
return self.impl.out
def __set__(self, value):
self.impl.out = value
cdef cppbool python_to_run_callback(void * f) except+ with gil:
return (<object>f).run_callback()
cdef void python_to_reset_callback(const c_mc_control.ControllerResetData & crd, void * f) except+ with gil:
(<object>f).reset_callback(ControllerResetDataFromPtr(&(c_mc_control.const_cast_crd(crd))))
cdef c_sva.PTransformd python_af_callback(callback, const c_mc_rbdyn.Robot & robot) except+ with gil:
cdef mc_rbdyn.Robot r = mc_rbdyn.RobotFromC(robot)
cdef sva.PTransformd af = callback(r)
return deref(af.impl)
cdef class MCPythonController(MCController):
AF_CALLBACKS = []
def __dealloc__(self):
del self.impl
self.impl = self.base = NULL
def __cinit__(self, robot_modules, double dt):
cdef mc_rbdyn.RobotModuleVector rmv = mc_rbdyn.RobotModuleVector(robot_modules)
self.impl = self.base = new c_mc_control.MCPythonController(rmv.v, dt)
try:
self.run_callback
c_mc_control.set_run_callback(deref(self.impl), &python_to_run_callback, <void*>(self))
except AttributeError:
raise TypeError("You need to implement a run_callback method in your object")
try:
self.reset_callback
c_mc_control.set_reset_callback(deref(self.impl), &python_to_reset_callback, <void*>(self))
except AttributeError:
pass
def addAnchorFrameCallback(self, name, callback):
if isinstance(name, unicode):
name = name.encode(u'ascii')
MCPythonController.AF_CALLBACKS.append(callback)
c_mc_control.add_anchor_frame_callback(deref(self.impl), <string>(name), &python_af_callback, callback)
def removeAnchorFrameCallback(self, name):
if isinstance(name, unicode):
name = name.encode(u'ascii')
c_mc_control.remove_anchor_frame_callback(deref(self.impl), name)
cdef class MCGlobalController(object):
def __dealloc__(self):
del self.impl
self.impl = NULL
def __cinit_simple__(self):
self.impl = new c_mc_control.MCGlobalController()
def __cinit_conf__(self, conf):
if isinstance(conf, unicode):
conf = conf.encode(u'ascii')
self.impl = new c_mc_control.MCGlobalController(conf)
def __cinit_full__(self, conf, mc_rbdyn.RobotModule rm):
if isinstance(conf, unicode):
conf = conf.encode(u'ascii')
self.impl = new c_mc_control.MCGlobalController(conf, rm.impl)
def __cinit__(self, *args):
if len(args) == 0:
self.__cinit_simple__()
elif len(args) == 1:
self.__cinit_conf__(args[0])
elif len(args) == 2:
self.__cinit_full__(args[0], args[1])
else:
raise TypeError("Wrong arguments passed to MCGlobalController ctor")
def init(self, q, pos = None):
cdef c_mc_control.array7d p = c_mc_control.array7d()
if pos is None:
self.impl.init(q)
else:
assert(len(pos) == 7)
for i,pi in enumerate(pos):
p[i] = pos[i]
self.impl.init(q, p)
def setSensorPosition(self, eigen.Vector3d p):
self.impl.setSensorPosition(p.impl)
def setSensorOrientation(self, eigen.Quaterniond q):
self.impl.setSensorOrientation(q.impl)
def setSensorLinearVelocity(self, eigen.Vector3d lv):
self.impl.setSensorLinearVelocity(lv.impl)
def setSensorAngularVelocity(self, eigen.Vector3d av):
self.impl.setSensorAngularVelocity(av.impl)
def setSensorAcceleration(self, eigen.Vector3d a):
deprecated()
self.impl.setSensorLinearAcceleration(a.impl)
def setSensorLinearAcceleration(self, eigen.Vector3d a):
self.impl.setSensorLinearAcceleration(a.impl)
def setEncoderValues(self, q):
self.impl.setEncoderValues(q)
def setEncoderVelocities(self, alpha):
self.impl.setEncoderVelocities(alpha)
def setJointTorques(self, tau):
self.impl.setJointTorques(tau)
def setWrenches(self, wrenchesIn):
cdef cppmap[string, c_sva.ForceVecd] wrenches = cppmap[string, c_sva.ForceVecd]()
for sensor,w in wrenchesIn.iteritems():
if not isinstance(w, sva.ForceVecd):
w = sva.ForceVecd(w)
wrenches[sensor] = deref((<sva.ForceVecd>(w)).impl)
self.impl.setWrenches(wrenches)
def run(self):
return self.impl.run()
def timestep(self):
return self.impl.timestep()
def controller(self):
return MCControllerFromPtr(&(self.impl.controller()))
def ref_joint_order(self):
return self.impl.ref_joint_order()
def robot(self):
return mc_rbdyn.RobotFromC(self.impl.robot())
property running:
def __get__(self):
return self.impl.running
def __set__(self, b):
self.impl.running = b
cdef class ElementId(object):
def __cinit__(self, category, name):
if isinstance(name, unicode):
name = name.encode(u'ascii')
self.impl = c_mc_control.ElementId([s.encode(u'ascii') if isinstance(s, unicode) else s for s in category], name)
property category:
def __get__(self):
return self.impl.category
property name:
def __get__(self):
return self.impl.name
cdef class ControllerClient(object):
def __cinit__(self, sub_conn_uri, push_conn_uri, timeout = 0.0):
if isinstance(sub_conn_uri, unicode):
sub_conn_uri = sub_conn_uri.encode(u'ascii')
if isinstance(push_conn_uri, unicode):
push_conn_uri = push_conn_uri.encode(u'ascii')
self.impl = new c_mc_control.ControllerClient(sub_conn_uri, push_conn_uri, timeout)
def send_request(self, element_id, data = None):
if data is None:
deref(self.impl).send_request((<ElementId>element_id).impl)
else:
deref(self.impl).send_request((<ElementId>element_id).impl, deref((<mc_rtc.Configuration>data).impl))
<|end_of_text|>cimport pyftw
from pyftw cimport ftw, nftw, stat, FTW, dev_t, ino_t, mode_t, nlink_t, uid_t, gid_t, off_t, blksize_t, blkcnt_t, time_t
__doc__ = "Primitive ftw.h wrapper"
# Flags
cpdef enum:
FTW_F = 0
FTW_D = 1
FTW_DNR = 2
FTW_NS = 3
FTW_SL = 4
FTW_DP = 5
FTW_SLN = 6
cpdef enum:
FTW_PHYS = 1
FTW_MOUNT = 2
FTW_CHDIR = 4
FTW_DEPTH | Cython |
= 8
FTW_ACTIONRETVAL = 16
# Service wrappers
cdef class Stat:
cdef public dev_t st_dev
cdef public ino_t st_ino
cdef public mode_t st_mode
cdef public nlink_t st_nlink
cdef public uid_t st_uid
cdef public gid_t st_gid
cdef public dev_t st_rdev
cdef public off_t st_size
cdef public blksize_t st_blksize
cdef public blkcnt_t st_blocks
cdef public time_t st_atim
cdef public time_t st_mtim
cdef public time_t st_ctim
cdef fill(self, const stat *sb):
self.st_dev = sb.st_dev
self.st_ino = sb.st_ino
self.st_mode = sb.st_mode
self.st_nlink = sb.st_nlink
self.st_uid = sb.st_uid
self.st_gid = sb.st_gid
self.st_rdev = sb.st_rdev
self.st_size = sb.st_size
self.st_blksize = sb.st_blksize
self.st_blocks = sb.st_blocks
self.st_atim = sb.st_atime
self.st_mtim = sb.st_mtime
self.st_ctim = sb.st_ctime
cdef class Nftw:
cdef public int base
cdef public int level
cdef fill(self, FTW *ftwbuf):
self.base = ftwbuf.base
self.level = ftwbuf.level
# Globals for python callbacks
cdef ftw_fn
cdef nftw_fn
# C callbacks
cdef int ftw_callback(const char *fpath, const stat *sb, int typeflag):
return ftw_fn(fpath, Stat().fill(sb), typeflag)
cdef int nftw_callback(const char *fpath, const stat *sb, int typeflag, FTW *ftwbuf):
return nftw_fn(fpath, Stat().fill(sb), typeflag, Nftw().fill(ftwbuf))
# Wrappers
cpdef int py_ftw(const char *dirpath, fn, int nopenfd):
'''py_ftw(dirpath, fn, nopenfd)\n\nPerform file tree walk (ftw wrapper)'''
global ftw_fn
ftw_fn = fn
return ftw(dirpath, ftw_callback, nopenfd)
cpdef int py_nftw(const char *dirpath, fn, int nopenfd, int flags):
'''py_nftw(dirpath, fn, nopenfd, flags)\n\nPerform file tree walk (nftw wrapper)'''
global nftw_fn
nftw_fn = fn
return nftw(dirpath, nftw_callback, nopenfd, flags)
<|end_of_text|>import numpy as _N
from cython.parallel import prange, parallel
from libc.math cimport sin, fabs
def calc_using_prange(int rep, int TR, int N):
cdef int tr, i, r
cdef double tot
outs = _N.empty(rep*TR)
cdef double[::1] v_outs = outs
cdef double *p_outs = &v_outs[0]
with nogil:
for r from 0 <= r < rep:
for tr in prange(TR):
p_outs[r*TR+tr] = FFdv_new(N, tr, r)
return outs
def calc_using_range(int rep, int TR, int N):
cdef int tr, i, r
cdef double tot
outs = _N.empty(rep*TR)
cdef double[::1] v_outs = outs
cdef double *p_outs = &v_outs[0]
with nogil:
for r from 0 <= r < rep:
for tr in range(TR):
p_outs[r*TR+tr] = FFdv_new(N, tr, r)
return outs
cdef double FFdv_new(int _Np1, int r, int tr) nogil:
cdef double tot = 1
cdef int n1, n2, n3
for n1 from 1 <= n1 < _Np1:
for n2 from 1 <= n2 < _Np1:
tot += fabs(sin((n1 + n2) / 1000) + tr + r + n3) * 0.001
return tot
"""
def doit(int rep, int TR, int N):
cdef int tr, i, r
with nogil:
for r from 0 <= r < rep:
for tr in prange(TR):
FFdv_new(N)
cdef void FFdv_new(int _Np1) nogil:
cdef double tot = 1
cdef int n1, n2, n3
for n1 from 1 <= n1 < _Np1:
for n2 from 1 <= n2 < _Np1:
for n3 from 1 <= n3 < _Np1:
tot += (n1 + n2) / 100 + 200 + n3
"""
<|end_of_text|>#cython: language_level=3
import os
from pysam import AlignmentFile
__all__ = ['Bri', 'bench']
"""
Expose bare minimum from jts/bri headers
"""
cdef extern from "bri_index.h":
struct bam_read_idx_record:
size_t file_offset
struct bam_read_idx:
bam_read_idx_record* records
bam_read_idx*bam_read_idx_load(const char* input_bam)
void bam_read_idx_build(const char* input_bam)
void bam_read_idx_destroy(bam_read_idx* bri)
cdef extern from "bri_get.h":
void bam_read_idx_get_range(const bam_read_idx* bri,
const char* readname,
bam_read_idx_record** start,
bam_read_idx_record** end)
cdef extern from "bri_benchmark.h":
int bam_read_idx_benchmark_main(int argc, char** argv)
def bench(input_bam):
bam_read_idx_benchmark_main(2, ['benchmark', input_bam.encode('utf-8')])
cdef class Bri:
""" Wrapper class for Jared's bri, supports creating and reading.bri index and extracting reads using pysam
Attributes:
index (bam_read_idx*): Bri index instance
input_bam_path (bytes): Path to bam file
input_bri_path (bytes): Path to bri file
hts (pysam.AlignmentFile): Bam file instance
"""
cdef:
bam_read_idx*index
object hts
bytes input_bam_path
bytes input_bri_path
def __init__(self, input_bam):
"""
Args:
input_bam (str): Path to.bam file
"""
self.input_bam_path = input_bam.encode('utf-8')
self.input_bri_path = (input_bam + '.bri').encode('utf-8')
if not os.path.exists(self.input_bam_path):
raise IOError("Bam file does not exist")
def create(self):
""" Create bri index for bam file by calling bam_read_idx_build and bam_read_idx_save. Index is immediately
destroyed, call load() after this to recycle this object.
"""
bam_read_idx_build(self.input_bam_path)
# noinspection PyAttributeOutsideInit
def load(self):
""" Load the index from.bri file """
if not os.path.exists(self.input_bri_path):
# Avoid exit() calls in bri_index.c
raise IOError("Bri file does not exist")
self.index = bam_read_idx_load(self.input_bam_path) # load.bri index
self.hts = AlignmentFile(self.input_bam_path, 'rb') # load.bam file
def get(self, read_name):
""" Get reads for read_name from the bam file using.bri index
Args:
read_name (str): Reads to search for
Yields:
pysam.AlignedSegment: found reads
"""
if not self.index:
raise ValueError('Bri index is not loaded, call load() function first')
cdef:
bam_read_idx_record*start
bam_read_idx_record*end
bam_read_idx_get_range(self.index, read_name.encode('utf-8'), &start, &end)
hts_iter = iter(self.hts)
while start!= end:
self.hts.seek(start.file_offset)
read = next(hts_iter)
yield read
start += 1
def __dealloc__(self):
""" Proper cleanup """
if self.index:
bam_read_idx_destroy(self.index)
if self.hts:
self.hts.close()
<|end_of_text|># This is an example of a pure Python function
def getForce():
# Write your code here, then delete the "pass" statement below
pass<|end_of_text|># cython: profile=True
#cimport cython
#cimport dilap.core.tools as dpr
cimport dilap.geometry.tools as gtl
from dilap.geometry.vec3 cimport vec3
from libc.math cimport sqrt
from libc.math c | Cython |
import sqrt
from libc.math cimport cos
from libc.math cimport sin
from libc.math cimport tan
from libc.math cimport acos
from libc.math cimport asin
from libc.math cimport hypot
cimport numpy
import numpy
stuff = 'hi'
__doc__ = '''dilapidator\'s implementation of a quaternion in R3'''
# dilapidators implementation of a quaternion in R3
cdef class quat:
###########################################################################
### python object overrides ###############################################
###########################################################################
def __str__(self):return 'quat:'+str(tuple(self))
def __repr__(self):return 'quat:'+str(tuple(self))
def __iter__(self):yield self.w;yield self.x;yield self.y;yield self.z
def __mul__(self,o):return self.mul(o)
def __add__(self,o):return self.add(o)
def __sub__(self,o):return self.sub(o)
def __is_equal(self,o):return self.isnear(o)
def __richcmp__(x,y,op):
if op == 2:return x.__is_equal(y)
else:assert False
###########################################################################
###########################################################################
### c methods #############################################################
###########################################################################
def __cinit__(self,float w,float x,float y,float z):
self.w = w
self.x = x
self.y = y
self.z = z
# given an angle a and axis v, modify to represent
# a rotation by a around v and return self
# NOTE: negative a still maps to positive self.w
cdef quat av_c(self,float a,vec3 v):
cdef float a2 = a/2.0
cdef float sa = sin(a2)
cdef float vm = v.mag_c()
self.w = cos(a2)
self.x = v.x*sa/vm
self.y = v.y*sa/vm
self.z = v.z*sa/vm
return self
# modify to represent a rotation
# between two vectors and return self
cdef quat uu_c(self,vec3 x,vec3 y):
cdef float a = x.ang_c(y)
cdef vec3 v
if a == 0.0:self.w = 1;self.x = 0;self.y = 0;self.z = 0
else:
v = x.crs_c(y)
return self.av_c(a,v)
# set self to the quaternion that rotates v to (0,0,1) wrt the xy plane
cdef quat toxy_c(self,vec3 v):
if v.isnear(vec3(0,0,-1)):
self.w = 0;self.x = 1;self.y = 0;self.z = 0
elif not v.isnear(vec3(0,0,1)):self.uu_c(v,vec3(0,0,1))
else:self.av_c(0,vec3(0,0,1))
return self
# return an independent copy of this quaternion
cdef quat cp_c(self):
cdef quat n = quat(self.w,self.x,self.y,self.z)
return n
# return an independent flipped copy of this quaternion
cdef quat cpf_c(self):
cdef quat n = quat(-self.w,self.x,self.y,self.z)
return n
# is quat o within a very small neighborhood of self
cdef bint isnear_c(self,quat o):
cdef float dw = (self.w-o.w)
if dw*dw > gtl.epsilonsq_c:return 0
cdef float dx = (self.x-o.x)
if dx*dx > gtl.epsilonsq_c:return 0
cdef float dy = (self.y-o.y)
if dy*dy > gtl.epsilonsq_c:return 0
cdef float dz = (self.z-o.z)
if dz*dz > gtl.epsilonsq_c:return 0
return 1
# return the squared magintude of self
cdef float mag2_c(self):
cdef float w2 = self.w*self.w
cdef float x2 = self.x*self.x
cdef float y2 = self.y*self.y
cdef float z2 = self.z*self.z
cdef float m2 = w2 + x2 + y2 + z2
return m2
# return the magintude of self
cdef float mag_c(self):
return sqrt(self.mag2_c())
# normalize and return self
cdef quat nrm_c(self):
cdef float m = self.mag_c()
if m == 0.0:return self
else:return self.uscl_c(1.0/m)
# flip the direction of and return self
cdef quat flp_c(self):
self.w *= -1.0
return self
# multiply each component by a scalar of and return self
cdef quat uscl_c(self,float s):
self.w *= s
self.x *= s
self.y *= s
self.z *= s
return self
# conjugate and return self
cdef quat cnj_c(self):
self.x *= -1.0
self.y *= -1.0
self.z *= -1.0
return self
# compute the inverse of self and return
cdef quat inv_c(self):
cdef m = self.mag2_c()
cdef quat n = self.cp_c().cnj_c().uscl_c(1/m)
return n
# given quat o, return self + o
cdef quat add_c(self,quat o):
cdef quat n = quat(self.w+o.w,self.x+o.x,self.y+o.y,self.z+o.z)
return n
# given quat o, return self - o
cdef quat sub_c(self,quat o):
cdef quat n = quat(self.w-o.w,self.x-o.x,self.y-o.y,self.z-o.z)
return n
# given quat o, rotate self so that self represents
# a rotation by self and then q (q * self)
cdef quat mul_c(self,quat o):
cdef float nw,nx,ny,nz
if gtl.isnear_c(self.w,0):nw,nx,ny,nz = o.__iter__()
elif gtl.isnear_c(o.w,0):nw,nx,ny,nz = self.__iter__()
else:
nw = o.w*self.w - o.x*self.x - o.y*self.y - o.z*self.z
nx = o.w*self.x + o.x*self.w + o.y*self.z - o.z*self.y
ny = o.w*self.y - o.x*self.z + o.y*self.w + o.z*self.x
nz = o.w*self.z + o.x*self.y - o.y*self.x + o.z*self.w
cdef quat n = quat(nw,nx,ny,nz)
return n
# given quat o, rotate self so that self represents
# a rotation by self and then q (q * self)
cdef quat rot_c(self,quat o):
cdef quat qres = self.mul_c(o)
self.w,self.x,self.y,self.z = qres.__iter__()
return self
# rotate a set of vec3 points by self
cdef quat rotps_c(self,ps):
cdef vec3 p
cdef int px
cdef int pcnt = len(ps)
for px in range(pcnt):
p = ps[px]
p.rot_c(self)
return self
# return the dot product of self and quat o
cdef float dot_c(self,quat o):
return self.w*o.w + self.x*o.x + self.y*o.y + self.z*o.z
# spherically linearly interpolate between
# self and quat o proportionally to ds
cdef quat slerp_c(self,quat o,float ds):
cdef float hcosth = self.dot_c(o)
# will need to flip result direction if hcosth < 0????
if gtl.isnear_c(abs(hcosth),1.0):return self.cp_c()
cdef float hth = acos(hcosth)
cdef float hsinth = sqrt(1.0 - hcosth*hcosth)
cdef float nw,nx,ny,nz,a,b
if gtl.isnear_c(hsinth,0):
nw = (self.w*0.5 + o.w*0.5)
nx = (self.x*0.5 + o.x*0.5)
ny = (self.y*0.5 + o.y*0.5)
nz = (self.z*0.5 + o.z*0.5)
else:
a = sin((1-ds)*hth)/hsinth
b = sin(( ds)*hth)/hsinth
nw = (self.w*a + o.w*b)
nx = (self.x*a + o.x*b)
ny = (self.y*a + o.y*b)
nz = (self.z*a + o.z*b)
| Cython |
cdef quat n = quat(nw,nx,ny,nz)
return n
###########################################################################
###########################################################################
### python wrappers for c methods #########################################
###########################################################################
# given an angle a and axis v, modify to represent
# a rotation by a around v and return self
cpdef quat av(self,float a,vec3 v):
'''modify to represent a rotation about a vector by an angle'''
return self.av_c(a,v)
# modify to represent a rotation
# between two vectors and return self
cpdef quat uu(self,vec3 x,vec3 y):
'''modify to represent a rotation from one vector to another'''
return self.uu_c(x,y)
# set self to the quaternion that rotates v to (0,0,1) wrt the xy plane
cpdef quat toxy(self,vec3 v):
'''set self to the quaternion that rotates v to (0,0,1) wrt the xy plane'''
return self.toxy_c(v)
# return an independent copy of this quaternion
cpdef quat cp(self):
'''create an independent copy of this quaternion'''
return self.cp_c()
# return an independent flipped copy of this quaternion
cpdef quat cpf(self):
'''create an independent flipped copy of this quaternion'''
return self.cpf_c()
# is quat o within a very small neighborhood of self
cpdef bint isnear(self,quat o):
'''determine if a point is numerically close to another'''
return self.isnear_c(o)
# return the squared magintude of self
cpdef float mag2(self):
'''compute the squared magnitude of this quaternion'''
return self.mag2_c()
# return the magintude of self
cpdef float mag(self):
'''compute the magnitude of this quaternion'''
return self.mag_c()
# normalize and return self
cpdef quat nrm(self):
'''normalize this quaternion'''
return self.nrm_c()
# flip the direction of and return self
cpdef quat flp(self):
'''flip the direction of rotation represented by this quaternion'''
return self.flp_c()
# multiply each component by a scalar of and return self
cpdef quat uscl(self,float s):
'''multiply components of this point by a scalar'''
return self.uscl_c(s)
# conjugate and return self
cpdef quat cnj(self):
'''conjugate this quaternion'''
return self.cnj_c()
# compute the inverse of self and return
cpdef quat inv(self):
'''compute the inverse of this quaternion'''
return self.inv_c()
# given quat o, return self + o
cpdef quat add(self,quat o):
'''compute the addition of this quaternion and another'''
return self.add_c(o)
# given quat o, return self - o
cpdef quat sub(self,quat o):
'''compute the subtraction of this quaternion and another'''
return self.sub_c(o)
# given quat o, rotate self so that self represents
# a rotation by self and then q (q * self)
cpdef quat mul(self,quat o):
'''rotate this quaternion by another quaternion'''
return self.mul_c(o)
# given quat o, rotate self so that self represents
# a rotation by self and then q (q * self)
cpdef quat rot(self,quat o):
'''rotate this quaternion by another quaternion'''
return self.rot_c(o)
# rotate a set of vec3 points by self
cpdef quat rotps(self,ps):
'''rotate a set of vec3 points this quaternion'''
return self.rotps_c(ps)
# return the dot product of self and quat o
cpdef float dot(self,quat o):
'''compute the dot product of this quaternion and another'''
return self.dot_c(o)
# spherically linearly interpolate between
# self and quat o proportionally to ds
cpdef quat slerp(self,quat o,float ds):
'''create a new quat interpolated between this quat and another'''
return self.slerp_c(o,ds)
###########################################################################
<|end_of_text|>import numpy as np
cimport numpy as np
assert sizeof(float) == sizeof(np.float32_t)
cdef extern from "kernel/manager.hh":
cdef cppclass C_distMatrix "distMatrix":
C_distMatrix(np.float32_t*, np.float32_t*, np.float32_t*, int, int, int)
void compute()
void compute_sharedMem()
cdef class distMatrix:
cdef C_distMatrix* g
def __cinit__(self, \
np.ndarray[ndim=1, dtype=np.float32_t] A, \
np.ndarray[ndim=1, dtype=np.float32_t] B, \
np.ndarray[ndim=1, dtype=np.float32_t] C, \
int x1, int x2, int dim):
self.g = new C_distMatrix(&A[0], &B[0], &C[0], x1, x2, dim)
def compute(self):
self.g.compute()
def compute_sharedMem(self):
self.g.compute_sharedMem()<|end_of_text|>from CBARX cimport *
from DataContainer cimport *
cdef class CBARX2(CBARX):
cpdef BuildElementMatrix(self,DataContainer dc)
cpdef BuildInternalForceVector(self, DataContainer dc)<|end_of_text|>cdef extern from "stdlib.h":
cpdef long random() nogil
cpdef void srandom(unsigned int) nogil
cpdef const long RAND_MAX
cdef double randdbl() nogil:
cdef double r
r = random()
r = r/RAND_MAX
return r
cpdef double calcpi(const int samples):
"""serially calculate Pi using Cython library functions"""
cdef int inside, i
cdef double x, y
inside = 0
srandom(0)
for i in range(samples):
x = randdbl()
y = randdbl()
if (x*x)+(y*y) < 1:
inside += 1
return (4.0 * inside)/samples
<|end_of_text|>from mpfmc.core.audio.sdl2 cimport *
from mpfmc.core.audio.gstreamer cimport *
from mpfmc.core.audio.sound_file cimport *
from mpfmc.core.audio.track cimport *
from mpfmc.core.audio.notification_message cimport *
# ---------------------------------------------------------------------------
# Sound Loop Track types
# ---------------------------------------------------------------------------
cdef enum:
do_not_stop_loop = 0xFFFFFFFF
cdef enum LayerStatus:
layer_stopped = 0
layer_queued = 1
layer_playing = 2
layer_fading_in = 3
layer_fading_out = 4
ctypedef struct SoundLoopLayerSettings:
LayerStatus status
SoundSample *sound
Uint8 volume
long sound_loop_set_id
Uint64 sound_id
Uint32 fade_in_steps
Uint32 fade_out_steps
Uint32 fade_steps_remaining
bint looping
Uint8 marker_count
GArray *markers
cdef enum SoundLoopSetPlayerStatus:
# Enumeration of the possible sound loop set player status values.
player_idle = 0
player_delayed = 1
player_fading_in = 2
player_fading_out = 3
player_playing = 4
ctypedef struct SoundLoopSetPlayer:
SoundLoopSetPlayerStatus status
Uint32 length
SoundLoopLayerSettings master_sound_layer
GSList *layers # An array of SoundLoopLayerSettings objects
Uint32 sample_pos
Uint32 stop_loop_samples_remaining
Uint32 start_delay_samples_remaining
float tempo
ctypedef struct TrackSoundLoopState:
# State variables for TrackSoundLoop tracks
GSList *players
SoundLoopSetPlayer *current
# ---------------------------------------------------------------------------
# TrackSoundLoop class
# ---------------------------------------------------------------------------
cdef class TrackSoundLoop(Track):
# Track state needs to be stored in a C struct in order for them to be accessible in
# the SDL callback functions without the GIL (for performance reasons).
# The TrackSoundLoopState struct is allocated during construction and freed during
# destruction.
cdef TrackSoundLoopState *type_state
cdef long _sound_loop_set_counter
cdef dict _active_sound_loop_sets
cdef process_notification_message(self, NotificationMessageContainer *notification_message)
cdef _apply_layer_settings(self, SoundLoopLayerSettings *layer, dict layer_settings)
cdef _initialize_player(self, SoundLoopSetPlayer *player)
cdef _delete_player(self, SoundLoopSetPlayer *player)
cdef _delete_player_layers(self, SoundLoopSetPlayer *player)
cdef _cancel_all_delayed_players(self)
cdef _fade_out_all_players(self, Uint32 fade_steps)
cdef inline Uint32 _fix_sample_frame_pos(self, Uint32 sample_pos, Uint8 bytes_per_sample, int channels)
cdef inline Uint32 _round_sample_pos_up_to_interval(self, Uint32 sample_pos, Uint32 interval, int bytes_per_sample_frame)
@staticmethod
cdef void mix_playing_sounds(TrackState *track, Uint32 buffer_length, AudioCallbackData *callback_data) nogil
cdef SoundLoopLayerSettings *_create_sound_loop_layer_settings() nogil
<|end_of_text|>from cython.operator cimport | Cython |
dereference as deref
from libc.stdlib cimport free
from libcpp cimport bool
from hltypes cimport Array, String
cimport XAL
import os
cdef char* XAL_AS_ANDROID = "Android"
cdef char* XAL_AS_DIRECTSOUND = "DirectSound"
cdef char* XAL_AS_OPENAL = "OpenAL"
cdef char* XAL_AS_SDL = "SDL"
cdef char* XAL_AS_AVFOUNDATION = "AVFoundation"
cdef char* XAL_AS_COREAUDIO = "CoreAudio"
cdef char* XAL_AS_DISABLED = "Disabled"
cdef char* XAL_AS_DEFAULT = ""
cdef XAL.BufferMode FULL = XAL.FULL
cdef XAL.BufferMode LAZY = XAL.LAZY
cdef XAL.BufferMode MANAGED = XAL.MANAGED
cdef XAL.BufferMode ON_DEMAND = XAL.ON_DEMAND
cdef XAL.BufferMode STREAMED = XAL.STREAMED
cdef XAL.SourceMode DISK = XAL.DISK
cdef XAL.SourceMode RAM = XAL.RAM
cdef XAL.Format FLAC = XAL.FLAC
cdef XAL.Format M4A = XAL.M4A
cdef XAL.Format OGG = XAL.OGG
cdef XAL.Format SPX = XAL.SPX
cdef XAL.Format WAV = XAL.WAV
cdef XAL.Format UNKNOWN = XAL.UNKNOWN
cdef extern from *:
ctypedef char* const_char_ptr "const char*"
ctypedef unsigned char* const_unsigned_char_ptr "const unsigned char*"
ctypedef String& chstr "chstr"
ctypedef String hstr "hstr"
ctypedef Array harray "harray"
# cdef str LOG_PATH = ""
# cdef bool LOG_ENABLED = False
# cpdef SetLogPath(str path):
# '''
# Sets the path where XAL should create a log file.
# the path should not include the file
# PyXAL will try to create a folder at the path if the path doesn't exist and will save it's log in that folder as a file named XAL.log
# @param path: string path to the folder where the log should be made
# @return: returns True or False if the path was set
# '''
# global LOG_PATH
# cdef str blank = ""
# cdef bint result = False
# if not LOG_PATH == blank and not os.path.exists(path) or not os.path.isdir(path):
# try:
# os.makedirs(path)
# except StandardError:
# return result
# LOG_PATH = path
# result = True
# return result
# cpdef EnableLogging(bool state = True, str path = ""):
# '''
# sets the logging state of PyXAL by default it is off
# @param state: bool True or False if XAL should be logging data default is True so calling
# PyXAL.EnableLogging will turn logging on (by default PyXAL does not log)
# @param path: string path to the folder where PyXAL should create the log
# it is an empty string by default so that should mean the log will be made in the
# current working directory. calling PyXAL.EnableLogging will set the path to an empty string if the paramater is not included
# @return: returns True or False if the path was set
# '''
# global LOG_ENABLED
# LOG_ENABLED = state
# cdef bint result = False
# result = SetLogPath(path)
# return result
# cdef void Log(chstr logMessage):
# global LOG_PATH
# global LOG_ENABLED
# if not LOG_ENABLED:
# return
# cdef const_char_ptr message
# cdef const_char_ptr line_end = "\n"
# message = logMessage.c_str()
# pymessage = message + line_end
# if os.path.exists(LOG_PATH):
# try:
# path = os.path.join(LOG_PATH, "XAL.log")
# file = open(path, "ab")
# file.write(pymessage)
# file.close()
# except StandardError:
# pass
# XAL.setLogFunction(Log)
Mgr = None
cdef hstr Py_to_Hstr (string):
py_byte_string = string.encode('UTF-8')
cdef char* c_str = py_byte_string
cdef hstr hstring = hstr(c_str)
return hstring
cdef Hstr_to_Py (hstr string):
cdef const_char_ptr c_str = string.c_str()
py_byte_string = c_str
pystring = py_byte_string.decode('UTF-8')
return pystring
cdef class PyAudioManager:
'''
A wrapper for the C++ xal::AudioManager class. it is currently not used
'''
cdef XAL.AudioManager *_pointer
cdef bool destroyed
def __init__(self):
'''
this is a wapper class for a C++ class. it should not be initialied outside of the PyXAL module as proper set up would be impossible.
as such calling the __init__ method will raise a Runtime Error
'''
raise RuntimeError("PyAudioManager Can not be initialized from python")
cdef class SoundWrapper:
'''
A wrapper class for the C++ xal::Sound class. it is returned by the XALManager.createSound and PyPlayer.getSound methods
'''
cdef XAL.Sound *_pointer
cdef bool destroyed
def __init__(self):
'''
this is a wapper class for a C++ class. it should not be initialied outside of the PyXAL module as proper set up would be impossible.
as such calling the __init__ method will raise a Runtime Error
'''
raise RuntimeError("PySound Can not be initialized from python")
def _destroy(self):
if self.isXALInitialized() and not self.destroyed :
XAL.mgr.destroySound(self._pointer)
self.destroyed = True
def __dealloc__(self):
if (XAL.mgr!= NULL) and (not self.destroyed):
XAL.mgr.destroySound(self._pointer)
self.destroyed = True
def isXALInitialized(self):
'''
returns true if the C++ side of the interface to XAL exists
'''
if XAL.mgr!= NULL:
return True
else:
return False
def getName(self):
'''
@return: returns the string name of the sound. it is normal the full path of teh sound file with out the file extention
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef hstr hl_name = self._pointer.getName()
name = Hstr_to_Py(hl_name)
return name
def getFilename(self):
'''
@return: returns a string containing the file name the sound was loaded from
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef hstr hl_name = self._pointer.getFilename()
name = Hstr_to_Py(hl_name)
return name
def getRealFilename(self):
'''
@return: returns a string with the full path to the file the string was loaded from
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef hstr hl_name = self._pointer.getRealFilename()
name = Hstr_to_Py(hl_name)
return name
def getSize(self):
'''
@return: int the size of the sound data in bits not bytes
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef int size = self._pointer.getSize()
return size
def getChannels(self):
'''
@return: int number of channels the sound has. 1 for mono or 2 for stereo
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef int channels = self._pointer.getChannels()
return channels
def getSamplingRate(self):
'''
@return: int the sampeling rate for the sound in samples per second
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef int rate = self._pointer.getSamplingRate()
return rate
def getBitsPerSample(self):
'''
@return: int the bits per sample of data in the sound. usualy 8, 16, or 24, possibly 32 not sure
'''
if not self.isXALInitialized():
| Cython |
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef int rate = self._pointer.getBitsPerSample()
return rate
def getDuration(self):
'''
@return: float duration of the sound in seconds. it is a floating point number to acound for fractions of a second
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef float duration = self._pointer.getDuration()
return duration
def getFormat(self):
'''
@return: int the intrnal designation of the sound format. coresponds to a file type but as of now there is no way to tell for certin which is which
as the nubers will change depending on what formats are currently suported by XAL
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef int format = <int>self._pointer.getFormat()
return format
def isStreamed(self):
'''
@return: bool is the sound being streamed from it's file to the player? or is it comleatly loaded into memory.
should always return false in PyXAL as PyXAL uses full decoding mode
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef bint streamed = self._pointer.isStreamed()
return streamed
def readPcmData(self):
'''
read the pcm data of the sound and return it the format of said data can be determined from the size, chanels, bits per sample and sampleling rate of the sound
@return: a 2 tuple of (number of bits read, string of bytes read)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef unsigned char* pcm_data
cdef int pcm_size
cdef char* c_data
data = ""
try:
pcm_size = self._pointer.readPcmData(&pcm_data)
if pcm_size > 0:
c_data = <char*>pcm_data
data = c_data[:pcm_size]
finally:
free(pcm_data)
pcm_data = NULL
return (pcm_size, data)
cdef class PlayerWrapper:
'''
a wraper for the C++ class xal::Player. it is retuned by the XALManager.createPlayer method
'''
cdef XAL.Player *_pointer
cdef bool destroyed
def __init__(self):
'''
this is a wapper class for a C++ class. it should not be initialied outside of the PyXAL module as proper set up would be impossible.
as such calling the __init__ method will raise a Runtime Error
'''
raise RuntimeError("PyPlayer Can not be initialized from python")
def _destroy(self):
if self.isXALInitialized() and not self.destroyed:
XAL.mgr.destroyPlayer(self._pointer)
self.destroyed = True
def __dealloc__(self):
if (XAL.mgr!= NULL) and (not self.destroyed):
XAL.mgr.destroyPlayer(self._pointer)
self.destroyed = True
def isXALInitialized(self):
'''
returns true if the C++ side of the interface to XAL exists
'''
if XAL.mgr!= NULL:
return True
else:
return False
def getGain(self):
'''
@return: float the current gain of the player (also knows as volume)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef float gain = self._pointer.getGain()
return gain
def setGain(self, float value):
'''
set the gain of the player (also knows as volume)
@param value: float the value of the volume to set 1.0 is normal 2.0 is twice as loud 0.5 is half volume ect.
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
self._pointer.setGain(value)
def getPitch(self):
'''
@return: float the current pitch of the player
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef float offset = self._pointer.getPitch()
return offset
def setPitch(self, float value):
'''
set the current pitch of the player
@param value: float the value of the pitch to set to set 1.0 is normal 2.0 is a 200% shift 0.5 is a 50% shift
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
self._pointer.setPitch(value)
def getName(self):
'''
@return: returns the string name of the sound. it is normal the full path of teh sound file with out the file extention
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef hstr hl_name = self._pointer.getName()
name = Hstr_to_Py(hl_name)
return name
def getFilename(self):
'''
@return: returns a string containing the file name the sound was loaded from
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef hstr hl_name = self._pointer.getFilename()
name = Hstr_to_Py(hl_name)
return name
def getRealFilename(self):
'''
@return: returns a string with the full path to the file the string was loaded from
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef hstr hl_name = self._pointer.getRealFilename()
name = Hstr_to_Py(hl_name)
return name
def getDuration(self):
'''
@return: float duration of the sound in seconds. it is a floating point number to acound for fractions of a second
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef float duration = self._pointer.getDuration()
return duration
def getSize(self):
'''
@return: int the size of the sound data in bits not bytes
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef int size = self._pointer.getSize()
return size
def getTimePosition(self):
'''
@return: float the time position in seconds
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef float size = self._pointer.getTimePosition()
return size
def getSamplePosition(self):
'''
@return: unsigned int the position in the buffer
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
cdef unsigned int size = self._pointer.getSamplePosition()
return size
def isPlaying(self):
'''
@return: bool True of the sound is playing
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._pointer.isPlaying()
def isPaused(self):
'''
@return: bool True if the sound is paused
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._pointer.isPaused()
def isFading(self):
'''
@return: bool True if the sound is fading in or out
'''
if not self.isXALInitialized():
raise RuntimeError | Cython |
("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._pointer.isFading()
def isFadingIn(self):
'''
@return: bool True if the sound is fading in
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._pointer.isFadingIn()
def isFadingOut(self):
'''
@return: bool True if teh sound is fading out
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._pointer.isFadingOut()
def isLooping(self):
'''
@return: bool True of the sound is looping
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._pointer.isLooping()
def play(self, float fadeTime = 0.0, bool looping = False):
'''
start the sound playing at it's current offset, the offset starts at 0.0 when teh sound is first loaded
@param fadeTime: float the time in seconds for the sound to fade in (0.0 by default)
@param looping: bool should the sound loop (False by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
self._pointer.play(fadeTime, looping)
def stop(self, float fadeTime = 0.0):
'''
stop the sound playing and rest set it's offset to 0.0
@param fadeTime: float the time in seconds for the sound to fade out (0.0 by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
self._pointer.stop(fadeTime)
def pause(self, float fadeTime = 0.0):
'''
stop the sound playing keeping the current offset of the sound
@param fadeTime: float the time in seconds for the sound to fade out (0.0 by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
self._pointer.pause(fadeTime)
class PySound(object):
'''
a interface for the wrapper of the xal::Sound class
'''
CATEGORY_STR = "default"
_wrapper = None
destroyed = False
def __init__(self, filename):
'''
this creates a sound object from a file name
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
path = os.path.split(filename)[0]
cdef hstr file_str = Py_to_Hstr(filename)
cdef hstr path_str = Py_to_Hstr(path)
cdef hstr category = Py_to_Hstr(self.CATEGORY_STR)
cdef XAL.Sound* sound
sound = XAL.mgr.createSound(file_str, category, path_str)
if sound == NULL:
raise RuntimeError("XAL Failed to load file %s" % filename)
cdef SoundWrapper wrapper = SoundWrapper.__new__(SoundWrapper)
wrapper._pointer = sound
wrapper.destroyed = False
self._wrapper = wrapper
def _destroy(self):
if self.isXALInitialized() and not self.destroyed:
self._wrapper._destroy()
self.destroyed = True
def __del__(self):
if self.isXALInitialized():
self._destroy()
del self._wrapper
def isXALInitialized(self):
'''
returns true if the C++ side of the interface to XAL exists
'''
if XAL.mgr!= NULL:
return True
else:
return False
def getName(self):
'''
@return: returns the string name of the sound. it is normal the full path of teh sound file with out the file extention
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getName()
def getFilename(self):
'''
@return: returns a string containing the file name the sound was loaded from
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getFilename()
def getRealFilename(self):
'''
@return: returns a string with the full path to the file the string was loaded from
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getRealFilename()
def getSize(self):
'''
@return: int the size of the sound data in bits not bytes
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getSize()
def getChannels(self):
'''
@return: int number of channels the sound has. 1 for mono or 2 for stereo
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getChannels()
def getSamplingRate(self):
'''
@return: int the sampeling rate for the sound in samples per second
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getSamplingRate()
def getBitsPerSample(self):
'''
@return: int the bits per sample of data in the sound. usualy 8, 16, or 24, possibly 32 not sure
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getBitsPerSample()
def getDuration(self):
'''
@return: float duration of the sound in seconds. it is a floating point number to acound for fractions of a second
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getDuration()
def getFormat(self):
'''
@return: int the intrnal designation of the sound format. coresponds to a file type but as of now there is no way to tell for certin which is which
as the nubers will change depending on what formats are currently suported by XAL
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getFormat()
def isStreamed(self):
'''
@return: bool is the sound being streamed from it's file to the player? or is it comleatly loaded into memory.
should always return false in PyXAL as PyXAL uses full decoding mode
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.isStreamed()
def readPcmData(self):
'''
read the raw data of the sound and return it the format of said data can be determined from the size, chanels, bits per sample and sampleling rate of the sound
@return: a 2 tuple of (number of bits read, string of bytes read)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.readPcmData()
class PyPlayer(object):
'''
a interface for the C++ wrapper
'''
_wrapper = None
_sound = None
destroyed = False
def __init__(self, sound):
'''
a PyPlayer object created by bassing a PySound to the __init__ method
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if not isinstance(sound, PySound | Cython |
):
raise TypeError("Expected argument 1 to be of type PySound got %s" % type(sound))
sound_name = sound.getName()
cdef hstr hl_name = Py_to_Hstr(sound_name)
cdef XAL.Player* player
player = XAL.mgr.createPlayer(hl_name)
if player == NULL:
raise RuntimeError("XAL Failed to create a player for %s" % sound_name)
cdef PlayerWrapper wrapper = PlayerWrapper.__new__(PlayerWrapper)
wrapper._pointer = player
wrapper.destroyed = False
self._wrapper = wrapper
self._sound = sound
def _destroy(self):
if self.isXALInitialized() and not self.destroyed:
self._wrapper._destroy()
self.destroyed = True
def __del__(self):
global Mgr
if not self.destroyed:
if Mgr is not None:
if hasattr(Mgr, "_players"):
if Mgr._players.has_key(self.getName()):
if self in Mgr._player[self.getName()]:
Mgr.players[self.getName()].remove(self)
if self.isXALInitialized():
self._destroy()
del self._wrapper
del self._sound
def isXALInitialized(self):
'''
returns true if the C++ side of the interface to XAL exists
'''
if XAL.mgr!= NULL:
return True
else:
return False
def getGain(self):
'''
@return: float the current gain of the player (also knows as volume)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getGain()
def setGain(self, float value):
'''
set the gain of the player (also knows as volume)
@param value: float the value of the volume to set 1.0 is normal 2.0 is twice as loud 0.5 is half volume ect.
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
self._wrapper.setGain(value)
def getPitch(self):
'''
@return: float the current pitch of the player
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getPitch()
def setPitch(self, float value):
'''
set the current pitch of the player
@param value: float the value of the pitch to set to set 1.0 is normal 2.0 is a 200% shift 0.5 is a 50% shift
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
self._wrapper.setPitch(value)
def getSound(self):
'''
return a PySound class wrapper for the sound object of the player
'''
return self._sound
def getName(self):
'''
@return: returns the string name of the sound. it is normal the full path of teh sound file with out the file extention
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getName()
def getFilename(self):
'''
@return: returns a string containing the file name the sound was loaded from
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getFilename()
def getRealFilename(self):
'''
@return: returns a string with the full path to the file the string was loaded from
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getRealFilename()
def getDuration(self):
'''
@return: float duration of the sound in seconds. it is a floating point number to acound for fractions of a second
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getDuration()
def getSize(self):
'''
@return: int the size of the sound data in bits not bytes
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getSize()
def getTimePosition(self):
'''
@return: float the time position in seconds
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getTimePosition()
def getSamplePosition(self):
'''
@return: unsigned int the position in the buffer
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.getSamplePosition()
def isPlaying(self):
'''
@return: bool True of the sound is playing
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.isPlaying()
def isPaused(self):
'''
@return: bool True if the sound is paused
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.isPaused()
def isFading(self):
'''
@return: bool True if the sound is fading in or out
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.isFading()
def isFadingIn(self):
'''
@return: bool True if the sound is fading in
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.isFadingIn()
def isFadingOut(self):
'''
@return: bool True if teh sound is fading out
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.isFadingOut()
def isLooping(self):
'''
@return: bool True of the sound is looping
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
return self._wrapper.isLooping()
def play(self, float fadeTime = 0.0, bool looping = False):
'''
start the sound playing at it's current offset, the offset starts at 0.0 when teh sound is first loaded
@param fadeTime: float the time in seconds for the sound to fade in (0.0 by default)
@param looping: bool should the sound loop (False by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
self._wrapper.play(fadeTime, looping)
def stop(self, float fadeTime = 0.0):
'''
stop the sound playing and rest set it's offset to 0.0
@param fadeTime: float the time in seconds for the sound to fade out (0.0 by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
self._wrapper.stop(fadeTime)
def pause(self, float fadeTime = 0.0):
'''
stop the sound playing keeping the current offset of the sound
@param fadeTime: float the time in seconds for the sound to fade out (0.0 by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self.destroyed:
raise RuntimeError("the C++ interface for this object has been destroyed")
| Cython |
self._wrapper.pause(fadeTime)
cdef class XALManagerWrapper(object):
'''
a wrapper for the xal::mgr object which is a xal::AudioManager. in other words this is the main interface to XAL you SHOLD NOT create an instance of the class yourself.
call PyXAL.Init to set up XAL. an instance of this class will be made avalable at PyXAL.Mgr
'''
cdef bint destroyed, inited
cdef XAL.Category *_category
cdef char* CATEGORY_STR
def __init__(self, char* systemname, int backendId, bint threaded = False, float updateTime = 0.01, char* deviceName = ""):
'''
sets up the interface and initializes XAL you SHOULD NOT BE CREATING THIS CLASS YOUR SELF call PyXAL.Init and use the object created at PyXAL.Mgr
if PyXAL.Mgr is None call PyXAL.Destroy and then PyXAL.Init to set up the interface again
@param systemname: string name of the back end system to use
@param backendId: int window handle of the calling aplication
@param threaded: bool should the system use a threaded interface? (False by defaut)
@param updateTime: float how offten should XAL update (0.01 by default)
@param deviceName: string arbatrary device name ("" by default)
'''
global Mgr
if Mgr is not None:
raise RuntimeError("Only one XALManager interface allowed at a time, use the one at PyXAL.Mgr")
self.CATEGORY_STR = "default"
cdef hstr name = hstr(systemname)
cdef hstr dname = hstr(deviceName)
self._destroyXAL()
XAL.init(name, <void*>backendId, threaded, updateTime, dname)
self.inited = True
self.destroyed = False
self.SetupXAL()
def __dealloc__(self):
if XAL.mgr!= NULL:
fade = 0.0
XAL.mgr.stopAll(fade)
XAL.destroy()
def isXALInitialized(self):
'''
returns true if the C++ side of the interface to XAL exists
'''
if XAL.mgr!= NULL:
return True
else:
return False
def SetupXAL(self):
'''
set up XAL and create the default sound catagory
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
cdef hstr category = hstr(self.CATEGORY_STR)
self._category = XAL.mgr.createCategory(category, FULL, DISK)
def _destroyXAL(self):
if XAL.mgr!= NULL:
fade = 0.0
XAL.mgr.stopAll(fade)
XAL.destroy()
class XALManager(object):
'''
a wrapper for the xal::mgr object which is a xal::AudioManager. in other words this is the main interface to XAL you SHOLD NOT create an instance of the class yourself.
call PyXAL.Init to set up XAL. an instance of this class will be made avalable at PyXAL.Mgr
'''
destroyed = False
inited = False
CATEGORY_STR = "default"
_players = {}
_wrapper = None
def __init__(self, int backendId, bint threaded = False):
'''
sets up the interface and initializes XAL you SHOULD NOT BE CREATING THIS CLASS YOUR SELF call PyXAL.Init and use the object created at PyXAL.Mgr
if PyXAL.Mgr is None call PyXAL.Destroy and then PyXAL.Init to set up the interface again
@param backendId: int window handle of the calling aplication
@param threaded: bool should the system use a threaded interface? (False by defaut)
'''
global Mgr
if Mgr is not None:
raise RuntimeError("Only one XALManager interface allowed at a time, use the one at PyXAL.Mgr")
cdef XALManagerWrapper wrapper = XALManagerWrapper(XAL_AS_DEFAULT, backendId, threaded)
self._wrapper = wrapper
self._players = {}
def isXALInitialized(self):
'''
returns true if the C++ side of the interface to XAL exists
'''
if XAL.mgr!= NULL:
return True
else:
return False
def __del__(self):
'''
make sure XAL is destroyed if the interface is destroyed
'''
del self._players
del self._wrapper
def clear(self):
'''
clear the XAL interface and reset it to be like it was freshly initialized all current sounds and players become invalid
'''
self._players = {}
if self.isXALInitialized():
fade = 0.0
XAL.mgr.stopAll(fade)
XAL.mgr.clear()
def createSound(self, filename):
'''
create a sound object
raises a runtime error if the sound fails to load so be sure to put this call in a try except block
@param filename: string full path to a sound file to load
@return: a PySound wraper to the sound object
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
pysound = PySound(filename)
return pysound
def createPlayer(self, sound):
'''
create a player from a sound object
raises a runtime error if XAL fails to create a player so be sure to put this call in a try except block
@param sound: a PySound wrapper to a sound object
@return: a PyPlayer wraper to the player object
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if not isinstance(sound, PySound):
raise TypeError("Expected argument 1 to be of type PySound got %s" % type(sound))
sound_name = sound.getName()
if not self._players.has_key(sound_name):
self._players[sound_name] = []
pyplayer = PyPlayer(sound)
self._players[sound_name].append(pyplayer)
return pyplayer
def destroyPlayer(self, player):
'''
destroy a player object
destroyes the C++ interface. the object is unusable after this
@param pyplayer: the PyPlayer wrapper for the player to destory
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if not isinstance(player, PyPlayer):
raise TypeError("Expected argument 1 to be of type PyPlayer got %s" % type(player))
name = player.getName()
if self._players.has_key(name):
if player in self._players[name]:
self._players[name].remove(player)
player._destroy()
def destroySound(self, sound):
'''
destroy a sound object
destroyes the C++ interface. the object is unusable after this and so is any player that uses the sound
@param pyplayer: the Pysound wrapper for the sound to destory
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if not isinstance(sound, PySound):
raise TypeError("Expected argument 1 to be of type PySound got %s" % type(sound))
sound._destroy()
def findPlayer(self, str name):
'''
tries to find a player for the sound whos name is passed. it find the player useing the intrealy kept list of wrpaed player instances. returns the first player in the list
@param name: string the name of the soudn to find a player for
@return: a PyPlayer wraper to the player object or None if no player is found
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
if self._players.has_key(name):
if len(self._players[name]) > 0:
return self._players[name][0]
return None
def play(self, name, float fadeTime = 0.0, bool looping = False, float gain = 1.0):
'''
play the sound identified by the name passed (it must of alrady been created)
@param name: string the name of the sound to play. it must alrady of been created
@param fadeTime: float time is seconds for teh sound to fade in (0.0 by default)
@param looping: bool should the sound loop? (False by default)
@param gain: float the volume to play the sound at. 1.0 is normal 0.5 is half 2.0 is twice the volume ect. (1.0 by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
cdef hstr hl_name = Py_to_Hstr(name)
XAL.mgr.play(hl_name, fadeTime, looping, gain)
def stop(self, name, float fadeTime = 0.0):
'''
stop playing the sound identifed by the name passed
@param name: string the name of the sound to stop
@param fadeTime: float the | Cython |
time is second for the sound to fade out (0.0 by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
cdef hstr hl_name = Py_to_Hstr(name)
XAL.mgr.stop(hl_name, fadeTime)
def stopFirst(self, name, float fadeTime = 0.0):
'''
stop playing the first player of the sound identifed by the name passed
@param name: string the name of the sound to stop
@param fadeTime: float the time is second for the sound to fade out (0.0 by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
cdef hstr hl_name = Py_to_Hstr(name)
XAL.mgr.stopFirst(hl_name, fadeTime)
def stopAll(self, float fadeTime = 0.0):
'''
stop playing the all players of the sound identifed by the name passed
@param name: string the name of the sound to stop
@param fadeTime: float the time is second for the sound to fade out (0.0 by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
XAL.mgr.stopAll(fadeTime)
def isAnyPlaying(self, name):
'''
@param name: sting name of sound to check
@return: bool True if there is a sound by this name playing
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
cdef hstr hl_name = Py_to_Hstr(name)
cdef bint result = XAL.mgr.isAnyPlaying(hl_name)
return result
def isAnyFading(self, name):
'''
@param name: sting name of sound to check
@return: bool True if there is a sound by this name fading in or out
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
cdef hstr hl_name = Py_to_Hstr(name)
cdef bint result = XAL.mgr.isAnyFading(hl_name)
return result
def isAnyFadingIn(self, name):
'''
@param name: sting name of sound to check
@return: bool True if there is a sound by this name fading in
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
cdef hstr hl_name = Py_to_Hstr(name)
cdef bint result = XAL.mgr.isAnyFadingIn(hl_name)
return result
def isAnyFadingOut(self, name):
'''
@param name: sting name of sound to check
@return: bool True if there is a sound by this name fading out
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
cdef hstr hl_name = Py_to_Hstr(name)
cdef bint result = XAL.mgr.isAnyFadingOut(hl_name)
return result
def suspendAudio(self):
'''
pause all sounds and players
@param fadeTime: float the time is second for the sound to fade out (0.0 by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
XAL.mgr.suspendAudio()
def resumeAudio(self):
'''
resume all sounds and players
@param fadeTime: float the time is second for the sound to fade in (0.0 by default)
'''
if not self.isXALInitialized():
raise RuntimeError("XAL is not Initialized")
XAL.mgr.resumeAudio()
def Init(int backendId, bint threaded = True):
'''
Setup XAL and create an XALManager interface at PyXAL.Mgr
@param backendId: int window handel in the calling aplication
@param threaded: bool should XAL use a threaded interface? (True by default)
'''
global Mgr
if Mgr is None:
if XAL.mgr!= NULL:
fade = 0.0
XAL.mgr.stopAll(fade)
XAL.destroy()
Mgr = XALManager(backendId, threaded)
def Destroy():
'''
Destroy XAL and remove the interface at PyXAL setting it to None
'''
global Mgr
if XAL.mgr!= NULL:
fade = 0.0
XAL.mgr.stopAll(fade)
XAL.destroy()
Mgr = None<|end_of_text|>from collections import OrderedDict as odict
from bot_belief_1 cimport Bot as BotBelief1
from bot_belief_4 cimport Bot as BotBelief4
from bot_belief_f cimport Bot as BotBeliefF
from bot_diffs_1 cimport Bot as BotDiffs1
from bot_diffs_1_m cimport Bot as BotDiffs1M
from bot_diffs_2 cimport Bot as BotDiffs2
from bot_diffs_3 cimport Bot as BotDiffs3
from bot_diffs_4 cimport Bot as BotDiffs4
from bot_diffs_4_m cimport Bot as BotDiffs4M
from bot_linear cimport Bot as BotLinear
from bot_linear_m cimport Bot as BotLinearM
from bot_phases_1 cimport Bot as BotPhases1
from bot_quadratic cimport Bot as BotQuadratic
from bot_random cimport Bot as BotRandom
from bot_random_1 cimport Bot as BotRandom1
from bot_random_5 cimport Bot as BotRandom5
from bot_random_n cimport Bot as BotRandomN
from bot_simi cimport Bot as BotSimi
from bot_states_1 cimport Bot as BotStates1
available_bots = odict((
('belief_1', BotBelief1),
('belief_4', BotBelief4),
('belief_f', BotBeliefF),
('diffs_1', BotDiffs1),
('diffs_1_m', BotDiffs1M),
('diffs_2', BotDiffs2),
('diffs_3', BotDiffs3),
('diffs_4', BotDiffs4),
('diffs_4_m', BotDiffs4M),
('linear', BotLinear),
('linear_m', BotLinearM),
('phases_1', BotPhases1),
('quadratic', BotQuadratic),
('random', BotRandom),
('random_1', BotRandom1),
('random_5', BotRandom5),
('random_n', BotRandomN),
('simi', BotSimi),
('states_1', BotStates1)
))
<|end_of_text|>from.object cimport PyObject
cdef extern from "Python.h":
############################################################################
# 6.3 Sequence Protocol
############################################################################
bint PySequence_Check(object o)
# Return 1 if the object provides sequence protocol, and 0
# otherwise. This function always succeeds.
Py_ssize_t PySequence_Size(object o) except -1
# Returns the number of objects in sequence o on success, and -1
# on failure. For objects that do not provide sequence protocol,
# this is equivalent to the Python expression "len(o)".
Py_ssize_t PySequence_Length(object o) except -1
# Alternate name for PySequence_Size().
object PySequence_Concat(object o1, object o2)
# Return value: New reference.
# Return the concatenation of o1 and o2 on success, and NULL on
# failure. This is the equivalent of the Python expression "o1 +
# o2".
object PySequence_Repeat(object o, Py_ssize_t count)
# Return value: New reference.
# Return the result of repeating sequence object o count times, or
# NULL on failure. This is the equivalent of the Python expression
# "o * count".
object PySequence_InPlaceConcat(object o1, object o2)
# Return value: New reference.
# Return the concatenation of o1 and o2 on success, and NULL on
# failure. The operation is done in-place when o1 supports
# it. This is the equivalent of the Python expression "o1 += o2".
object PySequence_InPlaceRepeat(object o, Py_ssize_t count)
# Return value: New reference.
# Return the result of repeating sequence object o count times, or
# NULL on failure. The operation is done in-place when o supports
# it. This is the equivalent of the Python expression "o *=
# count".
object PySequence_GetItem(object o, Py_ssize_t i)
# Return value: New reference.
# Return the ith element of o, or NULL on failure. This is the
# equivalent of the Python expression "o[i]".
object PySequence_GetSlice(object o, Py_ssize_t i1, Py_ssize_t i2)
# Return value: New reference.
# Return the slice of sequence object o between i1 and i2, or NULL
# on failure. This is the equivalent of the Python expression
# "o[i1 | Cython |
:i2]".
int PySequence_SetItem(object o, Py_ssize_t i, object v) except -1
# Assign object v to the ith element of o. Returns -1 on
# failure. This is the equivalent of the Python statement "o[i] =
# v". This function does not steal a reference to v.
int PySequence_DelItem(object o, Py_ssize_t i) except -1
# Delete the ith element of object o. Returns -1 on failure. This
# is the equivalent of the Python statement "del o[i]".
int PySequence_SetSlice(object o, Py_ssize_t i1, Py_ssize_t i2, object v) except -1
# Assign the sequence object v to the slice in sequence object o
# from i1 to i2. This is the equivalent of the Python statement
# "o[i1:i2] = v".
int PySequence_DelSlice(object o, Py_ssize_t i1, Py_ssize_t i2) except -1
# Delete the slice in sequence object o from i1 to i2. Returns -1
# on failure. This is the equivalent of the Python statement "del
# o[i1:i2]".
int PySequence_Count(object o, object value) except -1
# Return the number of occurrences of value in o, that is, return
# the number of keys for which o[key] == value. On failure, return
# -1. This is equivalent to the Python expression
# "o.count(value)".
int PySequence_Contains(object o, object value) except -1
# Determine if o contains value. If an item in o is equal to
# value, return 1, otherwise return 0. On error, return -1. This
# is equivalent to the Python expression "value in o".
Py_ssize_t PySequence_Index(object o, object value) except -1
# Return the first index i for which o[i] == value. On error,
# return -1. This is equivalent to the Python expression
# "o.index(value)".
object PySequence_List(object o)
# Return value: New reference.
# Return a list object with the same contents as the arbitrary
# sequence o. The returned list is guaranteed to be new.
object PySequence_Tuple(object o)
# Return value: New reference.
# Return a tuple object with the same contents as the arbitrary
# sequence o or NULL on failure. If o is a tuple, a new reference
# will be returned, otherwise a tuple will be constructed with the
# appropriate contents. This is equivalent to the Python
# expression "tuple(o)".
object PySequence_Fast(object o, char *m)
# Return value: New reference.
# Returns the sequence o as a tuple, unless it is already a tuple
# or list, in which case o is returned. Use
# PySequence_Fast_GET_ITEM() to access the members of the
# result. Returns NULL on failure. If the object is not a
# sequence, raises TypeError with m as the message text.
PyObject* PySequence_Fast_GET_ITEM(object o, Py_ssize_t i)
# Return value: Borrowed reference.
# Return the ith element of o, assuming that o was returned by
# PySequence_Fast(), o is not NULL, and that i is within bounds.
PyObject** PySequence_Fast_ITEMS(object o)
# Return the underlying array of PyObject pointers. Assumes that o
# was returned by PySequence_Fast() and o is not NULL.
object PySequence_ITEM(object o, Py_ssize_t i)
# Return value: New reference.
# Return the ith element of o or NULL on failure. Macro form of
# PySequence_GetItem() but without checking that
# PySequence_Check(o) is true and without adjustment for negative
# indices.
Py_ssize_t PySequence_Fast_GET_SIZE(object o)
# Returns the length of o, assuming that o was returned by
# PySequence_Fast() and that o is not NULL. The size can also be
# gotten by calling PySequence_Size() on o, but
# PySequence_Fast_GET_SIZE() is faster because it can assume o is
# a list or tuple.
<|end_of_text|>import sys
cdef str container_format_postfix = 'le' if sys.byteorder == 'little' else 'be'
cdef object _cinit_bypass_sentinel
cdef AudioFormat get_audio_format(lib.AVSampleFormat c_format):
"""Get an AudioFormat without going through a string."""
cdef AudioFormat format = AudioFormat.__new__(AudioFormat, _cinit_bypass_sentinel)
format._init(c_format)
return format
cdef class AudioFormat(object):
"""Descriptor of audio formats."""
def __cinit__(self, name):
if name is _cinit_bypass_sentinel:
return
cdef lib.AVSampleFormat sample_fmt
if isinstance(name, AudioFormat):
sample_fmt = (<AudioFormat>name).sample_fmt
else:
sample_fmt = lib.av_get_sample_fmt(name)
if sample_fmt < 0:
raise ValueError('Not a sample format: %r' % name)
self._init(sample_fmt)
cdef _init(self, lib.AVSampleFormat sample_fmt):
self.sample_fmt = sample_fmt
def __repr__(self):
return '<av.AudioFormat %s>' % (self.name)
property name:
"""Canonical name of the sample format.
>>> SampleFormat('s16p').name
's16p'
"""
def __get__(self):
return <str>lib.av_get_sample_fmt_name(self.sample_fmt)
property bytes:
"""Number of bytes per sample.
>>> SampleFormat('s16p').bytes
2
"""
def __get__(self):
return lib.av_get_bytes_per_sample(self.sample_fmt)
property bits:
"""Number of bits per sample.
>>> SampleFormat('s16p').bits
16
"""
def __get__(self):
return lib.av_get_bytes_per_sample(self.sample_fmt) << 3
property is_planar:
"""Is this a planar format?
Strictly opposite of :attr:`is_packed`.
"""
def __get__(self):
return bool(lib.av_sample_fmt_is_planar(self.sample_fmt))
property is_packed:
"""Is this a planar format?
Strictly opposite of :attr:`is_planar`.
"""
def __get__(self):
return not lib.av_sample_fmt_is_planar(self.sample_fmt)
property planar:
"""The planar variant of this format.
Is itself when planar:
>>> fmt = Format('s16p')
>>> fmt.planar is fmt
True
"""
def __get__(self):
if self.is_planar:
return self
return get_audio_format(lib.av_get_planar_sample_fmt(self.sample_fmt))
property packed:
"""The packed variant of this format.
Is itself when packed:
>>> fmt = Format('s16')
>>> fmt.packed is fmt
True
"""
def __get__(self):
if self.is_packed:
return self
return get_audio_format(lib.av_get_packed_sample_fmt(self.sample_fmt))
property container_name:
"""The name of a :class:`ContainerFormat` which directly accepts this data.
:raises ValueError: when planar, since there are no such containers.
"""
def __get__(self):
if self.is_planar:
raise ValueError('no planar container formats')
if self.sample_fmt == lib.AV_SAMPLE_FMT_U8:
return 'u8'
elif self.sample_fmt == lib.AV_SAMPLE_FMT_S16:
return's16' + container_format_postfix
elif self.sample_fmt == lib.AV_SAMPLE_FMT_S32:
return's32' + container_format_postfix
elif self.sample_fmt == lib.AV_SAMPLE_FMT_FLT:
return 'f32' + container_format_postfix
elif self.sample_fmt == lib.AV_SAMPLE_FMT_DBL:
return 'f64' + container_format_postfix
raise ValueError('unknown layout')
<|end_of_text|># Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libc cimport stdint
from libcpp cimport bool as cbool
from libcpp.string cimport string
INT32_MIN = stdint.INT32_MIN
INT32_MAX = stdint.INT32_MAX
UINT32_MAX = stdint.UINT32_MAX
INT64_MIN = stdint.INT64_MIN
INT64_MAX = stdint.INT64_MAX
UINT64_MAX = stdint.UINT64_MAX
cdef extern from " | Cython |
thrift/lib/python/capi/test/marshal_fixture.h" namespace "apache::thrift::python":
cdef object __make_unicode(object)
cdef object __roundtrip_pyobject[T](object)
cdef object __roundtrip_unicode(object)
cdef object __roundtrip_bytes(object)
def roundtrip_int32(object x):
return __roundtrip_pyobject[stdint.int32_t](x)
def roundtrip_int64(object x):
return __roundtrip_pyobject[stdint.int64_t](x)
def roundtrip_uint32(object x):
return __roundtrip_pyobject[stdint.uint32_t](x)
def roundtrip_uint64(object x):
return __roundtrip_pyobject[stdint.uint64_t](x)
def roundtrip_float(object x):
return __roundtrip_pyobject[float](x)
def roundtrip_double(object x):
return __roundtrip_pyobject[double](x)
def roundtrip_bool(object x):
return __roundtrip_pyobject[cbool](x)
def roundtrip_bytes(object x):
return __roundtrip_bytes(x)
def roundtrip_unicode(object x):
return __roundtrip_unicode(x)
def make_unicode(object x):
return __make_unicode(x)
<|end_of_text|># cython: language_level=3
# cython: profile=True
# Time-stamp: <2019-10-30 17:27:50 taoliu>
"""Module Description: Statistics function wrappers.
NOTE: This file is no longer used in any other MACS2 codes.
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file LICENSE included with
the distribution).
"""
from libc.math cimport log10, log
from MACS2.Prob import poisson_cdf
cdef class P_Score_Upper_Tail:
"""Unit to calculate -log10 poisson_CDF of upper tail and cache
results in a hashtable.
"""
cdef:
dict pscore_dict
def __init__ ( self ):
self.pscore_dict = dict()
cpdef float get_pscore ( self, int x, float l ):
cdef:
float val
if ( x, l ) in self.pscore_dict:
return self.pscore_dict [ (x, l ) ]
else:
# calculate and cache
val = -1 * poisson_cdf ( x, l, False, True )
self.pscore_dict[ ( x, l ) ] = val
return val
cdef class LogLR_Asym:
"""Unit to calculate asymmetric log likelihood, and cache
results in a hashtable.
"""
cdef:
dict logLR_dict
def __init__ ( self ):
self.logLR_dict = dict()
cpdef float get_logLR_asym ( self, float x, float y ):
cdef:
float val
if ( x, y ) in self.logLR_dict:
return self.logLR_dict[ ( x, y ) ]
else:
# calculate and cache
if x > y:
val = (x*(log10(x)-log10(y))+y-x)
elif x < y:
val = (x*(-log10(x)+log10(y))-y+x)
else:
val = 0
self.logLR_dict[ ( x, y ) ] = val
return val
cdef class LogLR_Sym:
"""Unit to calculate symmetric log likelihood, and cache
results in a hashtable.
"symmetric" means f(x,y) = -f(y,x)
"""
cdef:
dict logLR_dict
def __init__ ( self ):
self.logLR_dict = dict()
cpdef float get_logLR_sym ( self, float x, float y ):
cdef:
float val
if ( x, y ) in self.logLR_dict:
return self.logLR_dict[ ( x, y ) ]
else:
# calculate and cache
if x > y:
val = (x*(log10(x)-log10(y))+y-x)
elif y > x:
val = (y*(log10(x)-log10(y))+y-x)
else:
val = 0
self.logLR_dict[ ( x, y ) ] = val
return val
cdef class LogLR_Diff:
"""Unit to calculate log likelihood for differential calling, and cache
results in a hashtable.
here f(x,y) = f(y,x) and f() is always positive.
"""
cdef:
dict logLR_dict
def __init__ ( self ):
self.logLR_dict = dict()
cpdef float get_logLR_diff ( self, float x, float y ):
cdef:
float val
if ( x, y ) in self.logLR_dict:
return self.logLR_dict[ ( x, y ) ]
else:
# calculate and cache
if y > x: y, x = x, y
if x == y:
val = 0
else:
val = (x*(log10(x)-log10(y))+y-x)
self.logLR_dict[ ( x, y ) ] = val
return val
<|end_of_text|># cython: cdivision=True
cpdef bytearray decrypt(unsigned char *data, int data_len, unsigned char *key, int key_len, unsigned char *scramble, int scramble_len, unsigned char counter, int counter_step=1):
cdef unsigned int output_idx = 0
cdef unsigned int idx = 0
cdef unsigned int even_bit_shift = 0
cdef unsigned int odd_bit_shift = 0
cdef unsigned int is_even_bit_set = 0
cdef unsigned int is_odd_bit_set = 0
cdef unsigned int is_key_bit_set = 0
cdef unsigned int is_scramble_bit_set = 0
cdef unsigned int is_counter_bit_set = 0
cdef unsigned int is_counter_bit_inv_set = 0
cdef unsigned int cur_bit = 0
cdef unsigned int output_word = 0
output_data = bytearray(data_len * 2)
while idx < data_len:
output_word = 0
cur_data = (data[(idx * 2) + 1] << 8) | data[(idx * 2)]
cur_bit = 0
while cur_bit < 8:
even_bit_shift = (cur_bit * 2) & 0xff
odd_bit_shift = (cur_bit * 2 + 1) & 0xff
is_even_bit_set = int((cur_data & (1 << even_bit_shift))!= 0)
is_odd_bit_set = int((cur_data & (1 << odd_bit_shift))!= 0)
is_key_bit_set = int((key[idx % key_len] & (1 << cur_bit))!= 0)
is_scramble_bit_set = int((scramble[idx % scramble_len] & (1 << cur_bit))!= 0)
is_counter_bit_set = int((counter & (1 << cur_bit))!= 0)
is_counter_bit_inv_set = int(counter & (1 << ((7 - cur_bit) & 0xff))!= 0)
if is_scramble_bit_set == 1:
is_even_bit_set, is_odd_bit_set = is_odd_bit_set, is_even_bit_set
if ((is_even_bit_set ^ is_counter_bit_inv_set ^ is_key_bit_set)) == 1:
output_word |= 1 << even_bit_shift
if (is_odd_bit_set ^ is_counter_bit_set) == 1:
output_word |= 1 << odd_bit_shift
cur_bit += 1
output_data[output_idx] = (output_word >> 8) & 0xff
output_data[output_idx+1] = output_word & 0xff
output_idx += 2
counter += counter_step
idx += 1
return output_data
cpdef bytearray encrypt(unsigned char *data, int data_len, unsigned char *key, int key_len, unsigned char *scramble, int scramble_len, unsigned char counter, int counter_step=1):
cdef unsigned int output_idx = 0
cdef unsigned int idx = 0
cdef unsigned int even_bit_shift = 0
cdef unsigned int odd_bit_shift = 0
cdef unsigned int is_even_bit_set = 0
cdef unsigned int is_odd_bit_set = 0
cdef unsigned int is_key_bit_set = 0
cdef unsigned int is_scramble_bit_set = 0
cdef unsigned int is_counter_bit_set = 0
cdef unsigned int is_counter_bit_inv_set = 0
cdef unsigned int cur_bit = 0
cdef unsigned int output_word = 0
cdef unsigned int set_even_bit = 0
cdef unsigned int set_odd_bit = 0
output_data = bytearray(data_len * 2)
while idx < data_len:
output_word = 0
cur_data = (data[(idx * 2)] << 8) | data[(idx * 2) + 1]
cur_bit = 0
while cur_bit < 8:
even_bit_shift = (cur_bit * 2) & 0xff
odd_bit_shift | Cython |
= (cur_bit * 2 + 1) & 0xff
is_even_bit_set = int((cur_data & (1 << even_bit_shift))!= 0)
is_odd_bit_set = int((cur_data & (1 << odd_bit_shift))!= 0)
is_key_bit_set = int((key[idx % key_len] & (1 << cur_bit))!= 0)
is_scramble_bit_set = int((scramble[idx % scramble_len] & (1 << cur_bit))!= 0)
is_counter_bit_set = int((counter & (1 << cur_bit))!= 0)
is_counter_bit_inv_set = int(counter & (1 << ((7 - cur_bit) & 0xff))!= 0)
set_even_bit = is_even_bit_set ^ is_counter_bit_inv_set ^ is_key_bit_set
set_odd_bit = is_odd_bit_set ^ is_counter_bit_set
if is_scramble_bit_set == 1:
set_even_bit, set_odd_bit = set_odd_bit, set_even_bit
if set_even_bit == 1:
output_word |= 1 << even_bit_shift
if set_odd_bit == 1:
output_word |= 1 << odd_bit_shift
cur_bit += 1
output_data[output_idx] = output_word & 0xff
output_data[output_idx+1] = (output_word >> 8) & 0xff
output_idx += 2
counter += counter_step
idx += 1
return output_data
<|end_of_text|># cython: language_level=3
cdef class MultiCast:
cdef bytes _group
cdef bytes _port
cdef bytes _iface
cdef bytes _ip<|end_of_text|># mode: run
# tag: closures
# preparse: id
# preparse: def_to_cdef
#
# closure_tests_2.pyx
#
# Battery of tests for closures in Cython. Based on the collection of
# compiler tests from P423/B629 at Indiana University, Spring 1999 and
# Fall 2000. Special thanks to R. Kent Dybvig, Dan Friedman, Kevin
# Millikin, and everyone else who helped to generate the original
# tests. Converted into a collection of Python/Cython tests by Craig
# Citro.
#
# Note: This set of tests is split (somewhat randomly) into several
# files, simply because putting all the tests in a single file causes
# gcc and g++ to buckle under the load.
#
def g1526():
"""
>>> g1526()
2
"""
x_1134 = 0
def g1525():
x_1136 = 1
z_1135 = x_1134
def g1524(y_1137):
return (x_1136)+((z_1135)+(y_1137))
return g1524
f_1138 = g1525()
return f_1138(f_1138(x_1134))
def g1535():
"""
>>> g1535()
3050
"""
def g1534():
def g1533():
def g1531(t_1141):
def g1532(f_1142):
return t_1141(f_1142(1000))
return g1532
return g1531
def g1530():
def g1529(x_1140):
return (x_1140)+(50)
return g1529
return g1533()(g1530())
def g1528():
def g1527(y_1139):
return (y_1139)+(2000)
return g1527
return g1534()(g1528())
def g1540():
"""
>>> g1540()
2050
"""
def g1539():
t_1143 = 50
def g1538(f_1144):
return (t_1143)+(f_1144())
return g1538
def g1537():
def g1536():
return 2000
return g1536
return g1539()(g1537())
def g1547():
"""
>>> g1547()
2050
"""
def g1546():
def g1545():
def g1543(t_1145):
def g1544(f_1146):
return (t_1145)+(f_1146())
return g1544
return g1543
return g1545()(50)
def g1542():
def g1541():
return 2000
return g1541
return g1546()(g1542())
def g1550():
"""
>>> g1550()
700
"""
def g1549():
x_1147 = 300
def g1548(y_1148):
return (x_1147)+(y_1148)
return g1548
return g1549()(400)
def g1553():
"""
>>> g1553()
0
"""
x_1152 = 3
def g1552():
def g1551(x_1150, y_1149):
return x_1150
return g1551
f_1151 = g1552()
if (f_1151(0, 0)):
return f_1151(f_1151(0, 0), x_1152)
else:
return 0
def g1562():
"""
>>> g1562()
False
"""
def g1561():
def g1556(x_1153):
def g1560():
def g1559():
return isinstance(x_1153, list)
if (g1559()):
def g1558():
def g1557():
return (x_1153[0])
return (g1557() == 0)
return (not g1558())
else:
return False
if (g1560()):
return x_1153
else:
return False
return g1556
f_1154 = g1561()
def g1555():
def g1554():
return [0,[]]
return [0,g1554()]
return f_1154(g1555())
def g1570():
"""
>>> g1570()
False
"""
def g1569():
def g1563(x_1155):
def g1568():
if (x_1155):
def g1567():
def g1566():
return isinstance(x_1155, list)
if (g1566()):
def g1565():
def g1564():
return (x_1155[0])
return (g1564() == 0)
return (not g1565())
else:
return False
return (not g1567())
else:
return False
if (g1568()):
return x_1155
else:
return False
return g1563
f_1156 = g1569()
return f_1156(0)
def g1575():
"""
>>> g1575()
[]
"""
def g1574():
def g1571(x_1157):
def g1573():
def g1572():
return isinstance(x_1157, list)
if (g1572()):
return True
else:
return (x_1157 == [])
if (g1573()):
return x_1157
else:
return []
return g1571
f_1158 = g1574()
return f_1158(0)
def g1578():
"""
>>> g1578()
4
"""
y_1159 = 4
def g1577():
def g1576(y_1160):
return y_1160
return g1576
f_1161 = g1577()
return f_1161(f_1161(y_1159))
def g1581():
"""
>>> g1581()
0
"""
y_1162 = 4
def g1580():
def g1579(x_1164, y_1163):
return 0
return g1579
f_1165 = g1580()
return f_1165(f_1165(y_1162, y_1162), f_1165(y_1162, y_1162))
def g1584():
"""
>>> g1584()
0
"""
y_1166 = 4
def g1583():
def g1582(x_1168, y_1167):
return 0
return g1582
f_1169 = g1583()
return f_1169(f_1169(y_1166, y_1166), f_1169(y_1166, f_1169(y_1166, y_1166)))
def g1587():
"""
>>> g1587()
0
"""
y_1170 = | Cython |
4
def g1586():
def g1585(x_1172, y_1171):
return 0
return g1585
f_1173 = g1586()
return f_1173(f_1173(y_1170, f_1173(y_1170, y_1170)), f_1173(y_1170, f_1173(y_1170, y_1170)))
def g1594():
"""
>>> g1594()
4
"""
def g1593():
def g1588(y_1174):
def g1592():
def g1591(f_1176):
return f_1176(f_1176(y_1174))
return g1591
def g1590():
def g1589(y_1175):
return y_1175
return g1589
return g1592()(g1590())
return g1588
return g1593()(4)
def g1598():
"""
>>> g1598()
23
"""
def g1597():
def g1596(x_1177):
return x_1177
return g1596
f_1178 = g1597()
def g1595():
if (False):
return 1
else:
return f_1178(22)
return (g1595()+1)
def g1603():
"""
>>> g1603()
22
"""
def g1602():
def g1601(x_1179):
return x_1179
return g1601
f_1180 = g1602()
def g1600():
def g1599():
return 23 == 0
return f_1180(g1599())
if (g1600()):
return 1
else:
return 22
def g1611():
"""
>>> g1611()
5061
"""
def g1610():
def g1609(x_1182):
if (x_1182):
return (not x_1182)
else:
return x_1182
return g1609
f_1185 = g1610()
def g1608():
def g1607(x_1181):
return (10)*(x_1181)
return g1607
f2_1184 = g1608()
x_1183 = 23
def g1606():
def g1605():
def g1604():
return x_1183 == 0
return f_1185(g1604())
if (g1605()):
return 1
else:
return (x_1183)*(f2_1184((x_1183-1)))
return (g1606()+1)
def g1614():
"""
>>> g1614()
1
"""
def g1613():
def g1612():
return 0
return g1612
f_1186 = g1613()
x_1187 = f_1186()
return 1
def g1617():
"""
>>> g1617()
1
"""
def g1616():
def g1615():
return 0
return g1615
f_1188 = g1616()
f_1188()
return 1
def g1620():
"""
>>> g1620()
4
"""
def g1619():
def g1618(x_1189):
return x_1189
return g1618
f_1190 = g1619()
if (True):
f_1190(3)
return 4
else:
return 5
def g1623():
"""
>>> g1623()
6
"""
def g1622():
def g1621(x_1191):
return x_1191
return g1621
f_1192 = g1622()
(f_1192(4)) if (True) else (5)
return 6
def g1627():
"""
>>> g1627()
120
"""
def g1626():
def g1624(fact_1195, n_1194, acc_1193):
def g1625():
return n_1194 == 0
if (g1625()):
return acc_1193
else:
return fact_1195(fact_1195, (n_1194-1), (n_1194)*(acc_1193))
return g1624
fact_1196 = g1626()
return fact_1196(fact_1196, 5, 1)
def g1632():
"""
>>> g1632()
144
"""
def g1631():
def g1628(b_1199, c_1198, a_1197):
b_1203 = (b_1199)+(a_1197)
def g1630():
def g1629():
a_1201 = (b_1199)+(b_1199)
c_1200 = (c_1198)+(c_1198)
return (a_1201)+(a_1201)
return (a_1197)+(g1629())
a_1202 = g1630()
return (a_1202)*(a_1202)
return g1628
return g1631()(2, 3, 4)
def g1639():
"""
>>> g1639()
3
"""
def g1638():
def g1636(x_1204):
def g1637():
return x_1204()
return g1637
return g1636
f_1205 = g1638()
def g1635():
def g1634():
def g1633():
return 3
return g1633
return f_1205(g1634())
return g1635()()
def g1646():
"""
>>> g1646()
3628800
"""
def g1645():
def g1643(x_1207):
def g1644():
return x_1207 == 0
if (g1644()):
return 1
else:
return (x_1207)*(f_1206((x_1207)-(1)))
return g1643
f_1206 = g1645()
q_1208 = 17
def g1642():
def g1640(a_1209):
q_1208 = 10
def g1641():
return a_1209(q_1208)
return g1641
return g1640
g_1210 = g1642()
return g_1210(f_1206)()
<|end_of_text|># cython: language_level = 3, boundscheck = False
from __future__ import absolute_import
from._noncustomizable cimport noncustomizable
cdef class saved_yields(noncustomizable):
pass
<|end_of_text|>"""
Cython implementation for the 1D shock code
"""
from __future__ import division
from scipy.special import erf, erfinv
from numpy cimport ndarray, dtype
import numpy as np; cimport numpy as np
cimport cython; from cpython cimport bool
#
# Types
#
DTYPE = np.float64
ctypedef np.float64_t DTYPE_t
#
# Constants here and externs from math
#
cdef extern from "math.h":
double sqrt(double m)
double pow(double m, double n)
double exp(double a)
double fabs(double a)
double M_PI
bint isnan(double x)
double log10(double x)
double fmin(double, double)
cdef double kk = 1.3806488e-16
cdef double ss = 5.670367e-5
#
# Functions HERE
#
"""
Decipher the input parameters from the gas and dust parameters
"""
cdef decipherW(ndarray[DTYPE_t, ndim=1] w, int nspecs, int ndust):
"""
Variables
"""
cdef int ig, id
cdef ndarray[DTYPE_t, ndim=1] w1 = np.zeros(w.shape[0], dtype=DTYPE)
#
# First copy
#
for ig in range(w.shape[0]):
w1[ig] = w[ig]
#
# modify gas
#
for ig in range(nspecs):
w1[2+ig] = w[2+ig]
#
# modify dust: limit size
#
for id in range(ndust):
if (w1[<unsigned int> (2+nspecs+4*id+3)] < 0.0):
w1[2+nspecs+4*id+3] = 1e-30
return w1
""""""
cdef double calcQdust(double Tg, double vdg, double Td, double mass, | Cython |
double gamma):
"""
Calculate the dust heating rate
qd = rhogas CHd ( Trec - Td) |vg - vd|
"""
#
# Variables
#
cdef double s, qd, fact, Trec, CHd
#
#
#
s = fabs(vdg)/sqrt(2.*kk*Tg/mass)
#
# Calculate the heating rate
#
qd = 0.0
if fabs(vdg) <= 1e0:
qd += (gamma+1.)/(gamma-1.) * (Tg - Td) * (sqrt(
(pow(kk,3.)*Tg)/(8.*M_PI*pow(mass,3.))))
else:
"""
#
# Calculate the recovery temperature
#
"""
fact = ((2.*gamma)/(gamma-1.) + 2.*s*s - ( 1./(
0.5 + s*s + (s/sqrt(M_PI)) * exp(-s*s) * (1./erf(s)))) )
Trec = Tg*(gamma-1.)/(gamma+1.) * fact
if s == 0.0:
Trec = Tg
#
# Calculate the heat transfer coefficient
#
CHd = (gamma+1.)/(gamma-1.) * (kk/(8.0*mass*s*s)) * (
(s/sqrt(M_PI))*exp(-s*s) + ((0.5 + s*s) *
erf(s)) )
""""""
#
# Calculate the heating rate
#
qd += CHd*(Trec - Td)*fabs(vdg)
return qd
""""""
cdef double getCpd(double Td):
"""
Calculate and return the heat capacity
"""
#
# Calculate the heat capacity
#
cdef double Cpd = 0.0
if (Td > 1400.0) and (Td < 1820.0):
Cpd += 1e7 + 5e9/(1820.-1400.0)
else:
Cpd += 1e7
return Cpd
""""""
cdef double getfEvap(double Td):
"""
Return the evaporation fraction
"""
cdef double dT = 1e2
if Td < 1950.0:
return 0.0
elif Td > 2050.0:
return 1.0
else:
return ( 3./(dT*dT) * (Td - 2e3 + dT/2.)**(2.) -
2./(dT*dT*dT) * (Td - 2e3 + dT/2.)**(3.))
""""""
cdef double getEPS(double ad):
return 0.8*fmin(1.0, ad/(2.0e-4))
""""""
cdef double calcFDrag(double mbar, double nd, double vd, double td, double ad, double vg, double Tg, double Rhogas):
"""
Calculate the drag forces
This depends on
vdust, vgas, Tgas, Tdust, gas density
"""
#
# Variables
#
cdef double vdg, Fdrag, Cd, s
#
# veldif
#
vdg = (vd - vg)
#
# The s variable
#
Fdrag = 0.0
s = fabs(vdg)/sqrt(2.* kk * Tg/ mbar)
#
# Calculate the drag coefficients
#
if s == 0.0:
return 0.0
else:
Cd = ( (2./(3.*s)) * sqrt(M_PI*td/Tg) + (
(2.0*s*s+1.)/(sqrt(M_PI)* s * s * s) * np.exp(-s*s)) + (
(4.0* pow(s,4.) + 4.*s*s - 1.)/(2.* pow(s,4.)) * erf(s)) )
#
# Calculate the drag
#
Fdrag1 = (-M_PI*ad*ad*Rhogas * Cd/2. * (fabs(vdg)*vdg) )
if isnan(Cd) or isnan(Fdrag1):
Fdrag += 0.0
else:
Fdrag += Fdrag1
""""""
return Fdrag
""""""
cdef double calcDXa(double nd, double vd, double td, double ad, double vg, double gam, double Tg, double Jrad, double rhod, double Rhogas, double Ngas):
"""
Calculate the change of the dust size
dx ad = - f/(rho * H * vd) * (qd + radiation)
This also depends on the recovery temperature
and the heat transfer coefficient
-> needs the differential velocities
"""
#
# Variables
#
cdef double vdg, qd, mbar, fevap, netheat, dxa
cdef double Hevap = 1.1e11
#
# veldif
#
vdg = (vd - vg)
#
# The s variable
#
qd = 0.0
mbar = Rhogas/Ngas
#
# Calculate the heating rate
# The function returns the middle part of the equation
# qd = rhogas * qd * |vg - vd|
#
tempqd = calcQdust(Tg, vdg, td, mbar, gam)
if isnan(tempqd):
qd += 0.0
else:
qd += tempqd*Rhogas
""""""
#
# Calculate the evaporation fraction
#
fevap = getfEvap(td)
#
# Calculate the dxa
#
netheat = 0.0
if Jrad == 0.0:
netheat += qd
else:
netheat = (qd + getEPS(ad)*( M_PI*Jrad - ss*pow(td, 4.)) )
dxa = -fevap/(rhod*Hevap*vd) * netheat
if netheat < 0.0:
dxa = 0.0
return dxa
""""""
cdef double calcDxTd(double nd, double vd, double td, double ad, double vg, double Tg, double Rhogas, double Ngas, double gam, double Jrad, double rhod):
"""
Calculate the rate of change of the dust temperature
"""
#
# variables
#
cdef double vdg, qd, mbar, s, tempqd, Cpd, fevap, netheat, dxtd
#
# veldif
#
vdg = (vd - vg)
#
# The s variable
# This has to be done per gas species
#
qd = 0.0
mbar = Rhogas/ Ngas
s = fabs(vdg)/sqrt(2.* kk * Tg / mbar)
#
# Calculate the heating rate
# The function returns the middle part of the equation
# qd = rhogas * qd * |vg - vd|
#
tempqd = calcQdust(Tg, vdg, td, mbar, gam)
if isnan(tempqd):
qd += 0.0
else:
qd += tempqd*Rhogas
""""""
#
# Calculate the heat capacity
#
Cpd = getCpd(td)
#
# Calculate the evaporation fraction
#
fevap = getfEvap(td)
#
# Calculate the rate of change
#
netheat = 0.0
if Jrad == 0.0:
netheat += qd
else:
netheat += qd + getEPS(ad)*(M_PI*Jrad - ss*pow(td, 4.))
""""""
dxtd = 0.0
dxtd += ( (3. * (1.0 - fevap))/( vd * Cpd * rhod * ad) * netheat)
return dxtd
""""""
#
# Chemistry of gas using rates
#
@cython.boundscheck(False)
cdef calculateR(double Tg, int nspecs, ndarray[DTYPE_t, ndim=1] nden,
ndarray[DTYPE_t, ndim=2] rate ):
"""
Calculate the reactions and update the values
This rate is the creation of gas species i
in terms of number of reactions per unit time
per unit volume
"""
#
# Variables
#
cdef int nrate, irate
cdef int ip1, ip2, ip3, ip4, ip5, ip6, ip7
cdef double k, zeta, avmag, albedo, rad
cdef double d1, d2, d3, d4, d5,d6, d7
cdef ndarray[DTYPE_t, ndim=1] | Cython |
temprates = np.zeros(
(nspecs), dtype=DTYPE)
zeta = 0.0
avmag = 1e30
albedo = 0.0
rad = 1e9
#
# Loop through the rates to get formation and destruction rates
#
for irate in range(rate.shape[0]):
"""
Options for rate reactions
"""
k = 0.0
if <unsigned int> rate[irate,0] == 1:
k += zeta*rate[irate, 8] # alpha
elif <unsigned int> rate[irate,0] == 2:
k += (zeta*rate[irate, 8]*pow( Tg / 300.0,
rate[irate,9]) * rate[irate,10]/ (1.0-albedo))
elif <unsigned int> rate[irate,0] == 3:
if (-rate[irate,9]*avmag > -30.0):
k += (rad * rate[irate,8] * exp(-rate[irate, 10] *
avmag) )
else: k += 0.0
elif <unsigned int> rate[irate,0] == 4:
print 'NOT AVAILABLE!'
#k += iceform(Temp)
raise SystemExit
else:
if (-rate[irate,10]/Tg > -60.0):
k += (rate[irate, 8]*pow(Tg / 300.0, rate[irate, 9])*
exp(-rate[irate, 10] / Tg) )
else: k += 0.0
""""""
if k > 1e10:
print irate, rate[irate,0], rate[irate, 8:]
print rate[irate,8] * pow(Tg/300.0, rate[irate,9])
print exp(-rate[irate,10]/Tg)
raise SystemExit
#
#
#
if (k > 1e-90) and (~np.isinf(k)):
"""
#
# Get the seven indices of products and reactants
# include this rate
#
"""
d1 = 0.0
d2 = 0.0
d3 = d4 = d5 = d6 = d7 = 0.0
ip1 = <unsigned int> rate[irate, 1]
ip2 = <unsigned int> rate[irate, 2]
ip3 = <unsigned int> rate[irate, 3]
ip4 = <unsigned int> rate[irate, 4]
ip5 = <unsigned int> rate[irate, 5]
ip6 = <unsigned int> rate[irate, 6]
ip7 = <unsigned int> rate[irate, 7]
#
# destruction
#
if ip2!= -99:
d1 += (-k*nden[ip2] if ip3 == -99 else
-k*nden[ip2]*nden[ip3])
temprates[ip1] += d1*nden[ip1]
d2 += (-k*nden[ip1] if ip3 == -99 else
-k*nden[ip1]*nden[ip3])
temprates[ip2] += d2*nden[ip2]
if ip3!= -99:
d3 += (-k*nden[ip1]*nden[ip2])
temprates[ip3] += d3*nden[ip3]
else:
d1 += -k
temprates[ip1] += d1*nden[ip1]
#
# formation
#
if ip2!= -99:
d4 += (k*nden[ip1]*nden[ip2] if ip3 == -99
else k*nden[ip1]*nden[ip2]*nden[ip3])
temprates[ip4] += d4
else:
d4 += k*nden[ip1]
temprates[ip4] += d4
if ip5!= -99:
if ip2!= -99:
d5 += (k*nden[ip1]*nden[ip2] if ip3 == -99
else k*nden[ip1]*nden[ip2]*nden[ip3])
temprates[ip5] += d5
else:
d5 += k*nden[ip1]
temprates[ip5] += d5
if ip6!= -99:
if ip2!= -99:
d6 += (k*nden[ip1]*nden[ip2] if ip3 == -99
else k*nden[ip1]*nden[ip2]*nden[ip3])
temprates[ip6] += d6
else:
d6 += k*nden[ip1]
temprates[ip6] += d6
if ip7!= -99:
d7 += (k*nden[ip1]*nden[ip2] if ip3 == -99
else k*nden[ip1]*nden[ip2]*nden[ip3])
temprates[ip7] += d7
""""""
""""""
return temprates
""""""
cdef double calculateFreeEnergy(double totrate):
"""
Calculate the net energy
H + H + energy -> H2
H2 + energy -> H + H
"""
#
# Variables
#
cdef double onev = 1.6021772e-12
return <double> (totrate*4.48*onev)
""""""
cdef double gasKap(double Tg, int destroyed, ndarray[DTYPE_t, ndim=1] Tkap,
ndarray[DTYPE_t, ndim=1] Kaps):
"""
Averaged gas opacities
"""
cdef double kap
kap = <double> pow(10.0, np.interp(log10(Tg), Tkap, Kaps))
if (destroyed == 1 and (Tg < 1.4e3)):
kap = 0.5
return kap
""""""
"""
The vector field of the matrix to solve
"""
@cython.boundscheck(False)
@cython.cdivision
cdef vectorfield(double x, np.ndarray[DTYPE_t, ndim=1] w, np.ndarray[DTYPE_t, ndim=2] Jrad, int haveJ, int nspecs, int ndust, ndarray[DTYPE_t, ndim=1] gmasses, ndarray[DTYPE_t, ndim=1] gammas, double rhod, int destroyed, ndarray[DTYPE_t, ndim=1] Tkap, ndarray[DTYPE_t, ndim=1] Kaps, ndarray[DTYPE_t, ndim=1] gdustFrac, np.ndarray[DTYPE_t, ndim=2] rate):
"""
w : the variables to solve -> x, y
x : position with respect to the shock
p : parameters
"""
#
# Variables here
#
cdef int ii, idust, ig, dumid
cdef double curJ
cdef double Rhogas, Ngas, vgas, Tgas, gam, gam1
cdef double dustLoss, nd, vd, td, ad
cdef double varA, varB, varC, varD, varE, varF
cdef double dumGas, dumDust, radiation
cdef double f1, f2
dumid = nspecs+2
cdef ndarray[DTYPE_t, ndim=1] YDOT = np.zeros(
<unsigned int> (2 + nspecs+ 4*ndust), dtype=DTYPE)
#
# Rest of functions
#
curJ = 0.0
if haveJ == 1:
curJ += np.interp(x, Jrad[0,:], Jrad[1,:])
""""""
#
# Calculate the gas constants first
#
vgas = w[0]
Tgas = w[1]
Rhogas = 0.0
Ngas = 0.0
for ii in range(nspecs):
Rhogas += w[<unsigned int> (2+ii)] * gmasses[<unsigned int> ii]
Ngas += w[<unsigned int> (2+ii)]
#
# Gamma
#
gam1 = 0.0
gam = 0.
for ii in xrange(nspecs):
gam1 += w[<unsigned int> (2+ii)] * (gammas[ii] *2. - 2.)
gam += (gam1/Ngas + 2.)/(gam1/Ngas)
#
# Dust changes
#
cdef np.ndarray[DTYPE_t, ndim=1] md | Cython |
ust = np.zeros(ndust, dtype=DTYPE)
cdef np.ndarray[DTYPE_t, ndim=1] fdrag = np.zeros(ndust, dtype=DTYPE)
cdef np.ndarray[DTYPE_t, ndim=1] dxa = np.zeros(ndust, dtype=DTYPE)
cdef np.ndarray[DTYPE_t, ndim=1] dxtd = np.zeros(ndust, dtype=DTYPE)
cdef np.ndarray[DTYPE_t, ndim=1] Cpd = np.zeros(ndust, dtype=DTYPE)
dustLoss = 0.0
#
# Dust position is 2 + gas species + dusttype*4
# since each dust takes 4 positions
#
for idust in range(ndust):
nd = w[<unsigned int> (2+nspecs + idust*4 + 0)]
vd = w[<unsigned int> (2+nspecs + idust*4 + 1)]
td = w[<unsigned int> (2+nspecs + idust*4 + 2)]
ad = w[<unsigned int> (2+nspecs + idust*4 + 3)]
mdust[idust] = (4.0/3.0)*M_PI*rhod*pow(ad, 3.)
fdrag[idust] = calcFDrag(Rhogas/Ngas, nd, vd, td, ad, vgas, Tgas,
Rhogas)
dxa[idust] = calcDXa(nd, vd, td, ad, vgas, gam, Tgas, curJ, rhod,
Rhogas, Ngas)
dxtd[idust] = calcDxTd(nd, vd, td, ad, vgas, Tgas, Rhogas,
Ngas, gam, curJ, rhod)
Cpd[idust] = getCpd(td)
if dxa[idust] > 0.0:
dustLoss += 0.0
else:
dustLoss += -4.0 * M_PI * (nd * vd * rhod * ad*ad * dxa[idust])
""""""
""""""
#
# Calculate the variable here
#
varA = Rhogas*w[0] - (kk*w[1]/w[0])*Ngas
varB = Ngas*kk
varC = 0.0 # reset variable C
#
# Variable C now has dust properties
# Calculate the gas part
#
cdef ndarray[DTYPE_t, ndim=1] RatesMat = (calculateR(Tgas, nspecs,
w[2:2+nspecs], rate))
dumGas = 0.0 # Reset gas component
for ii in range(nspecs):
dumGas += RatesMat[ii]*(w[0]*gmasses[ii] + (kk*w[1]/w[0]))
#
# Add the dust evaporation mass
#
dumGas += dustLoss/(gmasses[3])*(w[0]*gmasses[3] + (kk*w[1]/w[0]))
#
# Get the dust part
#
dumDust = 0. # reset the dust part
for idust in range(ndust):
dumDust += (w[<unsigned int> (2+nspecs+idust*4 +0)] *
(fdrag[idust] + 4.0*M_PI *
pow(w[<unsigned int> (2+nspecs+idust*4 + 3)],2) * rhod *
pow(w[<unsigned int> (2+nspecs+idust*4+1)], 2.) * dxa[idust]) )
varC += -(dumGas + dumDust)
#
# Calculate the variables for energy equation
#
varD = w[0]*w[0]*Rhogas
varE = 0.0
for ii in range(nspecs):
varE += gammas[ii] * w[2+ii]
varE *= kk*w[0]
varF = 0.0 # reset variable F
#
# Calculate the variable F with the dust
#
dumGas = 0.0 # reset gas
for ii in range(nspecs):
dumGas += (RatesMat[ii]*(gammas[ii]*kk*w[1] +
0.5*w[0]*w[0]*gmasses[ii]))
dumGas += dustLoss/(gmasses[3]) * (gammas[3]*kk*w[1] +
0.5*w[0]*w[0]*gmasses[3])
#
# Dust part for variable F
#
dumDust = 0.0 # reset dust
for idust in range(ndust):
dumDust += ( w[<unsigned int> (dumid + idust*4+1)] *
w[<unsigned int> (dumid+idust*4+0)] *
(fdrag[idust] + 2.*M_PI*
pow(w[<unsigned int> (dumid+idust*4+3)], 2.) * rhod *
pow(w[<unsigned int> (dumid+idust*4+1)], 2.) *dxa[idust]) +
w[<unsigned int> (dumid+idust*4+0)] * (
mdust[idust] * w[<unsigned int> (dumid+idust*4+1)] * Cpd[idust] *
dxtd[idust]) + (4.0 * M_PI * (
pow(w[<unsigned int> (dumid+idust*4+3)],2.) *
w[<unsigned int> (dumid+idust*4+0)] * Cpd[idust] *
w[<unsigned int> (dumid+idust*4+1)] *
w[<unsigned int> (dumid+idust*4+2)] * rhod *dxa[idust])) )
varF += - (dumGas + dumDust) + calculateFreeEnergy(RatesMat[1])
#
# Radiation part
#
radiation = 0.0
if haveJ == 1:
radiation += (4.0 * M_PI * Rhogas *
gasKap(w[1], destroyed, Tkap, Kaps) *
(curJ - (ss*pow(w[1], 4.)/M_PI)))
for idust in range(ndust):
radiation += (4.0* M_PI * M_PI *
w[<unsigned int> (dumid+4*idust+0)] *
pow(w[<unsigned int> (dumid+4*idust+3)], 2.) *
getEPS(w[<unsigned int> (dumid+4*idust+3)]) *
(curJ- (ss * pow(w[<unsigned int> (dumid+4*idust+2)], 4.)/
M_PI)))
varF += radiation # add the radiation
#if curJ > 0.0:
# print x, varA, varB, varC, varD, varE, varF
# print Jrad[0,:5], Jrad[1,:5]
# print curJ
# raise SystemExit
#
# The RHS matrix
# Check few numbers to make sure these are not too large
# or too small
#
YDOT[0] = (varC*varE - varB*varF)/(varA*varE - varB*varD)
YDOT[1] = (varA*varF - varD*varC)/(varA*varE - varB*varD)
#
# Integration is over space dx
# The change of n * v
# changes in chemical species
# Add the dust evaporation
#
for ii in range(nspecs):
YDOT[<unsigned int> (2+ii)] += RatesMat[ii]
YDOT[<unsigned int> (2+ii)] += (gdustFrac[ii]*dustLoss/gmasses[ii])
YDOT[<unsigned int> (2+ii)] -= (w[2+ii] * YDOT[0])
YDOT[<unsigned int> (2+ii)] /= w[0]
#
#
# DUST from here on
#
if ndust > 0:
for idust in xrange(ndust):
nd = w[<unsigned int> (2+nspecs + idust*4 + 0)]
vd = w[<unsigned int> (2+nspecs + idust*4 + 1)]
td = w[<unsigned int> (2+nspecs + idust*4 + 2)]
ad = w[<unsigned int> (2+nspecs + idust*4 + 3)]
YDOT[<unsigned int> (dumid + idust* | Cython |
4 + 1)] += (
fdrag[idust]/(mdust[idust]*vd))
YDOT[<unsigned int> (dumid + idust*4 + 0)] += (
- (nd/vd) * YDOT[<unsigned int> (dumid + idust*4 + 1)])
YDOT[<unsigned int> (dumid + idust*4 + 2)] += dxtd[idust]
YDOT[<unsigned int> (dumid + idust*4 + 3)] += dxa[idust]
#
# Crash and BURN handles
#
if (isnan(YDOT[<unsigned int> (dumid + idust*4 + 1)])):
print 'NAN is found!'
print 'Tgas and vgas: %e, %e'%(w[1], w[0]*1e-5)
print 'ndust, vdust and Tdust: ', w[2+nspecs:]
print
print fdrag, mdust
print w
raise SystemExit
if (isnan(YDOT[<unsigned int> (dumid + idust*4 + 0)])):
print 'NAN is found!'
print 'Tgas and vgas: %e, %e'%(w[1], w[0]*1e-5)
print 'ndust, vdust and Tdust: ', w[2+nspecs:]
print
print fdrag, mdust
print w
raise SystemExit
if (isnan(YDOT[<unsigned int> (dumid + idust*4 + 2)])):
print 'NAN is found!'
print 'Tgas and vgas: %e, %e'%(w[1], w[0]*1e-5)
print 'ndust, vdust and Tdust: ', w[2+nspecs:]
print
print fdrag, mdust
print w
raise SystemExit
if (isnan(YDOT[<unsigned int> (dumid + idust*4 + 3)])):
print 'NAN is found!'
print 'Tgas and vgas: %e, %e'%(w[1], w[0]*1e-5)
print 'ndust, vdust and Tdust: ', w[2+nspecs:]
print
print fdrag, mdust
print w
raise SystemExit
""""""
"""
Limit the changes with respect to values to 1e-15
f1 -> vgas
f2 -> tgas
f3 -- f6 -> number densities
rest -> dust
"""
return YDOT
""""""
#
# Function to be called from outside
#
cpdef solve(double x, np.ndarray[DTYPE_t, ndim=1] w, list p):
"""
Here is the function to pass to the cython function
"""
#
# Variables
#
cdef int nspecs, ndust, haveJ, destroyed
cdef ndarray[DTYPE_t, ndim=1] w0 = np.zeros(w.shape[0], dtype=DTYPE)
#
# Rescale the input
#
nspecs = <unsigned int> (p[0].nspecs)
ndust = <unsigned int> (p[1].nspecs)
w0 = decipherW(w, nspecs, ndust)
#
# return YDOT
#
haveJ = 0
if p[3]:
haveJ = 1
destroyed = 0
if p[1].destroyed:
destroyed = 1
return (vectorfield(x, w0, p[2], haveJ, nspecs, ndust,
np.array(p[0].mass), np.array(p[0].gamma),
p[1].mdust, destroyed, p[0].logT, p[0].logK,
np.array(p[0].dustfrac), p[0].rates))
""""""
<|end_of_text|>cimport numpy as np
import numpy as np
cdef void solve(Py_ssize_t n, double[:] lower, double[:] diag, double[:] upper,
double[:] rhs, double[:] x):
cdef:
double m
Py_ssize_t i, im1, nm1 = n - 1
for i in xrange(n):
im1 = i - 1
m = lower[i] / diag[im1]
diag[i] -= m * upper[im1]
rhs[i] -= m * rhs[im1]
x[nm1] = rhs[nm1] / diag[nm1]
for i in xrange(n - 2, -1, -1):
x[i] = (rhs[i] - upper[i] * x[i + 1]) / diag[i]
cpdef double[:] tdma(double[:] a, double[:] b, double[:] c,
double[:] d):
cdef:
Py_ssize_t n = b.shape[0]
double[:] x = np.zeros(n, dtype=np.float64)
solve(n, a, b, c, d, x)
return x
<|end_of_text|># mode: run
# tag: cpp, cpp11, pthread
# cython: experimental_cpp_class_def=True, language_level=2
from libcpp.vector cimport vector
cdef cypclass Value:
__dealloc__(self) with gil:
print("Value destroyed")
int foo(self):
return 0
def test_cpp_index_refcounting():
"""
>>> test_cpp_index_refcounting()
Value destroyed
0
"""
cdef vector[Value] vec
vec.push_back(Value())
a = vec[0]
if Cy_GETREF(a)!= 3:
return -1
return 0
cdef cypclass Vector[T]:
vector[T] vec
T __getitem__(self, int index):
return vec[index]
def test_cyp_template_index_refcounting():
"""
>>> test_cyp_template_index_refcounting()
Value destroyed
0
"""
v = Vector[Value]()
v.vec.push_back(Value())
a = v[0]
if Cy_GETREF(a)!= 3:
return -1
return 0
cdef cypclass ValueVector:
vector[Value] vec
Value __getitem__(self, int index):
return vec[index]
def test_cyp_index_refcounting():
"""
>>> test_cyp_index_refcounting()
Value destroyed
0
"""
v = ValueVector()
v.vec.push_back(Value())
a = v[0]
if Cy_GETREF(a)!= 3:
return -1
return 0
def test_call_on_cpp_index_refcounting():
"""
>>> test_call_on_cpp_index_refcounting()
Value destroyed
0
"""
cdef vector[Value] vec
val = Value()
vec.push_back(val)
vec[0].foo()
if Cy_GETREF(val)!= 3:
return -1
return 0
def test_call_on_cyp_template_index_refcounting():
"""
>>> test_call_on_cyp_template_index_refcounting()
Value destroyed
0
"""
v = Vector[Value]()
val = Value()
v.vec.push_back(val)
v[0].foo()
if Cy_GETREF(val)!= 3:
return -1
return 0
def test_call_on_cyp_index_refcounting():
"""
>>> test_call_on_cyp_index_refcounting()
Value destroyed
0
"""
v = ValueVector()
val = Value()
v.vec.push_back(val)
v[0].foo()
if Cy_GETREF(val)!= 3:
return -1
return 0
<|end_of_text|>include '../../../types.pxi'
from cython.operator cimport dereference as deref
from libcpp cimport bool
from libcpp.vector cimport vector
from quantlib.handle cimport shared_ptr
from quantlib.time.date cimport Date, date_from_qldate
from quantlib.time._date cimport Date as _Date
from quantlib.time.daycounter cimport DayCounter
from quantlib.time._daycounter cimport DayCounter as QlDayCounter
from.black_vol_term_structure cimport BlackVarianceTermStructure
from. cimport _black_vol_term_structure as _bvts
from..._vol_term_structure cimport VolatilityTermStructure
cdef class BlackVarianceCurve(BlackVarianceTermStructure):
""" Black volatility curve modelled as variance curve
This class calculates time-dependent Black volatilities using
as input a vector of (ATM) Black volatilities observed in the
market.
The calculation is performed interpolating on the variance curve.
Linear interpolation is used as default; this can be changed
by the set_interpolation() method.
For strike dependence, see BlackVarianceSurface.
Parameters
----------
reference_date : Date
dates : list of Date
black_vols : list of Volatility
day_counter: DayCounter
force_monotone_variance: bool
"""
cdef inline _bvc.BlackVarianceCurve* get_bvc | Cython |
(self):
""" Utility function to extract a properly casted BlackVarianceCurve out
of the internal _thisptr attribute of the BlackVolTermStructure base class.
"""
return <_bvc.BlackVarianceCurve*>self.as_ptr()
def __init__(self,
Date reference_date,
list dates,
vector[Volatility] black_vols,
DayCounter day_counter,
bool force_monotone_variance = True,
):
cdef vector[_Date] _dates
for d in dates:
_dates.push_back(deref((<Date?>d)._thisptr))
self._thisptr.reset(
new _bvc.BlackVarianceCurve(
deref(reference_date._thisptr),
_dates,
black_vols,
deref(day_counter._thisptr),
force_monotone_variance,
)
)
@property
def min_strike(self):
return self.get_bvc().minStrike()
@property
def max_strike(self):
return self.get_bvc().maxStrike()
<|end_of_text|>from libcpp.memory cimport static_pointer_cast
from.kaacore.hashing cimport c_calculate_hash
from.kaacore.resources cimport CResourceReference
from.kaacore.textures cimport CTexture, CImageTexture
ctypedef CTexture* CTexture_ptr
cdef class Texture:
cdef CResourceReference[CTexture] c_texture
@staticmethod
cdef Texture create(const CResourceReference[CTexture]& texture):
cdef Texture instance = Texture.__new__(Texture)
instance.c_texture = texture
return instance
def __init__(self, str path not None):
self.c_texture = CResourceReference[CTexture](
static_pointer_cast[CTexture, CImageTexture](
CImageTexture.load(path.encode()).res_ptr
)
)
def __eq__(self, Texture other):
if other is None:
return False
return self.c_texture == other.c_texture
def __hash__(self):
return c_calculate_hash[CTexture_ptr](self.c_texture.get())
@property
def dimensions(self):
return Vector.from_c_vector(self.c_texture.get().get_dimensions())
<|end_of_text|>from _c_sc cimport *
cdef extern from "sc_mpi.h" nogil:
enum: SC_MPI_H
enum: MPI_SUCCESS
enum: MPI_COMM_NULL
enum: MPI_COMM_WORLD
enum: MPI_COMM_SELF
enum: MPI_ANY_SOURCE
enum: MPI_ANY_TAG
enum: MPI_STATUS_IGNORE
enum: MPI_STATUSES_IGNORE
enum: MPI_REQUEST_NULL
enum: MPI_CHAR
enum: MPI_SIGNED_CHAR
enum: MPI_UNSIGNED_CHAR
enum: MPI_BYTE
enum: MPI_SHORT
enum: MPI_UNSIGNED_SHORT
enum: MPI_INT
enum: MPI_UNSIGNED
enum: MPI_LONG
enum: MPI_UNSIGNED_LONG
enum: MPI_LONG_LONG_INT
enum: MPI_FLOAT
enum: MPI_DOUBLE
enum: MPI_LONG_DOUBLE
enum: MPI_MAX
enum: MPI_MIN
enum: MPI_SUM
enum: MPI_PROD
enum: MPI_LAND
enum: MPI_BAND
enum: MPI_LOR
enum: MPI_BOR
enum: MPI_LXOR
enum: MPI_BXOR
enum: MPI_MINLOC
enum: MPI_MAXLOC
enum: MPI_REPLACE
enum: MPI_UNDEFINED
SC_EXTERN_C_BEGIN
enum sc_tag_t:
SC_TAG_AG_ALLTOALL = '' + ''
SC_TAG_AG_RECURSIVE_A
SC_TAG_AG_RECURSIVE_B
SC_TAG_AG_RECURSIVE_C
SC_TAG_NOTIFY_RECURSIVE
SC_TAG_REDUCE
SC_TAG_PSORT_LO
SC_TAG_PSORT_HI
ctypedef int MPI_Comm
ctypedef int MPI_Datatype
ctypedef int MPI_Op
ctypedef int MPI_Request
cdef struct MPI_Status:
int count
int cancelled
int MPI_SOURCE
int MPI_TAG
int MPI_ERROR
ctypedef MPI_Status MPI_Status
int MPI_Init (int *, char ***)
int MPI_Finalize ()
int MPI_Abort (MPI_Comm, int) __attribute__ ((noreturn))
int MPI_Comm_dup (MPI_Comm, MPI_Comm *)
int MPI_Comm_free (MPI_Comm *)
int MPI_Comm_size (MPI_Comm, int *)
int MPI_Comm_rank (MPI_Comm, int *)
int MPI_Barrier (MPI_Comm)
int MPI_Bcast (void *, int, MPI_Datatype, int, MPI_Comm)
int MPI_Gather (void *, int, MPI_Datatype, void *, int, MPI_Datatype, int, MPI_Comm)
int MPI_Gatherv (void *, int, MPI_Datatype, void *, int *, int *, MPI_Datatype, int, MPI_Comm)
int MPI_Allgather (void *, int, MPI_Datatype, void *, int, MPI_Datatype, MPI_Comm)
int MPI_Allgatherv (void *, int, MPI_Datatype, void *, int *, int *, MPI_Datatype, MPI_Comm)
int MPI_Reduce (void *, void *, int, MPI_Datatype, MPI_Op, int, MPI_Comm)
int MPI_Allreduce (void *, void *, int, MPI_Datatype, MPI_Op, MPI_Comm)
double MPI_Wtime ()
int MPI_Recv (void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status *)
int MPI_Irecv (void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request *)
int MPI_Send (void *, int, MPI_Datatype, int, int, MPI_Comm)
int MPI_Isend (void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request *)
int MPI_Probe (int, int, MPI_Comm, MPI_Status *)
int MPI_Iprobe (int, int, MPI_Comm, int *, MPI_Status *)
int MPI_Get_count (MPI_Status *, MPI_Datatype, int *)
int MPI_Wait (MPI_Request *, MPI_Status *)
int MPI_Waitsome (int, MPI_Request *, int *, int *, MPI_Status *)
int MPI_Waitall (int, MPI_Request *, MPI_Status *)
size_t sc_mpi_sizeof (MPI_Datatype t)
SC_EXTERN_C_END
<|end_of_text|>import math
import numpy as np
from libc.stdlib cimport malloc, free
### Kolmogorov complexity ###
#% a la Lempel and Ziv (IEEE trans inf theory it-22, 75 (1976),
#% h(n)=c(n)/b(n) where c(n) is the kolmogorov complexity
#% and h(n) is a normalised measure of complexity. This function returns h(n)
def normedKolmogorovComplexity(s):
n = len(s)
c = 1
l = 1
b = n/math.log(n,2);
i = 0
k = 1
k_max = 1
stop = 0
while stop==0:
if s[i+k-1]!= s[l+k-1]:
if k>k_max:
k_max=k
i=i+1
if i==l:
c=c+1
l=l+k_max
if l+1>n:
stop=1
else:
i=0
k=1
k_max=1
else:
k=1
else:
k=k+1
if l+k>n:
c=c+1
stop=1
return(c/b)
def CnormedKolmogorovComplexity(s):
cdef int n = len(s)
cdef int p = 10
cdef int c = 1
cdef int l = 1
cdef int i = 0
cdef int k = 1
cdef int k_max = 1
cdef int stop = 0
cdef int *cs = <int *>malloc(n * sizeof(int))
try:
for j in range(n): cs[j] = s[j]
while stop==0:
if cs[i+k-1]!= cs[l+k-1]:
if k>k_max:
k_max=k
i=i+1
if i==l:
c=c+1
l=l+k_max
if l+1>n:
stop=1
else:
i=0
k=1
k_max=1
else:
k=1
else:
k=k+1
if l+k>n:
c=c+1
stop=1
finally:
free(cs)
b = n/math.log(n,2);
return(c/b)
def MovingKC(data, mean, window):
n = len(data)
bindata = binaryTimeseries(data,mean)
kc = np.array([normedKolmogorovComplexity(bindata[i-window:i]) for i in range(2*window, n)])
na = np.zeros(2*window)
na.fill(float('NaN'))
kc = np.concatenate([na,kc])
return(kc)
def CMovingKC(data, mean, window):
n = len(data)
bindata = binaryTimeseries(data,mean)
kc = np.array([CnormedKolmogorovComplex | Cython |
ity(bindata[i-window:i]) for i in range(2*window, n)])
na = np.zeros(2*window)
na.fill(float('NaN'))
kc = np.concatenate([na,kc])
return(kc)
#for detrending:
from scipy import signal
def CMovingKC_detrend(data, window):
n = len(data)
def KCbinaryTimeseriesDetrend(data,window):
zeros = np.zeros(window)
detrend = signal.detrend(data, type='linear')
binary = binaryTimeseries(detrend,zeros)
return(CnormedKolmogorovComplexity(binary))
kc = np.array( [KCbinaryTimeseriesDetrend(data[i-window:i],window) for i in range(2*window, n)])
na = np.zeros(2*window)
na.fill(float('NaN'))
kc = np.concatenate([na,kc])
return(kc)
### ###
#% Convert a timeseries, a, to a binary timeseries, c. If a[i] >= b[i] then c[i] = 1, otherwise c[i] = 0. Taken from Rohani and Miramontes, 1995. Takes as an argument two numpy arrays
def binaryTimeseries(a,b):
def switch(x):
if(x>=0):
return(1)
else:
return(0)
return([switch(x) for x in a-b])
<|end_of_text|>include "packet.pxd"
include "flow.pxd"
cdef class PacketFlow:
cdef flow_struct flow
cpdef long get_flow_start_time(self)
cpdef int get_packet_count(self)
cdef bint check_flow_direction(self, char* packet_src_addr)
cdef void first_packet(self, packet_struct packet)
cpdef void add_packet(self, packet_struct packet)
cdef void update_flow_bulk(self, packet_struct packet)
cdef void update_forward_bulk(self, packet_struct packet, long timestamp_last_bulk)
cdef void update_backward_bulk(self, packet_struct packet, long timestamp_last_bulk)
cdef void detect_and_update_sublfows(self, packet_struct packet)
cpdef void update_active_and_idle_time(self, long current_time, int threshold)
cdef void check_flags(self, packet_struct packet)
cdef float get_sub_flow_forward_bytes(self)
cdef float get_sub_flow_forward_packets(self)
cdef float get_sub_flow_backward_bytes(self)
cdef float get_sub_flow_backward_packets(self)
cdef float get_forward_packets_per_second(self)
cdef float get_backward_packets_per_second(self)
cdef float get_down_up_ratio(self)
cdef float get_avg_package_size(self)
cdef float forward_avg_segment_size(self)
cdef float backward_avg_segment_size(self)
cdef float forward_bulk_duration_in_second(self)
cdef float forward_avg_bytes_per_bulk(self)
cdef float forward_avg_packets_per_bulk(self)
cdef float forward_avg_bulk_rate(self)
cdef float backward_bulk_duration_in_second(self)
cdef float backward_avg_bytes_per_bulk(self)
cdef float backward_avg_packets_per_bulk(self)
cdef float backward_avg_bulk_rate(self)
cdef float get_flow_duration(self)
cdef float flow_bytes_per_second(self)
cdef float flow_packets_per_second(self)
cdef int packet_count(self)
cpdef get_features(self)<|end_of_text|>import numpy as np
cimport numpy as np
length = 300
cdef np.ndarray target
target = np.zeros(length)
target[np.random.uniform(0,1,length)>0.6]=1
print("target")
print(target)
ctypedef np.int_t DTYPE_t
cdef np.ndarray[DTYPE_t] cross(np.ndarray[DTYPE_t] ch1,np.ndarray[DTYPE_t] ch2):
cdef int index
index = int(np.random.uniform(0,length))
return np.concatenate([ch1[:index],ch2[index:]])
cdef np.ndarray[DTYPE_t] mutate(np.ndarray[DTYPE_t] ch):
cdef int index
index = int(np.random.uniform(0,length))
if ch[index]>0:
ch[index]=0
else:
ch[index]=1
return ch
cdef init_chromosome(int length):
cdef np.ndarray chromosome
chromosome = np.zeros(length)
chromosome[np.random.uniform(0,1,length)>0.6]=1
return chromosome
cdef DTYPE_t metric(np.ndarray[DTYPE_t] a):
return -np.sum((a-target)**2)
cdef evaluate(np.ndarray[DTYPE_t,ndim=2] population, DTYPE_t[:] result):
#cdef int r=len(population)
#cdef DTYPE_t[:] result = np.empty(r,dtype=np.DTYPE_t)
for i in range(len(result)):
#result[i] = metric(population[i])
result[i] = metric(population[i])
#return result
cdef select_mutate(np.ndarray[DTYPE_t,ndim=2] population, float rate, np.ndarray[DTYPE_t,ndim=2] p1):
#cdef int r1=int(len(population)*rate)
#cdef int r2=length
#cdef int index
#cdef np.ndarray[:] p1 = np.empty(r1, dtype=np.ndarray)
for i in range(len(p1)):
index = int(np.random.uniform(0,len(population),1))
p1[i] = population[index]
#return p1
cdef select_cross(np.ndarray[DTYPE_t,ndim=2] population, float rate, np.ndarray[DTYPE_t,ndim=2] p1, np.ndarray[DTYPE_t,ndim=2] p2):
#cdef int r1=int(len(population)*rate)
#cdef int r2=length
#cdef np.ndarray[:] p1 = np.empty(r1, dtype=np.ndarray)
#cdef np.ndarray[:] p2 = np.empty(r1, dtype=np.ndarray)
for i in range(len(p1)):
pair = [int(j) for j in np.random.uniform(0,len(population),2)]
p1[i] = population[pair[0]]
p2[i] = population[pair[1]]
#return p1,p2
cdef gp(int population_num, int loop_num, float cross_rate, float mutate_rate):
cdef int population_num1 = population_num
cdef int population_num2 = int(population_num*cross_rate)
cdef int population_num3 = int(population_num*mutate_rate)
cdef int population_num4 = population_num1+population_num2+population_num3
#cdef np.ndarray[:] population=np.empty(population_num1,dtype=np.ndarray)
#cdef np.ndarray[:] all_population=np.empty(population_num4,dtype=np.ndarray)
cdef np.ndarray[DTYPE_t, ndim=2] population = np.zeros([population_num,length],dtype=int)
cdef np.ndarray[DTYPE_t, ndim=2] all_population = np.zeros([population_num4,length],dtype=int)
#cdef np.ndarray[:] cross_population1=np.empty(population_num2,dtype=np.ndarray)
cdef np.ndarray[DTYPE_t,ndim=2] cross_population1=np.zeros([population_num2,length],dtype=int)
#cdef np.ndarray[:] cross_population2=np.empty(population_num2,dtype=np.ndarray)
cdef np.ndarray[DTYPE_t,ndim=2] cross_population2=np.zeros([population_num2,length],dtype=int)
#cdef np.ndarray[:] cross_children=np.empty(population_num2,dtype=np.ndarray)
cdef np.ndarray[DTYPE_t,ndim=2] cross_children=np.zeros([population_num2,length],dtype=int)
#cdef np.ndarray[:] mutate_population=np.empty(population_num3,dtype=np.ndarray)
cdef np.ndarray[DTYPE_t,ndim=2] mutate_population=np.zeros([population_num3,length],dtype=int)
#cdef np.ndarray[:] mutate_children=np.empty(population_num3,dtype=np.ndarray)
cdef np.ndarray[DTYPE_t, ndim=2] mutate_children=np.zeros([population_num3,length],dtype=int)
cdef DTYPE_t[:] result=np.empty(population_num4,dtype=int)
cdef DTYPE_t[:] result2=np.empty(population_num,dtype=int)
#cdef np.ndarray all_population[population_num+int(population_num*cross_rate)+int(population_num*mutate_rate)]
#cdef np.ndarray cross_population1[int(population_num*cross_rate)]
#cdef np.ndarray cross_population2[int(population_num*cross_rate)]
#cdef np.ndarray cross_children[int(population_num*cross_rate)]
#cdef np.ndarray mutate_population[int(population_num*mutate_rate)]
#cdef np.ndarray mutate_children[int(population_num*mutate_rate)]
#cdef np.ndarray result[population_num]
for i in range(population_num):
population[i] = init_chromosome(length)
for i in range(loop_num):
select_cross(population, cross_rate, cross_population1, cross_population2)
select_mutate(population, mutate_rate,mutate_population)
#
for j in range(len(cross_population1)):
cross_children[j] = cross(cross_population1[j],cross_population2[j])
#
# for j in range(len(mutate_population)):
# mutate_children[j] = mutate(mutate_population[j])
#
| Cython |
# all_population = np.concatenate([population,cross_children,mutate_children])
# #all_population = np.concatenate([population,cross_children])
# #all_population = np.concatenate([population])
#
# #score = evaluate(np.asarray(all_population))
# evaluate(all_population,result)
# indexs = np.asarray(result).argsort()[-population_num:]
# population = np.asarray(all_population)[indexs]
# #for i in range(len(population)):
# # result[i] = metric(population[i])
# evaluate(population,result2)
# best_index = np.asarray(result2).argsort()[-1]
# best = np.asarray(population)[np.asarray(result2).argsort()[-1]]
# best_score = result[np.asarray(result2).argsort()[-1]]
#
# print("best")
# print(best)
# print(best_score)
cpdef gpp(int population_num, int loop_num, float cross_rate, float mutate_rate):
gp(population_num, loop_num, cross_rate, mutate_rate)
<|end_of_text|># distutils: language = c++
# distutils: sources = src/bogus.cpp
# cython: c_string_type=str, c_string_encoding=ascii
cdef extern from 'bogus.hpp':
cdef cppclass bogus:
bogus() except +
int get_double(int value)
cdef class Bogus:
cdef bogus b
def get_double(self, int value):
return self.b.get_double(value)
<|end_of_text|>import math
import numpy
import re
cimport numpy
cimport cython
cdef extern from "math.h":
double sqrt(double i)
cdef class Box(object):
"""
A unlabeled bounding box not bound to a frame.
"""
@cython.profile(False)
def __init__(self, int xtl, int ytl, int xbr, int ybr,
int frame = 0, int lost = 0, int occluded = 0,
image = None, label = None,
int generated = 0, double score = 0.0, attributes = None):
"""
Initializes the bounding box.
"""
if xbr <= xtl:
raise TypeError("xbr ({0}) must be > xtl ({1})".format(xbr, xtl))
elif ybr <= ytl:
raise TypeError("ybr ({0}) must be > ytl ({1})".format(ybr, ytl))
elif xtl < 0:
raise TypeError("xtl must be nonnegative")
elif ytl < 0:
raise TypeError("ytl must be nonnegative")
self.xtl = xtl
self.ytl = ytl
self.xbr = xbr
self.ybr = ybr
self.frame = frame
self.lost = lost
self.image = image
self.label = label
self.occluded = occluded
self.generated = generated
self.score = score
if attributes is None:
attributes = []
self.attributes = attributes
@property
def width(self):
return self.xbr - self.xtl
@property
def height(self):
return self.ybr - self.ytl
@property
def size(self):
return self.width, self.height
@property
def center(self):
"""
Calculates the center of the bounding box.
"""
return self.xtl+self.width/2, self.ytl+self.height/2
@property
def area(self):
return (self.xbr - self.xtl) * (self.ybr - self.ytl)
def distance(self, oth):
"""
Calculate the Euclidean distance between boxes.
"""
scenter = abs(self.xbr + self.xtl) / 2, \
abs(self.ybr + self.ytl) / 2
ocenter = abs(oth.xbr + oth.xtl) / 2, \
abs(oth.ybr + oth.ytl) / 2
diff = scenter[0] - ocenter[0], \
scenter[1] - ocenter[1]
sum = diff[0]**2 + diff[1]**2
return math.sqrt(sum)
def intersects(self, oth):
"""
Determines if there is any overlap between two boxes.
"""
xlap = max(self.xtl, oth.xtl) <= min(self.xbr, oth.xbr)
ylap = max(self.ytl, oth.ytl) <= min(self.ybr, oth.ybr)
return xlap and ylap
def percentoverlap(self, oth):
"""
Calculates the percent of boxes that overlap.
"""
xdiff = <float>(min(self.xbr, oth.xbr) - max(self.xtl, oth.xtl))
ydiff = <float>(min(self.ybr, oth.ybr) - max(self.ytl, oth.ytl))
if xdiff <= 0 or ydiff <= 0:
return 0
uni = self.area + oth.area - xdiff * ydiff
return float(xdiff * ydiff) / float(uni)
def contains(self, point):
return (self.xtl >= point[0] and self.xbr <= point[0] and
self.ytl >= point[1] and self.ybr <= point[1])
def resize(self, xratio, yratio = None):
"""
Resizes the box by the xratio and yratio. If no yratio is specified,
defaults to the xratio.
"""
if yratio is None:
yratio = xratio
return Box(self.xtl, self.ytl,
self.xtl + <int> (self.width * xratio),
self.ytl + <int> (self.height * yratio),
self.frame, self.lost, self.occluded,
self.image, self.label, self.generated,
self.score, list(self.attributes))
def transform(self, xratio, yratio = None):
"""
Transforms the space that the box exists in by an x and y ratio. If
the y ratio is not specified, defaults to the xratio.
"""
if yratio is None:
yratio = xratio
cdef int xtl = <int>(self.xtl * xratio)
cdef int ytl = <int>(self.ytl * yratio)
cdef int xbr = <int>(self.xbr * xratio)
cdef int ybr = <int>(self.ybr * yratio)
if xbr <= xtl:
xbr += 1
if ybr <= ytl:
ybr += 1
return Box(xtl, ytl, xbr, ybr,
self.frame, self.lost, self.occluded,
self.image, self.label,
self.generated, self.score,
list(self.attributes))
def average(self, other):
return Box((self.xtl + other.xtl) / 2,
(self.ytl + other.ytl) / 2,
(self.xbr + other.xbr) / 2,
(self.ybr + other.ybr) / 2,
(self.frame + other.frame) / 2,
self.lost or other.lost,
self.occluded or other.occluded,
self.image, self.label,
self.generated,
self.score,
list(self.attributes))
def __str__(self):
"""
Returns a string representation.
"""
return "Box({0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}, {11})".format(
self.xtl, self.ytl, self.xbr, self.ybr,
self.frame, self.lost, self.occluded,
self.image, self.label,
self.generated, self.score, repr(self.attributes))
def __repr__(self):
"""
Returns a string representation.
"""
return str(self)
def __richcmp__(self, other, int t):
"""
A comparator to see if boxes are equal or not.
"""
if not isinstance(other, Box):
return False
equality = self.xtl is other.xtl and \
self.ytl is other.ytl and \
self.xbr is other.xbr and \
self.ybr is other.ybr and \
self.frame is other.frame and \
self.lost is other.lost
if t == 0: return self.frame < other.frame
elif t == 1: return self.frame <= other.frame
elif t == 2: return equality
elif t == 3: return not equality
elif t == 4: return self.frame > other.frame
elif t == 5: return self.frame >= other.frame
else: return False
def __reduce__(self):
"""
Provides support to serialize the box.
"""
return (Box, (self.xtl, self.ytl, self.xbr, self.ybr,
self.frame, self.lost, self.occluded,
self.image, self.label,
self.generated, self.score, list(self.attributes)))
def __getitem__(self, a):
"""
Allows accessing bounding box as if its a tuple
"""
tuple | Cython |
= (self.xtl, self.ytl, self.xbr, self.ybr,
self.frame, self.lost, self.occluded,
self.image, self.label,
self.generated, self.score,
list(self.attributes))
return tuple[a]
def readpaths(pointer):
"""
Reads a path file, typically coming from vatic.
"""
paths = []
lastid = None
currentpath = []
currentlabel = None
for line in pointer:
line = re.match("(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) "
"(\d+) \"(.+)\"( \".+\")*?", line)
id, xtl, ytl, xbr, ybr, frame, lost, occ, gen, label, attributes = line.groups()
box = Box(int(xtl), int(ytl), int(xbr), int(ybr),
int(frame), int(lost), int(occ), int(gen))
if lastid!= id:
if lastid!= None:
paths.append((currentlabel, currentpath))
lastid = id
currentpath = []
currentlabel = None
currentpath.append(box)
currentlabel = label
paths.append((currentlabel, currentpath))
return paths
<|end_of_text|>cdef int i, j, k
i = 17; j = 42; k = i * j
if j > k: i = 88
else: i = 99; j = k
def result():
"""
>>> result() == (99, 17*42, 17*42)
True
"""
return (i,j,k)
<|end_of_text|># coding=utf-8
cimport numpy as np
cimport cython
@cython.boundscheck(False)
@cython.wraparound(False)
cdef np.ndarray[np.float32_t, ndim=2] _bbox_insect(
np.ndarray[np.float32_t, ndim=1] box,
np.ndarray[np.float32_t, ndim=2] boxes):
cdef int N = boxes.shape[0]
cdef np.ndarray[np.float32_t, ndim=1] t_box
for i in range(N):
t_box = boxes[i]
<|end_of_text|>from PyGasMix.Gas cimport Gas
cdef void Gas_h2o(Gas* object)
<|end_of_text|># distutils: language = c++
from pytraj.ArrayIterator cimport *
cdef extern from "ComplexArray.h":
cdef cppclass _ComplexArray "ComplexArray":
_ComplexArray()
_ComplexArray(int)
_ComplexArray(const _ComplexArray&)
#_ComplexArray& operator =(const _ComplexArray&)
#~_ComplexArray()
void Allocate(int)
void Assign(const _ComplexArray&)
void PadWithZero(int)
void Normalize(double)
void SquareModulus()
void ComplexConjTimes(const _ComplexArray&)
double * CAptr()
int size() const
double& operator[](int idx)
const double& operator[](int idx) const
#const iterator begin() const
#const iterator end() const
cdef class ComplexArray:
cdef _ComplexArray* thisptr
<|end_of_text|>cimport cython
cdef list pe3(long long n=*)
<|end_of_text|>import numpy as np
cimport numpy as np
cimport cython
ctypedef np.float_t DTYPE_t
cdef class IntegratorsClass:
cdef:
readonly int N, M, kI, kE, nClass
readonly double beta, gE, gA, gIa, gIs, gIh, gIc, fsa, fh, ep, gI
readonly double tS, tE, tA, tIa, tIs, gIsp, gIcp, gIhp, ars, kapE
readonly np.ndarray rp0, Ni, dxdt, CM, FM, TR, sa, iaa, hh, cc, mm, alpha
cpdef set_contactMatrix(self, double t, contactMatrix)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SIR(IntegratorsClass):
"""
Susceptible, Infected, Removed (SIR)
Ia: asymptomatic
Is: symptomatic
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SIRS(IntegratorsClass):
"""
Susceptible, Infected, Removed, Susceptible (SIRS)
Ia: asymptomatic
Is: symptomatic
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SEIR(IntegratorsClass):
"""
Susceptible, Exposed, Infected, Removed (SEIR)
Ia: asymptomatic
Is: symptomatic
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SEI5R(IntegratorsClass):
"""
Susceptible, Exposed, Infected, Removed (SEIR)
The infected class has 5 groups:
* Ia: asymptomatic
* Is: symptomatic
* Ih: hospitalized
* Ic: ICU
* Im: Mortality
S ---> E
E ---> Ia, Is
Ia ---> R
Is ---> Ih, R
Ih ---> Ic, R
Ic ---> Im, R
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SEI8R(IntegratorsClass):
"""
Susceptible, Exposed, Infected, Removed (SEIR)
The infected class has 5 groups:
* Ia: asymptomatic
* Is: symptomatic
* Ih: hospitalized
* Ic: ICU
* Im: Mortality
S ---> E
E ---> Ia, Is
Ia ---> R
Is ---> Is',Ih, R
Ih ---> Ih',Ic, R
Ic ---> Ic',Im, R
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SIkR(IntegratorsClass):
"""
Susceptible, Infected, Removed (SIkR)
method of k-stages of I
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SEkIkR(IntegratorsClass):
"""
Susceptible, Infected, Removed (SIkR)
method of k-stages of I
See: Lloyd, Theoretical Population Biology 60, 5971 (2001), doi:10.1006tpbi.2001.1525.
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SEAIR(IntegratorsClass):
"""
Susceptible, Exposed, Asymptomatic and infected, Infected, Removed (SEAIR)
Ia: asymptomatic
Is: symptomatic
A : Asymptomatic and infectious
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SEAI5R(IntegratorsClass):
"""
Susceptible, Exposed, Activates, Infected, Removed (SEAIR)
The infected class has 5 groups:
* Ia: asymptomatic
* Is: symptomatic
* Ih: hospitalized
* Ic: ICU
* Im: Mortality
S ---> E
E ---> Ia, Is
Ia ---> R
Is ---> Ih, R
Ih ---> Ic, R
Ic ---> Im, R
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SEAI8R(IntegratorsClass):
"""
Susceptible, Exposed, Activates, Infected, Removed (SEAIR)
The infected class has 5 groups:
* Ia: asymptomatic
* Is: symptomatic
* Ih: hospitalized
* Ic: ICU
* Im: Mortality
S ---> E
E ---> A
A ---> Ia, Is
Ia ---> R
Is ---> Ih, Is', | Cython |
R
Ih ---> Ic, Ih', R
Ic ---> Im, Ic', R
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SEAIRQ(IntegratorsClass):
"""
Susceptible, Exposed, Asymptomatic and infected, Infected, Removed, Quarantined (SEAIRQ)
Ia: asymptomatic
Is: symptomatic
A : Asymptomatic and infectious
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
@cython.nonecheck(False)
cdef class SEAIRQ_testing(IntegratorsClass):
"""
Susceptible, Exposed, Asymptomatic and infected, Infected, Removed, Quarantined (SEAIRQ)
Ia: asymptomatic
Is: symptomatic
A : Asymptomatic and infectious
"""
cpdef rhs(self, rp, tt)
@cython.wraparound(False)
@cython.boundscheck(True)
@cython.cdivision(False)
@cython.nonecheck(True)
cdef class Spp(IntegratorsClass):
cdef:
readonly np.ndarray linear_terms, infection_terms
readonly np.ndarray parameters
readonly list param_keys
readonly dict class_index_dict
readonly np.ndarray _lambdas
"""
Susceptible, Exposed, Asymptomatic and infected, Infected, Removed, Quarantined (SEAIRQ)
Ia: asymptomatic
Is: symptomatic
A : Asymptomatic and infectious
"""
cpdef rhs(self, rp, tt)
<|end_of_text|>from typing import List
from libcpp.vector cimport vector
from constants import SEARCH_LIST
cdef extern from "src/binarysearch.hpp":
int binsearch(vector[int] l, int value, int low, int high)
cdef int cython_binary_search(l: List[int], value: int, low: int = 0, high: int = -1):
if high >= low:
mid = (high + low) // 2
if l[mid] == value:
return mid
elif l[mid] > value:
return cython_binary_search(l, value, low, mid - 1)
else:
return cython_binary_search(l, value, mid + 1, high)
else:
return -1
cpdef void benchmark_native_binary_search(loops=10):
cdef int len_search_list = len(SEARCH_LIST)
for i in range(0, loops):
binsearch(SEARCH_LIST, 66666, 0, len_search_list)
cpdef void benchmark_cython_binary_search(loops: int = 10):
cdef int len_search_list = len(SEARCH_LIST)
for i in range(0, loops):
cython_binary_search(SEARCH_LIST, 66666, 0, len_search_list)
<|end_of_text|>cdef bint variants_discovery(bytes chrid, list batchfiles, dict popgroup, float min_af,
cvg_file_handle, vcf_file_handle, batch_count)<|end_of_text|># cython: language_level = 3, boundscheck = False
cdef extern from "../../src/objects.h":
ctypedef struct FROMFILE:
char *name
char **labels
unsigned long n_rows
unsigned long n_cols
double **data
cdef extern from "../../src/dataframe/fromfile.h":
FROMFILE *fromfile_initialize()
void fromfile_free(FROMFILE *ff)
<|end_of_text|>"""
cparticle.pyx
"""
cdef class Particle:
"""Simple Particle type."""
cdef readonly double mass
cdef public double position, velocity
def __init__(self, m, p, v):
self.mass = m
self.position = p
self.velocity = v
cpdef double get_momentum(self):
"""Return the particle's momentum."""
return self.mass * self.velocity
def add_momentums(particles):
"""Return the sum of particles' momentums."""
total_mom = 0.
for particle in particles:
total_mom += particle.get_momentum()
return total_mom
def add_momentums_typed(list particles):
"""Typed momentum summer."""
cdef:
double total_mom = 0.0
Particle particle
for particle in particles:
total_mom += particle.get_momentum()
return total_mom
<|end_of_text|># tag: cpp,cpp11
# mode: compile
# tag: no-cpp-locals
# TODO cpp_locals works fine with the standard library that comes with gcc11
# but not with gcc8. Therefore disable the test for now
cdef extern from *:
"""
class NoAssignIterator {
public:
explicit NoAssignIterator(int pos) : pos_(pos) {}
NoAssignIterator(NoAssignIterator&) = delete;
NoAssignIterator(NoAssignIterator&&) {}
NoAssignIterator& operator=(NoAssignIterator&) = delete;
NoAssignIterator& operator=(NoAssignIterator&&) { return *this; }
// Default constructor of temp variable is needed by Cython
// as of 3.0a6.
NoAssignIterator() : pos_(0) {}
int operator*() {
return pos_;
}
NoAssignIterator operator++() {
return NoAssignIterator(pos_ + 1);
}
int operator!=(NoAssignIterator other) {
return pos_!= other.pos_;
}
int pos_;
};
class NoAssign {
public:
NoAssign() {}
NoAssign(NoAssign&) = delete;
NoAssign(NoAssign&&) {}
NoAssign& operator=(NoAssign&) = delete;
NoAssign& operator=(NoAssign&&) { return *this; }
void func() {}
NoAssignIterator begin() {
return NoAssignIterator(0);
}
NoAssignIterator end() {
return NoAssignIterator(2);
}
};
NoAssign get_NoAssign_Py() {
return NoAssign();
}
NoAssign get_NoAssign_Cpp() {
return NoAssign();
}
"""
cdef cppclass NoAssignIterator:
int operator*()
NoAssignIterator operator++()
int operator!=(NoAssignIterator)
cdef cppclass NoAssign:
void func()
NoAssignIterator begin()
NoAssignIterator end()
# might raise Python exception (thus needs a temp)
NoAssign get_NoAssign_Py() except *
# might raise C++ exception (thus needs a temp)
NoAssign get_NoAssign_Cpp() except +
cdef internal_cpp_func(NoAssign arg):
pass
def test_call_to_function():
# will fail to compile if move constructors aren't used
internal_cpp_func(get_NoAssign_Py())
internal_cpp_func(get_NoAssign_Cpp())
def test_assignment_to_name():
# will fail if move constructors aren't used
cdef NoAssign value
value = get_NoAssign_Py()
value = get_NoAssign_Cpp()
def test_assignment_to_scope():
cdef NoAssign value
value = get_NoAssign_Py()
value = get_NoAssign_Cpp()
def inner():
value.func()
cdef class AssignToClassAttr:
cdef NoAssign attr
def __init__(self):
self.attr = get_NoAssign_Py()
self.attr = get_NoAssign_Cpp()
def test_generator_cpp_iterator_as_temp():
for i in get_NoAssign_Py():
yield i
<|end_of_text|>from libc.stdlib cimport malloc
from cython.operator import dereference
cdef extern from "bstree.h":
ctypedef struct data:
int indx
char *cdata
float fdata
cdef struct tree_node:
data *dat
void *left # to link to next struct
void *right
ctypedef tree_node tn
void error()
void insert(tn ** root, int indx, char * name, float ratio)
void inorder(tn * root)
int count(tn * root)
data * search(tn * root, int key)
tn * delete(tn * root, int key)
cdef class BSTree:
cdef tn *root
def __cinit__(self):
self.root = NULL
cpdef void insert(self, int indx, char * name, float ratio):
insert(&self.root, indx, name, ratio)
cpdef void inorder(self):
inorder(self.root)
cpdef void remove(self, int node):
self.root = delete(self.root, node)
def search(self, int node):
cdef data *temp_tree = search(self.root, node)
if not temp_tree:
return "node doesn't exist"
else:
temp = dereference(temp_tree)
return (temp.indx, temp.cdata, round(temp.fdata, 3))
cpdef int node_count(self):
return count(self.root)
<|end_of_text|>'''Original code from https://github.com/benoitc/http-parser
2011 (c) Benoît Chesneau <[email protected]>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, | Cython |
sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Modified and adapted for pulsar.
'''
import os
import sys
from http cimport *
ispy3k = sys.version_info[0] == 3
if ispy3k:
from urllib.parse import urlsplit
else:
from urlparse import urlsplit
class _ParserData:
def __init__(self, decompress=False):
self.url = ""
self.body = []
self.headers = {}
self.environ = {}
self.decompress = decompress
self.decompressobj = None
self.chunked = False
self.headers_complete = False
self.partial_body = False
self.message_begin = False
self.message_complete = False
self._last_field = ""
self._last_was_value = False
cdef class HttpParser:
"""Cython HTTP parser wrapping http_parser."""
cdef http_parser _parser
cdef http_parser_settings _settings
cdef object _data
cdef str _path
cdef str _query_string
cdef str _fragment
cdef object _parsed_url
def __init__(self, kind=2, decompress=False):
""" constructor of HttpParser object.
:attr kind: Int, could be 0 to parseonly requests,
1 to parse only responses or 2 if we want to let
the parser detect the type.
"""
# set parser type
if kind == 2:
parser_type = HTTP_BOTH
elif kind == 1:
parser_type = HTTP_RESPONSE
elif kind == 0:
parser_type = HTTP_REQUEST
# initialize parser
http_parser_init(&self._parser, parser_type)
self._data = _ParserData(decompress=decompress)
self._parser.data = <void *>self._data
self._parsed_url = None
self._path = ""
self._query_string = ""
self._fragment = ""
# set callback
self._settings.on_url = <http_data_cb>on_url_cb
self._settings.on_body = <http_data_cb>on_body_cb
self._settings.on_header_field = <http_data_cb>on_header_field_cb
self._settings.on_header_value = <http_data_cb>on_header_value_cb
self._settings.on_headers_complete = <http_cb>on_headers_complete_cb
self._settings.on_message_begin = <http_cb>on_message_begin_cb
self._settings.on_message_complete = <http_cb>on_message_complete_cb
def execute(self, char *data, size_t length):
""" Execute the parser with the last chunk. We pass the length
to let the parser know when EOF has been received. In this case
length == 0.
:return recved: Int, received length of the data parsed. if
recvd!= length you should return an error.
"""
return http_parser_execute(&self._parser, &self._settings,
data, length)
def get_version(self):
""" get HTTP version """
return (self._parser.http_major, self._parser.http_minor)
def get_method(self):
""" get HTTP method as string"""
return http_method_str(<http_method>self._parser.method)
def get_status_code(self):
""" get status code of a response as integer """
return self._parser.status_code
def get_url(self):
""" get full url of the request """
return self._data.url
def maybe_parse_url(self):
raw_url = self.get_url()
if not self._parsed_url and raw_url:
parts = urlsplit(raw_url)
self._parsed_url = parts
self._path = parts.path or ""
self._query_string = parts.query or ""
self._fragment = parts.fragment or ""
def get_path(self):
""" get path of the request (url without query string and
fragment """
self.maybe_parse_url()
return self._path
def get_query_string(self):
""" get query string of the url """
self.maybe_parse_url()
return self._query_string
def get_fragment(self):
""" get fragment of the url """
self.maybe_parse_url()
return self._fragment
def get_headers(self):
"""get request/response headers dictionary."""
return self._data.headers
def get_protocol(self):
return None
def get_body(self):
return self._data.body
def recv_body(self):
""" return last chunk of the parsed body"""
body = b''.join(self._data.body)
self._data.body = []
self._data.partial_body = False
return body
def recv_body_into(self, barray):
""" Receive the last chunk of the parsed body and store the data
in a buffer rather than creating a new string. """
l = len(barray)
body = b''.join(self._data.body)
m = min(len(body), l)
data, rest = body[:m], body[m:]
barray[0:m] = data
if not rest:
self._data.body = []
self._data.partial_body = False
else:
self._data.body = [rest]
return m
def is_headers_complete(self):
""" return True if all headers have been parsed. """
return self._data.headers_complete
def is_partial_body(self):
""" return True if a chunk of body have been parsed """
return self._data.partial_body
def is_message_begin(self):
""" return True if the parsing start """
return self._data.message_begin
def is_message_complete(self):
""" return True if the parsing is done (we get EOF) """
return self._data.message_complete
def is_chunked(self):
""" return True if Transfer-Encoding header value is chunked"""
te = self._data.headers.get('transfer-encoding', '').lower()
return te == 'chunked'
<|end_of_text|>def fib(int n):
cdef long fib1 = 1
cdef long fib2 = 1
cdef long current_sum = 0
cdef int i = 0
while i < n - 2:
current_sum = fib1 + fib2
fib1 = fib2
fib2 = current_sum
i += 1
return fib2
<|end_of_text|># cython: profile=False
'''
Created on Dec 31, 2018
@author: cclausen
'''
# arbitrary performance scale
# Base cython version: 1.27
# randargmax : 1.57
# cdef TreeNode 1 : 1.69
# by hand pickMove : 2.46
# q value init : 4.37 <- different (better?!) search behavior, but the same as in cMctsTree, this was an oversight that made this one behave different (slower & worse)
# dconst cache : 4.47
import numpy as np
cimport numpy as np
DTYPE = np.float32
ctypedef np.float32_t DTYPE_t
from libc.stdlib cimport rand, RAND_MAX
import time
# TODO this should be part of the config....
cdef float DRAW_VALUE = 0.1 # 1 means draws are considered just as good as wins, 0 means draws are considered as bad as losses
cdef float DESPERATION_FACTOR = 0.1
# negative values should be not possible for moves in general?!
cdef float illegalMoveValue = -1
cdef int hasAncestor(TreeNode child, TreeNode ancestor):
if child == ancestor:
return 1
for pNode, _ in child.parentNodes:
if hasAncestor(pNode, ancestor):
return 1
return 0
cdef int bestLegalValue(float [:] ar):
cdef int n = ar.shape[0]
if n == 0:
return -1
cdef int startIdx = int((rand()/(<float>RAND_MAX)) * n)
cdef int idx, bestIdx, i
cdef float bestValue
bestIdx = -1
bestValue = -1
for i in range(n):
idx = (i + startIdx) % n
if ar[idx] > bestValue and ar[idx]!= illegalMoveValue:
bestValue = ar[idx]
bestIdx = idx
return bestIdx
cdef object dconsts = {}
cdef object getDconst(int n):
global dconsts
if not n in dconsts:
dconsts[n] = np.asarray([10.0 / n] * n, dtype=np.float32)
return dconsts[n]
cdef class TreeNode():
cdef readonly object state
cdef float noiseMix
cdef | Cython |
int isExpanded
cdef object parentNodes
cdef object children
cdef int useNodeRepository
cdef object nodeRepository
cdef unsigned short [:] compressedMoveToMove
cdef int numMoves
cdef float [:] edgePriors
cdef float [:] edgeVisits
cdef float [:] edgeTotalValues
cdef float[:] noiseCache
cdef int winningMove
cdef object terminalResult
cdef float stateValue
cdef int allVisits
cdef object netValueEvaluation
def __init__(self, state, parentNodes = [], noiseMix = 0.25, nodeRepository = None, useNodeRepository = True):
self.state = state
self.useNodeRepository = useNodeRepository
if self.useNodeRepository:
if nodeRepository is None:
self.nodeRepository = {}
else:
self.nodeRepository = nodeRepository
self.noiseMix = noiseMix
self.isExpanded = 0
self.parentNodes = parentNodes
self.children = {}
self.winningMove = -1
self.terminalResult = None
self.noiseCache = None
self.stateValue = 0.5 #overwritten before first use
self.allVisits = 0
self.numMoves = -1
self.compressedMoveToMove = None
self.edgeVisits = None
self.edgeTotalValues = None
self.edgePriors = None
cdef void lazyInitEdgeData(self):
if self.numMoves == -1:
legalMoves = self.state.getLegalMoves()
self.numMoves = len(legalMoves)
self.compressedMoveToMove = np.array(legalMoves, dtype=np.uint16)
self.edgeVisits = np.zeros(self.numMoves, dtype=np.float32)
self.edgeTotalValues = np.zeros(self.numMoves, dtype=np.float32)
cdef void backupWinningMove(self, int move):
if self.winningMove!= -1:
return
self.winningMove = move
cdef int pMove
cdef TreeNode pNode
for pNode, pMove in self.parentNodes:
if pNode.state.getPlayerOnTurnIndex() == self.state.getPlayerOnTurnIndex():
pNode.backupWinningMove(pNode.compressedMoveToMove[pMove])
else:
break
cdef TreeNode executeMove(self, int move):
cdef object newState = self.state.clone()
newState.simulate(move)
if newState.isTerminal() and newState.getWinner() == self.state.getPlayerOnTurnIndex():
self.backupWinningMove(move)
cdef TreeNode knownNode
cdef int ix
cdef int compressedNodeIdx = -1
for ix in range(self.numMoves):
if self.compressedMoveToMove[ix] == move:
compressedNodeIdx = ix
break
if self.useNodeRepository and newState in self.nodeRepository:
knownNode = self.nodeRepository[newState]
knownNode.parentNodes.append((self, compressedNodeIdx))
return knownNode
cdef TreeNode newNode = TreeNode(newState, [(self, compressedNodeIdx)], noiseMix = self.noiseMix, nodeRepository = self.nodeRepository, useNodeRepository = self.useNodeRepository)
if self.useNodeRepository:
self.nodeRepository[newState] = newNode
return newNode
def exportTree(self):
"""
create an independent data structure that describes the entire tree
that starts at this node, meant for storage and later analysis
"""
me = {}
me["state"] = self.state.packageForDebug()
me["expanded"] = self.isExpanded
me["winner"] = self.state.getWinner()
if self.isExpanded:
me["priors"] = np.asarray(self.edgePriors).tolist()
me["netValue"] = np.asarray(self.netValueEvaluation).tolist()
edges = {}
cdef int move
for move in self.children:
child = self.children[move]
e = {}
e["move"] = self.state.getHumanMoveDescription(move)
e["tree"] = child.exportTree()
e["visits"] = self.edgeVisits[move]
e["totalValue"] = self.edgeTotalValues[move]
e["meanValue"] = self.edgeTotalValues[move] / self.edgeVisits[move]
edges[move] = e
me["edges"] = edges
return me
def getBestValue(self):
"""
returns a single float that is meant to tell what the best
possible expected outcome is by choosing the best possible actions
"""
if self.winningMove!= -1:
return 1
if self.numMoves == -1:
return 0
cdef float bestValue = 0
cdef int i
for i in range(self.numMoves):
if self.edgeVisits[i] > 0 and (self.edgeTotalValues[i] / self.edgeVisits[i]) > bestValue:
bestValue = (self.edgeTotalValues[i] / self.edgeVisits[i])
return bestValue
def cutTree(self):
"""
deletes all children, reducing the tree to the root
resets all counters
meant to be used when different solvers are used in an alternating fashion on the same tree.
maybe instead a completely different tree should be used for each solver. But meh.
Training does reuse the trees, test play doesn't. Better than nothing...
TODO Why even have a function like this, instead of just grabbing the game state and creating a new root node around it?!
"""
self.children = {}
self.isExpanded = 0
self.parentNodes = []
self.terminalResult = None
self.nodeRepository = {}
cdef int i
if self.edgePriors is not None:
for i in range(self.state.getMoveCount()):
self.edgePriors[i] = 0
if self.numMoves!= -1:
for i in range(self.numMoves):
self.edgeVisits[i] = 0
self.edgeTotalValues[i] = 0
self.stateValue = 0.5
self.allVisits = 0
cdef float getVisitsFactor(self):
#.0001 means that in the case of a new node with zero visits it will chose whatever has the best P
# instead of just the move with index 0
# but there is little effect in other cases
return self.allVisits ** 0.5 + 0.0001
cdef TreeNode selectDown(self, float cpuct):
cdef TreeNode node = self
while node.isExpanded and not node.state.isTerminal():
node = node.selectMove(cpuct)
return node
def getTreeDepth(self):
if len(self.children) == 0:
return 1
return max([self.children[ckey].getTreeDepth() for ckey in self.children]) + 1
def getChildForMove(self, int move):
self.lazyInitEdgeData()
cdef TreeNode child = None
if not move in self.children:
child = self.executeMove(move)
self.children[move] = child
else:
child = self.children[move]
child.parentNodes = []
cdef TreeNode cached
if self.useNodeRepository:
cKeys = list(self.nodeRepository.keys())
for childKey in cKeys:
cached = self.nodeRepository[childKey]
if not hasAncestor(cached, child):
del self.nodeRepository[childKey]
cached.parentNodes = []
cached.children = {}
return child
def getEdgePriors(self):
if self.edgePriors is None:
return np.zeros(self.state.getMoveCount(), dtype=np.float32)
else:
return np.copy(np.asarray(self.edgePriors, dtype=np.float32))
def getMoveDistribution(self):
result = np.zeros(self.state.getMoveCount(), dtype=np.float32)
if self.winningMove!= -1:
result[self.winningMove] = 1
else:
result[self.compressedMoveToMove] = self.edgeVisits
result /= float(self.allVisits)
return result
cdef int pickMove(self, float cpuct):
if self.winningMove!= -1:
return self.winningMove
cdef int useNoise = len(self.parentNodes) == 0
cdef int i
cdef float nodeQ, nodeU
if useNoise and self.noiseCache is None:
self.noiseCache = np.random.dirichlet(getDconst(self.numMoves)).astype(np.float32)
cdef float vFactor = self.getVisitsFactor()
cdef float [:] valueTmp = np.zeros(self.numMoves, dtype=np.float32)
cdef int decompressedMove
for i in range(self.numMoves):
decompressedMove = self.compressedMoveToMove[i]
if useNoise:
valueTmp[i] = (1 - self.noiseMix) * self.edgePriors[decompressedMove] + self.noiseMix * self.noiseCache[i]
else:
valueTmp[i] = self.edgePriors[decompressedMove]
# not using an initialization of zero is a pretty good idea.
# not only for search quality (to be proven) but also | Cython |
for search speed by like 50%
# zero may be bad, stateValue is far worse! That means that especially for very clear cut
# situations the tree search will start to extensively explore bad plays to the point of diminishing the winning play probability quite considerably.
if self.edgeVisits[i] == 0:
# idea: if the current position is expected to be really good: Follow the network
nodeQ = self.stateValue * self.edgePriors[decompressedMove] + (1 - self.stateValue) * DESPERATION_FACTOR
else:
nodeQ = self.edgeTotalValues[i] / self.edgeVisits[i]
nodeU = valueTmp[i] * (vFactor / (1.0 + self.edgeVisits[i]))
valueTmp[i] = nodeQ + cpuct * nodeU
cdef int result = bestLegalValue(valueTmp)
return self.compressedMoveToMove[result]
cdef TreeNode selectMove(self, float cpuct):
self.lazyInitEdgeData()
move = self.pickMove(cpuct)
if not move in self.children:
self.children[move] = self.executeMove(move)
return self.children[move]
cdef void backup(self, object vs):
cdef int pMove
cdef TreeNode pNode
for pNode, pMove in self.parentNodes:
pNode.edgeVisits[pMove] += 1
pNode.allVisits += 1
pNode.edgeTotalValues[pMove] += vs[pNode.state.getPlayerOnTurnIndex()]
if pNode.state.hasDraws():
pNode.edgeTotalValues[pMove] += vs[self.state.getPlayerCount()] * DRAW_VALUE
pNode.backup(vs)
def getTerminalResult(self):
if self.terminalResult is None:
numOutputs = self.state.getPlayerCount()
if self.state.hasDraws():
numOutputs += 1
r = [0] * numOutputs
winner = self.state.getWinner()
if winner!= -1:
r[winner] = 1
else:
if self.state.hasDraws():
r[numOutputs-1] = 1
else:
r = [1.0 / self.state.getPlayerCount()] * self.state.getPlayerCount()
self.terminalResult = np.array(r, dtype=np.float32)
return self.terminalResult
cdef void expand(self, object movePMap, object vs):
self.edgePriors = np.zeros(self.state.getMoveCount(), dtype=np.float32)
np.copyto(np.asarray(self.edgePriors), movePMap, casting="no")
self.isExpanded = 1
self.netValueEvaluation = vs
self.stateValue = vs[self.state.getPlayerOnTurnIndex()]
if (self.state.hasDraws()):
self.stateValue += vs[self.state.getPlayerCount()] * DRAW_VALUE
def getNetValueEvaluation(self):
return self.netValueEvaluation
# init
# prepareA
# evaluate A
# prepare B
# GPU - CPU
# do:
# evaluate B - backup A, prepare A
# evaluate A - backup B, prepare B
# repeat
# complete
# backupA
def backupWork(backupSet, evalout):
cdef TreeNode node
for idx, ev in enumerate(evalout):
node = backupSet[idx]
w = ev[1]
if node.state.isTerminal():
w = node.getTerminalResult()
else:
node.expand(ev[0], ev[1])
node.backup(w)
def cpuWork(prepareSet, backupSet, evalout, cpuct):
prepareResult = []
cdef TreeNode tnode
if backupSet is not None:
backupWork(backupSet, evalout)
for i in range(len(prepareSet)):
tnode = prepareSet[i]
prepareResult.append(tnode.selectDown(cpuct))
return prepareResult
def batchedMcts(object states, int expansions, evaluator, float cpuct):
workspace = states
halfw = len(workspace) // 2
workspaceA = workspace[:halfw]
workspaceB = workspace[halfw:]
asyncA = True
preparedDataA = cpuWork(workspaceA, None, None, cpuct)
evaloutA = evaluator(preparedDataA)
preparedDataB = cpuWork(workspaceB, None, None, cpuct)
evaloutB = None
def asyncWork():
nonlocal preparedDataA
nonlocal preparedDataB
nonlocal asyncA
nonlocal cpuct
nonlocal evaloutA
nonlocal evaloutB
nonlocal workspaceA
nonlocal workspaceB
if asyncA:
preparedDataA = cpuWork(workspaceA, preparedDataA, evaloutA, cpuct)
else:
preparedDataB = cpuWork(workspaceB, preparedDataB, evaloutB, cpuct)
for _ in range(expansions):
for _ in range(2):
if asyncA:
evaloutB = evaluator(preparedDataB, asyncWork)
else:
evaloutA = evaluator(preparedDataA, asyncWork)
asyncA = not asyncA
backupWork(preparedDataA, evaloutA)
# cdef TreeNode tmp, node
#
# for _ in range(expansions):
# tlst = workspace
# workspace = []
# for i in range(len(tlst)):
# tmp = tlst[i]
# workspace.append(tmp.selectDown(cpuct))
#
# evalout = evaluator(workspace)
#
# for idx, ev in enumerate(evalout):
# node = workspace[idx]
#
# w = ev[1]
# if node.state.isTerminal():
# w = node.getTerminalResult()
# else:
# node.expand(ev[0], ev[1])
# node.backup(w)
# workspace[idx] = states[idx]
<|end_of_text|>import numpy as np
# cimport numpy as np
from scipy.misc import derivative
from scipy.optimize import root_scalar
import cython
def get_retarded_time(double x,
double z,
q_x_func,
q_z_func,
double t,
double C):
def func(t_r):
return np.sqrt((x - q_x_func(t_r)) ** 2 + (z - q_z_func(t_r)) ** 2) - C * (t - t_r)
result = root_scalar(func, x0=0, x1=1)
return result.root
# pos_x from other script
# pos_z from other script
def E_at_point(double x,
double z,
double t,
q_x_func=None,
q_z_func=None,
double C=0,
double EPS_0=0,
double Q=0):
"""
Počítá komponenty X a Z pole pohybujícího se náboje v bodě [x, z]
Parameters
----------
x : float
Pole hodnot X mřížky
z : float
Pole hodnot Y mřížky
t : float
Čas
q_x_func : callable
Funkce souřadnice X bodového náboje v čase
q_z_func : callable
Funkce souřadnice Z bodového náboje v čase
C : float
Rychlost světla
EPS_0 : float
Permitivita vakua
Q : float
Velikost náboje
Returns
-------
E_x : float
Xová složka E
E_z : float
Zová složka E
"""
if not q_x_func:
q_x_func = lambda t: 0
if not q_z_func:
q_z_func = lambda t: 0
if not C:
C = 2
if not EPS_0:
EPS_0 = 1
if not Q:
Q = 1
t_ret = get_retarded_time(x, z, q_x_func, q_z_func, t, C)
cdef double nice_r_x = x - q_x_func(t_ret)
cdef double nice_r_z = z - q_z_func(t_ret)
cdef double nice_r_norm = np.sqrt(nice_r_x ** 2 + nice_r_z ** 2)
cdef double v_x_ret = derivative(q_x_func, t_ret, dx=1e-6)
cdef double v_z_ret = derivative(q_z_func, t_ret, dx=1e-6)
cdef double a_x_ret = derivative(q_x_func, t_ret, dx=1e-6, n=2)
cdef double a_z_ret = derivative(q_z_func, t_ret, dx=1e-6, n=2)
cdef double u_x = C * nice_r_x / nice_r_norm - v_x_ret
cdef double u_z = C * nice_r_z / nice_r_norm - v_z_ret
cdef double nice_r_dot_u = nice_r_x * u_x + nice_r_z * u_z
cdef double const = Q / (4 * np.pi * EPS_0)
cdef double front = nice_r_norm / (nice_r_dot_u ** 3)
cdef double radiation_term_x = -nice_r_z * (u_z * a_x_ret - u_x * a_z_ret)
| Cython |
cdef double radiation_term_z = nice_r_x * (u_z * a_x_ret - u_x * a_z_ret)
E_x = const * front * radiation_term_x
E_z = const * front * radiation_term_z
return E_x, E_z
# @cython.embedsignature(True)
# nefunguje jak má
def E_components_on_grid(xs, zs, t,
q_x_func=None, q_z_func=None,
C=0, EPS_0=0, Q=0,
x_origin=0, z_origin=0, t_origin=0,
mask_func=None):
"""
Počítá komponenty X a Z pole pohybujícího se náboje na mřížce bodů
Parameters
----------
xs : sorted iterable of floats
Pole hodnot X mřížky
zs : sorted iterable of floats
Pole hodnot Y mřížky
t : float
Čas
q_x_func : callable
Funkce souřadnice X bodového náboje v čase
q_z_func : callable
Funkce souřadnice Z bodového náboje v čase
C : float
Rychlost světla
EPS_0 : float
Permitivita vakua
Q : float
Velikost náboje
x_origin : float
Posunutí počátku souřadnice X (aby bylo možné použít stejnou funkci
polohy náboje na čase pro náboje v různých místech)
z_origin : float
Posunutí počátku souřadnice Z
t_origin : float
Posunutí počátku času
mask_func : callable(x, z)
Funkce masky. Volá se pro všechny body mřížky, vrátí-li se True,
není pole pro bod počítáno, vrácené pole v těchto souřadnicích
obsahují NaN
Returns
-------
E_x : 2D numpy.ndarray
Xová složka E
E_z : 2D numpy.ndarray
Zová složka E
"""
t_shifted = t - t_origin
nrows = len(zs)
ncols = len(xs)
field_x = np.ndarray(shape=(nrows, ncols), dtype=np.float64)
field_z = np.ndarray(shape=(nrows, ncols), dtype=np.float64)
for row, z_orig in enumerate(zs):
for col, x_orig in enumerate(xs):
x = x_orig - x_origin
z = z_orig - z_origin
if mask_func:
if mask_func(x, z):
field_x[row, col] = np.nan
field_z[row, col] = np.nan
continue
e_x, e_z = E_at_point(x, z, t_shifted, q_x_func, q_z_func, C, EPS_0, Q)
field_x[row, col] = e_x
field_z[row, col] = e_z
return field_x, field_z
@cython.embedsignature(True)
def E_theta_on_grid(xs, zs, double t,
q_x_func=None, q_z_func=None,
double C=0,
double EPS_0=0,
double Q=0,
double x_origin=0,
double z_origin=0,
double t_origin=0,
mask_func=None):
"""
Počítá theta komponentu pole pohybujícího se náboje na mřížce bodů
Parameters
----------
xs : sorted iterable of floats
Pole hodnot X mřížky
zs : sorted iterable of floats
Pole hodnot Y mřížky
t : float
Čas
q_x_func : callable
Funkce souřadnice X bodového náboje v čase
q_z_func : callable
Funkce souřadnice Z bodového náboje v čase
C : float
Rychlost světla
EPS_0 : float
Permitivita vakua
Q : float
Velikost náboje
x_origin : float
Posunutí počátku souřadnice X (aby bylo možné použít stejnou funkci
polohy náboje na čase pro náboje v různých místech)
z_origin : float
Posunutí počátku souřadnice Z
t_origin : float
Posunutí počátku času
mask_func : callable(x, z)
Funkce masky. Volá se pro všechny body mřížky, vrátí-li se True,
není pole pro bod počítáno, vrácené pole v těchto souřadnicích
obsahují NaN
Returns
-------
E_theta : 2D numpy.ndarray
Theta složka E
"""
cdef double t_shifted = t - t_origin
nrows = len(zs)
ncols = len(xs)
field_theta = np.ndarray(shape=(nrows, ncols), dtype=np.float64)
for row in range(nrows):
for col in range(ncols):
x = xs[col] - x_origin
z = zs[row] - z_origin
if mask_func:
if mask_func(x, z):
field_theta[row, col] = np.nan
continue
E_x, E_z = E_at_point(x, z, t_shifted, q_x_func, q_z_func, C, EPS_0, Q)
theta = np.arctan2(abs(x), z)
e_theta_x = np.cos(theta)
e_theta_z = -np.sin(theta)
field_theta[row, col] = E_x * e_theta_x * np.sign(x) + E_z * e_theta_z
return field_theta
<|end_of_text|># distutils: language=c++
# cython: cdivision=True
# cython: boundscheck=False
# cython: nonecheck=False
# cython: wraparound=False
import numpy as np
# cython imports
from calibration.types cimport *
cimport cython
cimport numpy as np
np.import_array()
cdef class DArrayList(object):
def __cinit__(self, int initial_size):
self.data = np.zeros((initial_size,))
self.capacity = initial_size
self.size = 0
cpdef update(self, NPDOUBLE_t[:] row):
for r in row:
self.add(r)
cpdef add(self, NPDOUBLE_t x):
cdef np.ndarray newdata
if self.size == self.capacity:
self.capacity *= 4
newdata = np.zeros((self.capacity,))
newdata[:self.size] = self.data
self.data = newdata
self.data[self.size] = x
self.size += 1
cpdef np.ndarray[NPDOUBLE_t, mode='c'] finalize(self):
cdef np.ndarray data = self.data[:self.size]
return np.ascontiguousarray(data)
<|end_of_text|># distutils: language = c++
# distutils: sources = gmm.cpp
#
# Author: David Zurow, adapted from G. Bartsch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
#
import cython
from libcpp.string cimport string
from libcpp.vector cimport vector
import numpy as np
cimport numpy as cnp
import struct
import wave
import os
import re
from pathlib import Path
from tempfile import NamedTemporaryFile
import subprocess
from cpython.version cimport PY_MAJOR_VERSION
cdef unicode _text(s):
if type(s) is unicode:
# Fast path for most common case(s).
return <unicode>s
elif PY_MAJOR_VERSION < 3 and isinstance(s, bytes):
# Only accept byte strings as text input in Python 2.x, not in Py3.
return (<bytes>s).decode('utf8')
elif isinstance(s, unicode):
# We know from the fast path above that's' can only be a subtype here.
# An evil cast to <unicode> might still work in some(!) cases,
# depending on what the further processing does. To be safe,
# we can always create a copy instead.
return unicode(s)
else:
raise TypeError("Could not convert to unicode.")
cdef extern from "gmm_wrappers.h" namespace "kaldi":
cdef cppclass GmmOnlineModelWrapper:
GmmOnlineModelWrapper() except +
GmmOnlineModelWrapper(float, int, int, float, float, int, string, string, string, string, string) except +
cdef cppclass GmmOnlineDecoderWrapper:
GmmOnlineDecoderWrapper() except +
GmmOnlineDecoderWrapper(GmmOnlineModelWrapper *) except +
bint decode(float, int, float *, bint) | Cython |
except +
void get_decoded_string(string &, float &) except +
bint get_word_alignment(vector[string] &, vector[int] &, vector[int] &) except +
cdef class KaldiGmmOnlineModel:
cdef GmmOnlineModelWrapper* model_wrapper
cdef unicode modeldir, graph
cdef object od_conf_f
def __cinit__(self, object modeldir,
object graph,
float beam = 7.0, # nnet3: 15.0
int max_active = 7000,
int min_active = 200,
float lattice_beam = 8.0,
float acoustic_scale = 1.0, # nnet3: 0.1
int frame_subsampling_factor = 3, # neet3: 1
int num_gselect = 5,
float min_post = 0.025,
float posterior_scale = 0.1,
int max_count = 0,
int online_ivector_period = 10):
self.modeldir = _text(modeldir)
self.graph = _text(graph)
cdef unicode config = u'%s/conf/online_decoding.conf' % self.modeldir
cdef unicode word_symbol_table = u'%s/graph/words.txt' % self.graph
cdef unicode model_in_filename = u'%s/final.mdl' % self.modeldir
cdef unicode fst_in_str = u'%s/graph/HCLG.fst' % self.graph
cdef unicode align_lex_filename = u'%s/graph/phones/align_lexicon.int' % self.graph
#
# make sure all model files required exist
#
for conff in [config, word_symbol_table, model_in_filename, fst_in_str, align_lex_filename]:
if not os.path.isfile(conff.encode('utf8')):
raise Exception ('%s not found.' % conff)
if not os.access(conff.encode('utf8'), os.R_OK):
raise Exception ('%s is not readable' % conff)
#
# generate ivector_extractor.conf
#
self.od_conf_f = NamedTemporaryFile(prefix=u'py_online_decoding_', suffix=u'.conf', delete=True)
# print(self.od_conf_f.name)
with open(config) as file:
for line in file:
if re.search(r'=.*/.*', line):
# FIXME: uses python 3 f-strings
# self.od_conf_f.write(re.sub(r'=(.*)', lambda m: f"={self.modeldir}/{Path(*Path(m[1]).parts[2:])}", line).encode('utf8'))
self.od_conf_f.write(re.sub(r'=(.*)', lambda m: "=%s/%s" % (self.modeldir, m[1]), line).encode('utf8'))
else:
self.od_conf_f.write(line.encode('utf8'))
self.od_conf_f.flush()
# subprocess.run(f"cat {self.od_conf_f.name}", shell=True)
#
# instantiate our C++ wrapper class
#
self.model_wrapper = new GmmOnlineModelWrapper(beam,
max_active,
min_active,
lattice_beam,
acoustic_scale,
frame_subsampling_factor,
word_symbol_table.encode('utf8'),
model_in_filename.encode('utf8'),
fst_in_str.encode('utf8'),
self.od_conf_f.name.encode('utf8'),
align_lex_filename.encode('utf8'))
def __dealloc__(self):
if self.od_conf_f:
self.od_conf_f.close()
if self.model_wrapper:
del self.model_wrapper
cdef class KaldiGmmOnlineDecoder:
cdef GmmOnlineDecoderWrapper* decoder_wrapper
cdef object ie_conf_f
def __cinit__(self, KaldiGmmOnlineModel model):
#
# instantiate our C++ wrapper class
#
self.decoder_wrapper = new GmmOnlineDecoderWrapper(model.model_wrapper)
def __dealloc__(self):
del self.decoder_wrapper
def decode(self, samp_freq, cnp.ndarray[float, ndim=1, mode="c"] samples not None, finalize):
return self.decoder_wrapper.decode(samp_freq, samples.shape[0], <float *> samples.data, finalize)
def get_decoded_string(self):
cdef string decoded_string
cdef double likelihood=0.0
self.decoder_wrapper.get_decoded_string(decoded_string, likelihood)
return decoded_string.decode('utf8'), likelihood
def get_word_alignment(self):
cdef vector[string] words
cdef vector[int] times
cdef vector[int] lengths
if not self.decoder_wrapper.get_word_alignment(words, times, lengths):
return None
return words, times, lengths
#
# various convenience functions below
#
def decode_wav_file(self, object wavfile):
wavf = wave.open(wavfile, 'rb')
# check format
assert wavf.getnchannels()==1
assert wavf.getsampwidth()==2
assert wavf.getnframes()>0
# read the whole file into memory, for now
num_frames = wavf.getnframes()
frames = wavf.readframes(num_frames)
samples = struct.unpack_from('<%dh' % num_frames, frames)
wavf.close()
return self.decode(wavf.getframerate(), np.array(samples, dtype=np.float32), True)
<|end_of_text|>#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All right reserved.
#
# distutils: language = c++
# cython: language_level=3
from cpython.ref cimport PyObject
from libc.stdint cimport *
from libcpp cimport bool as c_bool
from libcpp.memory cimport shared_ptr
from libcpp.string cimport string as c_string
from libcpp.vector cimport vector
from snowflake.connector.snow_logging import getSnowLogger
from.errorcode import (
ER_FAILED_TO_CONVERT_ROW_TO_PYTHON_TYPE,
ER_FAILED_TO_READ_ARROW_STREAM,
)
from.errors import Error, InterfaceError, OperationalError
snow_logger = getSnowLogger(__name__)
'''
the unit in this iterator
EMPTY_UNIT: default
ROW_UNIT: fetch row by row if the user call `fetchone()`
TABLE_UNIT: fetch one arrow table if the user call `fetch_pandas()`
'''
ROW_UNIT, TABLE_UNIT, EMPTY_UNIT = 'row', 'table', ''
cdef extern from "cpp/ArrowIterator/CArrowIterator.hpp" namespace "sf":
cdef cppclass ReturnVal:
PyObject * successObj;
PyObject * exception;
cdef cppclass CArrowIterator:
shared_ptr[ReturnVal] next();
cdef extern from "cpp/ArrowIterator/CArrowChunkIterator.hpp" namespace "sf":
cdef cppclass CArrowChunkIterator(CArrowIterator):
CArrowChunkIterator(
PyObject* context,
vector[shared_ptr[CRecordBatch]]* batches,
PyObject* use_numpy,
) except +
cdef cppclass DictCArrowChunkIterator(CArrowChunkIterator):
DictCArrowChunkIterator(
PyObject* context,
vector[shared_ptr[CRecordBatch]]* batches,
PyObject* use_numpy
) except +
cdef extern from "cpp/ArrowIterator/CArrowTableIterator.hpp" namespace "sf":
cdef cppclass CArrowTableIterator(CArrowIterator):
CArrowTableIterator(
PyObject* context,
vector[shared_ptr[CRecordBatch]]* batches,
bint number_to_decimal,
) except +
cdef extern from "arrow/api.h" namespace "arrow" nogil:
cdef cppclass CStatus "arrow::Status":
CStatus()
c_string ToString()
c_string message()
c_bool ok()
c_bool IsIOError()
c_bool IsOutOfMemory()
c_bool IsInvalid()
c_bool IsKeyError()
c_bool IsNotImplemented()
c_bool IsTypeError()
c_bool IsCapacityError()
c_bool IsIndexError()
c_bool IsSerializationError()
cdef cppclass CResult "arrow::Result"[T]:
CResult()
CResult(CStatus status)
CResult(T)
c_string ToString()
c_string message()
c_bool ok()
const CStatus& status()
T& ValueOrDie()
T operator*()
cdef cppclass CBuffer" arrow::Buffer":
CBuffer(const uint8_t* data, int64_t size)
cdef cppclass CRecordBatch" arrow::RecordBatch"
cdef cppclass CRecordBatchReader" arrow::RecordBatchReader":
CStatus ReadNext(shared_ptr[CRecordBatch]* batch)
cdef extern from "arrow/ipc/api.h" namespace "arrow::ipc" nogil:
cdef cppclass CRecordBatchStreamReader \
" arrow::ipc::RecordBatchStreamReader"(CRecordBatchReader):
@staticmethod
CResult[shared_ptr[CRecordBatchReader]] Open(const InputStream* stream)
cdef extern from "arrow/io/api.h" namespace "arrow::io" nogil:
enum FileMode" arrow::io::FileMode::type":
FileMode_READ" arrow::io::FileMode::READ"
FileMode_WRITE" arrow::io::FileMode::WRITE"
FileMode_READWRITE" arrow::io | Cython |
::FileMode::READWRITE"
cdef cppclass FileInterface:
CStatus Close()
CStatus Tell(int64_t* position)
FileMode mode()
c_bool closed()
cdef cppclass Readable:
# put overload under a different name to avoid cython bug with multiple
# layers of inheritance
CStatus ReadBuffer" Read"(int64_t nbytes, shared_ptr[CBuffer]* out)
CStatus Read(int64_t nbytes, int64_t* bytes_read, uint8_t* out)
cdef cppclass InputStream(FileInterface, Readable):
pass
cdef cppclass Seekable:
CStatus Seek(int64_t position)
cdef cppclass RandomAccessFile(InputStream, Seekable):
CStatus GetSize(int64_t* size)
CStatus ReadAt(int64_t position, int64_t nbytes,
int64_t* bytes_read, uint8_t* buffer)
CStatus ReadAt(int64_t position, int64_t nbytes,
shared_ptr[CBuffer]* out)
c_bool supports_zero_copy()
cdef extern from "arrow/python/api.h" namespace "arrow::py" nogil:
cdef cppclass PyReadableFile(RandomAccessFile):
PyReadableFile(object fo)
T GetResultValue[T](CResult[T]) except *
cdef class EmptyPyArrowIterator:
def __next__(self):
raise StopIteration
def init(self, str iter_unit, bint number_to_decimal):
pass
cdef class PyArrowIterator(EmptyPyArrowIterator):
cdef object context
cdef CArrowIterator* cIterator
cdef str unit
cdef shared_ptr[ReturnVal] cret
cdef vector[shared_ptr[CRecordBatch]] batches
cdef object use_dict_result
cdef object cursor
# this is the flag indicating whether fetch data as numpy datatypes or not. The flag
# is passed from the constructor of SnowflakeConnection class. Note, only FIXED, REAL
# and TIMESTAMP_NTZ will be converted into numpy data types, all other sql types will
# still be converted into native python types.
# https://docs.snowflake.com/en/user-guide/sqlalchemy.html#numpy-data-type-support
cdef object use_numpy
def __cinit__(
self,
object cursor,
object py_inputstream,
object arrow_context,
object use_dict_result,
object numpy,
):
cdef shared_ptr[InputStream] input_stream
cdef shared_ptr[CRecordBatch] record_batch
cdef CStatus ret
input_stream.reset(new PyReadableFile(py_inputstream))
cdef CResult[shared_ptr[CRecordBatchReader]] readerRet = CRecordBatchStreamReader.Open(input_stream.get())
if not readerRet.ok():
Error.errorhandler_wrapper(
cursor.connection,
cursor,
OperationalError,
{
'msg': 'Failed to open arrow stream:'+ str(readerRet.status().message()),
'errno': ER_FAILED_TO_READ_ARROW_STREAM
})
cdef shared_ptr[CRecordBatchReader] reader = readerRet.ValueOrDie()
while True:
ret = reader.get().ReadNext(&record_batch)
if not ret.ok():
Error.errorhandler_wrapper(
cursor.connection,
cursor,
OperationalError,
{
'msg': 'Failed to read next arrow batch:'+ str(ret.message()),
'errno': ER_FAILED_TO_READ_ARROW_STREAM
})
if record_batch.get() is NULL:
break
self.batches.push_back(record_batch)
snow_logger.debug(msg="Batches read: {}".format(self.batches.size()), path_name=__file__, func_name="__cinit__")
self.context = arrow_context
self.cIterator = NULL
self.unit = ''
self.use_dict_result = use_dict_result
self.cursor = cursor
self.use_numpy = numpy
def __dealloc__(self):
del self.cIterator
def __next__(self):
self.cret = self.cIterator.next()
if not self.cret.get().successObj:
msg = 'Failed to convert current row, cause:'+ str(<object>self.cret.get().exception)
Error.errorhandler_wrapper(self.cursor.connection, self.cursor, InterfaceError,
{
'msg': msg,
'errno': ER_FAILED_TO_CONVERT_ROW_TO_PYTHON_TYPE
})
# it looks like this line can help us get into python and detect the global variable immediately
# however, this log will not show up for unclear reason
ret = <object>self.cret.get().successObj
if ret is None:
raise StopIteration
else:
return ret
def init(self, str iter_unit, bint number_to_decimal):
# init chunk (row) iterator or table iterator
if iter_unit!= ROW_UNIT and iter_unit!= TABLE_UNIT:
raise NotImplementedError
elif iter_unit == ROW_UNIT:
self.cIterator = new CArrowChunkIterator(
<PyObject*>self.context,
&self.batches,
<PyObject *>self.use_numpy,
) if not self.use_dict_result else new DictCArrowChunkIterator(
<PyObject*>self.context,
&self.batches,
<PyObject *>self.use_numpy
)
elif iter_unit == TABLE_UNIT:
self.cIterator = new CArrowTableIterator(
<PyObject*>self.context,
&self.batches,
number_to_decimal,
)
self.unit = iter_unit
<|end_of_text|>cimport ndvector
from libc.stdlib cimport malloc, free
from libc.math cimport exp
ctypedef double dtype
ctypedef ndvector.ndvector_dtype ndvector_dtype
# supporting functions ---------------------------------------------------------------------------------------
cdef inline ndvector_dtype ndvector_memoryview_construct(dtype* vector_ptr, int* shape_ptr, int ndim):
""" constructs ndvector_dtype from dtype memoryview
"""
cdef ndvector_dtype ndvector_construct
ndvector_construct.vector_ptr = vector_ptr
ndvector_construct.shape_ptr = shape_ptr
ndvector_construct.dweight_ptr = <long*>malloc(sizeof(long) * ndim)
ndvector_construct.ndim = ndim
ndvector.ndvector_init_size(&ndvector_construct)
ndvector.ndvector_init_dweight(&ndvector_construct)
return ndvector_construct
cdef inline void update_k_wrapper(ndvector_dtype* ndvector_ptr, dtype* vector_ptr, long k_sequence_index,
long k_sequence_size):
""" updates the vector, shape pointer to accomodate the Kth observation sequence.
"""
ndvector_ptr.vector_ptr = &vector_ptr[ndvector_ptr.dweight_ptr[0] * k_sequence_index]
ndvector_ptr.shape_ptr[0] = k_sequence_size
ndvector.ndvector_init_size(ndvector_ptr)
cdef inline void remove_k_wrapper(ndvector_dtype* ndvector_ptr, dtype* vector_ptr, int t_observations):
ndvector_ptr.vector_ptr = vector_ptr
ndvector_ptr.shape_ptr[0] = t_observations
ndvector.ndvector_init_size(ndvector_ptr)
cdef inline void ndvector_logprob_mreducecast(ndvector_dtype* ndvector_outptr, ndvector_dtype*
ndvector_xptr, int order):
""" reduces log-probability distributions to [@param order].
"""
ndvector.ndvector_dtype_mreducecast(&ndvector.vreduce_logsumexp, ndvector_outptr, ndvector_xptr,
order, 0)
cdef long reduced_size = ndvector_outptr.size // ndvector_outptr.shape_ptr[ndvector_outptr.ndim - 1]
cdef dtype* ndvector_priorptr = <dtype*>malloc(sizeof(dtype) * reduced_size)
ndvector.ndvector_reducecast_vptr(&ndvector.vreduce_logsumexp, ndvector_priorptr, ndvector_outptr
.vector_ptr, ndvector_outptr.shape_ptr, ndvector_outptr.dweight_ptr, ndvector_outptr.size,
ndvector_outptr.ndim, -1)
ndvector.ndvector_broadcast_vptr(&ndvector.logdivexp, ndvector_outptr.vector_ptr, ndvector_priorptr,
ndvector_outptr.size, reduced_size, -1)
free(ndvector_priorptr)
cdef inline void forward(ndvector_dtype* init_logprobdist, ndvector_dtype* transition_logprobdist,
ndvector_dtype* emission_logprobgraph, ndvector_dtype* forward_logprobgraph,
ndvector_dtype* logweightsum_tempbuffer, int max_order, int t_observations,
int n_states):
""" parameters
init_logprobdist: shape[N]
the log-probability distribution for initializing in state N.
transition_logprobdist: shape[N ^(max_order + 1)]
the transition log-probability distribution encoding N states and order M,
where the index [0, 1... q] corresponds to the sequence <0 1... q>
and P(Qt = q|Q(t-m):Q(t-1) = 0...).
emission_logprobgraph: shape[T x N]
the emission log-probabilities of the observation sequence, P(Ot|Qt).
forward_logprob_graph: shape[T x N ^max_order]
return-pointer encoding the forward log-probability α where
P(OT|λ) = Σ EXP(forward_logprob_graph[T])
"""
cdef Py_ssize_t timestep
cdef int ndvector_order, mdvector_order
cdef long ndvector_size, mdvector_size
cdef long fgraph_tdweight = forward_logprobgraph.dweight_ptr[ | Cython |
0]
cdef long egraph_tdweight = emission_logprobgraph.dweight_ptr[0]
cdef dtype* forward_logprob_tempbuffer = <dtype*>malloc(sizeof(dtype) * n_states ** max_order)
ndvector.vector_init_copy(forward_logprob_tempbuffer, init_logprobdist.vector_ptr, n_states)
ndvector.vector_broadcast(&ndvector.vsum_dtype, forward_logprob_tempbuffer,
emission_logprobgraph.vector_ptr, n_states)
ndvector.ndvector_logdtype_identitycast_vptr(forward_logprobgraph.vector_ptr,
forward_logprob_tempbuffer, n_states, 1, n_states, max_order)
ndvector_size = n_states
ndvector_order = 1
for timestep in range(1, t_observations):
mdvector_order = min(timestep + 1, max_order)
mdvector_size = n_states ** mdvector_order
ndvector_logprob_mreducecast(logweightsum_tempbuffer, transition_logprobdist,
ndvector_order + 1)
ndvector.ndvector_broadcast_vptr(&ndvector.logprodexp, logweightsum_tempbuffer.vector_ptr,
forward_logprob_tempbuffer, logweightsum_tempbuffer.size, ndvector_size, -1)
if ndvector_order == max_order:
ndvector.ndvector_reducecast_vptr(&ndvector.vreduce_logsumexp, forward_logprob_tempbuffer,
logweightsum_tempbuffer.vector_ptr, logweightsum_tempbuffer.shape_ptr,
logweightsum_tempbuffer.dweight_ptr, logweightsum_tempbuffer.size,
logweightsum_tempbuffer.ndim, 0)
else:
ndvector.vector_init_copy(forward_logprob_tempbuffer, logweightsum_tempbuffer
.vector_ptr, mdvector_size)
ndvector.ndvector_broadcast_vptr(&ndvector.logprodexp, forward_logprob_tempbuffer,
&emission_logprobgraph.vector_ptr[egraph_tdweight * timestep], mdvector_size,
n_states, 0)
ndvector.ndvector_logdtype_identitycast_vptr(&forward_logprobgraph.vector_ptr[fgraph_tdweight
* timestep], forward_logprob_tempbuffer, mdvector_size, mdvector_order, n_states,
max_order)
ndvector_size = mdvector_size
ndvector_order = mdvector_order
free(forward_logprob_tempbuffer)
cdef inline void backward(ndvector_dtype* transition_logprobdist, ndvector_dtype* emission_logprobgraph,
ndvector_dtype* backward_logprobgraph, ndvector_dtype* logweightsum_tempbuffer,
int max_order, int t_observations, int n_states):
""" parameters
transition_logprobdist: shape[N ^(max_order + 1)]
the transition log-probability distribution encoding N states and order M,
where the index [0, 1... q] corresponds to the sequence <0 1... q>
and P(Qt = q|Q(t-m):Q(t-1) = 0...).
emission_logprobgraph: shape[T x N]
the emission log-probabilities of the observation sequence, P(Ot|Qt).
backward_logprob_graph: shape[T x N ^max_order]
return-pointer encoding the backward log-probability β where
P(O|λ) = Σ[P(O1|S) P(Q1=S) EXP(backward_logprobgraph[1]])
"""
cdef Py_ssize_t timestep
cdef int ndvector_order, mdvector_order
cdef long ndvector_size, mdvector_size
cdef long bgraph_tdweight = backward_logprobgraph.dweight_ptr[0]
cdef long egraph_tdweight = emission_logprobgraph.dweight_ptr[0]
cdef dtype* backward_logprob_tempbuffer = <dtype*>malloc(sizeof(dtype) * n_states ** max_order)
mdvector_size = n_states ** max_order
mdvector_order = max_order
ndvector.vector_init_zeros(backward_logprob_tempbuffer, mdvector_size)
ndvector.vector_init_zeros(&backward_logprobgraph.vector_ptr[bgraph_tdweight * (t_observations - 1)],
mdvector_size)
for timestep in range(t_observations - 2, -1, -1):
ndvector_order = min(timestep + 1, max_order)
ndvector_size = n_states ** ndvector_order
ndvector_logprob_mreducecast(logweightsum_tempbuffer, transition_logprobdist,
ndvector_order + 1)
ndvector.ndvector_broadcast_vptr(&ndvector.logprodexp, logweightsum_tempbuffer.vector_ptr,
backward_logprob_tempbuffer, logweightsum_tempbuffer.size, mdvector_size, 0)
ndvector.ndvector_broadcast_vptr(&ndvector.logprodexp, logweightsum_tempbuffer.vector_ptr,
&emission_logprobgraph.vector_ptr[egraph_tdweight * (timestep + 1)],
logweightsum_tempbuffer.size, n_states, 0)
ndvector.ndvector_reducecast_vptr(&ndvector.vreduce_logsumexp, backward_logprob_tempbuffer,
logweightsum_tempbuffer.vector_ptr, logweightsum_tempbuffer.shape_ptr,
logweightsum_tempbuffer.dweight_ptr, logweightsum_tempbuffer.size,
logweightsum_tempbuffer.ndim, -1)
ndvector.ndvector_logdtype_identitycast_vptr(&backward_logprobgraph.vector_ptr[bgraph_tdweight
* timestep], backward_logprob_tempbuffer, ndvector_size, ndvector_order, n_states,
max_order)
mdvector_size = ndvector_size
mdvector_order = ndvector_order
free(backward_logprob_tempbuffer)
cdef inline dtype expectation_logprob(ndvector_dtype* transition_logprobdist, ndvector_dtype*
emission_logprobgraph, ndvector_dtype* transition_logprobgraph, ndvector_dtype* state_logprobgraph,
ndvector_dtype* forward_logprobgraph, ndvector_dtype* backward_logprobgraph, ndvector_dtype*
logweightsum_tempbuffer, int max_order, int t_observations, int n_states):
""" computes the transition sequence expected probability ξ = P(Q(t-m+1):Qt,Q(T+1)|O,λ),
state sequence expected probability γ = P(Q(t-m+1):Qt|O,λ), and the observation
sequence probability P(O|λ), where
P(O|λ) = Σ[αT•βT]
ξt = [αt•A(t:t+1)•B(t+1)•β(t+1)]/P(O|λ)
γt = [αt•βt]/P(O|λ)
parameters
transition_logprobdist: shape[N ^(max_order + 1)]
the transition log-probability distribution encoding N states and order M,
where the index [0, 1... q] corresponds to the sequence <0 1... q>
and P(Qt = q|Q(t-m):Q(t-1) = 0...).
emission_logprobgraph: shape[T x N]
the emission log-probabilities of the observation sequence, P(Ot|Qt).
forward_logprobgraph: shape[T x N ^max_order]
the forward log-probabilities α computed in the forward algorithm.
backward_logprob_graph: shape[T x N ^max_order]
the backward log-probabilities β computed in the backward algorithm.
transition_logprobgraph: shape[(T - max_order) x N^(max_order + 1)]
return-pointer for log-probability ξ.
state_logprobgraph: [T x N ^max_order]
return-pointer for log-probability γ.
returns
observation_logprob: dtype
P(O|λ)
"""
cdef Py_ssize_t timestep
cdef dtype observation_logprob = ndvector.vreduce_logsumexp(&forward_logprobgraph.vector_ptr
[forward_logprobgraph.dweight_ptr[0] * (t_observations - 1)],
n_states ** max_order)
ndvector.ndvector_init_copy(state_logprobgraph, forward_logprobgraph)
ndvector.vector_broadcast(&ndvector.logprodexp, state_logprobgraph.vector_ptr,
backward_logprobgraph.vector_ptr, state_logprobgraph.size)
for timestep in range(state_logprobgraph.size):
state_logprobgraph.vector_ptr[timestep] = ndvector.logdivexp(state_logprobgraph.vector_ptr
[timestep], observation_logprob)
ndvector_logprob_mreducecast(logweightsum_tempbuffer, transition_logprobdist, max_order + 1)
ndvector.ndvector_broadcast_vptr(&ndvector.vconstruct_dtype, transition_logprobgraph.vector_ptr,
logweightsum_tempbuffer.vector_ptr, transition_logprobgraph.size,
logweightsum_tempbuffer.size, 0)
cdef long transition_tstepsize = transition_logprobgraph.size // transition_logprobgraph.shape_ptr[0]
for timestep in range(max_order, t_observations):
ndvector.ndvector_broadcast_vptr(&ndvector.logprodexp, &transition_logprobgraph.vector_ptr
[transition_logprobgraph.dweight_ptr[0] * (timestep - max_order)],
&forward_logprobgraph.vector_ptr[forward_logprobgraph.dweight_ptr[0] * (timestep - 1)],
transition_tstepsize, forward_logprobgraph.size // forward_logprobgraph.shape_ptr[0], -1)
ndvector.ndvector_broadcast_vptr(&ndvector.logprodexp, &transition_logprobgraph.vector_ptr
[transition_logprob | Cython |
graph.dweight_ptr[0] * (timestep - max_order)],
&emission_logprobgraph.vector_ptr[emission_logprobgraph.dweight_ptr[0] * timestep],
transition_tstepsize, emission_logprobgraph.size // emission_logprobgraph.shape_ptr[0], 0)
ndvector.ndvector_broadcast_vptr(&ndvector.logprodexp, &transition_logprobgraph.vector_ptr
[transition_logprobgraph.dweight_ptr[0] * (timestep - max_order)],
&backward_logprobgraph.vector_ptr[backward_logprobgraph.dweight_ptr[0] * timestep],
transition_tstepsize, backward_logprobgraph.size // backward_logprobgraph.shape_ptr[0], 0)
for timestep in range(transition_logprobgraph.size):
transition_logprobgraph.vector_ptr[timestep] = ndvector.logdivexp(transition_logprobgraph
.vector_ptr[timestep], observation_logprob)
return observation_logprob
cdef inline void maximization_transition_logprobdist(ndvector_dtype* transition_logprobdist,
ndvector_dtype* k_transition_logprobgraph, ndvector_dtype* k_state_logprobgraph,
dtype* transition_logprobgraph, dtype* state_logprobgraph, ndvector_dtype* k_transition_tempbuffer,
ndvector_dtype* k_state_tempbuffer, dtype* observation_logprobgraph, int* k_sequence_indexes,
int* k_sequence_sizes, int k_sequences, int max_order, int n_states):
""" updates transition log-probability distribution A = P(Qt|Q(t-m):Q(t-1))
for K observation sequences where
A = Σk[Σ(ξk)/P(Ok|λ)]/Σk[Σ(γk)/P(Ok|λ)]
parameters:
transition_logprobgraph: shape[K x (Tk - max_order) x N^(max_order + 1)]
the transition sequence expected log-probability LN(ξ) where
ξ = P(Q(t-m+1):Qt,Q(T+1)|O,λ) for K observation sequences and the
shape K parameter padded to |O|.
state_logprobgraph: shape[K x Tk x N ^max_order] = [O x N ^max_order]
the state sequence expected probability LN(γ) where γ = P(Q(t-m+1):Qt|O,λ)
for K observation sequences.
observation_logprobgraph: shape[K]
the observation sequence log-probability LN(P(Ok|λ)) for K observation
sequences.
transition_logprobdist: shape[N ^(max_order + 1)]
return-pointer for the transition log-probability distribution A, encoding
N states and order M, where the index [0, 1... q] corresponds to the
sequence <0 1... q> and P(Qt = q|Q(t-m):Q(t-1) = 0...).
"""
cdef Py_ssize_t k
cdef dtype* state_tempbuffer = <dtype*>malloc(sizeof(dtype) * n_states ** max_order)
for k in range(k_sequences):
update_k_wrapper(k_transition_logprobgraph, transition_logprobgraph, k_sequence_indexes[k],
k_sequence_sizes[k] - max_order)
update_k_wrapper(k_state_logprobgraph, state_logprobgraph, k_sequence_indexes[k],
k_sequence_sizes[k] - max_order)
ndvector.ndvector_reducecast_vptr(&ndvector.vreduce_logsumexp,
&k_transition_tempbuffer.vector_ptr[k * k_transition_tempbuffer.dweight_ptr[0]],
k_transition_logprobgraph.vector_ptr, k_transition_logprobgraph.shape_ptr,
k_transition_logprobgraph.dweight_ptr, k_transition_logprobgraph.size,
k_state_logprobgraph.ndim, 0)
ndvector.ndvector_reducecast_vptr(&ndvector.vreduce_logsumexp,
&k_state_tempbuffer.vector_ptr[k * k_state_tempbuffer.dweight_ptr[0]],
&k_state_logprobgraph.vector_ptr[(max_order - 1) * k_state_logprobgraph.dweight_ptr[0]],
k_state_logprobgraph.shape_ptr, k_state_logprobgraph.dweight_ptr, k_state_logprobgraph.size,
k_state_logprobgraph.ndim, 0)
ndvector.ndvector_broadcast_vptr(&ndvector.logdivexp, k_transition_tempbuffer.vector_ptr,
observation_logprobgraph, k_sequences * k_transition_tempbuffer.dweight_ptr[0],
k_sequences, -1)
ndvector.ndvector_broadcast_vptr(&ndvector.logdivexp, k_state_tempbuffer.vector_ptr,
observation_logprobgraph, k_sequences * k_state_tempbuffer.dweight_ptr[0],
k_sequences, -1)
ndvector.ndvector_dtype_mreducecast(&ndvector.vreduce_logsumexp, transition_logprobdist,
k_transition_tempbuffer, max_order + 1, 0)
ndvector.ndvector_reducecast_vptr(&ndvector.vreduce_logsumexp, state_tempbuffer, k_state_tempbuffer
.vector_ptr, k_state_tempbuffer.shape_ptr, k_state_tempbuffer.dweight_ptr,
k_state_tempbuffer.size, k_state_tempbuffer.ndim, 0)
ndvector.ndvector_broadcast_vptr(&ndvector.logdivexp, transition_logprobdist.vector_ptr,
state_tempbuffer, transition_logprobdist.size, n_states ** max_order, -1)
free(state_tempbuffer)
cdef inline void forward_backward_empass(ndvector_dtype* init_logprobdist, ndvector_dtype* transition_logprobdist,
ndvector_dtype* k_emission_logprobgraph, ndvector_dtype* k_forward_logprobgraph,
ndvector_dtype* k_backward_logprobgraph, ndvector_dtype* k_transition_logprobgraph,
ndvector_dtype* k_state_logprobgraph, ndvector_dtype* logweightsum_tempbuffer,
ndvector_dtype* k_transition_tempbuffer, ndvector_dtype* k_state_tempbuffer,
dtype* emission_logprobgraph, dtype* forward_logprobgraph, dtype* backward_logprobgraph,
dtype* transition_logprobgraph, dtype* state_logprobgraph, dtype* observation_logprobgraph,
int* k_sequence_indexes, int* k_sequence_sizes, int k_sequences, int max_order, int t_observations,
int n_states):
cdef Py_ssize_t k
cdef int kindex, ksize
for k in range(k_sequences):
kindex = k_sequence_indexes[k]
ksize = k_sequence_sizes[k]
update_k_wrapper(k_emission_logprobgraph, emission_logprobgraph, kindex, ksize)
update_k_wrapper(k_forward_logprobgraph, forward_logprobgraph, kindex, ksize)
update_k_wrapper(k_backward_logprobgraph, backward_logprobgraph, kindex, ksize)
update_k_wrapper(k_transition_logprobgraph, transition_logprobgraph, kindex, ksize - max_order)
update_k_wrapper(k_state_logprobgraph, state_logprobgraph, kindex, ksize)
forward(init_logprobdist, transition_logprobdist, k_emission_logprobgraph,
k_forward_logprobgraph, logweightsum_tempbuffer, max_order,
t_observations, n_states)
backward(transition_logprobdist, k_emission_logprobgraph, k_backward_logprobgraph,
logweightsum_tempbuffer, max_order, t_observations, n_states)
observation_logprobgraph[k] = expectation_logprob(transition_logprobdist,
k_emission_logprobgraph, k_transition_logprobgraph, k_state_logprobgraph,
k_forward_logprobgraph, k_backward_logprobgraph, logweightsum_tempbuffer,
max_order, t_observations, n_states)
maximization_transition_logprobdist(transition_logprobdist, k_transition_logprobgraph,
k_state_logprobgraph, transition_logprobgraph, state_logprobgraph, k_transition_tempbuffer,
k_state_tempbuffer, observation_logprobgraph, k_sequence_indexes, k_sequence_sizes,
k_sequences, max_order, n_states)
cdef inline dtype viterbi(ndvector_dtype* init_logprobdist, ndvector_dtype* transition_logprobdist,
ndvector_dtype* emission_logprobgraph, ndvector_dtype* logweightsum_tempbuffer,
int* maxprob_pathgraph, int max_order, int t_observations, int n_states):
cdef ndvector_dtype viterbi_logprobgraph
viterbi_logprobgraph.size = t_observations * n_states ** max_order
viterbi_logprobgraph.ndim = max_order + 1
viterbi_logprobgraph.vector_ptr = <dtype*>malloc(sizeof(dtype) * viterbi_logprobgraph.size)
viterbi_logprobgraph.shape_ptr = <int*>malloc(sizeof(dtype) * viterbi_logprobgraph.ndim)
viterbi_logprobgraph.dweight_ptr = <long*>malloc(sizeof(long) * viterbi_logprobgraph.ndim)
viterbi_logprobgraph.shape_ptr[0] = t_observations
ndvector.vector_init_repeat(&viterbi_logprobgraph.shape_ptr[1], n_states, max_order)
ndvector.ndvector_init_dweight(&viterbi_logprobgraph)
cdef int* backpointer_pathgraph_vptr = <int*>malloc(sizeof(int) * viterbi_logprobgraph.size)
cdef Py_ssize_t timestep
cdef int ndvector_order, mdvector_order
cdef long ndvector_size, mdvector | Cython |
_size
cdef long vgraph_tdweight = viterbi_logprobgraph.dweight_ptr[0]
ndvector.vector_init_copy(viterbi_logprobgraph.vector_ptr, init_logprobdist.vector_ptr,
n_states)
ndvector.vector_broadcast(&ndvector.vsum_dtype, viterbi_logprobgraph.vector_ptr,
emission_logprobgraph.vector_ptr, n_states)
ndvector_size = n_states
ndvector_order = 1
for timestep in range(1, t_observations):
mdvector_order = min(timestep + 1, max_order)
mdvector_size = n_states ** mdvector_order
ndvector_logprob_mreducecast(logweightsum_tempbuffer, transition_logprobdist,
ndvector_order + 1)
ndvector.ndvector_broadcast_vptr(&ndvector.logprodexp, logweightsum_tempbuffer.vector_ptr,
&viterbi_logprobgraph.vector_ptr[(timestep - 1) * vgraph_tdweight],
logweightsum_tempbuffer.size, ndvector_size, -1)
ndvector.ndvector_broadcast_vptr(&ndvector.logprodexp, logweightsum_tempbuffer.vector_ptr,
&emission_logprobgraph.vector_ptr[timestep * emission_logprobgraph.dweight_ptr[0]],
logweightsum_tempbuffer.size, n_states, 0)
if ndvector_order == max_order:
ndvector.ndvector_reducecast_vptr(&ndvector.vreduce_dtypemax,
&viterbi_logprobgraph.vector_ptr[timestep * vgraph_tdweight],
logweightsum_tempbuffer.vector_ptr, logweightsum_tempbuffer.shape_ptr,
logweightsum_tempbuffer.dweight_ptr, logweightsum_tempbuffer.size,
logweightsum_tempbuffer.ndim, 0)
ndvector.ndvector_reducecast_vptr(&ndvector.vreduce_dtype_argmax,
&backpointer_pathgraph_vptr[timestep * vgraph_tdweight],
logweightsum_tempbuffer.vector_ptr, logweightsum_tempbuffer.shape_ptr,
logweightsum_tempbuffer.dweight_ptr, logweightsum_tempbuffer.size,
logweightsum_tempbuffer.ndim, 0)
else:
ndvector.vector_init_copy(&viterbi_logprobgraph.vector_ptr[timestep * vgraph_tdweight],
logweightsum_tempbuffer.vector_ptr, mdvector_size)
ndvector_size = mdvector_size
ndvector_order = mdvector_order
cdef dtype max_logprob = ndvector.vector_reduce(&ndvector.vmax_dtype, &viterbi_logprobgraph
.vector_ptr[(t_observations - 1) * vgraph_tdweight], ndvector_size)
ndvector.ndvector_mdargmax(
&maxprob_pathgraph[t_observations - max_order],
&viterbi_logprobgraph.vector_ptr[(t_observations - 1) * vgraph_tdweight],
&viterbi_logprobgraph.shape_ptr[1],
&viterbi_logprobgraph.dweight_ptr[1],
viterbi_logprobgraph.size // viterbi_logprobgraph.shape_ptr[0],
max_order
)
for timestep in range(t_observations - 1, max_order - 1, -1):
reversed_timestep = timestep - max_order
maxprob_pathgraph[reversed_timestep] = ndvector.ndvector_indexing_vptr(
&backpointer_pathgraph_vptr[timestep * vgraph_tdweight],
&maxprob_pathgraph[reversed_timestep + 1],
&viterbi_logprobgraph.dweight_ptr[1], max_order
)
ndvector.ndvector_deconstruct(&viterbi_logprobgraph)
free(backpointer_pathgraph_vptr)
return max_logprob
<|end_of_text|>#cython: boundscheck=False
#cython: wraparound=False
#cython: nonecheck=False
#cython: language_level=3
from libc.math cimport lgamma
from scipy.optimize.cython_optimize cimport ridder
import numpy as np # For internal testing of the cython documentation
cimport numpy as np # "cimport" is used to import special compile-time stuff
DTYPE_d = np.float
ctypedef np.float_t DTYPE_d_t
cdef expect_inv_beta_p_delta(np.ndarray[DTYPE_d_t, ndim=1] bs_po,
np.ndarray[DTYPE_d_t, ndim=1] br_po,
np.ndarray[DTYPE_d_t, ndim=2] delta):
"""Compute the expectation of 1/(beta + delta)"""
b_mean = br_po / (bs_po - 1)
return 1 / (b_mean + delta)
def _update_alpha(np.ndarray[DTYPE_d_t, ndim=2] as_pr,
np.ndarray[DTYPE_d_t, ndim=2] ar_pr,
list zp_po,
np.ndarray[DTYPE_d_t, ndim=2] bs_po,
np.ndarray[DTYPE_d_t, ndim=2] br_po,
list delta_ikj,
list dt_ik,
np.ndarray[DTYPE_d_t, ndim=1] last_t):
cdef:
dim = as_pr.shape[1]
np.ndarray[DTYPE_d_t, ndim=2] as_po = np.zeros((dim+1, dim)) # Alpha posterior shape, to return
np.ndarray[DTYPE_d_t, ndim=2] ar_po = np.zeros((dim+1, dim)) # Alpha posterior rate, to return
np.ndarray[DTYPE_d_t, ndim=2] zp_po_i
for i in range(dim):
# update shape
as_po[:, i] = as_pr[:, i] + zp_po[i].sum(axis=0)
# update rate
ar_po[0, i] = ar_pr[0, i] + last_t[i]
D_i_kj = (np.expand_dims(dt_ik[i], 1) *
expect_inv_beta_p_delta(bs_po[:, i], br_po[:, i],
delta_ikj[i][:, 1:] + 1e-20))
ar_po[1:, i] = ar_pr[1:, i] + D_i_kj.sum(axis=0)
return as_po, ar_po
cdef digamma(np.ndarray[DTYPE_d_t, ndim=1] arr):
"""Digamma function (arr is assumed to be 1 or 2 dimensional)"""
cdef:
int dim0 = arr.shape[0]
int dim1 = arr.shape[1]
double eps = 1e-8
lgamma_prime = np.zeros_like(arr)
if arr.ndim == 1:
for i in range(dim0):
lgamma_prime[i] = (lgamma(arr[i] + eps) - lgamma(arr[i])) / eps
elif arr.ndim == 2:
for j in range(dim0):
for i in range(dim1):
lgamma_prime[j, i] = (lgamma(arr[j, i] + eps) - lgamma(arr[j, i])) / eps
return lgamma_prime
cdef expect_log_alpha(np.ndarray[DTYPE_d_t, ndim=1] as_po, np.ndarray[DTYPE_d_t, ndim=1] ar_po):
"""Compute the expectation of log(alpha)"""
return digamma(as_po) - np.log(ar_po)
cdef expect_log_beta_p_delta(np.ndarray[DTYPE_d_t, ndim=1] bs_po,
np.ndarray[DTYPE_d_t, ndim=1] br_po,
np.ndarray[DTYPE_d_t, ndim=2] delta):
"""Compute the expectation of log(beta + delta)"""
b_mean = br_po / (bs_po - 1)
return np.log(b_mean + delta)
cdef _compute_epi(int i,
np.ndarray[DTYPE_d_t, ndim=2] as_po,
np.ndarray[DTYPE_d_t, ndim=2] ar_po,
np.ndarray[DTYPE_d_t, ndim=2] bs_po,
np.ndarray[DTYPE_d_t, ndim=2] br_po,
list dt_ik,
list delta_ikj):
# log(inter-arrival time), only for valid events
epi = np.zeros_like(delta_ikj[i])
epi += np.log(np.expand_dims(dt_ik[i], 1) + 1e-20)
# Expected value log(alpha)
epi += np.expand_dims(expect_log_alpha(as_po[:, i], ar_po[:, i]), 0)
# Expected value log(beta + delta), only for j>=1, i.e. ignore baseline
epi[:, 1:] -= expect_log_beta_p_delta(bs_po[:, i], br_po[:, i],
delta_ikj[i][:, 1:] + 1e-20)
return epi
def _update_z(np.ndarray[DTYPE_d_t, ndim=2] as_po,
np.ndarray[DTYPE_d_t, ndim=2] ar_po,
np.ndarray[DTYPE_d_t, ndim=2] bs_po,
np.ndarray[DTYPE_d_t, ndim=2] br_po,
list dt_ik,
list delta_ikj):
dim = as_po.shape[1]
zs = list()
for i in range(dim):
epi = _compute_epi(i, as_po, ar_po, bs_po, br_po, dt_ik, delta_ikj)
# Softmax
epi -= epi.max()
epi = | Cython |
np.exp(epi)
epi /= np.expand_dims(epi.sum(axis=1), 1)
zs.append(epi)
return zs
<|end_of_text|>from __future__ import print_function
from cpython cimport bool
from collections import defaultdict
from itertools import chain
import io
def filter_word_stream(self, sentence):
cdef list filtered
cdef set stop_list
stop_list = self.config.get('stop_list', set())
filtered = []
for w in sentence:
self.vocabulary[w] += 1
if self.vocabulary[w] > self.config.get('lower_threshold', 0) and w not in stop_list:
filtered.append(w)
return filtered
def _read_documents(self, corpus):
"""
If text file, treats each line as a sentence.
If list of list, treats each list as sentence of words
"""
if isinstance(corpus, str):
corpus = open(corpus)
for sentence in corpus:
if isinstance(sentence, list):
pass
elif isinstance(sentence, str):
sentence = _tokenize(sentence)
else:
raise TypeError("Corpus format not supported")
yield filter_word_stream(self, sentence)
if isinstance(corpus, io.TextIOBase):
corpus.close()
def _tokenize(s):
"""
Removes all URL's replacing them with 'URL'. Only keeps A-Ö 0-9.
"""
return s.split()
cdef tuple _build_contexts(self, focus, list sentence, int i):
cdef bool ordered, directed, is_ngrams
cdef int left, right
cdef tuple window_size
cdef list context
cdef int j
cdef str add_word
ordered = self.config.get('ordered', False)
directed = self.config.get('directed', False)
window_size = self.config['window_size']
left = i - window_size[0] if i - window_size[0] > 0 else 0
right = i + window_size[1] + 1 if i + window_size[1] + 1 <= len(sentence) else len(sentence)
context = []
for j in range(right - left):
if left + j == i: # skip focus word
continue
add_word = sentence[left+j]
if directed:
if left + j < i:
add_word += '_left'
elif left + j > i:
add_word += '_right'
if ordered:
if left + j < i:
add_word += '_' + str(j + 1)
elif left + j > i:
add_word += '_' + str(left + j - i)
context.append(add_word)
return focus, context
def _vocabularize(self, corpus):
"""
Wraps the corpus object creating a generator that counts the vocabulary,
and yields the focus word along with left and right context.
Lists as replacements of words are treated as one unit and iterated through (good for ngrams).
"""
cdef int n, i
cdef list sentence
cdef str focus
is_ngrams = self.config.get('is_ngrams', False)
for n, sentence in enumerate(_read_documents(self, corpus)):
if n % 1000 == 0:
print(".", end=" ", flush=True)
for i, focus in enumerate(sentence):
contexts = _build_contexts(self, focus, sentence, i)
yield contexts
def build(focus_words, context_words):
"""
Builds a dict of dict of collocation frequencies. This is to be cythonized.
"""
# Collect word collocation frequencies in dict of dict
colfreqs = defaultdict(lambda: defaultdict(int))
for focus, contexts in zip(focus_words, context_words):
for context in contexts:
colfreqs[focus][context] += 1
return colfreqs
<|end_of_text|>"""Module for parsing and manipulating data from ENDF evaluations. Currently, it
only can read several MTs from File 1, but with time it will be expanded to
include the entire ENDF format.
All the classes and functions in this module are based on document
ENDF-102 titled "Data Formats and Procedures for the Evaluated Nuclear
Data File ENDF-6". The latest version from June 2009 can be found at
http://www-nds.iaea.org/ndspub/documents/endf/endf102/endf102.pdf
For more information on the Evaluation class, contact Paul Romano
<[email protected]>. For more information on the Library class, contact
John Xia <[email protected]>.
"""
from __future__ import print_function, division, unicode_literals
import re
import os
try:
from collections.abc import Iterable
from collections import OrderedDict
except ImportError:
from collections import OrderedDict, Iterable
from warnings import warn
from pyne.utils import QA_warn
cimport numpy as np
import numpy as np
from numpy.polynomial.polynomial import Polynomial
from numpy.polynomial.legendre import Legendre
from scipy.interpolate import interp1d
cimport cython
from pyne cimport cpp_nucname
from pyne import nucname
from pyne import rxdata
from pyne.rxname import label
from pyne.utils import fromendf_tok, endftod
np.import_array()
QA_warn(__name__)
libraries = {0: 'ENDF/B', 1: 'ENDF/A', 2: 'JEFF', 3: 'EFF',
4: 'ENDF/B High Energy', 5: 'CENDL', 6: 'JENDL',
31: 'INDL/V', 32: 'INDL/A', 33: 'FENDL', 34: 'IRDF',
35: 'BROND', 36: 'INGDB-90', 37: 'FENDL/A', 41: 'BROND'}
FILE1END = r'([1-9]\d{3}| [1-9]\d{2}| [1-9]\d| [1-9]) 1451(?= *[1-9]\d*$)[ \d]{5}$'
FILE1_R = re.compile(r'^.{66}'+FILE1END)
SPACEINT11_R = re.compile('^((?= *-?[1-9]\d*$)[ \d-]{11}| {10}0)$') # I11 (FORTRAN77)
def _radiation_type(value):
p = {0: 'gamma', 1: 'beta-', 2: 'ec/beta+', 3: 'IT',
4: 'alpha', 5: 'neutron', 6:'sf', 7: 'proton',
8: 'e-', 9: 'xray', 10: 'unknown'}
if value % 1.0 == 0:
return p[int(value)]
else:
return (p[int(value)], p[int(10*value % 10)])
class Library(rxdata.RxLib):
"""A class for a file which contains multiple ENDF evaluations."""
def __init__(self, fh):
self.mts = {}
self.structure = {}
self.mat_dict = {}
self.more_files = True
self.intdict = {1: self._histogram, 2: self._linlin, 3: self._linlog, 4:
self._loglin, 5: self._loglog, 6:
self._chargedparticles, 11: self._histogram,
12: self._linlin, 13: self._linlog, 14: self._loglin,
15: self._loglog, 21: self._histogram, 22: self._linlin,
23: self._linlog, 24: self._loglin, 25: self._loglog}
self.chars_til_now = 0 # offset (byte) from the top of the file for seek()ing
self.fh = fh
self._set_line_length()
# read first line (Tape ID)
self._read_tpid()
# read headers for all materials
while self.more_files:
self._read_headers()
def _set_line_length(self):
opened_here = False
if isinstance(self.fh, str):
fh = open(self.fh, 'r')
opened_here = True
else:
fh = self.fh
# Make sure the newlines attribute is set: read a couple of lines
# and rewind to the beginning.
# Yes, we need to read two lines to make sure that fh.newlines gets
# set. One line seems to be enough for *nix-style line terminators, but
# two seem to be necessary for Windows-style terminators.
fh.seek(0)
fh.readline()
line = fh.readline()
self.line_length = len(line) # actual chars/line read
# self.offset now stores the diff. between the length of a line read
# and the length of a line in the ENDF-6 formatted file.
self.offset = len(fh.newlines) - (self.line_length - 80)
fh.seek(0)
if opened_here:
fh.close()
def load(self):
"""load()
Read the ENDF file into a NumPy array.
Returns
-------
data : np.array, 1 | Cython |
d, float64
Returns a 1d float64 NumPy array.
"""
opened_here = False
if isinstance(self.fh, basestring):
fh = open(self.fh, 'r')
opened_here = True
else:
fh = self.fh
fh.readline()
data = fromendf_tok(fh.read())
fh.seek(0)
if opened_here:
fh.close()
return data
def _read_tpid(self):
if self.chars_til_now == 0:
opened_here = False
if isinstance(self.fh, basestring):
fh = open(self.fh, 'r')
opened_here = True
else:
fh = self.fh
line = fh.readline()
self.chars_til_now = len(line) + self.offset
else:
warn('TPID is the first line, has been read already', UserWarning)
def _isContentLine(self,parts):
"""Check whether a line is consisted of 22*spaces and 4*(I11)s (FORTRAN77).
Parameters
-----------
parts: list
made by dividing 1-66 chars of an input line into 6 parts of equal length
"""
return parts[0]+parts[1]==' '*22 and \
SPACEINT11_R.match(parts[2]) and SPACEINT11_R.match(parts[3]) and \
SPACEINT11_R.match(parts[4]) and SPACEINT11_R.match(parts[5])
def _read_headers(self):
cdef int nuc
cdef int mat_id
cdef double nucd
opened_here = False
if isinstance(self.fh, basestring):
fh = open(self.fh, 'r')
opened_here = True
else:
fh = self.fh
# Go to current file position
fh.seek(self.chars_til_now)
# get mat_id
line = fh.readline()
mat_id = int(line[66:70].strip() or -1)
# check for isomer (LIS0/LISO entry)
matflagstring = line + fh.read(3*self.line_length)
flagkeys = ['ZA', 'AWR', 'LRP', 'LFI', 'NLIB', 'NMOD', 'ELIS',
'STA', 'LIS', 'LIS0', 0, 'NFOR', 'AWI', 'EMAX',
'LREL', 0, 'NSUB', 'NVER', 'TEMP', 0, 'LDRV',
0, 'NWD', 'NXC']
flags = dict(zip(flagkeys, fromendf_tok(matflagstring)))
nuc = cpp_nucname.id(<int> (<int> flags['ZA']*10000 + flags['LIS0']))
# Make a new dict in self.structure to contain the material data.
if nuc not in self.structure:
self.structure.update(
{nuc: {'styles': '', 'docs': [], 'particles': [], 'data': {},
'matflags': {}}})
self.mat_dict.update({nuc: {'end_line': [],
'mfs': {}}})
# Parse header (all lines with 1451)
mf = 1
start = self.chars_til_now//(self.line_length+self.offset) # present (the first) line number
stop = start # if no 451 can be found
line = fh.readline() # get the next line; start parsing from the 6th line
while FILE1_R.search(line):
# divide 1-66 chars of the line into six 11-char parts
lineparts = [line[i:i+11] for i in range(0, 66, 11)]
# parse contents section
if self._isContentLine(lineparts):
# When MF and MT change, add offset due to SEND/FEND records.
old_mf = mf
mf, mt = int(lineparts[2]), int(lineparts[3])
if old_mf!= mf:
start += 1
mt_length = int(lineparts[4])
stop = start + mt_length
# The first number in the tuple is the offset in the file to seek(),
# whereas the second stands for the number of characters to be read().
self.mat_dict[nuc]['mfs'][mf, mt] = ((self.line_length+self.offset)*start,
self.line_length*stop)
start = stop + 1
line = fh.readline()
# parse comment
else:
self.structure[nuc]['docs'].append(line[0:66])
line = fh.readline()
# Find where the end of the material is and then jump to it.
# The end is 3 lines after the last mf,mt
# combination (SEND, FEND, MEND)
self.chars_til_now = (stop + 3)*(self.line_length+self.offset) # at the end of a MAT
fh.seek(self.chars_til_now)
nextline = fh.readline()
self.more_files = (nextline!= '' and nextline[68:70]!= '-1')
# Update materials dict
if mat_id!= -1:
self.mat_dict[nuc]['end_line'] = \
(self.chars_til_now+self.offset)//self.line_length
setattr(self,'mat{0}'.format(nuc), self.structure[nuc])
self._read_mat_flags(nuc)
fh.seek(0)
if opened_here:
fh.close()
def _read_mat_flags(self, nuc):
"""Reads the global flags for a certain material.
Parameters
-----------
nuc: int
ZZAAAM of material.
"""
mf1 = self.get_rx(nuc, 1, 451, lines=4)
flagkeys = ['ZA', 'AWR', 'LRP', 'LFI', 'NLIB', 'NMOD', 'ELIS',
'STA', 'LIS', 'LIS0', 0, 'NFOR', 'AWI', 'EMAX',
'LREL', 0, 'NSUB', 'NVER', 'TEMP', 0, 'LDRV',
0, 'NWD', 'NXC']
flags = dict(zip(flagkeys, mf1[:12]))
del flags[0]
self.structure[nuc]['matflags'] = flags
def _get_cont(self, keys, line):
"""Read one line of the array, treating it as a CONT record.
Parameters
-----------
keys: iterable
An iterable containing the labels for each field in the CONT record.
For empty/unassigned fields, use 0.
line: array-like
The line to be read.
Returns
--------
cont : dict
Contains labels and values mapped to each other.
"""
cont = dict(zip(keys, line.flat[:6]))
if 0 in cont:
del cont[0]
return cont
def _get_head(self, keys, line):
"""Read one line of the array, treating it as a HEAD record.
Parameters
-----------
keys: iterable
An iterable containing the labels for each field in the HEAD record.
For empty/unassigned fields, use 0.
line: array-like
The line to be read.
Returns
--------
cont : dict
Contains labels and values mapped to each other.
"""
# Just calls self._get_cont because HEAD is just a special case of CONT
if (keys[0] == 'ZA' and keys[1] == 'AWR'):
return self._get_cont(keys, line)
else:
raise ValueError('This is not a HEAD record: {}'.format(
dict(zip(keys, line))))
def _get_list(self, headkeys, itemkeys, lines):
"""Read some lines of the array, treating it as a LIST record.
Parameters
-----------
headkeys: iterable
An iterable containing the labels for each field in the first
record. For empty/unassigned fields, use 0.
itemkeys: iterable
An iterable containing the labels for each field in the next
records. For empty/unassigned fields, use 0. If itemkeys has length
1, the array is flattened and assigned to that key.
lines: two-dimensional array-like
The lines to be read. Each line should have 6 elements. The first
line should be the first line of the LIST record; since we don't
know the length of the LIST record, the last line should be the last
line it is plausible for the LIST record to end.
Returns
--------
head: dict
Contains elements of the first line paired with their labels.
items: dict
Contains columns of the LIST array paired with their labels, unless
itemkeys has length 1, in which case items contains the flattened
LIST array paired with its label.
total_lines: int
The number of lines the LIST record takes up.
"""
head = dict(zip(headkeys, lines[0:].flat[:len(headkeys)]))
if 0 in head:
del head[0]
npl = int(lines[0][4])
headlines = (len(headkeys)-1)//6 + 1
arraylines = (npl-1)//6 + 1
if len(itemkeys | Cython |
) == 1:
array_len = npl - (headlines-1) * 6
items = {itemkeys[0]: lines[headlines:].flat[:array_len]}
else:
array_width = ((len(itemkeys)-1)//6 + 1)*6
items_transposed = np.transpose(
lines[headlines:headlines+arraylines].reshape(-1,
array_width))
items = dict(zip(itemkeys, items_transposed))
if 0 in items:
del items[0]
total_lines = 1+arraylines
return head, items, total_lines
def _get_tab1(self, headkeys, xykeys, lines):
"""Read some lines of the array, treating it as a TAB1 record.
Parameters
-----------
headkeys: iterable, length 6
An iterable containing the labels for each field in the first
line. For empty/unassigned fields, use 0.
xykeys: iterable, length 2
An iterable containing the labels for the interpolation data. The
first key should be xint, the second should be y(x).
lines: two-dimensional array-like
The lines to be read. Each line should have 6 elements. The first
line should be the first line of the TAB1 record; since we don't
know the length of the TAB1 record, the last line should be the last
line it is plausible for the TAB1 record to end.
Returns
--------
head: dict
Contains elements of the first card paired with their labels.
intdata: dict
Contains the interpolation data.
total_lines: int
The number of lines the TAB1 record takes up.
"""
head = dict(zip(headkeys, lines[0]))
if 0 in head:
del head[0]
nr, np_ = int(lines[0][4]), int(lines[0][5])
meta_len = (nr*2-1)//6 + 1
data_len = (np_*2-1)//6 + 1
intmeta = dict(zip(('intpoints', 'intschemes'),
(np.asarray(lines[1:1+meta_len].flat[:nr*2:2], dtype=int),
lines[1:1+meta_len].flat[1:nr*2:2])))
intdata = dict(zip(xykeys,
(lines[1+meta_len:1+meta_len+data_len].flat[:np_*2:2],
lines[1+meta_len:1+meta_len+data_len].flat[1:np_*2:2])))
intdata.update(intmeta)
total_lines = 1 + meta_len + data_len
return head, intdata, total_lines
def _histogram(self, e_int, xs, low, high):
if low in e_int:
# truncate at lower bound
xs = xs[e_int >= low]
e_int = e_int[e_int >= low]
elif low is not None and low > e_int[0]:
# truncate at lower bound and prepend interpolated endpoint
low_xs = xs[e_int < low][-1]
xs = np.insert(xs[e_int > low], 0, low_xs)
e_int = np.insert(e_int[e_int > low], 0, low)
if high in e_int:
# truncate at higher bound
xs = xs[e_int <= high]
e_int = e_int[e_int <= high]
elif high is not None:
# truncate at higher bound and prepend interpolated endpoint
high_xs = xs[e_int < high][-1]
xs = np.append(xs[e_int < high], high_xs)
e_int = np.append(e_int[e_int < high], high)
de_int = float(e_int[-1]-e_int[0])
return np.nansum((e_int[1:]-e_int[:-1]) * xs[:-1]/de_int)
def _linlin(self, e_int, xs, low, high):
if low is not None or high is not None:
interp = interp1d(e_int, xs)
if low in e_int:
xs = xs[e_int >= low]
e_int = e_int[e_int >= low]
elif low is not None and low > e_int[0]:
low_xs = interp(low)
xs = np.insert(xs[e_int > low], 0, low_xs)
e_int = np.insert(e_int[e_int > low], 0, low)
if high in e_int:
xs = xs[e_int <= high]
e_int = e_int[e_int <= high]
elif high is not None:
high_xs = interp(high)
xs = np.append(xs[e_int < high], high_xs)
e_int = np.append(e_int[e_int < high], high)
de_int = float(e_int[-1]-e_int[0])
return np.nansum((e_int[1:]-e_int[:-1]) * (xs[1:]+xs[:-1])/2./de_int)
def _linlog(self, e_int, xs, low, high):
if low is not None or high is not None:
interp = interp1d(np.log(e_int), xs)
if low in e_int:
xs = xs[e_int >= low]
e_int = e_int[e_int >= low]
elif low is not None and low > e_int[0]:
low_xs = interp(np.log(low))
xs = np.insert(xs[e_int > low], 0, low_xs)
e_int = np.insert(e_int[e_int > low], 0, low)
if high in e_int:
xs = xs[e_int <= high]
e_int = e_int[e_int <= high]
elif high is not None:
high_xs = interp(np.log(high))
xs = np.append(xs[e_int < high], high_xs)
e_int = np.append(e_int[e_int < high], high)
de_int = float(e_int[-1]-e_int[0])
x1 = e_int[:-1]
x2 = e_int[1:]
y1 = xs[:-1]
y2 = xs[1:]
A = (y1-y2)/(np.log(x1/x2))
B = y1-A*np.log(x1)
return np.nansum(A*(x2*np.log(x2) -
x1*np.log(x1)-x2+x1) + B*(x2-x1))/de_int
def _loglin(self, e_int, xs, low, high):
if low is not None or high is not None:
interp = interp1d(e_int, np.log(xs))
if low in e_int:
xs = xs[e_int >= low]
e_int = e_int[e_int >= low]
elif low is not None and low > e_int[0]:
low_xs = np.e ** interp(low)
xs = np.insert(xs[e_int > low], 0, low_xs)
e_int = np.insert(e_int[e_int > low], 0, low)
if high in e_int:
xs = xs[e_int <= high]
e_int = e_int[e_int <= high]
elif high is not None:
high_xs = np.e ** interp(high)
xs = np.append(xs[e_int < high], high_xs)
e_int = np.append(e_int[e_int < high], high)
de_int = float(e_int[-1]-e_int[0])
x1 = e_int[:-1]
x2 = e_int[1:]
y1 = xs[:-1]
y2 = xs[1:]
A = (np.log(y1)-np.log(y2))/(x1-x2)
B = np.log(y1) - A*x1
return np.nansum((y2-y1)/A)/de_int
def _loglog(self, e_int, xs, low, high):
if low is not None or high is not None:
interp = interp1d(np.log(e_int), np.log(xs))
if low in e_int:
xs = xs[e_int >= low]
e_int = e_int[e_int >= low]
elif low is not None and low > e_int[0]:
low_xs = np.e ** interp(np.log(low))
xs = np.insert(xs[e_int > low], 0, low_xs)
e_int = np.insert(e_int[e_int > low], 0, low)
if high in e_int:
xs = xs[e_int <= high]
e_int = e_int[e_int <= high]
elif high is not None:
high_xs = np.e ** interp(np.log(high))
xs = np.append(xs[e_int < high], high_xs)
e_int = np.append(e_int[e_int < high], high)
de_int = float(e_int[-1]-e_int[0])
x1 = e_int[:-1]
x2 = e_int[1:]
y1 = xs[:-1]
y2 = xs[1:]
A = - np.log(y2/y1)/np.log(x1/x2)
B = - (np.log(y1)*np.log(x2) - np.log(y2)*np.log(x1))/np.log(x1/x2)
return np.nansum(np.e**B / (A+1) * (x2**(A+1) - x1**(A+1 | Cython |
))/de_int)
def _chargedparticles(self, e_int, xs, flags=None):
q = flags['Q']
if q > 0:
T = 0
else:
T = q
de_int = float(e_int[-1]-e_int[0])
x1 = e_int[:-1]
x2 = e_int[1:]
y1 = xs[:-1]
y2 = xs[1:]
B = np.log(y2*x2/(x1*y1)) / (1/(x1-T)**0.5 - 1/(x2-T)**0.5)
A = np.e**(B/(x1-T)**0.5)*y1*x1
# FIXME
raise NotImplementedError('see docs for more details.')
def integrate_tab_range(self, intscheme, e_int, xs, low=None, high=None):
"""integrate_tab_range(intscheme, e_int, xs, low=None, high=None)
Integrates across one tabulation range.
Parameters
----------
intscheme : int or float
The interpolation scheme used in this range.
e_int : array
The energies at which we have xs data.
xs : array
The xs data corresponding to e_int.
low, high : float
Lower and upper bounds within the tabulation range to start/stop at.
Returns
-------
sigma_g : float
The group xs.
"""
with np.errstate(divide='ignore', invalid='ignore'):
# each of these functions returns a normalized integration
# over the range
return self.intdict[intscheme](e_int, xs, low, high)
def _cont_and_update(self, flags, keys, data, total_lines):
flags.update(self._get_cont(keys, data[total_lines]))
return flags, total_lines+1
def _nls_njs_loop(self, L_keys, j_keys, itemkeys, data, total_lines,
range_flags, subsection_dict):
nls = int(range_flags['NLS'])
for nls_iter in range(nls):
if j_keys is None:
L_flags, items, lines = self._get_list(
L_keys, itemkeys, data[total_lines:])
total_lines += lines
spi, L = range_flags['SPI'], L_flags['L']
subsection_dict[spi, L] = items
else:
L_flags = self._get_cont(L_keys, data[total_lines])
total_lines += 1
njs = int(L_flags['NJS'])
for njs_iter in range(njs):
j_flags, items, lines = self._get_list(
j_keys, itemkeys, data[total_lines:])
total_lines += lines
items.update(j_flags)
spi, L, aj = range_flags['SPI'], L_flags['L'], j_flags['AJ']
subsection_dict[(spi, L, aj)] = items
return total_lines
def _read_res(self, mat_id):
"""_read_res(mat_id)
Read the resonance data from one material in the library and updates
self.structure.
Parameters
-----------
mat_id: int
Material id.
"""
lrp = self.structure[mat_id]['matflags']['LRP']
if (lrp == -1 or mat_id in (-1, 0)):
# If the LRP flag for the material is -1,
# there's no resonance data.
# Also if the mat id is invalid.
#
# However other methods expects _read_res to set
# structur[nuc]['data'], so fill it with a single
# entry for mat_id and empty values:
self.structure[mat_id]['data'].update(
{mat_id: {'resolved': [],
'unresolved': [],
'datadocs': [],
'xs': {},
'output': {'channel1': [],
'channel2': []},
'isotope_flags': {}}})
pass
else:
# Load the resonance data.
mf2 = self.get_rx(mat_id, 2, 151).reshape(-1, 6)
self.structure[mat_id]['matflags'].update(
self._get_head(['ZA', 'AWR', 0, 0, 'NIS', 0], mf2[0]))
total_lines = 1
for isotope_num in range(
int(self.structure[mat_id]['matflags']['NIS'])):
total_lines += self._read_nis(mf2[total_lines:], lrp, mat_id)
for isotope in self.structure[mat_id]['data'].values():
isotope['resolved'].sort()
isotope['unresolved'].sort()
def _read_nis(self, isotope_data, lrp, mat_id):
"""_read_nis(isotope_data, lrp, mat_id)
Read resonance data for a specific isotope.
Parameters
-----------
isotope_data: 2D array
The section of the resonance data to read. The isotope starts at the
top of this.
lrp: int
A flag denoting the type of data in the isotope. Exact meaning of
this flag can be found in ENDF Manual pp.50-51.
mat_id: int
Material ZZAAAM.
Returns
--------
total_lines: int
The number of lines the isotope takes up.
"""
isotope_flags = self._get_cont(['ZAI', 'ABN', 0, 'LFW', 'NER', 0],
isotope_data[0])
# according to endf manual, there is no specification
# for metastable states in ZAI
# if we have a LIS0!= 0 we add the state to all isotopes
if self.structure[mat_id]['matflags']['LIS0'] == 0:
nuc_i = nucname.id(int(isotope_flags['ZAI']*10))
else:
nuc_i = nucname.id(int(isotope_flags['ZAI']*10 +
self.structure[mat_id]['matflags']['LIS0']))
self.structure[mat_id]['data'].update(
{nuc_i: {'resolved': [],
'unresolved': [],
'datadocs': [],
'xs': {},
'output': {'channel1': [],
'channel2': []},
'isotope_flags': isotope_flags}})
total_lines = 1
for er in range(int(isotope_flags['NER'])):
total_lines += self._read_subsection(isotope_data[total_lines:],
isotope_flags,
mat_id,
nuc_i)
return total_lines
def _read_subsection(self, subsection, isotope_flags, mat_id, nuc_i):
"""Read resonance data for a specific energy range subsection.
Parameters
-----------
subsection: 2D array
The section of the resonance data to read. The energy range
subsection starts at the top of this.
range_flags: dict
Dictionary of flags inherited from the range.
isotope_flags: dict
Dictionary of flags inherited from the isotope.
mat_id: int
Material ZZAAAM.
nuc_i: int
Isotope ZZAAAM.
Returns
--------
total_lines: int
The number of lines the energy range subsection takes up.
"""
range_flags = self._get_cont(('EL', 'EH', 'LRU', 'LRF', 'NRO', 'NAPS'),
subsection[0])
total_lines = 1
lru = int(round(range_flags['LRU']))
lru_list = [self._read_ap_only, self._read_resolved,
self._read_unresolved]
total_lines += lru_list[lru](subsection[1:],
range_flags,
isotope_flags,
mat_id,
nuc_i)
return total_lines
def _read_resolved(self, subsection, range_flags, isotope_flags, mat_id,
nuc_i):
"""Read the subsection for a resolved energy range.
Parameters
-----------
subsection: 2D array
The section of the resonance data to read. The energy range
subsection starts at the top of this.
range_flags: dict
Dictionary of flags inherited from the range.
isotope_flags: dict
Dictionary of flags inherited from the isotope.
mat_id: int
ZZAAAM of the material.
nuc_i: int
ZZAAAM of the isotope.
Returns
--------
total_lines: int
The number of lines taken up by the subsection.
"""
def read_kbks(nch, subsection, aj_data, total_lines):
for ch in range(nch):
lbk = int(subsection[total_lines][4])
lbk_list_keys = {2: ('R0', 'R1', 'R2', 'S0', 'S1', 0),
3: ('R0', 'SO', 'GA', 0, 0, 0)}
aj_data['ch{}'.format(ch)] = {'LBK': lbk}
ch_data = aj_data['ch{}'.format(ch)]
if lbk == 0:
total_lines += 2
elif lbk == 1:
total_lines += 2
rbr, rbr_size = self._ | Cython |
get_tab1(
(0, 0, 0, 0, 'NR', 'NP'), ('e_int', 'RBR'),
subsection[total_lines:])[1:3]
total_lines += rbr_size
ch_data['RBR'] = rbr
rbi, rbi_size = self._get_tab1(
(0, 0, 0, 0, 'NR', 'NP'), ('e_int', 'RBI'),
(subsection[total_lines:]))[1:3]
total_lines += rbi_size
ch_data['RBI'] = rbi
else:
ch_data, total_lines = self._cont_and_update(
ch_data, ('ED', 'EU', 0, 0, 'LBK', 0), subsection,
total_lines)
ch_data, total_lines = self._cont_and_update(
ch_data, lbk_list_keys[lbk], subsection,
total_lines)
return total_lines
def read_kpss(nch, subsection, aj_data, total_lines):
for ch in range(nch):
ch_data = aj_data['ch{}'.format(ch)]
lps = subsection[total_lines][4]
ch_data['LPS'] = lps
total_lines += 2
if lps == 1:
psr, psr_size = self._get_tab1(
(0, 0, 0, 0, 'NR', 'NP'), ('e_int', 'PSR'),
subsection[total_lines:])[1:3]
total_lines += psr_size
ch_data['PSR'] = psr
psi, psi_size = self._get_tab1(
(0, 0, 0, 0, 'NR', 'NP'), ('e_int', 'PSI'),
(subsection[total_lines:]))[1:3]
total_lines += psi_size
ch_data['PSI'] = psi
total_lines += psi_size
return total_lines
lrf = int(range_flags['LRF'])
subsection_dict = rxdata.DoubleSpinDict({})
headers = [None,
('SPI', 'AP', 0, 0, 'NLS', 0),
('SPI', 'AP', 0, 0, 'NLS', 0),
('SPI', 'AP', 'LAD', 0, 'NLS', 'NLSC'),
('SPI', 'AP', 0, 0, 'NLS', 0),
None,
None,
(0, 0, 'IFG', 'KRM', 'NJS', 'KRL')]
if range_flags['NRO'] > 0:
intdata, total_lines = self._get_tab1((0, 0, 0, 0, 'NR', 'NP'),
('E', 'AP'),
subsection)[1:3]
subsection_dict['int'] = intdata
else:
total_lines = 0
range_flags, total_lines = self._cont_and_update(
range_flags, headers[lrf], subsection, total_lines)
lrf_L_keys = [None,
('AWRI', 'QX', 'L', 'LRX', '6*NRS', 'NRS'),
('AWRI', 'QX', 'L', 'LRX', '6*NRS', 'NRS'),
('AWRI', 'APL', 'L', 0, '6*NRS', 'NRS'),
(0, 0, 'L', 0, 'NJS', 0)]
lrf_J_keys = [None, None, None, None, ('AJ', 0, 0, 0, '12*NLJ', 'NLJ')]
lrf_itemkeys = [None,
('ER', 'AJ', 'GT', 'GN', 'GG', 'GF'),
('ER', 'AJ', 'GT', 'GN', 'GG', 'GF'),
('ER', 'AJ', 'GN', 'GG', 'GFA', 'GFB'),
('DET', 'DWT', 'GRT', 'GIT', 'DEF', 'DWF',
'GRF', 'GIF', 'DEC', 'DWC', 'GRC', 'GIC')]
if lrf == 4:
# Adler-Adler
bg_flags, bg, bg_size = self._get_list(
('AWRI', 0, 'LI', 0, '6*NX', 'NX'),
('A1', 'A2', 'A3', 'A4', 'B1', 'B2'),
subsection[total_lines:])
total_lines += bg_size
subsection_dict['bg'] = bg
if lrf < 5:
total_lines = self._nls_njs_loop(lrf_L_keys[lrf],
lrf_J_keys[lrf],
lrf_itemkeys[lrf],
subsection,
total_lines,
range_flags,
subsection_dict)
if lrf == 7:
# R-Matrix Limited Format (ENDF Manual pp. 62-67)
# Particle pair descriptions for the whole range
particle_pair_data, pp_size = self._get_list(
(0, 0, 'NPP', 0, '12*NPP', '2*NPP'),
('MA', 'MB', 'ZA', 'ZB', 'IA', 'IB',
'Q', 'PNT', 'SHF', 'MT', 'PA', 'PB'),
subsection[total_lines:])[1:3]
total_lines += pp_size
range_flags.update(particle_pair_data)
for aj_section in range(int(range_flags['NJS'])):
# Read first LIST record, with channel descriptions
aj_flags, ch_items, ch_size = self._get_list(
('AJ', 'PJ', 'KBK', 'KPS', '6*NCH', 'NCH'),
('IPP', 'L', 'SCH', 'BND', 'APE', 'APT'),
subsection[total_lines:])
total_lines += ch_size
# Second LIST record, with resonance energies and widths.
er_flags, er_data, er_size = self._get_list(
(0, 0, 0, 'NRS', '6*NX', 'NX'), ('ER',),
subsection[total_lines:])
total_lines += er_size
nch = int(aj_flags['NCH'])
er_array_width = (nch//6+1)*6
er_data = er_data['ER'].reshape(-1, er_array_width).transpose()
aj_data = {'ER': er_data[0],
'GAM': er_data[1:1+nch].transpose()}
aj_data.update(ch_items)
aj = aj_flags['AJ']
# Additional records
if aj_flags['KBK'] > 0:
lbk_list_keys = ((), (), # ('ED','EU',0,0,'LBK',0),
('R0', 'R1', 'R2', 'S0', 'S1', 0),
('R0', 'SO', 'GA', 0, 0, 0))
total_lines = read_kbks(nch, subsection,
aj_data, total_lines)
if aj_flags['KPS'] > 0:
total_lines = read_kpss(nch, subsection,
aj_data, total_lines)
subsection_dict[aj] = aj_data
el, eh = range_flags['EL'], range_flags['EH']
subsection_data = (el, eh, subsection_dict, range_flags)
isotope_dict = self.structure[mat_id]['data'][nuc_i]
isotope_dict['resolved'].append(subsection_data)
return total_lines
def _read_unresolved(self, subsection, range_flags, isotope_flags, mat_id,
nuc_i):
"""Read unresolved resonances of an energy subsection.
Parameters
-----------
subsection: array
Contains data for energy subsection.
range_flags: dict
Contains metadata flags for energy range.
isotope_flags: dict
Contiains flags for isotope.
mat_id: int
Material ZZAAAM.
nuc_i: int
Isotope ZZAAAM.
Returns
--------
total_lines: int
"""
head_cont = ('SPI', 'AP', 'LSSF', 0, 'NLS', 0)
has_head_cont = {(0, 1): True, (1, 1): False,
(0, 2): True, (1, 2): True}
L_keys = {(0, 1): ('AWRI', 0, 'L', 0, '6*NJS', 'NJS'),
(1, 1): ('AWRI', 0, 'L', 0, 'NJS', 0),
(0, 2): ('AWRI', 0, 'L', 0, 'NJS', 0),
(1, 2): ('AWRI', 0, 'L', 0, 'NJS', 0)}
j_keys = {(0, 1): None,
(1, 1): (0, 0, 'L', 'MUF', 'NE+6', 0,
'D', 'AJ', 'AMUN', 'GN0', ' | Cython |
GG', 0),
(0, 2): ('AJ', 0, 'INT', 0, '6*NE+6', 'NE',
0, 0, 'AMUX', 'AMUN', 'AMUG', 'AMUF'),
(1, 2): ('AJ', 0, 'INT', 0, '6*NE+6', 'NE',
0, 0, 'AMUX', 'AMUN', 'AMUG', 'AMUF')}
itemkeys = {(0, 1): ('D', 'AJ', 'AMUN', 'GN0', 'GG', 0),
(1, 1): ('GF', ),
(0, 2): ('ES', 'D', 'GX', 'GN0', 'GG', 'GF'),
(1, 2): ('ES', 'D', 'GX', 'GN0', 'GG', 'GF')}
lfw, lrf = int(isotope_flags['LFW']), int(range_flags['LRF'])
subsection_dict = rxdata.DoubleSpinDict({})
if range_flags['NRO'] > 0:
tabhead, intdata, total_lines = self._get_tab1((0, 0, 0,
0, 'NR', 'NP'),
('E', 'AP'),
subsection)
subsection_dict['int'] = intdata
else:
total_lines = 0
if has_head_cont[(lfw, lrf)]:
range_flags, total_lines = self._cont_and_update(
range_flags, head_cont, subsection, total_lines)
if (lfw, lrf) == (1, 1):
# Case B in ENDF manual p.70
head_flags, es_array, lines = self._get_list(
('SPI', 'AP', 'LSSF', 0, 'NE', 'NLS'),
('ES', ),
subsection[total_lines:])
subsection_dict['ES'] = es_array['ES']
total_lines += lines
range_flags.update(head_flags)
total_lines = self._nls_njs_loop(L_keys[(lfw, lrf)],
j_keys[(lfw, lrf)],
itemkeys[(lfw, lrf)],
subsection,
total_lines,
range_flags,
subsection_dict)
el, eh = range_flags['EL'], range_flags['EH']
subsection_data = (el, eh, subsection_dict, range_flags)
isotope_dict = self.structure[mat_id]['data'][nuc_i]
isotope_dict['unresolved'].append(subsection_data)
return total_lines
def _read_ap_only(self, subsection, range_flags, isotope_flags, mat_id,
nuc_i):
'Read in scattering radius when it is the only resonance data given.'
subsection_dict = {}
if range_flags['NRO'] > 0:
tabhead, intdata, total_lines = self._get_tab1((0, 0, 0, 0,
'NR', 'NP'),
('E', 'AP'),
subsection)
subsection_dict['int'] = intdata
else:
total_lines = 0
range_flags, total_lines = self._cont_and_update(
range_flags, ('SPI', 'AP', 0, 0, 'NLS', 0), subsection, total_lines)
return total_lines
def _read_xs(self, nuc, mt, nuc_i=None):
"""Read in cross-section data. Read resonances with Library._read_res
first.
Parameters
-----------
nuc: int
id of material.
mt: int
Reaction number to find cross-section data of.
nuc_i: int
Isotope to find; if None, defaults to mat_id.
"""
nuc = nucname.id(nuc)
if nuc_i is None:
nuc_i = nuc
if 600 > mt > 500:
xsdata = self.get_rx(nuc, 23, mt).reshape(-1, 6)
else:
xsdata = self.get_rx(nuc, 3, mt).reshape(-1, 6)
total_lines = 0
head_flags = self._get_head(('ZA', 'AWR', 0, 0, 0, 0),
xsdata[total_lines])
total_lines += 1
int_flags, int_data, int_size = self._get_tab1(
('QM', 'QI', 0, 'LM', 'NR', 'NP'),
('e_int', 'xs'),
xsdata[total_lines:])
int_flags.update(head_flags)
isotope_dict = self.structure[nuc]['data'][nuc_i]
isotope_dict['xs'].update({mt: (int_data, int_flags)})
total_lines += int_size
def get_xs(self, nuc, mt, nuc_i=None):
"""get_xs(nuc, mt, nuc_i=None)
Grab cross-section data.
Parameters
----------
nuc: int
id of nuclide to read.
mt: int
ENDF reaction number to read.
nuc_i: int
id of isotope to read. Defaults to nuc.
Returns
-------
tuple
Returns a tuple with xs data in tuple[0] and flags in tuple[1].
"""
nuc = nucname.id(nuc)
if not nuc_i:
nuc_i = nuc
else:
nuc_i = nucname.id(nuc_i)
if (nuc not in self.structure) or (not self.structure[nuc]['data']):
self._read_res(nuc)
if nuc_i not in self.structure[nuc]['data'] or \
mt not in self.structure[nuc]['data'][nuc_i]['xs']:
self._read_xs(nuc, mt, nuc_i)
return self.structure[nuc]['data'][nuc_i]['xs'][mt]
def get_rx(self, nuc, mf, mt, lines=0):
"""get_rx(nuc, mf, mt, lines=0)
Grab the data from one reaction type.
Parameters
----------
nuc : int
id form of material to read from.
mf : int
ENDF file number (MF).
mt : int
ENDF reaction number (MT).
lines : int
Number of lines to read from this reaction, starting from the top.
Default value is 0, which reads in the entire reaction.
Returns
-------
data : NumPy array
Contains the reaction data in an Nx6 array.
"""
nuc = nucname.id(nuc)
if nuc in self.structure:
return self._read_nucmfmt(nuc, mf, mt, lines)
else:
raise ValueError('Material {} does not exist.'.format(nuc))
def _read_nucmfmt(self, nuc, mf, mt, lines):
"""Load in the data from one reaction into self.structure.
Parameters
----------
nuc : int
id of nuclide.
mf : int
ENDF file number (MF).
mt : int
ENDF reaction number (MT).
Returns
-------
array, 1d, float64
1d, float64 NumPy array containing the reaction data.
"""
opened_here = False
if isinstance(self.fh, basestring):
fh = open(self.fh, 'r')
opened_here = True
else:
fh = self.fh
try:
start, stop = self.mat_dict[nuc]['mfs'][mf, mt]
except KeyError as e:
msg = 'MT {1} not found in File {0}.'.format(mf, mt)
e.args = (msg,)
raise e
fh.readline()
fh.seek(start)
if lines == 0:
s = fh.read(stop-start)
else:
s = fh.read(lines*self.line_length)
if opened_here:
fh.close
return fromendf_tok(s)
def at_end_of_tape(f):
"""Indicate whether file is positioned at the end of an ENDF tape.
Parameters
----------
f : file_like
File to check
Returns
-------
bool
Whether the file is at the end of the ENDF tape
"""
position = f.tell()
line = f.readline()
if line == '' or line[66:70] ==' -1':
return True
else:
f.seek(position)
return False
def seek_material_end(f):
"""Position the file at the end of the ENDF material (MAT) currently being read.
Parameters
----------
f : file_like
File to position
"""
while True:
line = f.readline()
if line[66:70] ==' 0':
break
def seek_file_end(f):
"""Position the file at the end of the ENDF file (MF) currently being read.
Parameters
----------
f : file_like
File to position
"""
while True:
line = f.readline()
if line[70:72] =='0':
break
def seek_section_end(f):
"""Position the file at the end of the ENDF section (MT) currently being read.
Parameters | Cython |
----------
f : file_like
File to position
"""
while True:
line = f.readline()
if line[72:75] ==' 0':
break
class Evaluation(object):
"""ENDF material evaluation with multiple files/sections
The Evaluation class provides a means to parse data from an ENDF-6 format
file and access it as stored internal Python objects. A summary of the
parsing capabilities is as follows:
== === =============================================== ========
MF MT Description Complete
== === =============================================== ========
1 451 Descriptive data and directory Yes
1 452 Number of neutrons per fission Yes
1 455 Delayed neutron data Yes
1 456 Number of prompt neutrons per fission Yes
1 458 Components of fission energy release Yes
1 460 Delayed photon data Yes
2 151 Resonance parameters Yes
3 - Reaction cross sections Yes
4 - Angular distributions Yes
5 - Energy distributions Yes
6 - Product energy-angle distributions Yes
7 2 Thermal elastic scattering Yes
7 4 Thermal inelastic scattering Yes
8 454 Independent fission yields Yes
8 457 Radioactive decay data Yes
8 459 Cumulative fission yields Yes
8 - Radioactive nuclide production Yes
9 - Multiplicities of radioactive products Yes
10 - Production cross sections for radionuclides Yes
12 - Photon production yield data Yes
13 - Photon production cross sections Yes
14 - Photon angular distributions Yes
15 - Continuous photon energy spectra Yes
23 - Photon and electron cross sections Yes
26 - Secondary distributions for electro-atomic data Yes
27 - Atomic form factors Yes
28 533 Atomic relaxation data Yes
30 1 Directory and correspondance table No
30 2 Covariance matrix No
30 - Sensitivities No
31 - Covariances of fission No
32 - Covariances of resonance parameters No
33 - Covariances of neutron cross sections No
34 - Covariances for angular distributions No
35 - Covariances for energy distributions No
40 - Covariances for radionuclide production No
== === =============================================== ========
Attributes
----------
atomic_relaxation : dict
Dictionary containing atomic relaxation data from MF=28, MT=533. If the
evaluation is not an atomic relaxation sublibrary, the dictionary is
empty.
decay : dict
Dictionary containing decay data from MF=8. If the evaluation is not
from a decay sublibrary, the dictionary is empty.
fission : dict
Dictionary containing fission-related data, such as neutrons release
from fission (MF=1, MT=452,455,456), components of energy release (MF=1,
MT=458), delayed photons from fission (MF=1, MT=460), and
cumulative/independent fission yields (MF=8, MT=454,459).
info : dict
Miscallaneous information about the evaluation.
target : dict
Information about the target material, such as its mass, isomeric state,
whether it's stable, and whether it's fissionable.
projectile : dict
Information about the projectile such as its mass.
reaction_list : list of 4-tuples
List of sections in the evaluation. The entries of the tuples are the
file (MF), section (MT), number of records (NC), and modification
indicator (MOD).
reactions : collections.OrderedDict
Dictionary whose keys are MT numbers and values are Reaction instances.
resonances : dict
Resolved resonance data from MF=2, MT=151.
thermal_elastic : dict
Coherent and/or incoherent thermal elastic data from MF=7, MT=2.
thermal_inelastic : dict
Incoherent thermal inelastic data from MF=7, MT=4.
"""
def __init__(self, filename_or_handle, verbose=True):
if hasattr(filename_or_handle,'read'):
self._fh = filename_or_handle
else:
self._fh = open(filename_or_handle, 'r')
self._verbose = verbose
self._veryverbose = False
# Create public attributes
self.atomic_relaxation = {}
self.decay = {}
self.fission = {'nu': {}, 'energy_release': {}, 'delayed_photon': {},
'yield_independent': {}, 'yield_cumulative': {}}
self.info = {}
self.target = {}
self.projectile = {}
self.reaction_list = []
self.reactions = OrderedDict()
self.resonances = {}
self.thermal_elastic = {}
self.thermal_inelastic = {}
# Determine MAT number for this evaluation
MF = 0
while MF == 0:
position = self._fh.tell()
line = self._fh.readline()
MF = int(line[70:72])
self.material = int(line[66:70])
# Save starting position for this evaluation
self._fh.seek(position)
# First we need to read MT=1, MT=451 which has a description of the ENDF
# file and a list of what data exists in the file
self._read_header()
# Save starting position
self._start_position = self._fh.tell()
def read(self, reactions=None, skip_mf=[], skip_mt=[]):
"""Reads reactions from the ENDF file of the Evaluation object. If no
arguments are provided, this method will read all the reactions in the
file. A single reaction can be read if provided.
Parameters
----------
reactions : tuple or list of tuple, optional
A single reaction in the following format: (MF, MT)
skip_mf : list of int, optional
Files (MF) which should not be read
skip_mt : list of int, optional
Reactions (MT) which should not be read
"""
# Make sure file is positioned correctly
self._fh.seek(self._start_position)
if isinstance(reactions, tuple):
reactions = [reactions]
while True:
# Find next section
while True:
position = self._fh.tell()
line = self._fh.readline()
MAT = int(line[66:70])
MF = int(line[70:72])
MT = int(line[72:75])
if MT > 0 or MAT == 0:
self._fh.seek(position)
break
# If end of material reached, exit loop
if MAT == 0:
break
# If there are files/reactions requested to be skipped, check them
if MF in skip_mf:
seek_file_end(self._fh)
continue
if MT in skip_mt:
seek_section_end(self._fh)
continue
# If reading is restricted to certain reactions, check here
if reactions and (MF, MT) not in reactions:
seek_section_end(self._fh)
continue
# File 1 data
if MF == 1:
if MT == 452:
# Number of total neutrons per fission
self._read_total_nu()
elif MT == 455:
# Number of delayed neutrons per fission
self._read_delayed_nu()
elif MT == 456:
# Number of prompt neutrons per fission
self._read_prompt_nu()
elif MT == 458:
# Components of energy release due to fission
self._read_fission_energy()
elif MT == 460:
self._read_delayed_photon()
elif MF == 2:
# Resonance parameters
if MT == 151:
self._read_resonances()
else:
seek_section_end(self._fh)
elif MF == 3:
# Reaction cross sections
self._read_reaction_xs(MT)
elif MF == 4:
# Angular distributions
self._read_angular_distribution(MT)
elif MF == 5:
# Energy distributions
self._read_energy_distribution(MT)
elif MF == 6:
# Product energy-angle distributions
self._read_product_energy_angle(MT)
elif MF == 7:
# Thermal scattering data
if MT == 2:
self._read_thermal_elastic()
if MT == 4:
self._read_thermal_inelastic()
elif MF == 8:
# decay and fission yield data
if MT == 454:
self._read_independent_yield()
elif MT == 459:
self._read_cumulative_yield()
elif MT == 457:
self._read_decay()
else:
self._read | Cython |
import numpy as np
cimport numpy as np
cimport cython
DIST_DTYPE = np.float32
PATH_DTYPE = np.int
ctypedef np.float32_t DIST_DTYPE_t
ctypedef np.int_t PATH_DTYPE_t
@cython.boundscheck(False) # turn off bounds-checking for entire function
@cython.wraparound(False) # turn off negative index wrapping for entire function
def dynamic_time_warping_c(np.ndarray[DIST_DTYPE_t, ndim=2] precomputed_distances):
assert precomputed_distances.dtype == DIST_DTYPE
cdef int n = precomputed_distances.shape[0]
cdef int m = precomputed_distances.shape[1]
assert n > 1, m > 1
cdef np.ndarray[DIST_DTYPE_t, ndim=2] cost = np.full([n + 1, m + 1], np.inf, dtype=DIST_DTYPE)
cost[0, 0] = 0
cdef DIST_DTYPE_t dist
cdef int i, j
for i in range(1, n + 1):
for j in range(1, m + 1):
dist = precomputed_distances[i-1, j-1]
cost[i, j] = dist + min(cost[i-1, j],
cost[i, j-1],
cost[i-1, j-1])
cdef np.ndarray[PATH_DTYPE_t, ndim=2] path = compute_path_c(cost)
return cost[1:, 1:], path
@cython.boundscheck(False) # turn off bounds-checking for entire function
@cython.wraparound(False) # turn off negative index wrapping for entire function
def compute_path_c(np.ndarray[DIST_DTYPE_t, ndim=2] cost):
cdef int i = cost.shape[0]
cdef int j = cost.shape[1]
cdef np.ndarray[PATH_DTYPE_t, ndim=2] p = np.zeros([i * j, 2], dtype=PATH_DTYPE)
cdef int p_idx = p.shape[0] - 1
i -= 2
j -= 2
p[p_idx, 0] = i
p[p_idx, 1] = j
p_idx -= 1
cdef int k
while i > 0 or j > 0:
k = np.argmin((cost[i, j], cost[i, j + 1], cost[i + 1, j]))
if k == 0:
i -= 1
j -= 1
elif k == 1:
i -= 1
else:
j -= 1
p[p_idx, 0] = i
p[p_idx, 1] = j
p_idx -= 1
return p[(p_idx + 1):]
<|end_of_text|>import scipy as sp
import random
import pylab
class GeneticAlgorithm(object):
"""
A genetic algorithm is a model of biological evolution. It
maintains a population of chromosomes. Each chromosome is
represented as a list of 0's and 1's. A fitness function must be
defined to score each chromosome. Initially, a random population
is created. Then a series of generations are executed. Each
generation, parents are selected from the population based on
their fitness. More highly fit chromosomes are more likely to be
selected to create children. With some probability crossover will
be done to model sexual reproduction. With some very small
probability mutations will occur. A generation is complete once
all of the original parents have been replaced by children. This
process continues until the maximum generation is reached or when
the isDone method returns True.
"""
def __init__(self, length, popSize, verbose=False):
self.verbose = verbose # Set to True to see more info displayed
self.length = length # Length of the chromosome
self.popSize = popSize # Size of the population
self.bestEver = None # Best member ever in this evolution
self.bestEverScore = 0 # Fitness of best member ever
self.population = None # Population is a list of chromosomes
self.scores = None # Fitnesses of all members of population
self.totalFitness = None # Total fitness in entire population
self.generation = 0 # Current generation of evolution
self.maxGen = 100 # Maximum generation
self.pCrossover = None # Probability of crossover
self.pMutation = None # Probability of mutation (per bit)
self.bestList = [] # Best fitness per generation
self.avgList = [] # Avg fitness per generation
print("Executing genetic algorithm")
print("Chromosome length:", self.length)
print("Population size:", self.popSize)
def initializePopulation(self):
"""
Initialize each chromosome in the population with a random
series of 1's and 0's.
Returns: None
Result: Initializes self.population
"""
raise NotImplementedError("TODO")
def evaluatePopulation(self):
"""
Computes the fitness of every chromosome in population. Saves the
fitness values to the list self.scores. Checks whether the
best fitness in the current population is better than
self.bestEverScore. If so, prints a message that a new best
was found and its score, updates this variable and saves the
chromosome to self.bestEver. Computes the total fitness of
the population and saves it in self.totalFitness. Appends the
current bestEverScore to the self.bestList, and the current
average score of the population to the self.avgList.
Returns: None
"""
raise NotImplementedError("TODO")
def selection(self):
"""
Each chromosome's chance of being selected for reproduction is
based on its fitness. The higher the fitness the more likely
it will be selected. Uses the roulette wheel strategy on
self.scores.
Returns: A COPY of the selected chromosome.
"""
raise NotImplementedError("TODO")
def crossover(self, parent1, parent2):
"""
With probability self.pCrossover, recombine the genetic
material of the given parents at a random location between
1 and the length-1 of the chromosomes. If no crossover is
performed, then return the original parents.
Returns: Two children
"""
raise NotImplementedError("TODO")
def mutation(self, chromosome):
"""
With probability self.pMutation tested at each position in the
chromosome, flip value.
Returns: None
"""
raise NotImplementedError("TODO")
def oneGeneration(self):
"""
Execute one generation of the evolution. Each generation,
repeatedly select two parents, call crossover to generate two
children. Call mutate on each child. Finally add both
children to the new population. Continue until the new
population is full. Replaces self.pop with a new population.
Returns: None
"""
raise NotImplementedError("TODO")
def evolve(self, maxGen, pCrossover=0.7, pMutation=0.001):
"""
Run a series of generations until a maximum generation is
reached or self.isDone() returns True.
Returns the best chromosome ever found over the course of
the evolution, which is stored in self.bestEver.
"""
raise NotImplementedError("TODO")
def plotStats(self, title=""):
"""
Plots a summary of the GA's progress over the generations.
Adds the given title to the plot.
"""
gens = range(self.generation+1)
pylab.plot(gens, self.bestList, label="Best")
pylab.plot(gens, self.avgList, label="Average")
pylab.legend(loc="upper left")
pylab.xlabel("Generations")
pylab.ylabel("Fitness")
if len(title)!= 0:
pylab.title(title)
pylab.show()
def fitness(self, chromosome):
"""
The fitness function will change for each problem. Therefore
it is not defined here. To use this class to solve a
particular problem, inherit from this class and define this
method.
"""
# Override this
pass
def isDone(self):
"""
The stopping critera will change for each problem. Therefore
it is not defined here. To use this class to solve a
particular problem, inherit from this class and define this
method.
"""
# Override this
pass
<|end_of_text|>cdef extern from "entropy.c":
int read_all_from(char* address, int limit)
cpdef py_read_all_from(py_address, limit):
py_byte_string = py_address.encode('UTF-8')
cdef char* c_string = py_byte_string
return read_all_from(c_string, limit)
cdef extern from "entropy.c":
int increment(int a)
cpdef py_increment(a):
return increment(a)
cdef extern from "entropy.c":
int calculation(
char* data_file_name,
char* results_file_name,
int compartment_size,
int subcompartment_num,
int result_placement
)
cpdef py_calculation(
data_file_name,
results_file_name,
compartment_size,
subcompartment_num,
result_placement
):
data_file_name_byte_string = data_file_name.encode('UTF-8')
| Cython |
cdef char* data_file_name_c_string = data_file_name_byte_string
results_file_name_byte_string = results_file_name.encode('UTF-8')
cdef char* results_file_name_c_string = results_file_name_byte_string
return calculation(
data_file_name_c_string,
results_file_name_c_string,
compartment_size,
subcompartment_num,
result_placement
)
"""
from libc.stdio cimport FILE
from cpython cimport array
cdef extern from "entropy.c":
int following_compartments(
FILE* output,
double* compartments,
int compartment_size,
int* compartments_y,
double* compartment_y_address,
int subcompartment_num,
double* latest_data_address,
double* min_y_address,
double* max_y_address,
int* min_y_index_address,
int* max_y_index_address,
int* data_size_address,
int* input_size_address,
double* result_address
)
cpdef py_following_compartments(
output,
compartments,
compartment_size,
compartments_y,
compartment_y_address,
subcompartment_num,
latest_data_address,
min_y_address,
max_y_address,
min_y_index_address,
max_y_index_address,
data_size_address,
input_size_address,
result_address
):
return following_compartments(
output,
compartments_arr,
compartment_size,
compartments_y,
compartment_y_address,
subcompartment_num,
latest_data_address,
min_y_address,
max_y_address,
min_y_index_address,
max_y_index_address,
data_size_address,
input_size_address,
result_address
)
"""
<|end_of_text|>from ConvexHull2D cimport ConvexHull2D as _ConvexHull2D
from DPosition cimport DPosition2 as _DPosition2
from DBoundingBox cimport DBoundingBox2 as _DBoundingBox2
def enclosesXY(self, float x, float y):
"""
Parameters:
x (float)
y (float)
Returns:
int
"""
cdef _DPosition2 pos
pos[0] = x
pos[1] = y
return self.inst.get().encloses(pos)
def getHullPointsNPY(self):
"""
Returns:
result (np.ndarray[np.float32_t, ndim=2])
"""
cdef libcpp_vector[_DPosition2] points = self.inst.get().getHullPoints()
cdef np.ndarray[np.float32_t, ndim=2] result
cdef n = points.size()
result = np.zeros( [n,2], dtype=np.float32)
cdef libcpp_vector[_DPosition2].iterator it = points.begin()
cdef int i = 0
while it!= points.end():
result[i,0] = deref(it)[0]
result[i,1] = deref(it)[1]
inc(it)
i += 1
return result
def setHullPointsNPY(self, np.ndarray[np.float32_t, ndim=2] points):
"""
Parameters:
points (np.ndarray[np.float32_t, ndim=2])
"""
cdef _ConvexHull2D * hull = self.inst.get()
cdef int N = points.shape[0]
cdef int i
cdef libcpp_vector[_DPosition2] vec
cdef _DPosition2 p
for i in range(N):
p[0] = points[i,0]
p[1] = points[i,1]
vec.push_back(p)
self.inst.get().setHullPoints(vec)
def getBoundingBox2D(self):
"""
Returns:
((double,double),(double,double))
"""
cdef _DBoundingBox2 box = self.inst.get().getBoundingBox()
cdef _DPosition2 minp = box.minPosition()
cdef _DPosition2 maxp = box.maxPosition()
return (minp[0], minp[1]), (maxp[0], maxp[1])
def addPointXY(self, x, y):
"""
Parameters:
x (double)
y (double)
"""
cdef _DPosition2 p
p[0] = x
p[1] = y
self.inst.get().addPoint(p)
def addPointsNPY(self, np.ndarray[np.float32_t, ndim=2] points):
"""
Parameters:
points (np.ndarray[np.float32_t, ndim=2])
"""
cdef _ConvexHull2D * hull = self.inst.get()
cdef int N = points.shape[0]
cdef int i
cdef libcpp_vector[_DPosition2] vec
cdef _DPosition2 p
for i in range(N):
p[0] = points[i,0]
p[1] = points[i,1]
vec.push_back(p)
self.inst.get().addPoints(vec)
<|end_of_text|># -*- coding: utf-8 -*-
# distutils: language=c
# cython: initializedcheck=False
# cython: nonecheck=False
# cython: overflowcheck=False
# cython: boundscheck=False
from myawesomelib.types cimport data_t, ufunc
cimport numpy as cnp
cdef apply_ufunc(cnp.ndarray x, ufunc f, bint inplace=?)
cdef data_t apply_ufunc0(data_t x, ufunc f) nogil
cdef data_t[:] apply_ufunc1(data_t[:] x, ufunc f) nogil
cdef data_t[:, :] apply_ufunc2(data_t[:, :] x, ufunc f) nogil
cdef data_t[:, :, :] apply_ufunc3(data_t[:, :, :] x, ufunc f) nogil
cdef data_t[:, :, :, :] apply_ufunc4(data_t[:, :, :, :] x, ufunc f) nogil
cdef data_t[:, :, :, :, :] apply_ufunc5(data_t[:, :, :, :, :] x, ufunc f) nogil
cdef data_t[:, :, :, :, :, :] apply_ufunc6(data_t[:, :, :, :, :, :] x, ufunc f) nogil
cdef data_t[:, :, :, :, :, :, :] apply_ufunc7(data_t[:, :, :, :, :, :, :] x, ufunc f) nogil
<|end_of_text|># Present for backwards compatability
from cpython.dict cimport *
<|end_of_text|># distutils: language = c++
"""
Provides Cython header for "Solitaire.h".
"""
from libcpp.string cimport string
__author__ = 'Tiziano Bettio'
__license__ = 'MIT'
__version__ = '0.0.10'
__copyright__ = """Copyright (c) 2020 Tiziano Bettio
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
cdef extern from "Move.cpp":
pass
cdef extern from "Move.h":
cdef struct Move:
pass
cdef extern from "Card.cpp":
pass
cdef extern from "Card.h":
pass
cdef extern from "HashMap.h":
pass
cdef extern from "Pile.cpp":
pass
cdef extern from "Pile.h":
pass
cdef extern from "Random.cpp":
pass
cdef extern from "Random.h":
pass
cdef extern from "Solitaire.cpp":
pass
cdef extern from "Solitaire.h":
cdef enum SolveResult "SolveResult":
CouldNotComplete = -2,
SolvedMayNotBeMinimal = -1,
Impossible = 0,
SolvedMinimal = 1
cdef cppclass Solitaire:
void Initialize()
int Shuffle1(int dealNumber)
void Shuffle2(int dealNumber)
void ResetGame()
void ResetGame(int drawCount)
SolveResult SolveMinimalMultithreaded(int numThreads, int maxClosedCount) nogil
SolveResult SolveMinimal(int maxClosedCount) nogil
SolveResult SolveFast(int maxClosedCount, int twoShift, int threeShift) nogil
int MovesMadeCount()
int MovesMadeNormalizedCount()
int FoundationCount()
int DrawCount()
void SetDrawCount(int drawCount)
string GetMoveInfo(Move move)
bint LoadSolitaire(const string& cardSet)
string GetSolitaire()
bint LoadPysol | Cython |
(const string& cardSet)
string GetPysol()
string GameDiagram()
string GameDiagramPysol()
string MovesMade()
Move operator[](int index)
<|end_of_text|>"""The cmSystemTools Cython declaration."""
cdef extern from "cmSystemTools.h" namespace "cmSystemTools":
void FindExecutableDirectory(char * argv0)
<|end_of_text|>
"""
Implements ObjectID base class and global object registry.
It used to be that we could store the HDF5 identifier in an ObjectID
and simply close it when the object was deallocated. However, since
HDF5 1.8.5 they have started recycling object identifiers, which
breaks this system.
We now use a global registry of object identifiers. This is implemented
via a dictionary which maps an integer representation of the identifier
to a weak reference of an ObjectID. There is only one ObjectID instance
in the universe for each integer identifier. When the HDF5 reference
count for a identifier reaches zero, HDF5 closes the object and reclaims
the identifier. When this occurs, the identifier and weak reference must
be deleted from the registry. If an ObjectID is deallocated, it is deleted
from the registry and the HDF5 reference count is decreased, HDF5 closes
and reclaims the identifier for future use.
All interactions with the registry must be synchronized for thread safety.
You must acquire "registry.lock" before interacting with the registry. The
registry is not internally synchronized, in the interest of performance: we
don't want the same thread attempting to acquire the lock multiple times
during a single operation, if we can avoid it.
All ObjectIDs and subclasses thereof should be opened with the "open"
classmethod factory function, such that an existing ObjectID instance can
be returned from the registry when appropriate.
"""
from defs cimport *
from weakref import KeyedRef, ref
## {{{ http://code.activestate.com/recipes/577336/ (r3)
from cpython cimport pythread
from cpython.exc cimport PyErr_NoMemory
cdef class FastRLock:
"""Fast, re-entrant locking.
Under uncongested conditions, the lock is never acquired but only
counted. Only when a second thread comes in and notices that the
lock is needed, it acquires the lock and notifies the first thread
to release it when it's done. This is all made possible by the
wonderful GIL.
"""
cdef pythread.PyThread_type_lock _real_lock
cdef long _owner # ID of thread owning the lock
cdef int _count # re-entry count
cdef int _pending_requests # number of pending requests for real lock
cdef bint _is_locked # whether the real lock is acquired
def __cinit__(self):
self._owner = -1
self._count = 0
self._is_locked = False
self._pending_requests = 0
self._real_lock = pythread.PyThread_allocate_lock()
if self._real_lock is NULL:
PyErr_NoMemory()
def __dealloc__(self):
if self._real_lock is not NULL:
pythread.PyThread_free_lock(self._real_lock)
self._real_lock = NULL
def acquire(self, bint blocking=True):
return lock_lock(self, pythread.PyThread_get_thread_ident(), blocking)
def release(self):
if self._owner!= pythread.PyThread_get_thread_ident():
raise RuntimeError("cannot release un-acquired lock")
unlock_lock(self)
# compatibility with threading.RLock
def __enter__(self):
# self.acquire()
return lock_lock(self, pythread.PyThread_get_thread_ident(), True)
def __exit__(self, t, v, tb):
# self.release()
if self._owner!= pythread.PyThread_get_thread_ident():
raise RuntimeError("cannot release un-acquired lock")
unlock_lock(self)
def _is_owned(self):
return self._owner == pythread.PyThread_get_thread_ident()
cdef inline bint lock_lock(FastRLock lock, long current_thread, bint blocking) nogil:
# Note that this function *must* hold the GIL when being called.
# We just use 'nogil' in the signature to make sure that no Python
# code execution slips in that might free the GIL
if lock._count:
# locked! - by myself?
if current_thread == lock._owner:
lock._count += 1
return 1
elif not lock._pending_requests:
# not locked, not requested - go!
lock._owner = current_thread
lock._count = 1
return 1
# need to get the real lock
return _acquire_lock(
lock, current_thread,
pythread.WAIT_LOCK if blocking else pythread.NOWAIT_LOCK)
cdef bint _acquire_lock(FastRLock lock, long current_thread, int wait) nogil:
# Note that this function *must* hold the GIL when being called.
# We just use 'nogil' in the signature to make sure that no Python
# code execution slips in that might free the GIL
if not lock._is_locked and not lock._pending_requests:
# someone owns it but didn't acquire the real lock - do that
# now and tell the owner to release it when done. Note that we
# do not release the GIL here as we must absolutely be the one
# who acquires the lock now.
if not pythread.PyThread_acquire_lock(lock._real_lock, wait):
return 0
#assert not lock._is_locked
lock._is_locked = True
lock._pending_requests += 1
with nogil:
# wait for the lock owning thread to release it
locked = pythread.PyThread_acquire_lock(lock._real_lock, wait)
lock._pending_requests -= 1
#assert not lock._is_locked
#assert lock._count == 0
if not locked:
return 0
lock._is_locked = True
lock._owner = current_thread
lock._count = 1
return 1
cdef inline void unlock_lock(FastRLock lock) nogil:
# Note that this function *must* hold the GIL when being called.
# We just use 'nogil' in the signature to make sure that no Python
# code execution slips in that might free the GIL
#assert lock._owner == pythread.PyThread_get_thread_ident()
#assert lock._count > 0
lock._count -= 1
if lock._count == 0:
lock._owner = -1
if lock._is_locked:
pythread.PyThread_release_lock(lock._real_lock)
lock._is_locked = False
## end of http://code.activestate.com/recipes/577336/ }}}
cdef class _Registry:
cdef object _data
cdef readonly FastRLock lock
def __cinit__(self):
self._data = {}
self.lock = FastRLock()
__hash__ = None # Avoid Py3 warning
def cleanup(self):
"Manage invalid identifiers"
deadlist = []
for key in self._data:
val = self._data[key]
val = val()
if val is None:
deadlist.append(key)
continue
if not val.valid:
deadlist.append(key)
for key in deadlist:
del self._data[key]
def __getitem__(self, key):
o = self._data[key]()
if o is None:
# This would occur if we had open objects and closed their
# file, causing the objects identifiers to be reclaimed.
# Now we clean up the registry when we close a file (or any
# other identifier, for that matter), so in practice this
# condition never obtains.
del self._data[key]
# We need to raise a KeyError:
o = self._data[key]()
return o
def __setitem__(self, key, val):
# this method should only be called by ObjectID.open
self._data[key] = ref(val)
def __delitem__(self, key):
# we need to synchronize removal of the id from the
# registry with decreasing the HDF5 reference count:
self._data.pop(key,None)
if H5Iget_type(key) >= 0: # if not, object was explicitly closed
H5Idec_ref(key)
registry = _Registry()
cdef class ObjectID:
"""
Represents an HDF5 identifier.
"""
property fileno:
def __get__(self):
cdef H5G_stat_t stat
H5Gget_objinfo(self.id, '.', 0, &stat)
return (stat.fileno[0], stat.fileno[1])
property valid:
def __get__(self):
if not self.id:
return False
res = H5Iget_type(self.id) > 0
if not res:
self.id = 0
return res
def __cinit__(self, id):
self.id = id
self.locked = 0
with registry.lock:
| Cython |
registry[id] = self
def __dealloc__(self):
if not self.locked:
try:
with registry.lock:
del registry[self.id]
except AttributeError:
# library being torn down, registry is None
pass
def __nonzero__(self):
return self.valid
def __copy__(self):
cdef ObjectID cpy
cpy = type(self)(self.id)
return cpy
def __richcmp__(self, object other, int how):
""" Default comparison mechanism for HDF5 objects (equal/not-equal)
Default equality testing:
1. Objects which are not both ObjectIDs are unequal
2. Objects with the same HDF5 ID number are always equal
3. Objects which hash the same are equal
"""
cdef bint equal = 0
if how!= 2 and how!= 3:
return NotImplemented
if isinstance(other, ObjectID):
if self.id == other.id:
equal = 1
else:
try:
equal = hash(self) == hash(other)
except TypeError:
pass
if how == 2:
return equal
return not equal
def __hash__(self):
""" Default hashing mechanism for HDF5 objects
Default hashing strategy:
1. Try to hash based on the object's fileno and objno records
2. If (1) succeeds, cache the resulting value
3. If (1) fails, raise TypeError
"""
cdef H5G_stat_t stat
if self._hash is None:
try:
H5Gget_objinfo(self.id, '.', 0, &stat)
self._hash = hash((stat.fileno[0], stat.fileno[1], stat.objno[0], stat.objno[1]))
except Exception:
raise TypeError("Objects of class %s cannot be hashed" % self.__class__.__name__)
return self._hash
@classmethod
def open(cls, id):
""" Return a representation of an HDF5 identifier """
with registry.lock:
try:
res = registry[id]
except KeyError:
res = cls(id)
return res
cdef hid_t pdefault(ObjectID pid):
if pid is None:
return <hid_t>H5P_DEFAULT
return pid.id
<|end_of_text|># Copyright (c) 2021-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libcpp cimport bool
from pylibraft.common.handle cimport *
cdef extern from "cugraph/legacy/graph.hpp" namespace "cugraph::legacy":
ctypedef enum PropType:
PROP_UNDEF "cugraph::legacy::PROP_UNDEF"
PROP_FALSE "cugraph::legacy::PROP_FALSE"
PROP_TRUE "cugraph::legacy::PROP_TRUE"
ctypedef enum DegreeDirection:
DIRECTION_IN_PLUS_OUT "cugraph::legacy::DegreeDirection::IN_PLUS_OUT"
DIRECTION_IN "cugraph::legacy::DegreeDirection::IN"
DIRECTION_OUT "cugraph::legacy::DegreeDirection::OUT"
struct GraphProperties:
bool directed
bool weighted
bool multigraph
bool bipartite
bool tree
PropType has_negative_edges
cdef cppclass GraphViewBase[VT,ET,WT]:
WT *edge_data
handle_t *handle;
GraphProperties prop
VT number_of_vertices
ET number_of_edges
VT* local_vertices
ET* local_edges
VT* local_offsets
void set_handle(handle_t*)
void set_local_data(VT* local_vertices_, ET* local_edges_, VT* local_offsets_)
void get_vertex_identifiers(VT *) const
GraphViewBase(WT*,VT,ET)
cdef cppclass GraphCOOView[VT,ET,WT](GraphViewBase[VT,ET,WT]):
VT *src_indices
VT *dst_indices
void degree(ET *,DegreeDirection) const
GraphCOOView()
GraphCOOView(const VT *, const ET *, const WT *, size_t, size_t)
cdef cppclass GraphCompressedSparseBaseView[VT,ET,WT](GraphViewBase[VT,ET,WT]):
ET *offsets
VT *indices
void get_source_indices(VT *) const
void degree(ET *,DegreeDirection) const
GraphCompressedSparseBaseView(const VT *, const ET *, const WT *, size_t, size_t)
cdef cppclass GraphCSRView[VT,ET,WT](GraphCompressedSparseBaseView[VT,ET,WT]):
GraphCSRView()
GraphCSRView(const VT *, const ET *, const WT *, size_t, size_t)
cdef cppclass GraphCSCView[VT,ET,WT](GraphCompressedSparseBaseView[VT,ET,WT]):
GraphCSCView()
GraphCSCView(const VT *, const ET *, const WT *, size_t, size_t)
<|end_of_text|>
from PDSim.flow import flow_models
cimport PDSim.flow.flow_models as flow_models
from PDSim.flow.flow_models import FlowFunction
from PDSim.flow.flow_models cimport FlowFunction
from PDSim.flow.flow import FlowPath
from PDSim.flow.flow cimport FlowPath
from PDSim.scroll import scroll_geo
from PDSim.scroll.common_scroll_geo import geoVals
from PDSim.scroll.common_scroll_geo cimport geoVals
cdef class _Scroll(object):
cdef public geoVals geo
cdef public double theta
cdef public double HTC
cpdef dict __cdict__(self)
cpdef double SA_S(self, FlowPath FP)
cpdef double Discharge(self,FlowPath FP)
cpdef double Inlet_sa(self, FlowPath FP)
cpdef double radial_leakage_area(self, double, long, long)
cpdef tuple radial_leakage_angles(self, double theta, long key1Index, long key2Index)
cpdef double RadialLeakage(self, FlowPath FP, double t = *)
cpdef double FlankLeakage(self, FlowPath FP, int Ncv_check = *)
<|end_of_text|>from quantlib.time._calendar cimport Calendar
cdef extern from 'ql/time/calendars/japan.hpp' namespace 'QuantLib':
cdef cppclass Japan(Calendar):
Japan()
<|end_of_text|># Copyright (c) 2013, Robert Escriva
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of this project nor the names of its contributors may
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
from cpython cimport bool
cdef extern from "stdlib.h":
void* malloc(size_t sz)
void free(void* ptr)
cdef extern from "stdint.h":
ctypedef unsigned int uint32_t
ctypedef long unsigned int uint64_t
cdef extern from "ygor/data.h":
cdef enum ygor_units:
YGOR_UNIT_S = 1
YGOR_UNIT_MS = 2
YGOR_UNIT_US = 3
YGOR_UNIT_BYTES = 9
YGOR_UNIT_KBYTES = 10
YGOR_UNIT_MBYTES = 11
YGOR_UNIT_GBYTES = 12
YGOR_UNIT_MONOTONIC= 254
YGOR_UNIT_UNIT = 255
cdef enum ygor_precision:
YGOR_PRECISE_INTEGER = 1
YGOR_HALF_PRECISION = 2
| Cython |
YGOR_SINGLE_PRECISION = 3
YGOR_DOUBLE_PRECISION = 4
cdef struct ygor_series:
const char* name
ygor_units indep_units
ygor_precision indep_precision
ygor_units dep_units
ygor_precision dep_precision
cdef union ygor_data_value:
uint64_t precise
double approximate
cdef struct ygor_data_point:
const ygor_series* series
ygor_data_value indep
ygor_data_value dep
cdef struct ygor_data_logger
ygor_data_logger* ygor_data_logger_create(const char* output,
const ygor_series** series,
size_t series_sz)
int ygor_data_logger_flush_and_destroy(ygor_data_logger* ydl)
int ygor_data_logger_record(ygor_data_logger* ydl, ygor_data_point* ydp)
cdef extern from "ygor-internal.h":
int ygor_is_precise(ygor_precision p)
Series = collections.namedtuple('Series', ('name', 'indep_units', 'indep_precision', 'dep_units', 'dep_precision'))
units_conversion = {'s': YGOR_UNIT_S,
'ms': YGOR_UNIT_MS,
'us': YGOR_UNIT_US,
'B': YGOR_UNIT_BYTES,
'KB': YGOR_UNIT_KBYTES,
'MB': YGOR_UNIT_MBYTES,
'GB': YGOR_UNIT_GBYTES,
None: YGOR_UNIT_UNIT}
precision_conversion = {'precise': YGOR_PRECISE_INTEGER,
'half': YGOR_HALF_PRECISION,
'single': YGOR_SINGLE_PRECISION,
'double': YGOR_DOUBLE_PRECISION}
cdef class __series:
cdef bytes name
cdef ygor_series series
cdef class DataLogger:
cdef ygor_data_logger* dl
cdef dict series
cdef dict series_idxs
cdef const ygor_series** ys;
cdef size_t ys_sz
def __cinit__(self, str output, series):
self.dl = NULL
self.series = {}
self.series_idxs = {}
self.ys = <const ygor_series**>malloc(sizeof(const ygor_series*) * len(series))
self.ys_sz = len(series)
for idx, ser in enumerate(series):
if ser.indep_units not in units_conversion.keys():
raise ValueError("invalid independent units")
if ser.indep_precision not in ('half','single', 'double', 'precise'):
raise ValueError("invalid independent precision")
if ser.dep_units not in units_conversion.keys():
raise ValueError("invalid dependent units")
if ser.dep_precision not in ('half','single', 'double', 'precise'):
raise ValueError("invalid dependent precision")
if ser.name in self.series:
raise KeyError("series defined twice")
s = __series()
s.name = ser.name.encode('utf8')
s.series.name = s.name
s.series.indep_units = units_conversion[ser.indep_units]
s.series.indep_precision = precision_conversion[ser.indep_precision]
s.series.dep_units = units_conversion[ser.dep_units]
s.series.dep_precision = precision_conversion[ser.dep_precision]
self.series[s.name] = s
self.series_idxs[s.name] = idx
self.ys[idx] = &s.series
out = output.encode('utf8')
self.dl = ygor_data_logger_create(out, self.ys, self.ys_sz)
if not self.dl:
raise RuntimeError("error creating data logger")
def __dealloc__(self):
if self.dl:
self.flush_and_destroy()
if self.ys:
free(self.ys)
def flush_and_destroy(self):
assert(self.dl)
ygor_data_logger_flush_and_destroy(self.dl)
self.dl = NULL
def record(self, str _series, indep, dep):
cdef bytes series = _series.encode('utf8')
assert series in self.series
idx = self.series_idxs[series]
cdef ygor_data_point ydp
ydp.series = self.ys[idx]
if ygor_is_precise(ydp.series.indep_precision):
ydp.indep.precise = indep
else:
ydp.indep.approximate = indep
if ygor_is_precise(ydp.series.dep_precision):
ydp.dep.precise = dep
else:
ydp.dep.approximate = dep
if ygor_data_logger_record(self.dl, &ydp) < 0:
raise RuntimeError("could not log data record")
<|end_of_text|># File: slp.pyx
# Abstract: Pyrex wrapper for OpenSLP
# Requires: OpenSLP installation, Pyrex
#
# Author(s): Ganesan Rajagopal <[email protected]>
#
# Copyright (C) 2003 The OpenSLP Project
#
# This program is released under same license as the OpenSLP project (BSD
# style without the advertising clause). Alternatively, you can use it under
# the Python License.
cdef extern from "slp.h":
ctypedef enum SLPBoolean:
SLP_FALSE
SLP_TRUE
ctypedef enum SLPError:
SLP_LAST_CALL = 1
SLP_OK = 0
SLP_LANGUAGE_NOT_SUPPORTED = -1
SLP_PARSE_ERROR = -2
SLP_INVALID_REGISTRATION = -3
SLP_SCOPE_NOT_SUPPORTED = -4
SLP_AUTHENTICATION_ABSENT = -6
SLP_AUTHENTICATION_FAILED = -7
SLP_INVALID_UPDATE = -13
SLP_REFRESH_REJECTED = -15
SLP_NOT_IMPLEMENTED = -17
SLP_BUFFER_OVERFLOW = -18
SLP_NETWORK_TIMED_OUT = -19
SLP_NETWORK_INIT_FAILED = -20
SLP_MEMORY_ALLOC_FAILED = -21
SLP_PARAMETER_BAD = -22
SLP_NETWORK_ERROR = -23
SLP_INTERNAL_SYSTEM_ERROR = -24
SLP_HANDLE_IN_USE = -25
SLP_TYPE_ERROR = -26
cdef struct srvurl:
char *s_pcSrvType
char *s_pcHost
int s_iPort
char *s_pcNetFamily
char *s_pcSrvPart
ctypedef srvurl SLPSrvURL
ctypedef void *SLPHandle
ctypedef void SLPRegReport(SLPHandle hSLP, SLPError errCode,
void *pvCookie)
ctypedef SLPBoolean SLPSrvTypeCallback(
SLPHandle hSLP, char *pcSrvTypes, SLPError errCode, void *pvCookie)
ctypedef SLPBoolean SLPSrvURLCallback(
SLPHandle hSLP, char *pcSrvURL, unsigned short sLifetime,
SLPError errCode, void *pvCookie)
ctypedef SLPBoolean SLPAttrCallback(
SLPHandle hSLP, char *pcAttrList, SLPError errCode, void *pvCookie)
SLPError SLPOpen(char *lang, SLPBoolean isasync, SLPHandle *phslp)
void SLPClose(SLPHandle hSLP)
SLPError SLPAssociateIFList(SLPHandle hSLP, char* McastIFList)
SLPError SLPAssociateIP(SLPHandle hSLP, char* unicast_ip)
SLPError SLPReg(SLPHandle hSLP, char *pcSrvURL, unsigned short usLifetime,
char *pcSrvType, char *pcAttrs, SLPBoolean fresh,
SLPRegReport callback, void *pvCookie)
SLPError SLPDereg(SLPHandle hSLP, char *pcSrvURL, SLPRegReport callback,
void *pvCookie)
SLPError SLPDelAttrs(SLPHandle hSLP, char *pcSrvURL, char *pcAttrs,
SLPRegReport callback, void *pvCookie)
SLPError SLPFindSrvTypes(SLPHandle hslp, char *namingauthority,
char *scopelist, SLPSrvTypeCallback callback,
void* cookie)
SLPError SLPFindSrvs(SLPHandle hSLP, char *pcServiceType,
char *pcScopeList, char *pcSearchFilter,
SLPSrvURLCallback callback, void *pvCookie)
SLPError SLPFindAttrs(SLPHandle hSLP, char *pcURLOrServiceType,
char *pcScopeList, char *pcAttrIds,
SLPAttrCallback callback, void *pvCookie)
unsigned short SLPGetRefreshInterval()
SLPError SLPFindScopes(SLPHandle hSLP, char **ppcScopeList)
SLPError SLPParseSrvURL(char *pcSrvURL, SLPSrvURL** ppSrvURL)
SLPError SLPEscape(char *pcInbuf, char **ppcOutBuf, SLPBoolean isTag)
SLPError SLPUnescape(char* pcInbuf, char **ppcOutBuf, SLPBoolean isTag)
void SLPFree(void *pvMem)
char *SLPGetProperty(char* pcName)
void SLPSetProperty(char *pc | Cython |
Name, char *pcValue)
SLPError SLPParseAttrs(char *pcAttrList, char *pcAttrId,
char **ppcAttrVal)
# C callbacks get a python tuple as the cookie parameter. The first element
# of the tuple is a python function and the second element is a cookie to
# be passed to it (as the last parameter).
# Callback for register(), deregister() and delattrs() methods
cdef void errcb(SLPHandle hslp, SLPError errcode, void *cookie):
callback, realcookie = <object>cookie
callback(errcode, realcookie)
# Callback for findsrvtypes() and findattrs() methods
cdef SLPBoolean strcb(SLPHandle slph, char *string, SLPError errcode,
void *cookie):
cdef SLPBoolean ret
callback, realcookie = <object>cookie
if string!= NULL: # when errcode!= SLP_OK
pystring = string
ret = callback(pystring, errcode, realcookie)
return ret
# Callback for findsrvs() menthod
cdef SLPBoolean srvcb(SLPHandle hslp, char *srvurl, unsigned short lifetime,
SLPError errcode, void *cookie):
cdef SLPBoolean ret
callback, realcookie = <object>cookie
pysrvurl = None
if srvurl!= NULL: # when errcode!= SLP_OK
pysrvurl = srvurl
ret = callback(pysrvurl, lifetime, errcode, realcookie)
return ret
# Wrapper for OpenSLP APIs
cdef class SLPConn:
cdef SLPHandle slph
cdef object cookie
# defaultcookie will be passed as the cookie parameter to callback
# functions if no explicit parameter is specified in the call
def __init__(self, char *lang, int async, object defaultcookie = None):
self.cookie = defaultcookie
if async:
f = SLP_TRUE
else:
f = SLP_FALSE
err = SLPOpen(lang, f, &self.slph)
if err!= SLP_OK:
raise EnvironmentError(err, "")
def __del__(self):
if self.slph!= NULL:
SLPClose(self.slph)
# Close the SLP Handle
def close(self):
SLPClose(self.slph)
self.slph = NULL
def associate_if_list(self, char *mcast_if_list):
return SLPAssociateIFList(self.slph, mcast_if_list)
def associate_ip(self, char *unicast_ip):
return SLPAssociateIP(self.slph, unicast_ip)
# register an SLP service
def register(self, char *srvurl, unsigned lifetime, char *attrs,
object callback, object cookie = None):
pycb = (callback, cookie or self.cookie)
return SLPReg(self.slph, srvurl, <unsigned short>lifetime, "",
attrs, SLP_TRUE, errcb, <void *>pycb)
# deregister an SLP service
def deregister(self, char *srvurl, object callback,
object cookie = None):
pycb = (callback, cookie or self.cookie)
return SLPDereg(self.slph, srvurl, errcb, <void *>pycb)
# delete attributes from a SLP service URL
def delattrs(self, char *srvurl, char *attrs, object callback,
object cookie = None):
pycb = (callback, cookie or self.cookie)
return SLPDelAttrs(self.slph, srvurl, attrs, errcb, <void *>pycb)
# find SLP service types
def findsrvtypes(self, char *na, char *scopelist, object callback,
object cookie = None):
pycb = (callback, cookie or self.cookie)
return SLPFindSrvTypes(self.slph, na, scopelist, strcb, <void *>pycb)
# find SLP services matching the service type and searchfilter
def findsrvs(self, char *srvtype, char *scopelist, char *searchfilter,
object callback, object cookie = None):
pycb = (callback, cookie or self.cookie)
return SLPFindSrvs(self.slph, srvtype, scopelist, searchfilter,
srvcb, <void *>pycb)
# find attributes for the given SLP service URL
def findattrs(self, char *url, char *scopelist, char *attrids,
object callback, object cookie = None):
pycb = (callback, cookie or self.cookie)
return SLPFindAttrs(self.slph, url, scopelist, attrids, strcb,
<void *>pycb)
# find supported SLP scopes
def findscopes(self):
cdef char *scopes
err = SLPFindScopes(self.slph, &scopes)
if err!= SLP_OK:
raise EnvironmentError(err, "")
return scopes
def getrefreshinterval():
"""get refresh interval"""
return SLPGetRefreshInterval()
def parsesrvurl(char *srvurl):
"""Parse given service URL"""
cdef SLPSrvURL *pSrvURL
err = SLPParseSrvURL(srvurl, &pSrvURL)
if err!= SLP_OK:
raise EnvironmentError(err, "")
parsedurl = (pSrvURL.s_pcSrvType, pSrvURL.s_pcHost, pSrvURL.s_iPort,
pSrvURL.s_pcNetFamily, pSrvURL.s_pcSrvPart)
SLPFree(pSrvURL)
return parsedurl
def escape(char *s, int isTag):
"""Escape given string"""
cdef char *outs
if isTag:
f = SLP_TRUE
else:
f = SLP_FALSE
err = SLPEscape(s, &outs, f)
if err!= SLP_OK:
raise EnvironmentError(err, "")
ret = outs
SLPFree(outs)
return ret
def unescape(char *s, int isTag):
"""Unescape given string"""
cdef char *outs
if isTag:
f = SLP_TRUE
else:
f = SLP_FALSE
err = SLPUnescape(s, &outs, f)
if err!= SLP_OK:
raise EnvironmentError(err, "")
ret = outs
SLPFree(outs)
return ret
def getprop(char *name):
"""Get a SLP property"""
return SLPGetProperty(name)
def setprop(char *name, char *value):
"""Set an SLP property"""
SLPSetProperty(name, value)
# Local Variables:
# mode: python
# End:
<|end_of_text|># -*- coding: utf-8 -*-
"""Serial METIS with 64bit integer
"""
include "impl_metis.pxi"
<|end_of_text|>
#cython: embedsignature=True, cdivision=True, boundscheck=False, wraparound=False, initializedcheck=False
__all__ = ['c_maxvol', 'c_rect_maxvol']
import numpy as np
cimport numpy as cnp
from libc.stdlib cimport malloc, realloc, free
from cython.parallel import prange
from scipy.linalg.cython_blas cimport (
strsm, dtrsm, ctrsm, ztrsm,
scopy, ccopy, dcopy, zcopy,
sgemv, dgemv, cgemv, zgemv,
sger, dger, cgerc, cgeru, zgerc, zgeru,
isamax, idamax
)
from scipy.linalg.cython_lapack cimport (
sgetrf, dgetrf, cgetrf, zgetrf
)
cdef extern from "complex.h" nogil:
double cabs(double complex)
float cabsf(float complex)
cdef extern from "math.h" nogil:
double fabs(double)
float fabsf(float)
def c_rect_maxvol(A, tol=1., maxK=None, min_add_K=None, minK=None,
start_maxvol_iters=10, identity_submatrix=True, top_k_index=-1):
"""
Cython implementation of rectangular 2-volume maximization.
For information see `rect_maxvol` function.
"""
cdef int N, r, id_sub
cdef cnp.ndarray lu, coef, basis
if type(A)!= np.ndarray:
raise TypeError, "argument must be of numpy.ndarray type"
if len(A.shape)!= 2:
raise ValueError, "argument must have 2 dimensions"
N, r = A.shape
if N <= r:
return np.arange(N, dtype=np.int32), np.eye(N, dtype=A.dtype)
lu = np.copy(A, order='F')
if maxK is None or maxK > N:
maxK = N
if maxK < r:
maxK = r
if minK is None or minK < r:
minK = r
if minK > N:
minK = N
if min_add_K is not None:
minK = max(minK, r+min_add_K)
if minK > maxK:
minK | Cython |
= maxK
if identity_submatrix:
id_sub = 1
else:
id_sub = 0
try:
if A.dtype is np.dtype(np.float32):
return srect_maxvol(N, r, <float *>lu.data, tol, minK, maxK,
start_maxvol_iters, id_sub, top_k_index)
elif A.dtype is np.dtype(np.float64):
return drect_maxvol(N, r, <double *>lu.data, tol, minK, maxK,
start_maxvol_iters, id_sub, top_k_index)
elif A.dtype is np.dtype(np.complex64):
return crect_maxvol(N, r, <float complex *>lu.data, tol, minK,
maxK, start_maxvol_iters, id_sub, top_k_index)
elif A.dtype is np.dtype(np.complex128):
return zrect_maxvol(N, r, <double complex*>lu.data, tol, minK,
maxK, start_maxvol_iters, id_sub, top_k_index)
except Exception:
raise
def c_maxvol(A, tol=1.05, max_iters=100, top_k_index=-1):
"""
Cython implementation of 1-volume maximization.
For information see `maxvol` function.
"""
cdef int N, r
cdef cnp.ndarray lu, coef, basis
if type(A)!= np.ndarray:
raise TypeError, "argument must be of numpy.ndarray type"
if len(A.shape)!= 2:
raise ValueError, "argument must have 2 dimensions"
N, r = A.shape
if N <= r:
return np.arange(N, dtype=np.int32), np.eye(N, dtype=A.dtype)
if tol < 1:
tol = 1.0
lu = np.copy(A, order='F')
coef = np.copy(lu, order='F')
basis = np.ndarray(r, dtype=np.int32)
try:
if A.dtype is np.dtype(np.float32):
smaxvol(N, r, <float *>lu.data, <float *>coef.data,
<int *>basis.data, tol, max_iters, top_k_index)
elif A.dtype == np.dtype(np.float64):
dmaxvol(N, r, <double *>lu.data, <double *>coef.data,
<int *>basis.data, tol, max_iters, top_k_index)
elif A.dtype is np.dtype(np.complex64):
cmaxvol(N, r, <float complex *>lu.data, <float complex *>
coef.data, <int *>basis.data, tol, max_iters, top_k_index)
elif A.dtype is np.dtype(np.complex128):
zmaxvol(N, r, <double complex*>lu.data, <double complex *>
coef.data, <int *>basis.data, tol, max_iters, top_k_index)
else:
raise TypeError("must be of float or complex type")
except Exception:
raise
return basis, coef
cdef object srect_maxvol(int N, int R, float *lu, float tol, int minK,
int maxK, int start_maxvol_iters, int identity_submatrix,
int top_k_index):
cdef char cN = 'N'
cdef int i, j, i_one = 1, K, size = N*R
cdef float d_one = 1.0, d_zero = 0.0, l
cdef float tol2 = tol*tol, tmp, tmp2
cdef int *basis = <int *> malloc(N * sizeof(int))
cdef float *chosen = <float *> malloc(N * sizeof(float))
cdef int [:]basis_buf
cdef int coef_realloc_step = R, coef_columns = R+coef_realloc_step
cdef float *coef = <float *> malloc(N * coef_columns * sizeof(float))
cdef float *tmp_pointer
cdef float *L = <float *> malloc(N * sizeof(float))
cdef float *V = <float *> malloc(N * sizeof(float))
cdef float *tmp_row = <float *> malloc(N * sizeof(float))
cdef float [:,:] coef_buf
if top_k_index == -1 or top_k_index > N:
top_k_index = N
if top_k_index < R:
top_k_index = R
scopy(&size, lu, &i_one, coef, &i_one)
tmp = 1.05 # tolerance for square maxvol
smaxvol(N, R, lu, coef, basis, tmp, start_maxvol_iters, top_k_index)
# compute square length for each vector
for j in prange(top_k_index, schedule="static", nogil=True):
L[j] = 0.0
V[j] = 0.0
chosen[j] = 1.0
for i in range(R):
tmp_pointer = coef+i*N
for j in prange(top_k_index, schedule="static", nogil=True):
tmp = fabsf(tmp_pointer[j])
L[j] += tmp*tmp
for i in prange(R, schedule="static", nogil=True):
L[basis[i]] = 0.0
chosen[basis[i]] = 0.0
i = isamax(&top_k_index, L, &i_one)-1
K = R
while K < minK or (L[i] > tol2 and K < maxK):
basis[K] = i
chosen[i] = 0.0
#scopy(&K, coef+i, &N, tmp_row, &i_one)
tmp_pointer = coef+i
for j in prange(K, schedule="static", nogil=True):
tmp_row[j] = tmp_pointer[j*N]
sgemv(&cN, &N, &K, &d_one, coef, &N, tmp_row, &i_one, &d_zero, V,
&i_one)
l = (-d_one)/(1+V[i])
sger(&N, &K, &l, V, &i_one, tmp_row, &i_one, coef, &N)
tmp = -l
if coef_columns <= K:
coef_columns += coef_realloc_step
coef = <float *> realloc(coef, N * coef_columns * sizeof(float))
tmp_pointer = coef+K*N
for j in prange(N, schedule="static", nogil=True):
tmp_pointer[j] = tmp*V[j]
for j in prange(top_k_index, schedule="static", nogil=True):
tmp2 = fabsf(V[j])
L[j] -= tmp2*tmp2*tmp
L[j] *= chosen[j]
i = isamax(&top_k_index, L, &i_one)-1
K += 1
free(L)
free(V)
free(tmp_row)
C = np.ndarray((N, K), order='F', dtype=np.float32)
coef_buf = C
for i in prange(K, schedule="static", nogil=True):
for j in range(N):
coef_buf[j, i] = coef[i*N+j]
free(coef)
if identity_submatrix == 1:
for i in prange(K, schedule="static", nogil=True):
tmp_pointer = &coef_buf[0, 0]+basis[i]
for j in range(K):
tmp_pointer[j*N] = 0.0
tmp_pointer[i*N] = 1.0
I = np.ndarray(K, dtype=np.int32)
basis_buf = I
for i in prange(K, schedule="static", nogil=True):
basis_buf[i] = basis[i]
free(basis)
return I, C
cdef object smaxvol(int N, int R, float *lu, float *coef, int *basis,
float tol, int max_iters, int top_k_index):
cdef int *ipiv = <int *> malloc(R * sizeof(int))
cdef int *interchange = <int *> malloc(N * sizeof(int))
cdef float *tmp_row = <float *> malloc(R*sizeof(float))
cdef float *tmp_column = <float *> malloc(N*sizeof(float))
cdef int info = 0, size = N * R, i, j, tmp_int, i_one = 1, iters = 0
cdef int k_row, k_col
cdef char cR = 'R', cN = 'N', cU = 'U', cL = 'L'
cdef float d_one = 1, alpha, max_value
cdef float abs_max, tmp
if (ipiv == NULL or interchange == NULL or tmp_row == NULL or
tmp_column == NULL):
raise MemoryError("malloc failed to allocate temporary buffers")
if top_k_index == -1 or top_k_index > N:
top_k_index = N
if top_k_index < R:
top_k_index = R
sgetrf(&top_k_index, &R, lu, &N, ipiv, &info)
if info < 0:
raise ValueError("Internal maxvol_fullrank error, {} argument of"
" sgetrf_ had illegal value".format(info))
if info > 0:
raise | Cython |
ValueError("Input matrix must not be singular")
for i in prange(N, schedule="static", nogil=True):
interchange[i] = i
for i in prange(R, schedule="static", nogil=True):
j = ipiv[i]-1
if j!= i:
tmp_int = interchange[i]
interchange[i] = interchange[j]
interchange[j] = tmp_int
free(ipiv)
for i in prange(R, schedule="static", nogil=True):
basis[i] = interchange[i]
free(interchange)
strsm(&cR, &cU, &cN, &cN, &N, &R, &d_one, lu, &N, coef, &N)
strsm(&cR, &cL, &cN, &cU, &N, &R, &d_one, lu, &N, coef, &N)
while iters < max_iters:
abs_max = -1
for k_row in range(top_k_index):
for k_col in range(R):
tmp = fabsf(coef[k_row+k_col*N])
if tmp > abs_max:
abs_max = tmp
j = k_row
i = k_col
max_value = coef[j+i*N]
if abs_max > tol:
scopy(&R, coef+j, &N, tmp_row, &i_one)
tmp_row[i] -= d_one
scopy(&N, coef+i*N, &i_one, tmp_column, &i_one)
basis[i] = j
alpha = (-d_one)/max_value
sger(&N, &R, &alpha, tmp_column, &i_one, tmp_row, &i_one,
coef, &N)
iters += i_one
else:
break
free(tmp_row)
free(tmp_column)
return
cdef object drect_maxvol(int N, int R, double *lu, double tol, int minK,
int maxK, int start_maxvol_iters, int identity_submatrix,
int top_k_index):
cdef char cN = 'N'
cdef int i, j, i_one = 1, K, size = N*R
cdef double d_one = 1.0, d_zero = 0.0, l
cdef double tol2 = tol*tol, tmp, tmp2
cdef int *basis = <int *> malloc(N * sizeof(int))
cdef double *chosen = <double *> malloc(N * sizeof(double))
cdef int [:]basis_buf
cdef int coef_realloc_step = R, coef_columns = R+coef_realloc_step
cdef double *coef = <double *> malloc(N * coef_columns * sizeof(double))
cdef double *tmp_pointer
cdef double *L = <double *> malloc(N * sizeof(double))
cdef double *V = <double *> malloc(N * sizeof(double))
cdef double *tmp_row = <double *> malloc(N * sizeof(double))
cdef double [:,:] coef_buf
if top_k_index == -1 or top_k_index > N:
top_k_index = N
if top_k_index < R:
top_k_index = R
dcopy(&size, lu, &i_one, coef, &i_one)
tmp = 1.05 # tolerance for square maxvol
dmaxvol(N, R, lu, coef, basis, tmp, start_maxvol_iters, top_k_index)
# compute square length for each vector
for j in prange(top_k_index, schedule="static", nogil=True):
L[j] = 0.0
V[j] = 0.0
chosen[j] = 1.0
for i in range(R):
tmp_pointer = coef+i*N
for j in prange(top_k_index, schedule="static", nogil=True):
tmp = fabs(tmp_pointer[j])
L[j] += tmp*tmp
for i in prange(R, schedule="static", nogil=True):
L[basis[i]] = 0.0
chosen[basis[i]] = 0.0
i = idamax(&top_k_index, L, &i_one)-1
K = R
while K < minK or (L[i] > tol2 and K < maxK):
basis[K] = i
chosen[i] = 0.0
#dcopy(&K, coef+i, &N, tmp_row, &i_one)
tmp_pointer = coef+i
for j in prange(K, schedule="static", nogil=True):
tmp_row[j] = tmp_pointer[j*N]
dgemv(&cN, &N, &K, &d_one, coef, &N, tmp_row, &i_one, &d_zero, V,
&i_one)
l = (-d_one)/(1+V[i])
dger(&N, &K, &l, V, &i_one, tmp_row, &i_one, coef, &N)
tmp = -l
if coef_columns <= K:
coef_columns += coef_realloc_step
coef = <double *> realloc(coef, N * coef_columns * sizeof(double))
tmp_pointer = coef+K*N
for j in prange(N, schedule="static", nogil=True):
tmp_pointer[j] = tmp*V[j]
for j in prange(top_k_index, schedule="static", nogil=True):
tmp2 = fabs(V[j])
L[j] -= tmp2*tmp2*tmp
L[j] *= chosen[j]
i = idamax(&top_k_index, L, &i_one)-1
K += 1
free(L)
free(V)
free(tmp_row)
C = np.ndarray((N, K), order='F', dtype=np.float64)
coef_buf = C
for i in prange(K, schedule="static", nogil=True):
for j in range(N):
coef_buf[j, i] = coef[i*N+j]
free(coef)
if identity_submatrix == 1:
for i in prange(K, schedule="static", nogil=True):
tmp_pointer = &coef_buf[0, 0]+basis[i]
for j in range(K):
tmp_pointer[j*N] = 0.0
tmp_pointer[i*N] = 1.0
I = np.ndarray(K, dtype=np.int32)
basis_buf = I
for i in prange(K, schedule="static", nogil=True):
basis_buf[i] = basis[i]
free(basis)
return I, C
cdef object dmaxvol(int N, int R, double *lu, double *coef, int *basis,
double tol, int max_iters, int top_k_index):
cdef int *ipiv = <int *> malloc(R * sizeof(int))
cdef int *interchange = <int *> malloc(N * sizeof(int))
cdef double *tmp_row = <double *> malloc(R*sizeof(double))
cdef double *tmp_column = <double *> malloc(N*sizeof(double))
cdef int info = 0, size = N * R, i, j, tmp_int, i_one = 1, iters = 0
cdef int k_row, k_col
cdef char cR = 'R', cN = 'N', cU = 'U', cL = 'L'
cdef double d_one = 1, alpha, max_value
cdef double abs_max, tmp
if (ipiv == NULL or interchange == NULL or tmp_row == NULL or
tmp_column == NULL):
raise MemoryError("malloc failed to allocate temporary buffers")
if top_k_index == -1 or top_k_index > N:
top_k_index = N
if top_k_index < R:
top_k_index = R
dgetrf(&top_k_index, &R, lu, &N, ipiv, &info)
if info < 0:
raise ValueError("Internal maxvol_fullrank error, {} argument of"
" dgetrf_ had illegal value".format(info))
if info > 0:
raise ValueError("Input matrix must not be singular")
for i in prange(N, schedule="static", nogil=True):
interchange[i] = i
for i in prange(R, schedule="static", nogil=True):
j = ipiv[i]-1
if j!= i:
tmp_int = interchange[i]
interchange[i] = interchange[j]
interchange[j] = tmp_int
free(ipiv)
for i in prange(R, schedule="static", nogil=True):
basis[i] = interchange[i]
free(interchange)
dtrsm(&cR, &cU, &cN, &cN, &N, &R, &d_one, lu, &N, coef, &N)
dtrsm(&cR, &cL, &cN, &cU, &N, &R, &d_one, lu, &N, coef, &N)
while iters < max_iters:
abs_max = -1
for k_row in range(top_k_index):
for k_col in range(R):
tmp = fabs(coef[k_row+k_col*N])
if tmp | Cython |
> abs_max:
abs_max = tmp
j = k_row
i = k_col
max_value = coef[j+i*N]
if abs_max > tol:
dcopy(&R, coef+j, &N, tmp_row, &i_one)
tmp_row[i] -= d_one
dcopy(&N, coef+i*N, &i_one, tmp_column, &i_one)
basis[i] = j
alpha = (-d_one)/max_value
dger(&N, &R, &alpha, tmp_column, &i_one, tmp_row, &i_one,
coef, &N)
iters += i_one
else:
break
free(tmp_row)
free(tmp_column)
return
cdef object crect_maxvol(int N, int R, float complex *lu, float tol, int minK,
int maxK, int start_maxvol_iters, int identity_submatrix,
int top_k_index):
cdef char cN = 'N'
cdef int i, j, i_one = 1, K, size = N*R
cdef float complex d_one = 1.0, d_zero = 0.0, l
cdef float tol2 = tol*tol, tmp, tmp2
cdef int *basis = <int *> malloc(N * sizeof(int))
cdef float *chosen = <float *> malloc(N * sizeof(float))
cdef int [:]basis_buf
cdef int coef_realloc_step = R, coef_columns = R+coef_realloc_step
cdef float complex *coef = <float complex *> malloc(N * coef_columns * sizeof(float complex))
cdef float complex *tmp_pointer
cdef float *L = <float *> malloc(N * sizeof(float))
cdef float complex *V = <float complex *> malloc(N * sizeof(float complex))
cdef float complex *tmp_row = <float complex *> malloc(N * sizeof(float complex))
cdef float complex [:,:] coef_buf
if top_k_index == -1 or top_k_index > N:
top_k_index = N
if top_k_index < R:
top_k_index = R
ccopy(&size, lu, &i_one, coef, &i_one)
tmp = 1.05 # tolerance for square maxvol
cmaxvol(N, R, lu, coef, basis, tmp, start_maxvol_iters, top_k_index)
# compute square length for each vector
for j in prange(top_k_index, schedule="static", nogil=True):
L[j] = 0.0
V[j] = 0.0
chosen[j] = 1.0
for i in range(R):
tmp_pointer = coef+i*N
for j in prange(top_k_index, schedule="static", nogil=True):
tmp = cabsf(tmp_pointer[j])
L[j] += tmp*tmp
for i in prange(R, schedule="static", nogil=True):
L[basis[i]] = 0.0
chosen[basis[i]] = 0.0
i = isamax(&top_k_index, L, &i_one)-1
K = R
while K < minK or (L[i] > tol2 and K < maxK):
basis[K] = i
chosen[i] = 0.0
#ccopy(&K, coef+i, &N, tmp_row, &i_one)
tmp_pointer = coef+i
for j in prange(K, schedule="static", nogil=True):
tmp_row[j] = tmp_pointer[j*N].conjugate()
cgemv(&cN, &N, &K, &d_one, coef, &N, tmp_row, &i_one, &d_zero, V,
&i_one)
l = (-d_one)/(1+V[i])
cgerc(&N, &K, &l, V, &i_one, tmp_row, &i_one, coef, &N)
tmp = -l.real
if coef_columns <= K:
coef_columns += coef_realloc_step
coef = <float complex *> realloc(coef, N * coef_columns * sizeof(float complex))
tmp_pointer = coef+K*N
for j in prange(N, schedule="static", nogil=True):
tmp_pointer[j] = tmp*V[j]
for j in prange(top_k_index, schedule="static", nogil=True):
tmp2 = cabsf(V[j])
L[j] -= tmp2*tmp2*tmp
L[j] *= chosen[j]
i = isamax(&top_k_index, L, &i_one)-1
K += 1
free(L)
free(V)
free(tmp_row)
C = np.ndarray((N, K), order='F', dtype=np.complex64)
coef_buf = C
for i in prange(K, schedule="static", nogil=True):
for j in range(N):
coef_buf[j, i] = coef[i*N+j]
free(coef)
if identity_submatrix == 1:
for i in prange(K, schedule="static", nogil=True):
tmp_pointer = &coef_buf[0, 0]+basis[i]
for j in range(K):
tmp_pointer[j*N] = 0.0
tmp_pointer[i*N] = 1.0
I = np.ndarray(K, dtype=np.int32)
basis_buf = I
for i in prange(K, schedule="static", nogil=True):
basis_buf[i] = basis[i]
free(basis)
return I, C
cdef object cmaxvol(int N, int R, float complex *lu, float complex *coef, int *basis,
float tol, int max_iters, int top_k_index):
cdef int *ipiv = <int *> malloc(R * sizeof(int))
cdef int *interchange = <int *> malloc(N * sizeof(int))
cdef float complex *tmp_row = <float complex *> malloc(R*sizeof(float complex))
cdef float complex *tmp_column = <float complex *> malloc(N*sizeof(float complex))
cdef int info = 0, size = N * R, i, j, tmp_int, i_one = 1, iters = 0
cdef int k_row, k_col
cdef char cR = 'R', cN = 'N', cU = 'U', cL = 'L'
cdef float complex d_one = 1, alpha, max_value
cdef float abs_max, tmp
if (ipiv == NULL or interchange == NULL or tmp_row == NULL or
tmp_column == NULL):
raise MemoryError("malloc failed to allocate temporary buffers")
if top_k_index == -1 or top_k_index > N:
top_k_index = N
if top_k_index < R:
top_k_index = R
cgetrf(&top_k_index, &R, lu, &N, ipiv, &info)
if info < 0:
raise ValueError("Internal maxvol_fullrank error, {} argument of"
" cgetrf_ had illegal value".format(info))
if info > 0:
raise ValueError("Input matrix must not be singular")
for i in prange(N, schedule="static", nogil=True):
interchange[i] = i
for i in prange(R, schedule="static", nogil=True):
j = ipiv[i]-1
if j!= i:
tmp_int = interchange[i]
interchange[i] = interchange[j]
interchange[j] = tmp_int
free(ipiv)
for i in prange(R, schedule="static", nogil=True):
basis[i] = interchange[i]
free(interchange)
ctrsm(&cR, &cU, &cN, &cN, &N, &R, &d_one, lu, &N, coef, &N)
ctrsm(&cR, &cL, &cN, &cU, &N, &R, &d_one, lu, &N, coef, &N)
while iters < max_iters:
abs_max = -1
for k_row in range(top_k_index):
for k_col in range(R):
tmp = cabsf(coef[k_row+k_col*N])
if tmp > abs_max:
abs_max = tmp
j = k_row
i = k_col
max_value = coef[j+i*N]
if abs_max > tol:
ccopy(&R, coef+j, &N, tmp_row, &i_one)
tmp_row[i] -= d_one
ccopy(&N, coef+i*N, &i_one, tmp_column, &i_one)
basis[i] = j
alpha = (-d_one)/max_value
cgeru(&N, &R, &alpha, tmp_column, &i_one, tmp_row, &i_one,
coef, &N)
iters += i_one
else:
break
free(tmp_row)
free(tmp_column)
return
cdef object zrect_maxvol(int N, int R, double complex *lu, double tol, int minK,
int maxK, int start_maxvol_iters, int identity_submatrix,
int top_k_index):
c | Cython |
def char cN = 'N'
cdef int i, j, i_one = 1, K, size = N*R
cdef double complex d_one = 1.0, d_zero = 0.0, l
cdef double tol2 = tol*tol, tmp, tmp2
cdef int *basis = <int *> malloc(N * sizeof(int))
cdef double *chosen = <double *> malloc(N * sizeof(double))
cdef int [:]basis_buf
cdef int coef_realloc_step = R, coef_columns = R+coef_realloc_step
cdef double complex *coef = <double complex *> malloc(N * coef_columns * sizeof(double complex))
cdef double complex *tmp_pointer
cdef double *L = <double *> malloc(N * sizeof(double))
cdef double complex *V = <double complex *> malloc(N * sizeof(double complex))
cdef double complex *tmp_row = <double complex *> malloc(N * sizeof(double complex))
cdef double complex [:,:] coef_buf
if top_k_index == -1 or top_k_index > N:
top_k_index = N
if top_k_index < R:
top_k_index = R
zcopy(&size, lu, &i_one, coef, &i_one)
tmp = 1.05 # tolerance for square maxvol
zmaxvol(N, R, lu, coef, basis, tmp, start_maxvol_iters, top_k_index)
# compute square length for each vector
for j in prange(top_k_index, schedule="static", nogil=True):
L[j] = 0.0
V[j] = 0.0
chosen[j] = 1.0
for i in range(R):
tmp_pointer = coef+i*N
for j in prange(top_k_index, schedule="static", nogil=True):
tmp = cabs(tmp_pointer[j])
L[j] += tmp*tmp
for i in prange(R, schedule="static", nogil=True):
L[basis[i]] = 0.0
chosen[basis[i]] = 0.0
i = idamax(&top_k_index, L, &i_one)-1
K = R
while K < minK or (L[i] > tol2 and K < maxK):
basis[K] = i
chosen[i] = 0.0
#zcopy(&K, coef+i, &N, tmp_row, &i_one)
tmp_pointer = coef+i
for j in prange(K, schedule="static", nogil=True):
tmp_row[j] = tmp_pointer[j*N].conjugate()
zgemv(&cN, &N, &K, &d_one, coef, &N, tmp_row, &i_one, &d_zero, V,
&i_one)
l = (-d_one)/(1+V[i])
zgerc(&N, &K, &l, V, &i_one, tmp_row, &i_one, coef, &N)
tmp = -l.real
if coef_columns <= K:
coef_columns += coef_realloc_step
coef = <double complex *> realloc(coef, N * coef_columns * sizeof(double complex))
tmp_pointer = coef+K*N
for j in prange(N, schedule="static", nogil=True):
tmp_pointer[j] = tmp*V[j]
for j in prange(top_k_index, schedule="static", nogil=True):
tmp2 = cabs(V[j])
L[j] -= tmp2*tmp2*tmp
L[j] *= chosen[j]
i = idamax(&top_k_index, L, &i_one)-1
K += 1
free(L)
free(V)
free(tmp_row)
C = np.ndarray((N, K), order='F', dtype=np.complex128)
coef_buf = C
for i in prange(K, schedule="static", nogil=True):
for j in range(N):
coef_buf[j, i] = coef[i*N+j]
free(coef)
if identity_submatrix == 1:
for i in prange(K, schedule="static", nogil=True):
tmp_pointer = &coef_buf[0, 0]+basis[i]
for j in range(K):
tmp_pointer[j*N] = 0.0
tmp_pointer[i*N] = 1.0
I = np.ndarray(K, dtype=np.int32)
basis_buf = I
for i in prange(K, schedule="static", nogil=True):
basis_buf[i] = basis[i]
free(basis)
return I, C
cdef object zmaxvol(int N, int R, double complex *lu, double complex *coef, int *basis,
double tol, int max_iters, int top_k_index):
cdef int *ipiv = <int *> malloc(R * sizeof(int))
cdef int *interchange = <int *> malloc(N * sizeof(int))
cdef double complex *tmp_row = <double complex *> malloc(R*sizeof(double complex))
cdef double complex *tmp_column = <double complex *> malloc(N*sizeof(double complex))
cdef int info = 0, size = N * R, i, j, tmp_int, i_one = 1, iters = 0
cdef int k_row, k_col
cdef char cR = 'R', cN = 'N', cU = 'U', cL = 'L'
cdef double complex d_one = 1, alpha, max_value
cdef double abs_max, tmp
if (ipiv == NULL or interchange == NULL or tmp_row == NULL or
tmp_column == NULL):
raise MemoryError("malloc failed to allocate temporary buffers")
if top_k_index == -1 or top_k_index > N:
top_k_index = N
if top_k_index < R:
top_k_index = R
zgetrf(&top_k_index, &R, lu, &N, ipiv, &info)
if info < 0:
raise ValueError("Internal maxvol_fullrank error, {} argument of"
" zgetrf_ had illegal value".format(info))
if info > 0:
raise ValueError("Input matrix must not be singular")
for i in prange(N, schedule="static", nogil=True):
interchange[i] = i
for i in prange(R, schedule="static", nogil=True):
j = ipiv[i]-1
if j!= i:
tmp_int = interchange[i]
interchange[i] = interchange[j]
interchange[j] = tmp_int
free(ipiv)
for i in prange(R, schedule="static", nogil=True):
basis[i] = interchange[i]
free(interchange)
ztrsm(&cR, &cU, &cN, &cN, &N, &R, &d_one, lu, &N, coef, &N)
ztrsm(&cR, &cL, &cN, &cU, &N, &R, &d_one, lu, &N, coef, &N)
while iters < max_iters:
abs_max = -1
for k_row in range(top_k_index):
for k_col in range(R):
tmp = cabs(coef[k_row+k_col*N])
if tmp > abs_max:
abs_max = tmp
j = k_row
i = k_col
max_value = coef[j+i*N]
if abs_max > tol:
zcopy(&R, coef+j, &N, tmp_row, &i_one)
tmp_row[i] -= d_one
zcopy(&N, coef+i*N, &i_one, tmp_column, &i_one)
basis[i] = j
alpha = (-d_one)/max_value
zgeru(&N, &R, &alpha, tmp_column, &i_one, tmp_row, &i_one,
coef, &N)
iters += i_one
else:
break
free(tmp_row)
free(tmp_column)
return
<|end_of_text|>import numpy as np
cimport numpy as np
cimport allelefreq as af
cdef class AdmixProp:
"""
Admixture proportions for all samples and their relevant methods
fall under this class. The prior over admixture proportions is set
to be a symmetric Dirichlet distribution with parameter 1/K.
Arguments
N : int
number of samples
K : int
number of populations
"""
cdef long N,K
cdef np.ndarray alpha, var, xi
cdef list oldvar
cdef copy(self)
cdef require(self)
cdef update(self, np.ndarray[np.uint8_t, ndim=2] G, af.AlleleFreq pi)
cdef square_update(self, np.ndarray[np.uint8_t, ndim=2] G, af.AlleleFreq pi)
<|end_of_text|>cdef extern from * nogil:
ctypedef char* SlepcBVType "const char*"
SlepcBVType BVMAT
SlepcBVType BVSVEC
SlepcBVType BVVECS
SlepcBVType BVCONTIGUOUS
ctypedef enum SlepcBVOrthogType "BVOrthog | Cython |
Type":
BV_ORTHOG_CGS
BV_ORTHOG_MGS
ctypedef enum SlepcBVOrthogRefineType "BVOrthogRefineType":
BV_ORTHOG_REFINE_IFNEEDED
BV_ORTHOG_REFINE_NEVER
BV_ORTHOG_REFINE_ALWAYS
ctypedef enum SlepcBVOrthogBlockType "BVOrthogBlockType":
BV_ORTHOG_BLOCK_GS
BV_ORTHOG_BLOCK_CHOL
int BVCreate(MPI_Comm,SlepcBV*)
int BVDuplicate(SlepcBV,SlepcBV*)
int BVCopy(SlepcBV,SlepcBV)
int BVView(SlepcBV,PetscViewer)
int BVDestroy(SlepcBV*)
int BVSetType(SlepcBV,SlepcBVType)
int BVGetType(SlepcBV,SlepcBVType*)
int BVSetSizes(SlepcBV,PetscInt,PetscInt,PetscInt)
int BVSetSizesFromVec(SlepcBV,PetscVec,PetscInt)
int BVGetSizes(SlepcBV,PetscInt*,PetscInt*,PetscInt*)
int BVSetOptionsPrefix(SlepcBV,char[])
int BVGetOptionsPrefix(SlepcBV,char*[])
int BVAppendOptionsPrefix(SlepcBV,char[])
int BVSetFromOptions(SlepcBV)
int BVSetOrthogonalization(SlepcBV,SlepcBVOrthogType,SlepcBVOrthogRefineType,PetscReal,SlepcBVOrthogBlockType)
int BVGetOrthogonalization(SlepcBV,SlepcBVOrthogType*,SlepcBVOrthogRefineType*,PetscReal*,SlepcBVOrthogBlockType*)
int BVSetRandom(SlepcBV)
int BVSetMatrix(SlepcBV,PetscMat,PetscBool)
int BVGetMatrix(SlepcBV,PetscMat*,PetscBool*)
int BVApplyMatrix(SlepcBV,PetscVec,PetscVec)
int BVSetActiveColumns(SlepcBV,PetscInt,PetscInt)
int BVGetActiveColumns(SlepcBV,PetscInt*,PetscInt*)
int BVInsertVec(SlepcBV,PetscInt,PetscVec)
int BVInsertVecs(SlepcBV,PetscInt,PetscInt*,PetscVec*,PetscBool)
int BVGetColumn(SlepcBV,PetscInt,PetscVec*)
int BVRestoreColumn(SlepcBV,PetscInt,PetscVec*)
int BVDot(SlepcBV,SlepcBV,PetscMat)
int BVDotVec(SlepcBV,PetscVec,PetscScalar*)
int BVMatProject(SlepcBV,PetscMat,SlepcBV,PetscMat)
int BVMatMult(SlepcBV,PetscMat,SlepcBV)
int BVMatMultHermitianTranspose(SlepcBV,PetscMat,SlepcBV)
int BVMultVec(SlepcBV,PetscScalar,PetscScalar,PetscVec,PetscScalar*)
int BVScaleColumn(SlepcBV,PetscInt,PetscScalar)
int BVScale(SlepcBV,PetscScalar)
int BVNormColumn(SlepcBV,PetscInt,PetscNormType,PetscReal*)
int BVNorm(SlepcBV,PetscNormType,PetscReal*)
int BVOrthogonalizeVec(SlepcBV,PetscVec,PetscScalar*,PetscReal*,PetscBool*)
int BVOrthogonalize(SlepcBV,PetscMat)
cdef inline int BV_Sizes(
object size,
PetscInt *_n,
PetscInt *_N,
) except -1:
# unpack and get local and global sizes
cdef PetscInt n=PETSC_DECIDE, N=PETSC_DECIDE
cdef object on, oN
try:
on, oN = size
except (TypeError, ValueError):
on = None; oN = size
if on is not None: n = asInt(on)
if oN is not None: N = asInt(oN)
if n==PETSC_DECIDE and N==PETSC_DECIDE: raise ValueError(
"local and global sizes cannot be both 'DECIDE'")
# return result to the caller
if _n!= NULL: _n[0] = n
if _N!= NULL: _N[0] = N
return 0
<|end_of_text|>from time import time
cdef float start_time
cdef int minscore, maxscore, i1, i3, i4, i5, i6, i7, i8, i9, score, counter
cdef list mincircle, maxcircle
def read_distinct_primes(path):
primes = []
with open(path, 'r') as f:
for l in f.readlines():
distinct = True
for i in range(10):
if l.count(str(i)) > 1:
distinct = False
break
if distinct:
primes.append(l)
return primes
cdef calculate_score(circle, primes):
cdef sc, move
cdef int counter, score, temps, ii, i, ij, j
cdef str p
sc = {(i,j): min(abs(ij-ii), 9-ij+ii, 9-ii+ij) for ii,i in enumerate(circle) for ij,j in enumerate(circle)}
counter = 0
score = 0
for p in primes:
temps = 0
for i in range(6):
move = (int(p[i]), int(p[i+1]))
if move in sc:
temps += sc[move]
else:
temps = -1
break
if temps > 0:
score += temps
counter += 1
return counter, score
primes = read_distinct_primes('primes7.txt')
start_time = time()
minscore = 10000000
mincircle = []
maxscore = 0
maxcircle = []
for i1 in [0,1]:
qq = {i for i in range(i1)}
for i2 in {0,1,2,3,4,5,6,7,8,9}-{i1}-qq:
for i3 in {0,1,2,3,4,5,6,7,8,9}-{i1,i2}-qq:
for i4 in {0,1,2,3,4,5,6,7,8,9}-{i1,i2,i3}-qq:
for i5 in {0,1,2,3,4,5,6,7,8,9}-{i1,i2,i3,i4}-qq:
for i6 in {0,1,2,3,4,5,6,7,8,9}-{i1,i2,i3,i4,i5}-qq:
for i7 in {0,1,2,3,4,5,6,7,8,9}-{i1,i2,i3,i4,i5,i6}-qq:
for i8 in {0,1,2,3,4,5,6,7,8,9}-{i1,i2,i3,i4,i5,i6,i7}-qq:
for i9 in {0,1,2,3,4,5,6,7,8,9}-{i1,i2,i3,i4,i5,i6,i7,i8}-qq:
circle = [i1,i2,i3,i4,i5,i6,i7,i8,i9]
counter, score = calculate_score(circle, primes)
if score < minscore:
minscore = score
mincircle = circle
print('New min circle: '+str(circle)+' ; score: '+str(score) +' ('+str(counter)+')')
if score > maxscore:
maxscore = score
maxcircle = circle
print('New max circle: '+str(circle)+' ; score: '+str(score) +' ('+str(counter)+')')
print()
print('Min circle:')
print(mincircle)
print('Max circle:')
print(maxcircle)
print()
print('Time used (seconds): '+str(time()-start_time))<|end_of_text|># Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cython wrapper for ``surface_intersection.f90``."""
from bezier._status cimport Status
cdef extern from "bezier/surface_intersection.h":
cpdef enum SurfaceContained:
NEITHER = 0
FIRST = 1
SECOND = 2
ctypedef struct CurvedPolygonSegment:
double start
double | Cython |
end
int edge_index
void newton_refine_surface(int* num_nodes, double* nodes, int* degree,
double* x_val, double* y_val, double* s, double* t, double* updated_s,
double* updated_t)
void locate_point_surface(int* num_nodes, double* nodes, int* degree,
double* x_val, double* y_val, double* s_val, double* t_val)
void surface_intersections(int* num_nodes1, double* nodes1, int* degree1,
int* num_nodes2, double* nodes2, int* degree2, int* segment_ends_size,
int* segment_ends, int* segments_size, CurvedPolygonSegment* segments,
int* num_intersected, SurfaceContained* contained, Status* status)
void free_surface_intersections_workspace()
<|end_of_text|>'''
Tree procedures and stuff
'''
import sys
from cnode import Node
import Extras
#This takes in the newick and the
#seq data then puts them in a data
#structure that can be preorder or
#postorder traversed pretty easily
def build(instr):
#print "Entered build"
root = None
name_array =[]
index = 0
nextchar = instr[index]
begining = "Yep"
keepgoing = True
current_node = None
#keeps going until the value becomes false
while keepgoing == True:
#This situation will only happen at the very beginning but
#when it hits this it will create a root and change begining
#to no
if nextchar == "(" and begining == "Yep":
root = Node()
current_node = root
begining = "No"
#This happens anytime their is an open bracket thats not the
#beginning
elif nextchar == "(" and begining == "No":
newnode = Node()
current_node.add_child(newnode)
current_node = newnode
#This indicates that you are in a clade and tells the
#program to move back one to grab the sister to the clade
elif nextchar == ',':
current_node = current_node.parent
#This says you are closing a clade and therefore it moves
#back to where the parent node is which allows the name
#to be added to the parent node
elif nextchar == ")":
#print "Closing Clade"
current_node = current_node.parent
index += 1
nextchar = instr[index]
while True:
if nextchar == ',' or nextchar == ')' or nextchar == ':' \
or nextchar == ';' or nextchar == '[':
break
name += nextchar
index += 1
nextchar = instr[index]
current_node.label = name
index -= 1
#This indicates everything is done so keepgoing becomes false
elif nextchar == ';':
keepgoing = False
break
#This indicates you have branch lengths so it grabs the branch
#lengths turns them into floats and puts them in the current node
elif nextchar == ":":
index += 1
nextchar = instr[index]
while True:
if nextchar == ',' or nextchar == ')' or nextchar == ':' \
or nextchar == ';' or nextchar == '[':
break
branch += nextchar
index += 1
nextchar = instr[index]
current_node.length = float(branch)
index -= 1
#This is for if anywhitespace exists
elif nextchar =='':
index += 1
nextchar = instr[index]
#This is for when any taxa name is hit, it will concatenate
#the taxa names together and add the name
else: # this is an external named node
newnode = Node()
current_node.add_child(newnode)
current_node = newnode
current_node.istip = True
while True:
if nextchar == ',' or nextchar == ')' or nextchar == ':' \
or nextchar == ';' or nextchar == '[':
break
name += nextchar
index += 1
nextchar = instr[index]
current_node.label = name
name_array.append(name)
index -= 1
if index < len(instr) - 1:
index += 1
nextchar = instr[index]
name = ""
branch = ""
return root
#get segments of a bipart
def clade_post_order(clade,clade_names):
for x in clade.children:
if x.istip:
clade_names.append(x.label)
clade_post_order(x,clade_names)
return clade_names
#Post order traverse the whole tree
def post_order(tree,support,all_names,t_to_clade):
for x in tree.children:
#account for trees that don't have support
if x.children and x.label == "":
#print "Clade does not have support value"
clade_names = []
clade = []
clade_names = clade_post_order(x,clade_names)
clade = get_right(clade_names, all_names)
t_to_clade.append(clade)
elif x.children and support <= int(x.label):
#print "Clade has support value: " + x.label
clade_names = []
clade = []
clade_names = clade_post_order(x,clade_names)
clade = get_right(clade_names, all_names)
t_to_clade.append(clade)
post_order(x,support,all_names,t_to_clade)
return t_to_clade
#get the other side of the bipartition
def get_right(clade_names, all_names):
mis1 = list(set(all_names.split(",")) - set(clade_names))
clade_names.append("|")
return ",".join(clade_names + mis1)
def comp_biparts(tree_bipart,all_biparts):
bi = tree_bipart.split("|")
part1 = bi[0][:-1].split(",")
part2 = bi[1][1:].split(",")
for x in all_biparts:
comp_bi = x.split("|")
comp_part1 = comp_bi[0][:-1].split(",")
if len(part1) == len(comp_part1):
dif = list(set(part1) - set(comp_part1))
if len(dif) == 0:
return True
if len(part2) == len(comp_part1):
dif = list(set(part2) - set(comp_part1))
if len(dif) == 0:
return True
return False
#compare if any incoming biparts are new
def get_biparts(trees_clades, all_biparts):
new_biparts = []
count = 0
for x in trees_clades:
bin = comp_biparts(x,all_biparts)
if bin == False:
new_biparts.append(x)
return new_biparts
#ugh...this is ugly
def dissect_trees(tr,all_names,support):
all_biparts = []
count = 0
for x in tr:
sys.stderr.write("tree " + str(count) + "\r")
t_to_clade = []
new_biparts = []
x = x.rstrip("\r\n")
tree = build(x)
trees_clades = post_order(tree,support,all_names,t_to_clade)
if len(all_biparts) == 0:
all_biparts = trees_clades
else:
new_biparts = get_biparts(trees_clades, all_biparts)
all_biparts += new_biparts
count += 1
return all_biparts
def make_constraints(biparts, out_folder, outf):
outb = open(out_folder + "/bipartitions.txt", "w")
count = 0
constraint_list = []
for x in biparts:
constraint_name = "constraint_" + str(count) + ".tre"
c = x.split("|")
out = open(out_folder + "/Constraints/" + constraint_name, "w")
#checks for 2 to account for the split leading to a blank one
if len(c[0].split(",")) == 2 or len(c[1].split(",")) == 2:
message = "can't make constraint for \"" + x + "\" it may be rooted"
Extras.get_time(message, outf)
constraint = "((" + c[0][:-1] + ")" + c[1] + ");"
outb.write(constraint_name + ": " + c[0][:-1] + "|" + c[1][1:] + "\n")
out.write(constraint)
constraint_list.append(constraint_name)
count += 1
return constraint_list
<|end_of_text|># cython: profile=True, binding=True, infer_types=True
from cpython.object cimport PyObject
from libc.stdint cimport int64_t
from typing import Optional
cdef extern from "polyleven.c":
int64_t polyleven(PyObject *o1, PyObject *o2, int64_t k)
cpdef int64_t levenshte | Cython |
in(a: str, b: str, k: Optional[int] = None):
if k is None:
k = -1
return polyleven(<PyObject*>a, <PyObject*>b, k)
<|end_of_text|>from wavefunc_gpu cimport ShWavefuncGPU, ShWavefuncArrayGPU
from field cimport Field
from atom cimport ShAtomCache
cdef class WfArrayGPUWorkspace:
def __cinit__(self, ShAtomCache atom_cache, ShGrid grid, UabsCache uabs_cache, int N):
self.cdata = new WfArrayGpu(atom_cache.cdata, grid.data, uabs_cache.cdata, N)
self.atom_cache = atom_cache
self.grid = grid
self.uabs_cache = uabs_cache
def __init__(self, ShAtomCache atom_cache, ShGrid grid, UabsCache uabs_cache, int N):
pass
def __dealloc__(self):
del self.cdata
def prop(self, ShWavefuncArrayGPU wf_array, double[:] E, double dt):
self.cdata.prop(wf_array.cdata, &E[0], dt)
def prop_abs(self, ShWavefuncArrayGPU wf_array, double dt):
self.cdata.prop_abs(wf_array.cdata, dt)
def prop_at(self, ShWavefuncArrayGPU wf_array, double dt):
self.cdata.prop_at(wf_array.cdata, dt)
cdef class WfGPUWorkspace:
def __cinit__(self, ShAtomCache atom_cache, ShGrid grid, UabsCache uabs_cache, int gpuGridNl = 1024, int threadsPerBlock = 32):
self.cdata = new WfGpu(atom_cache.cdata[0], grid.data[0], uabs_cache.cdata[0], gpuGridNl, threadsPerBlock)
self.atom_cache = atom_cache
self.grid = grid
self.uabs_cache = uabs_cache
def __init__(self, ShAtomCache atom_cache, ShGrid grid, UabsCache uabs_cache, int gpuGridNl = 1024, int threadsPerBlock = 32):
pass
def __dealloc__(self):
del self.cdata
def prop(self, ShWavefuncGPU wf, Field field, double t, double dt):
self.cdata.prop(wf.cdata[0], field.cdata[0], t, dt)
def prop_abs(self, ShWavefuncGPU wf, double dt):
self.cdata.prop_abs(wf.cdata[0], dt)
def prop_at(self, ShWavefuncGPU wf, double dt):
self.cdata.prop_at(wf.cdata[0], dt)
<|end_of_text|># cython: embedsignature=True
# cython: binding=True
"""
# use these for profiling (note linetracing is broken on PyPy)
# cython: linetrace=True
# cython: profile=True
# distutils: define_macros=[CYTHON_TRACE=1, CYTHON_TRACE_NOGIL=1]
"""
import numpy as np
import numbers
import os
import warnings
from libc.stdio cimport printf, FILE, SEEK_SET, SEEK_CUR, SEEK_END
from libc.stdlib cimport malloc, free, realloc
from libc.stdint cimport int64_t, uint64_t, int32_t, uint32_t
cimport cython
cimport numpy as np
np.import_array()
# Marks operation success
ctypedef enum tng_function_status: TNG_SUCCESS, TNG_FAILURE, TNG_CRITICAL
# Marks use of hash checking in file read
ctypedef enum tng_hash_mode: TNG_SKIP_HASH, TNG_USE_HASH
# Datatypes that can be read off disk
ctypedef enum tng_datatypes: TNG_CHAR_DATA, TNG_INT_DATA, TNG_FLOAT_DATA, \
TNG_DOUBLE_DATA
# Marks particle or non-particle data
ctypedef enum tng_particle_dependency: TNG_NON_PARTICLE_BLOCK_DATA, \
TNG_PARTICLE_BLOCK_DATA
# Indicates compression type
ctypedef enum tng_compression: TNG_UNCOMPRESSED, TNG_XTC_COMPRESSION, \
TNG_TNG_COMPRESSION, TNG_GZIP_COMPRESSION
# TNG alias for T/F
ctypedef enum tng_bool: TNG_FALSE, TNG_TRUE
# Indicates variable number of atoms
ctypedef enum tng_variable_n_atoms_flag: TNG_CONSTANT_N_ATOMS, \
TNG_VARIABLE_N_ATOMS
# Flag to indicate frame dependent data.
DEF TNG_FRAME_DEPENDENT = 1
# Flag to indicate particle dependent data.
DEF TNG_PARTICLE_DEPENDENT = 2
# GROUP 1 Standard non-trajectory blocks
# Block IDs of standard non-trajectory blocks.
DEF TNG_GENERAL_INFO = 0x0000000000000000LL
DEF TNG_MOLECULES = 0x0000000000000001LL
DEF TNG_TRAJECTORY_FRAME_SET = 0x0000000000000002LL
DEF TNG_PARTICLE_MAPPING = 0x0000000000000003LL
# GROUP 2 Standard trajectory blocks
# Block IDs of standard trajectory blocks. Box shape and partial charges can
# be either trajectory blocks or non-trajectory blocks
DEF TNG_TRAJ_BOX_SHAPE = 0x0000000010000000LL
DEF TNG_TRAJ_POSITIONS = 0x0000000010000001LL
DEF TNG_TRAJ_VELOCITIES = 0x0000000010000002LL
DEF TNG_TRAJ_FORCES = 0x0000000010000003LL
DEF TNG_TRAJ_PARTIAL_CHARGES = 0x0000000010000004LL
DEF TNG_TRAJ_FORMAL_CHARGES = 0x0000000010000005LL
DEF TNG_TRAJ_B_FACTORS = 0x0000000010000006LL
DEF TNG_TRAJ_ANISOTROPIC_B_FACTORS = 0x0000000010000007LL
DEF TNG_TRAJ_OCCUPANCY = 0x0000000010000008LL
DEF TNG_TRAJ_GENERAL_COMMENTS = 0x0000000010000009LL
DEF TNG_TRAJ_MASSES = 0x0000000010000010LL
# GROUP 3 GROMACS data block IDs
# Block IDs of data blocks specific to GROMACS.
DEF TNG_GMX_LAMBDA = 0x1000000010000000LL
DEF TNG_GMX_ENERGY_ANGLE = 0x1000000010000001LL
DEF TNG_GMX_ENERGY_RYCKAERT_BELL = 0x1000000010000002LL
DEF TNG_GMX_ENERGY_LJ_14 = 0x1000000010000003LL
DEF TNG_GMX_ENERGY_COULOMB_14 = 0x1000000010000004LL
# NOTE changed from TNG_GMX_ENERGY_LJ_(SR)
DEF TNG_GMX_ENERGY_LJ_SR = 0x1000000010000005LL
# NOTE changed from TNG_GMX_ENERGY_COULOMB_(SR)
DEF TNG_GMX_ENERGY_COULOMB_SR = 0x1000000010000006LL
DEF TNG_GMX_ENERGY_COUL_RECIP = 0x1000000010000007LL
DEF TNG_GMX_ENERGY_POTENTIAL = 0x1000000010000008LL
DEF TNG_GMX_ENERGY_KINETIC_EN = 0x1000000010000009LL
DEF TNG_GMX_ENERGY_TOTAL_ENERGY = 0x1000000010000010LL
DEF TNG_GMX_ENERGY_TEMPERATURE = 0x1000000010000011LL
DEF TNG_GMX_ENERGY_PRESSURE = 0x1000000010000012LL
DEF TNG_GMX_ENERGY_CONSTR_RMSD = 0x1000000010000013LL
DEF TNG_GMX_ENERGY_CONSTR2_RMSD = 0x1000000010000014LL
DEF TNG_GMX_ENERGY_BOX_X = 0x1000000010000015LL
DEF TNG_GMX_ENERGY_BOX_Y = 0x1000000010000016LL
DEF TNG_GMX_ENERGY_BOX_Z = 0x1000000010000017LL
DEF TNG_GMX_ENERGY_BOXXX = 0x1000000010000018LL
DEF TNG_GMX_ENERGY_BOXYY = 0x1000000010000019LL
DEF TNG_GMX_ENERGY_BOXZZ = 0x1000000010000020LL
DEF TNG_GMX_ENERGY_BOXYX = 0x1000000010000021LL
DEF TNG_GMX_ENERGY_BOXZX = 0x1000000010000022LL
DEF TNG_GMX_ENERGY_BOXZY = 0x1000000010000023LL
DEF TNG_GMX_ENERGY_BOXVELXX = 0x1000000010000024LL
DEF TNG_GMX_ENERGY_BOXVELYY = 0x1000000010000025LL
DEF TNG_GMX_ENERGY_BOXVELZZ = 0x1000000010000026LL
DEF TNG_GMX_ENERGY_BOXVELYX = 0x1000000010000027LL
DEF TNG_GMX_ENERGY_BOX | Cython |
VELZX = 0x1000000010000028LL
DEF TNG_GMX_ENERGY_BOXVELZY = 0x1000000010000029LL
DEF TNG_GMX_ENERGY_VOLUME = 0x1000000010000030LL
DEF TNG_GMX_ENERGY_DENSITY = 0x1000000010000031LL
DEF TNG_GMX_ENERGY_PV = 0x1000000010000032LL
DEF TNG_GMX_ENERGY_ENTHALPY = 0x1000000010000033LL
DEF TNG_GMX_ENERGY_VIR_XX = 0x1000000010000034LL
DEF TNG_GMX_ENERGY_VIR_XY = 0x1000000010000035LL
DEF TNG_GMX_ENERGY_VIR_XZ = 0x1000000010000036LL
DEF TNG_GMX_ENERGY_VIR_YX = 0x1000000010000037LL
DEF TNG_GMX_ENERGY_VIR_YY = 0x1000000010000038LL
DEF TNG_GMX_ENERGY_VIR_YZ = 0x1000000010000039LL
DEF TNG_GMX_ENERGY_VIR_ZX = 0x1000000010000040LL
DEF TNG_GMX_ENERGY_VIR_ZY = 0x1000000010000041LL
DEF TNG_GMX_ENERGY_VIR_ZZ = 0x1000000010000042LL
DEF TNG_GMX_ENERGY_SHAKEVIR_XX = 0x1000000010000043LL
DEF TNG_GMX_ENERGY_SHAKEVIR_XY = 0x1000000010000044LL
DEF TNG_GMX_ENERGY_SHAKEVIR_XZ = 0x1000000010000045LL
DEF TNG_GMX_ENERGY_SHAKEVIR_YX = 0x1000000010000046LL
DEF TNG_GMX_ENERGY_SHAKEVIR_YY = 0x1000000010000047LL
DEF TNG_GMX_ENERGY_SHAKEVIR_YZ = 0x1000000010000048LL
DEF TNG_GMX_ENERGY_SHAKEVIR_ZX = 0x1000000010000049LL
DEF TNG_GMX_ENERGY_SHAKEVIR_ZY = 0x1000000010000050LL
DEF TNG_GMX_ENERGY_SHAKEVIR_ZZ = 0x1000000010000051LL
DEF TNG_GMX_ENERGY_FORCEVIR_XX = 0x1000000010000052LL
DEF TNG_GMX_ENERGY_FORCEVIR_XY = 0x1000000010000053LL
DEF TNG_GMX_ENERGY_FORCEVIR_XZ = 0x1000000010000054LL
DEF TNG_GMX_ENERGY_FORCEVIR_YX = 0x1000000010000055LL
DEF TNG_GMX_ENERGY_FORCEVIR_YY = 0x1000000010000056LL
DEF TNG_GMX_ENERGY_FORCEVIR_YZ = 0x1000000010000057LL
DEF TNG_GMX_ENERGY_FORCEVIR_ZX = 0x1000000010000058LL
DEF TNG_GMX_ENERGY_FORCEVIR_ZY = 0x1000000010000059LL
DEF TNG_GMX_ENERGY_FORCEVIR_ZZ = 0x1000000010000060LL
DEF TNG_GMX_ENERGY_PRES_XX = 0x1000000010000061LL
DEF TNG_GMX_ENERGY_PRES_XY = 0x1000000010000062LL
DEF TNG_GMX_ENERGY_PRES_XZ = 0x1000000010000063LL
DEF TNG_GMX_ENERGY_PRES_YX = 0x1000000010000064LL
DEF TNG_GMX_ENERGY_PRES_YY = 0x1000000010000065LL
DEF TNG_GMX_ENERGY_PRES_YZ = 0x1000000010000066LL
DEF TNG_GMX_ENERGY_PRES_ZX = 0x1000000010000067LL
DEF TNG_GMX_ENERGY_PRES_ZY = 0x1000000010000068LL
DEF TNG_GMX_ENERGY_PRES_ZZ = 0x1000000010000069LL
DEF TNG_GMX_ENERGY_SURFXSURFTEN = 0x1000000010000070LL
DEF TNG_GMX_ENERGY_MUX = 0x1000000010000071LL
DEF TNG_GMX_ENERGY_MUY = 0x1000000010000072LL
DEF TNG_GMX_ENERGY_MUZ = 0x1000000010000073LL
DEF TNG_GMX_ENERGY_VCOS = 0x1000000010000074LL
DEF TNG_GMX_ENERGY_VISC = 0x1000000010000075LL
DEF TNG_GMX_ENERGY_BAROSTAT = 0x1000000010000076LL
DEF TNG_GMX_ENERGY_T_SYSTEM = 0x1000000010000077LL
DEF TNG_GMX_ENERGY_LAMB_SYSTEM = 0x1000000010000078LL
DEF TNG_GMX_SELECTION_GROUP_NAMES = 0x1000000010000079LL
DEF TNG_GMX_ATOM_SELECTION_GROUP = 0x1000000010000080LL
cdef extern from "tng/tng_io.h":
cdef enum:
TNG_MAX_STR_LEN
TNG_MD5_HASH_LEN
# note that the _t suffix is a typedef mangle for a pointer to the struct
ctypedef struct tng_molecule_t:
pass
struct tng_particle_mapping:
# The index number of the first particle in this mapping block
int64_t num_first_particle
# The number of particles list in this mapping block
int64_t n_particles
# the mapping of index numbers to the real particle numbers in the
# trajectory. real_particle_numbers[0] is the real particle number
# (as it is numbered in the molecular system) of the first particle
# in the data blocks covered by this particle mapping block
int64_t * real_particle_numbers
struct tng_trajectory_frame_set:
# The number of different particle mapping blocks present.
int64_t n_mapping_blocks
# The atom mappings of this frame set
tng_particle_mapping * mappings
# The first frame of this frame set
int64_t first_frame
# The number of frames in this frame set
int64_t n_frames
# The number of written frames in this frame set (used when writing
# *one frame at a time).
int64_t n_written_frames
# The number of frames not yet written to file in this frame set
# (used from the utility functions to finish the writing properly.
int64_t n_unwritten_frames
# A list of the number of each molecule type - only used when using
# variable number of atoms
int64_t * molecule_cnt_list
# The number of particles/atoms - only used when using variable
# number of atoms
int64_t n_particles
# The file position of the next frame set
int64_t next_frame_set_file_pos
# The file position of the previous frame set
int64_t prev_frame_set_file_pos
# The file position of the frame set one long stride step ahead
int64_t medium_stride_next_frame_set_file_pos
# The file position of the frame set one long stride step behind
int64_t medium_stride_prev_frame_set_file_pos
# The file position of the frame set one long stride step ahead
int64_t long_stride_next_frame_set_file_pos
# The file position of the frame set one long stride step behind
int64_t long_stride_prev_frame_set_file_pos
# Time stamp (in seconds) of first frame in frame set
double first_frame_time
# The data blocks in a frame set are trajectory data blocks
# The number of trajectory data blocks of particle dependent data
int n_particle_data_blocks
# A list of data blocks containing particle dependent data
tng_data * tr_particle_data
# The number of trajectory data blocks independent of particles
int n_data_blocks
# A list of data blocks containing particle indepdendent data
tng_data * tr_data
struct tng_data:
# The block ID of the data block containing this particle data.
# This is used to determine the kind of data that is stored
int64_t block_id
# The name of the data block. This is used to determine the kind of
# data that is stored
char * block_name
# The type of data stored.
char datatype
# A flag to indicate if this data block contains frame
# and/or particle dependent data
char dependency
# The frame number of the first data value
int64_t first_frame_with_data
# The number of frames in this frame set
int64_t n_frames
# The number of values stored per frame
int64_t n_values_per_frame
# The number of frames | Cython |
between each data point - e.g. when
# storing sparse data.
int64_t stride_length
# ID of the CODEC used for compression 0 == no compression.
int64_t codec_id
# If reading one frame at a time this is the last read frame
int64_t last_retrieved_frame
# The multiplier used for getting integer values for compression
double compression_multiplier
# A 1-dimensional array of values of length
# [sizeof (datatype)] * n_frames * n_particles*n_values_per_frame
void * values
# If storing character data store it in a 3-dimensional array
char**** strings
struct tng_trajectory:
# The path of the input trajectory file
char * input_file_path
# A handle to the input file
FILE * input_file
# The length of the input file
int64_t input_file_len
# The path of the output trajectory file
char * output_file_path
# A handle to the output file
FILE * output_file
# Function to swap 32 bit values to and from the endianness of the
# * input file
tng_function_status(*input_endianness_swap_func_32)(const tng_trajectory*, uint32_t*)
# Function to swap 64 bit values to and from the endianness of the
# input file
tng_function_status(*input_endianness_swap_func_64)(const tng_trajectory*, uint64_t*)
# Function to swap 32 bit values to and from the endianness of the
# input file
tng_function_status(*output_endianness_swap_func_32)(const tng_trajectory*, uint32_t*)
# Function to swap 64 bit values to and from the endianness of the
# input file
tng_function_status(*output_endianness_swap_func_64)(const tng_trajectory*, uint64_t*)
# The endianness of 32 bit values of the current computer
char endianness_32
# The endianness of 64 bit values of the current computer
char endianness_64
# The name of the program producing this trajectory
char * first_program_name
# The forcefield used in the simulations
char * forcefield_name
# The name of the user running the simulations
char * first_user_name
# The name of the computer on which the simulations were performed
char * first_computer_name
# The PGP signature of the user creating the file.
char * first_pgp_signature
# The name of the program used when making last modifications
# to the file
char * last_program_name
# The name of the user making the last modifications to the file
char * last_user_name
# The name of the computer on which the last modifications were made
char * last_computer_name
# The PGP signature of the user making the last modifications to the
# file.
char * last_pgp_signature
# The time (n seconds since 1970) when the file was created
int64_t time
# The exponential of the value of the distance unit used. The default
# distance unit is nm (1e-9), i.e. distance_unit_exponential = -9. If
# the measurements are in Å the distance_unit_exponential = -10.
int64_t distance_unit_exponential
# A flag indicating if the number of atoms can vary throughout the
# simulation, e.g. using a grand canonical ensemble.
char var_num_atoms_flag
# The number of frames in a frame set. It is allowed to have frame sets
# with fewer frames, but this will help searching for specific frames
int64_t frame_set_n_frames
# The number of frame sets in a medium stride step
int64_t medium_stride_length
# The number of frame sets in a long stride step
int64_t long_stride_length
# The current (can change from one frame set to another) time length
# (in seconds) of one frame.
double time_per_frame
# The number of different kinds of molecules in the trajectory
int64_t n_molecules
# A list of molecules in the trajectory
tng_molecule_t molecules
# A list of the count of each molecule - if using variable number of
# particles this will be specified in each frame set
int64_t * molecule_cnt_list
# The total number of particles/atoms. If using variable number of
# particles this will be specified in each frame set
int64_t n_particles
# The pos in the src file of the first frame set
int64_t first_trajectory_frame_set_input_file_pos
# The pos in the dest file of the first frame set
int64_t first_trajectory_frame_set_output_file_pos
# The pos in the src file of the last frame set
int64_t last_trajectory_frame_set_input_file_pos
# The pos in the dest file of the last frame set
int64_t last_trajectory_frame_set_output_file_pos
# The currently active frame set
tng_trajectory_frame_set current_trajectory_frame_set
# The pos in the src file of the current frame set
int64_t current_trajectory_frame_set_input_file_pos
# The pos in the dest file of the current frame set
int64_t current_trajectory_frame_set_output_file_pos
# The number of frame sets in the trajectory N.B. Not saved in file and
# cannot be trusted to be up-to-date
int64_t n_trajectory_frame_sets
# These data blocks are non-trajectory data blocks
# The number of non-frame dependent particle dependent data blocks
int n_particle_data_blocks
# A list of data blocks containing particle dependent data
tng_data * non_tr_particle_data
# The number of frame and particle independent data blocks
int n_data_blocks
# A list of frame and particle indepdendent data blocks
tng_data * non_tr_data
# TNG compression algorithm for compressing positions
int * compress_algo_pos
# TNG compression algorithm for compressing velocities
int * compress_algo_vel
# The precision used for lossy compression
double compression_precision
struct tng_gen_block:
# The size of the block header in bytes
int64_t header_contents_size
# The size of the block contents in bytes
int64_t block_contents_size
# The ID of the block to determine its type
int64_t id
# The MD5 hash of the block to verify integrity
char md5_hash[16] # TNG_MD5_HASH_LEN == 16
# The name of the block
char * name
# The library version used to write the block
int64_t block_version
int64_t alt_hash_type
int64_t alt_hash_len
char * alt_hash
int64_t signature_type
int64_t signature_len
char * signature
# The full block header contents
char * header_contents
# The full block contents
char * block_contents
tng_function_status tng_util_trajectory_open(
const char * filename,
const char mode,
tng_trajectory * * tng_data_p) nogil
tng_function_status tng_util_trajectory_close(
tng_trajectory * * tng_data_p) nogil
tng_function_status tng_num_frames_get(
const tng_trajectory * tng_data,
int64_t * n) nogil
tng_function_status tng_num_particles_get(
const tng_trajectory * tng_data,
int64_t * n) nogil
tng_function_status tng_distance_unit_exponential_get(
const tng_trajectory * tng_data,
int64_t * exp) nogil
tng_function_status tng_util_time_of_frame_get(
const tng_trajectory * tng_data,
const int64_t frame_nr,
double * time) nogil
tng_function_status tng_block_read_next(
tng_trajectory * tng_data,
tng_gen_block * block_data,
char hash_mode) nogil
tng_function_status tng_block_init(
tng_gen_block ** block_p) nogil
tng_function_status tng_block_header_read(
tng_trajectory * tng_data,
tng_gen_block * block) nogil
tng_function_status tng_num_frame_sets_get(
tng_trajectory * tng_data,
int64_t * n) nogil
tng_function_status tng_block_destroy(
tng_gen_block ** block_p) nogil
tng_function_status tng_data_get_stride_length(
tng_trajectory * tng_data,
int64_t block_id,
int64_t frame,
int64_t * stride_length) nogil
tng_function_status tng_util_trajectory_next_frame_present_data_blocks_find(
tng_trajectory * tng_data,
int64_t current_frame,
int64_t n_requested_data_block_ids,
int64_t * requested_data_block_ids,
int64_t * next_frame,
int64_t * n_data_blocks_in_next_frame,
int64_t ** data_block_ids_in_next_frame | Cython |
) nogil
tng_function_status tng_data_block_name_get(
tng_trajectory * tng_data,
const int64_t block_id,
char * name,
const int max_len) nogil
tng_function_status tng_data_block_dependency_get(
tng_trajectory * tng_data,
const int64_t block_id,
int * block_dependency) nogil
tng_function_status tng_util_particle_data_next_frame_read(
tng_trajectory * tng_data,
const int64_t block_id,
void ** values,
char * data_type,
int64_t * retrieved_frame_number,
double * retrieved_time) nogil
tng_function_status tng_util_non_particle_data_next_frame_read(
tng_trajectory * tng_data,
const int64_t block_id,
void ** values,
char * data_type,
int64_t * retrieved_frame_number,
double * retrieved_time) nogil
tng_function_status tng_data_block_num_values_per_frame_get(
tng_trajectory * tng_data,
int64_t block_id,
int64_t * n_values_per_frame) nogil
tng_function_status tng_util_frame_current_compression_get(
tng_trajectory * tng_data,
int64_t block_id,
int64_t * codec_id,
double * factor) nogil
tng_function_status tng_util_num_frames_with_data_of_block_id_get(
tng_trajectory * tng_data,
int64_t block_id,
int64_t * n_frames) nogil
tng_function_status tng_gen_data_vector_interval_get(
tng_trajectory * tng_data,
const int64_t block_id,
const tng_bool is_particle_data,
const int64_t start_frame_nr,
const int64_t end_frame_nr,
const char hash_mode,
void ** values,
int64_t * n_particles,
int64_t * stride_length,
int64_t * n_values_per_frame,
char * type) nogil
tng_function_status tng_num_particles_variable_get(
tng_trajectory * tng_data, char * variable) nogil
cdef int64_t gcd(int64_t a, int64_t b):
cdef int64_t temp
while b < 0:
temp = b
b = a % b
a = temp
return a
cdef int64_t gcd_list(list a):
cdef int size = len(a)
cdef int i
cdef int64_t result = a[0]
for i in range(1, size):
result = gcd(result, a[i])
return result
cdef class TrajectoryWrapper:
"""A wrapper class for a tng_trajectory"""
cdef tng_trajectory * _ptr
cdef bint ptr_owner
def __cinit__(self):
self.ptr_owner = False
def __dealloc__(self):
# De-allocate if not null and flag is set
if self._ptr is not NULL and self.ptr_owner is True:
free(self._ptr)
self._ptr = NULL
@staticmethod
cdef TrajectoryWrapper from_ptr(tng_trajectory * _ptr, bint owner=False):
"""Factory function to create WrapperClass objects from
given tng_trajectory pointer.
Setting ``owner`` flag to ``True`` causes
the extension type to ``free`` the structure pointed to by ``_ptr``
when the wrapper object is deallocated."""
# Call to __new__ bypasses __init__ constructor
cdef TrajectoryWrapper wrapper = \
TrajectoryWrapper.__new__(TrajectoryWrapper)
wrapper._ptr = _ptr
wrapper.ptr_owner = owner
return wrapper
@staticmethod
cdef TrajectoryWrapper new_struct():
"""Factory function to create WrapperClass objects with
newly allocated tng_trajectory"""
cdef tng_trajectory * _ptr = \
<tng_trajectory * >malloc(sizeof(tng_trajectory))
if _ptr is NULL:
raise MemoryError
return TrajectoryWrapper.from_ptr(_ptr, owner=True)
cdef class TNGFileIterator:
"""File handle object for TNG files
Supports use as a context manager ("with" blocks).
"""
# tng_trajectory pointer
cdef tng_trajectory * _traj_p
# trajectory wrapper
cdef TrajectoryWrapper _traj
# filename
cdef readonly fname
# mode (r,w,a)
cdef str mode
# enable/disable debug output
cdef bint debug
# mark trajectory to be closed/open
cdef int is_open
# have we reached the end of the file
cdef int reached_eof
# integrator timestep
cdef int64_t step
# number of integrator timesteps
cdef int64_t _n_steps
# number of particles
cdef int64_t _n_particles
# distance unit
cdef float _distance_scale
# stride at which each block is written
cdef dict _frame_strides
# same but indexed by block id
cdef dict _frame_strides_blockid
# number of actual frames with data for each block
cdef dict _n_data_frames
# the number of values per frame for each data block
cdef dict _values_per_frame
# particle dependencies for each data block
cdef dict _particle_dependencies
# greatest common divisor of data strides
cdef int64_t _gcd
# holds data at the current trajectory timestep
cdef TNGCurrentIntegratorStep current_step
def __cinit__(self, fname, mode='r', debug=False):
self._traj = TrajectoryWrapper.from_ptr(self._traj_p, owner=True)
self.fname = fname
self.debug = debug
self.step = 0
self._n_steps = -1
self._n_particles = -1
self._distance_scale = 0.0
self._frame_strides = {}
self._n_data_frames = {}
self._values_per_frame = {}
self._particle_dependencies = {}
self._gcd = -1
self._open(self.fname, mode)
def __dealloc__(self):
self._close()
def _open(self, fname, mode):
"""Open a file handle
Parameters
----------
fname : str
path to the file
mode : str
mode to open the file in, 'r' for read, 'w' for write
"""
self.mode = mode
cdef char _mode
cdef char var_natoms_flag
if self.mode == 'r':
_mode = 'r'
elif self.mode == 'w':
_mode = 'w'
raise NotImplementedError('Writing is not implemented yet.')
elif self.mode == 'a':
_mode = 'a'
raise NotImplementedError('Appending is not implemented yet')
else:
raise ValueError('mode must be one of "r", "w", or "a" you '
'supplied {}'.format(mode))
# handle file not existing at python level,
if self.mode == 'r' and not os.path.isfile(fname):
raise IOError("File '{}' does not exist".format(fname))
cdef tng_function_status stat
fname_bytes = fname.encode('UTF-8')
# open the trajectory
stat = tng_util_trajectory_open(fname_bytes, _mode, & self._traj._ptr)
if stat!= TNG_SUCCESS:
raise IOError("File '{}' cannot be opened".format(fname))
# check if the number of particles can vary
stat = tng_num_particles_variable_get(self._traj._ptr, & var_natoms_flag)
if stat!= TNG_SUCCESS:
raise IOError("Particle variability cannot be read")
if var_natoms_flag!= TNG_CONSTANT_N_ATOMS:
raise IOError("Variable numbers of particles not supported")
# get the number of integrator timesteps
stat = tng_num_frames_get(self._traj._ptr, & self._n_steps)
if stat!= TNG_SUCCESS:
raise IOError("Number of frames cannot be read")
# get the number of particles
stat = tng_num_particles_get(self._traj._ptr, & self._n_particles)
if stat!= TNG_SUCCESS:
raise IOError("Number of particles cannot be read")
# get the unit scale
cdef int64_t exponent
stat = tng_distance_unit_exponential_get(self._traj._ptr, & exponent)
if stat!= TNG_SUCCESS:
raise IOError("Distance exponent cannot be read")
# fill out dictionaries of block metadata
stat = self._get_block_metadata()
if stat!= TNG_SUCCESS:
raise IOError("Strides for each data block cannot be read")
self._sanitise_block_metadata()
self._distance_scale = 10.0**(exponent+9)
self.is_open = True
self.reached_eof = False
# close the file
def _close(self):
"""Make sure the file handle is closed"""
if self.is_open:
tng_util_trajectory_close(& self._traj._ptr)
self.is_open = False
self.reached_eof = True
self._n_steps = -1
@property | Cython |
def n_steps(self):
"""The number of integrator steps in the TNG file
Returns
-------
n_steps : int
number of integrator steps
"""
if not self.is_open:
raise IOError("File is not yet open")
return self._n_steps
@property
def n_atoms(self):
"""The number of atoms in the TNG file
Returns
-------
n_atoms : int
number of atoms
"""
if not self.is_open:
raise IOError("File is not yet open")
return self._n_particles
@property
def block_strides(self):
"""Dictionary of block names and the strides (in integrator steps)
at which they are written in the TNG file
Returns
-------
block_strides : dict
dictionary of block names (keys) and strides of each block (values)
"""
if not self.is_open:
raise IOError("File is not yet open")
return self._frame_strides
@property
def block_ids(self):
"""Dictionary of block names and block ids (long longs) in the
TNG file
Returns
-------
block_ids : dict
dictionary of block names (keys) and block ids (values)
"""
if not self.is_open:
raise IOError("File is not yet open")
block_id_dict = {}
for k in self._frame_strides.keys():
block_id_dict[k] = block_id_dictionary[k]
return block_id_dict
@property
def n_data_frames(self):
"""Dictionary of block names and the number of actual steps with data
for that block in the TNG file
Returns
-------
n_data_frames : dict
dictionary of block names (keys) and number of steps with data
(values)
"""
if not self.is_open:
raise IOError("File is not yet open")
return self._n_data_frames
@property
def values_per_frame(self):
"""Dictionary of block names and the number of values per frame for the
block
Returns
-------
values_per_frame : dict
dictionary of block names (keys) and number of values per frame
(values)
"""
if not self.is_open:
raise IOError("File is not yet open")
return self._values_per_frame
@property
def particle_dependencies(self):
"""Dictionary of block names and whether the block is particle
dependent
Returns
-------
particle_dependencies : dict
dictionary of block names (keys) and particle dependencies (values)
"""
if not self.is_open:
raise IOError("File is not yet open")
return self._particle_dependencies
cpdef np.ndarray make_ndarray_for_block_from_name(self, str block_name):
"""Make a NumPy array that can hold a specified block from the block
name
Parameters
----------
block_name : str
a block name
Returns
-------
target : :class:`np.ndarray`
A NumPy array that can hold the data values for a specified block
See Also
--------
block_ids : dict
dictionary of block names (keys) and block ids (values) available
in this TNG file
"""
if block_name not in block_dictionary.values():
raise ValueError("Block name {} not recognised".format(block_name))
if self._particle_dependencies[block_name]:
ax0 = self._n_particles
else:
ax0 = 1
ax1 = self._values_per_frame[block_name]
target = np.ndarray(shape=(ax0, ax1), dtype=np.float32, order='C')
return target
cpdef np.ndarray make_ndarray_for_block_from_id(self, int64_t block_id):
"""Make a NumPy array that can hold a specified block from the block id
Parameters
----------
block_id : int64_t
a block id
Returns
-------
target : :class:`np.ndarray`
A NumPy array that can hold the data values for a specified block
See Also
--------
block_ids : dict
dictionary of block names (keys) and block ids (values) available
in this TNG file
"""
return self.make_ndarray_for_block_from_name(block_id_dictionary[block_id])
@property
def step(self):
"""The current integrator step being read
Returns
-------
step : int
the current step in the TNG file
"""
if not self.is_open:
raise IOError("File is not yet open")
return self.step
@property
def current_integrator_step(self):
"""Class that retrieves data from the file at the current integrator
step
Returns
-------
current_integrator_step : :class:`TNGCurrentIntegratorStep`
The data accessor at the current integrator step
"""
if not self.is_open:
raise IOError("File is not yet open")
return self.current_step
cpdef TNGCurrentIntegratorStep read_step(self, step):
"""Read a step (integrator step) from the file
Parameters
----------
step : int
step to read from the file
Returns
-------
current_integrator_step : :class:`TNGCurrentIntegratorStep`
The data accessor at the current integrator step
Raises
------
ValueError
attempt to read a negative step or step number greater than that in
the input file
"""
if not self.is_open:
raise IOError('File is not yet open')
if step >= self._n_steps:
raise ValueError("""frame specified is greater than number of steps
in input file {}""".format(self._n_steps))
if step < 0:
step = self._n_steps - np.abs(step)
self.step = step
self.current_step = TNGCurrentIntegratorStep(
self._traj, step, self._frame_strides_blockid, debug=self.debug)
return self.current_step
def _sanitise_block_metadata(self):
"""
Check that the strides and number of frames makes sense
Raises
------
ValueError
Any of the blocks contain 0 data frames
"""
for block, stride in self._frame_strides.items():
if stride > self._n_steps:
self._frame_strides[block] = 1
self._n_data_frames[block] = self._n_steps -1
warnings.warn(f"Stride of block {block} is larger than the"
" number of steps in the TNG file. This can"
" sometimes occur for the trajectories produced"
" with `gmx trjconv`. Setting"
" stride for block to one.")
for block, nframes in self._n_data_frames.items():
if nframes == 0:
raise ValueError(f"Block {block} has no frames contaning data")
self._frame_strides_blockid = {block_id_dictionary[k]:v for k, v in self._frame_strides.items()}
# NOTE here we assume that the first frame has all the blocks
# that are present in the whole traj
cdef tng_function_status _get_block_metadata(self):
"""Gets the ids, strides and number of frames with
actual data from the trajectory"""
cdef int64_t step, n_blocks
cdef int64_t nframes, stride_length, n_values_per_frame
cdef int64_t block_counter = 0
cdef int64_t * block_ids = NULL
cdef int block_dependency
cdef bint particle_dependent
cdef tng_function_status read_stat = \
tng_util_trajectory_next_frame_present_data_blocks_find(
self._traj._ptr, -1, 0, NULL, & step, & n_blocks, & block_ids)
for i in range(n_blocks):
read_stat = tng_data_get_stride_length(
self._traj._ptr, block_ids[i], -1, & stride_length)
if read_stat!= TNG_SUCCESS:
return TNG_CRITICAL
read_stat = tng_util_num_frames_with_data_of_block_id_get(
self._traj._ptr, block_ids[i], & nframes)
if read_stat!= TNG_SUCCESS:
return TNG_CRITICAL
read_stat = tng_data_block_num_values_per_frame_get(
self._traj._ptr, block_ids[i], & n_values_per_frame)
read_stat = tng_data_block_dependency_get(self._traj._ptr,
block_ids[i],
& block_dependency)
if read_stat!= TNG_SUCCESS:
return TNG_CRITICAL
if block_dependency & TNG_PARTICLE_DEPENDENT:
particle_dependent = True
else:
particle_dependent = False
# stride length for the block
self._frame_strides[block_dictionary[block_ids[i]]] = stride_length
# number of actual data frames for the block
self._n_data_frames[block_dictionary[block_ids[i]]] = nframes
# number of values per frame
self._values_per_frame[block_dictionary[block_ids[i]]
] = n_values_per_frame
self._particle_dependencies[block_dictionary[block_ids[i]]
] = particle_dependent
# TODO we will use this if we want to instead iterate
# over the greatest common divisor of the data strides | Cython |
self._gcd = gcd_list(list(self._frame_strides.values()))
if self.debug:
printf("PYTNG INFO: gcd of strides %ld \n", self._gcd)
return TNG_SUCCESS
def __enter__(self):
# Support context manager
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
# always propagate exceptions forward
return False
def __len__(self):
return self._n_steps
def __iter__(self):
self._close()
self._open(self.fname, self.mode)
self.read_step(self.step)
return self
def __next__(self):
if self.step == self._n_steps - 1:
raise StopIteration
self.read_step(self.step)
self.step += 1
return self.current_integrator_step
def __getitem__(self, frame):
cdef int64_t start, stop, step, i
if isinstance(frame, numbers.Integral):
if self.debug:
print("slice is a number")
self.read_step(frame)
return self.current_integrator_step
elif isinstance(frame, (list, np.ndarray)):
if self.debug:
print("slice is a list or array")
if isinstance(frame[0], (bool, np.bool_)):
if not (len(frame) == len(self)):
raise TypeError(
"Boolean index must match length of trajectory")
# Avoid having list of bools
frame = np.asarray(frame, dtype=np.bool_)
# Convert bool array to int array
frame = np.arange(len(self))[frame]
def listiter(frames):
for f in frames:
if not isinstance(f, numbers.Integral):
raise TypeError("Frames indices must be integers")
self.read_step(f)
yield self.current_integrator_step
return listiter(frame)
elif isinstance(frame, slice):
start = frame.start if frame.start is not None else 0
stop = frame.stop if frame.stop is not None else self._n_steps
step = frame.step if frame.step is not None else 1
def sliceiter(start, stop, step):
for i in range(start, stop, step):
self.read_step(i)
yield self.current_integrator_step
return sliceiter(start, stop, step)
else:
raise TypeError("Trajectories must be an indexed using an integer,"
" slice or list of indices")
cdef class TNGCurrentIntegratorStep:
"""Retrieves data at the curent trajectory step"""
cdef bint debug
cdef public dict _frame_strides_blockid
cdef int64_t _n_blocks
cdef tng_trajectory * _traj
cdef int64_t step
cdef bint read_success
def __cinit__(self, TrajectoryWrapper traj, int64_t step,
dict frame_strides, bint debug=False):
self.debug = debug
self._frame_strides_blockid = frame_strides
self._traj = traj._ptr
self.step = step
self.read_success = False
def __dealloc__(self):
pass
@property
def step(self):
"""The current integrator step being read
Returns
-------
step : int
the current step in the TNG file
"""
return self.step
@property
def read_success(self):
"""Indicates whether the last attempt to read data was successful
Returns
-------
read_success : bool
Whether the last attempt to read data was successful
"""
return self.read_success
@property
def frame_strides_blockid(self):
"""Dictionary of blockid:frame_stride
Returns
-------
frame_strides_blockid : dict
Dictionary of frame strides
"""
return self._frame_strides_blockid
cpdef get_time(self):
"""Get the time of the current integrator step being read from the file
Returns
-------
time : int
the time of the current step
"""
cdef tng_function_status read_stat
cdef double _step_time
read_stat = self._get_step_time(& _step_time)
if read_stat!= TNG_SUCCESS:
return None
else:
return _step_time
cpdef np.ndarray get_positions(self, np.ndarray data):
"""Get the positions present at the current step and read them into a
NumPy array
Parameters
----------
data : np.ndarray
NumPy array to read the data into. As this is a particle dependent
block, the shape should be (n_atoms, n_values_per_frame)
ie (n_atoms, 3).
"""
self.get_blockid(TNG_TRAJ_POSITIONS, data)
return data
cpdef np.ndarray get_box(self, np.ndarray data):
"""Get the box vectors present at the current step and read them into a
NumPy array. The box vectors are a (3,3) matrix comprised of 3
three-dimensional basis vectors for the coordinate system of the
simulation. The vectors can be accessed in their proper shape by
reshaping the resulting (1,9) matrix to be (3,3) with
ndarray.reshape(3,3).
Parameters
----------
data : np.ndarray
NumPy array to read the data into. As this is NOT a particle
dependent block, the shape should be
(1, n_values_per_frame) ie (1,9)
"""
self.get_blockid(TNG_TRAJ_BOX_SHAPE, data)
return data
cpdef np.ndarray get_velocities(self, np.ndarray data):
"""Get the velocities present at the current step and read them into a
NumPy array
Parameters
----------
data : np.ndarray
NumPy array to read the data into. As this is a particle dependent
block, the shape should be (n_atoms, n_values_per_frame)
ie (n_atoms, 3).
"""
self.get_blockid(TNG_TRAJ_VELOCITIES, data)
return data
cpdef np.ndarray get_forces(self, np.ndarray data):
"""Get the forces present at the current step and read them into a
NumPy array
Parameters
----------
data : np.ndarray
NumPy array to read the data into. As this is a particle dependent
block, the shape should be (n_atoms, n_values_per_frame)
ie (n_atoms, 3).
"""
self.get_blockid(TNG_TRAJ_FORCES, data)
return data
cpdef get_blockid(self, int64_t block_id, np.ndarray data):
"""Get a block ID present at the current step and read it into a
NumPy array
Parameters
----------
block_id : int64_t
TNG block id to read from the current step
data : np.ndarray
NumPy array to read the data into, the required shape is determined
by the block dependency and the number of values per frame.
Raises
------
TypeError
The dtype of the numpy array provided is not supported by TNG
datatypes or does not match the underlying datatype.
IOError
The block data type cannot be understood.
IndexError
The shape of the numpy array provided does not match the shape of
the data to be read from disk.
"""
shape = data.shape
dtype = data.dtype
if dtype not in [np.int64, np.float32, np.float64]:
raise TypeError(
"PYTNG ERROR: datatype of numpy array not supported\n")
cdef void * values = NULL
cdef int64_t n_values_per_frame = -1
cdef int64_t n_atoms = -1
cdef double precision = -1
cdef char datatype = -1
cdef tng_function_status read_stat = TNG_CRITICAL
cdef np.float32_t[::1] _float_view
cdef int64_t[::1] _int64_t_view
cdef double[::1] _double_view
cdef int i, j
with nogil:
read_stat = self._get_data_current_step(block_id,
self.step,
& values,
& n_values_per_frame,
& n_atoms,
& precision,
& datatype,
self.debug)
if read_stat == TNG_CRITICAL:
self.read_success = False
warnings.warn(f"Failed read for block "
f"{block_dictionary[block_id]}")
# NOTE nan fill on blank read
data[:, :] = np.nan
return data
elif read_stat == TNG_FAILURE:
# possibly off stride, perhaps the stride in file is > nstep and
# we sorted it out in _sanitise_block_metadata?
if (self.step % self._frame_strides_blockid[block_id]):
self.read_success = False
warnings.warn(f"Off stride read for block "
f"{block_dictionary[block_id]}")
# NOTE nan fill on blank read
data[:, :] = np.nan
return data
else:
self.read_success = True
else:
self.read_success = True
if data.ndim > 2:
raise IndexError(
"""PYTNG ERROR: Numpy array must be 2 dimensional,
| Cython |
you supplied a {} dimensional ndarray""".format(data.ndim))
if shape[0]!= n_atoms:
raise IndexError(
"""PYTNG ERROR: First axis must be n_atoms long,
you supplied {}""".format(shape[0]))
if shape[1]!= n_values_per_frame:
raise IndexError(
"""PYTNG ERROR: Second axis must be n_values_per_frame long,
you supplied {}""".format(shape[1]))
cdef int64_t n_vals = n_atoms * n_values_per_frame
if datatype == TNG_FLOAT_DATA:
if dtype!= np.float32:
raise TypeError(
"PYTNG ERROR: dtype of array {} does not match TNG dtype float".format(dtype))
_float_view = <np.float32_t[:n_vals] > ( < float*> values)
data[:, :] = np.asarray(_float_view, dtype=np.float32).reshape(
n_atoms, n_values_per_frame)
elif datatype == TNG_INT_DATA:
if dtype!= np.int64:
raise TypeError(
"PYTNG ERROR: dtype of array {} does not match TNG dtype int64_t".format(dtype))
_int64_t_view = <int64_t[:n_vals] > ( < int64_t*> values)
data[:, :] = np.asarray(_int64_t_view, dtype=np.int64).reshape(
n_atoms, n_values_per_frame)
elif datatype == TNG_DOUBLE_DATA:
if dtype!= np.float64:
raise TypeError(
"PYTNG ERROR: dtype of array {} does not match TNG dtype double".format(dtype))
_double_view = <double[:n_vals] > ( < double*> values)
data[:, :] = np.asarray(_double_view, dtype=np.float64).reshape(
n_atoms, n_values_per_frame)
else:
raise IOError("PYTNG ERROR: block datatype not understood")
return data
cdef tng_function_status _get_data_current_step(self, int64_t block_id,
int64_t step,
void ** values,
int64_t * n_values_per_frame,
int64_t * n_atoms,
double * prec,
char * datatype,
bint debug) nogil:
"""Gets the frame data off disk and into C level arrays
Parameters
----------
block_id : int64_t
block id to read
step : int64_t
integrator step to read
values : void **
NULL void pointer to hook the data onto
n_values_per_frame : int64_t *
set to the number of values per frame for the block
n_atoms : int64_t *
set to the number of atoms or 1 if particle dependent
prec : double *
set to the precision of the block
datatype : char *
set to the datatype of the block
debug : bint
debug the block read
Notes
-----
This function is private. Additionally, this function is marked nogil
and called without the GIL so cannot contain python or python
exceptions. Instead failure is marked by returning
:data:`TNG_CRITICAL`. Success is indicated by returning
:data:`TNG_SUCCESS`. Cleanup is then be done by the calling
code in :method:`get_blockid` so the user should not have to deal with
C level exceptions
"""
cdef tng_function_status stat = TNG_CRITICAL
cdef int64_t codec_id
cdef int block_dependency
cdef void * data = NULL
cdef double local_prec
cdef int64_t stride_length
# is this a particle dependent block?
stat = tng_data_block_dependency_get(self._traj, block_id,
& block_dependency)
if stat!= TNG_SUCCESS:
return TNG_CRITICAL
if block_dependency & TNG_PARTICLE_DEPENDENT: # bitwise & due to enums
tng_num_particles_get(self._traj, n_atoms)
# read particle data off disk with hash checking
stat = tng_gen_data_vector_interval_get(self._traj,
block_id,
TNG_TRUE,
self.step,
self.step,
TNG_USE_HASH,
values,
n_atoms,
& stride_length,
n_values_per_frame,
datatype)
else:
n_atoms[0] = 1 # still used for some allocs
# read non particle data off disk with hash checking
stat = tng_gen_data_vector_interval_get(self._traj,
block_id,
TNG_FALSE,
self.step,
self.step,
TNG_USE_HASH,
values,
NULL,
& stride_length,
n_values_per_frame,
datatype)
if stat!= TNG_SUCCESS:
return TNG_CRITICAL
# get the compression of the current frame
stat = tng_util_frame_current_compression_get(self._traj,
block_id,
& codec_id,
& local_prec)
if stat!= TNG_SUCCESS:
return TNG_CRITICAL
if codec_id!= TNG_TNG_COMPRESSION:
prec[0] = -1.0
else:
prec[0] = local_prec
# possible blank read, but nothing complained, we will warn in caller
if self.step % stride_length!= 0:
return TNG_FAILURE
# if we reached here read was succesfull
return TNG_SUCCESS
cdef tng_function_status _get_step_time(self, double * step_time):
stat = tng_util_time_of_frame_get(self._traj, self.step, step_time)
if stat!= TNG_SUCCESS:
return TNG_CRITICAL
block_dictionary = {}
# group 1
block_dictionary[TNG_GENERAL_INFO] = "TNG_GENERAL_INFO"
block_dictionary[TNG_MOLECULES] = "TNG_MOLECULES"
block_dictionary[TNG_TRAJECTORY_FRAME_SET] = "TNG_TRAJECTORY_FRAME_SET"
block_dictionary[TNG_PARTICLE_MAPPING] = "TNG_PARTICLE_MAPPING"
# group 2
block_dictionary[TNG_TRAJ_BOX_SHAPE] = "TNG_TRAJ_BOX_SHAPE"
block_dictionary[TNG_TRAJ_POSITIONS] = "TNG_TRAJ_POSITIONS"
block_dictionary[TNG_TRAJ_VELOCITIES] = "TNG_TRAJ_VELOCITIES"
block_dictionary[TNG_TRAJ_FORCES] = "TNG_TRAJ_FORCES"
block_dictionary[TNG_TRAJ_PARTIAL_CHARGES] = "TNG_TRAJ_PARTIAL_CHARGES"
block_dictionary[TNG_TRAJ_FORMAL_CHARGES] = "TNG_TRAJ_FORMAL_CHARGES"
block_dictionary[TNG_TRAJ_B_FACTORS] = "TNG_TRAJ_B_FACTORS"
block_dictionary[TNG_TRAJ_ANISOTROPIC_B_FACTORS] = "TNG_TRAJ_ANISOTROPIC_B_FACTORS"
block_dictionary[TNG_TRAJ_OCCUPANCY] = "TNG_TRAJ_OCCUPANCY"
block_dictionary[TNG_TRAJ_GENERAL_COMMENTS] = "TNG_TRAJ_GENERAL_COMMENTS"
block_dictionary[TNG_TRAJ_MASSES] = "TNG_TRAJ_MASSES"
# group 3
block_dictionary[TNG_GMX_LAMBDA] = "TNG_GMX_LAMBDA"
block_dictionary[TNG_GMX_ENERGY_ANGLE] = "TNG_GMX_ENERGY_ANGLE"
block_dictionary[TNG_GMX_ENERGY_RYCKAERT_BELL] = "TNG_GMX_ENERGY_RYCKAERT_BELL"
block_dictionary[TNG_GMX_ENERGY_LJ_14] = "TNG_GMX_ENERGY_LJ_14"
block_dictionary[TNG_GMX_ENERGY_COULOMB_14] = "TNG_GMX_ENERGY_COULOMB_14"
block_dictionary[TNG_GMX_ENERGY_LJ_SR] = "TNG_GMX_ENERGY_LJ_SR"
block_dictionary[TNG_GMX_ENERGY_COULOMB_SR] = "TNG_GMX_ENERGY_COULOMB_SR"
block_dictionary[TNG_GMX_ENERGY_COUL_RECIP] = "TNG_GMX_ENERGY_COUL_RECIP"
block_dictionary[TNG_GMX_ENERGY_POTENTIAL] = "TNG_GMX_ENERGY_POTENTIAL"
block_dictionary[TNG_GMX_ENERGY_KINETIC_EN] = "TNG_GMX_ENERGY_KINETIC_EN"
block_dictionary[TNG_GMX_ENERGY_TOTAL_ENERGY] = "TNG_GMX_ENERGY_TOTAL_ENERGY"
block_dictionary[TNG_GMX_ENERGY_TEMPERATURE] = "TNG_GMX_ENERGY_TEMPERATURE"
block_dictionary[TNG_GMX_ENERGY_PRESSURE] = "TNG_GMX_ENERGY_PRESSURE"
block_dictionary[TNG_GMX_ENERGY_CONSTR_RMSD] = "TNG_GMX_ENERGY_CONSTR_RMSD"
block_dictionary[TNG_GMX_ENERGY_CONSTR2_RMSD] = "TNG_GMX_ENERGY_CONSTR2_RMSD"
block_dictionary[TNG_GMX_ENERGY_BOX_X] = "TNG_GMX_ENERGY_BOX_X"
block_dictionary[TNG_GMX_ENERGY_BOX_Y] = "TNG_GMX_ENERGY_BOX_Y"
block_dictionary[TNG_GMX_ENERGY_BOX_Z] = "TNG_GMX_ENERGY_BOX_Z"
block_dictionary[TNG_GMX_ENERGY_BOXXX] = "TNG_GMX_ENERGY_BOXXX"
block_dictionary[TNG_GMX_ENERGY_BOXYY] = "TNG_GMX_ENERGY_BOXYY"
block_dictionary[TNG_GMX_ENERGY_BOXZZ | Cython |
] = "TNG_GMX_ENERGY_BOXZZ"
block_dictionary[TNG_GMX_ENERGY_BOXYX] = "TNG_GMX_ENERGY_BOXYX"
block_dictionary[TNG_GMX_ENERGY_BOXZX] = "TNG_GMX_ENERGY_BOXZX"
block_dictionary[TNG_GMX_ENERGY_BOXZY] = "TNG_GMX_ENERGY_BOXZY"
block_dictionary[TNG_GMX_ENERGY_BOXVELXX] = "TNG_GMX_ENERGY_BOXVELXX"
block_dictionary[TNG_GMX_ENERGY_BOXVELYY] = "TNG_GMX_ENERGY_BOXVELYY"
block_dictionary[TNG_GMX_ENERGY_BOXVELZZ] = "TNG_GMX_ENERGY_BOXVELZZ"
block_dictionary[TNG_GMX_ENERGY_BOXVELYX] = "TNG_GMX_ENERGY_BOXVELYX"
block_dictionary[TNG_GMX_ENERGY_BOXVELZX] = "TNG_GMX_ENERGY_BOXVELZX"
block_dictionary[TNG_GMX_ENERGY_BOXVELZY] = "TNG_GMX_ENERGY_BOXVELZY"
block_dictionary[TNG_GMX_ENERGY_VOLUME] = "TNG_GMX_ENERGY_VOLUME"
block_dictionary[TNG_GMX_ENERGY_DENSITY] = "TNG_GMX_ENERGY_DENSITY"
block_dictionary[TNG_GMX_ENERGY_PV] = "TNG_GMX_ENERGY_PV"
block_dictionary[TNG_GMX_ENERGY_ENTHALPY] = "TNG_GMX_ENERGY_ENTHALPY"
block_dictionary[TNG_GMX_ENERGY_VIR_XX] = "TNG_GMX_ENERGY_VIR_XX"
block_dictionary[TNG_GMX_ENERGY_VIR_XY] = "TNG_GMX_ENERGY_VIR_XY"
block_dictionary[TNG_GMX_ENERGY_VIR_XZ] = "TNG_GMX_ENERGY_VIR_XZ"
block_dictionary[TNG_GMX_ENERGY_VIR_YX] = "TNG_GMX_ENERGY_VIR_YX"
block_dictionary[TNG_GMX_ENERGY_VIR_YY] = "TNG_GMX_ENERGY_VIR_YY"
block_dictionary[TNG_GMX_ENERGY_VIR_YZ] = "TNG_GMX_ENERGY_VIR_YZ"
block_dictionary[TNG_GMX_ENERGY_VIR_ZX] = "TNG_GMX_ENERGY_VIR_ZX"
block_dictionary[TNG_GMX_ENERGY_VIR_ZY] = "TNG_GMX_ENERGY_VIR_ZY"
block_dictionary[TNG_GMX_ENERGY_VIR_ZZ] = "TNG_GMX_ENERGY_VIR_ZZ"
block_dictionary[TNG_GMX_ENERGY_SHAKEVIR_XX] = "TNG_GMX_ENERGY_SHAKEVIR_XX"
block_dictionary[TNG_GMX_ENERGY_SHAKEVIR_XY] = "TNG_GMX_ENERGY_SHAKEVIR_XY"
block_dictionary[TNG_GMX_ENERGY_SHAKEVIR_XZ] = "TNG_GMX_ENERGY_SHAKEVIR_XZ"
block_dictionary[TNG_GMX_ENERGY_SHAKEVIR_YX] = "TNG_GMX_ENERGY_SHAKEVIR_YX"
block_dictionary[TNG_GMX_ENERGY_SHAKEVIR_YY] = "TNG_GMX_ENERGY_SHAKEVIR_YY"
block_dictionary[TNG_GMX_ENERGY_SHAKEVIR_YZ] = "TNG_GMX_ENERGY_SHAKEVIR_YZ"
block_dictionary[TNG_GMX_ENERGY_SHAKEVIR_ZX] = "TNG_GMX_ENERGY_SHAKEVIR_ZX"
block_dictionary[TNG_GMX_ENERGY_SHAKEVIR_ZY] = "TNG_GMX_ENERGY_SHAKEVIR_ZY"
block_dictionary[TNG_GMX_ENERGY_SHAKEVIR_ZZ] = "TNG_GMX_ENERGY_SHAKEVIR_ZZ"
block_dictionary[TNG_GMX_ENERGY_FORCEVIR_XX] = "TNG_GMX_ENERGY_FORCEVIR_XX"
block_dictionary[TNG_GMX_ENERGY_FORCEVIR_XY] = "TNG_GMX_ENERGY_FORCEVIR_XY"
block_dictionary[TNG_GMX_ENERGY_FORCEVIR_XZ] = "TNG_GMX_ENERGY_FORCEVIR_XZ"
block_dictionary[TNG_GMX_ENERGY_FORCEVIR_YX] = "TNG_GMX_ENERGY_FORCEVIR_YX"
block_dictionary[TNG_GMX_ENERGY_FORCEVIR_YY] = "TNG_GMX_ENERGY_FORCEVIR_YY"
block_dictionary[TNG_GMX_ENERGY_FORCEVIR_YZ] = "TNG_GMX_ENERGY_FORCEVIR_YZ"
block_dictionary[TNG_GMX_ENERGY_FORCEVIR_ZX] = "TNG_GMX_ENERGY_FORCEVIR_ZX"
block_dictionary[TNG_GMX_ENERGY_FORCEVIR_ZY] = "TNG_GMX_ENERGY_FORCEVIR_ZY"
block_dictionary[TNG_GMX_ENERGY_FORCEVIR_ZZ] = "TNG_GMX_ENERGY_FORCEVIR_ZZ"
block_dictionary[TNG_GMX_ENERGY_PRES_XX] = "TNG_GMX_ENERGY_PRES_XX"
block_dictionary[TNG_GMX_ENERGY_PRES_XY] = "TNG_GMX_ENERGY_PRES_XY"
block_dictionary[TNG_GMX_ENERGY_PRES_XZ] = "TNG_GMX_ENERGY_PRES_XZ"
block_dictionary[TNG_GMX_ENERGY_PRES_YX] = "TNG_GMX_ENERGY_PRES_YX"
block_dictionary[TNG_GMX_ENERGY_PRES_YY] = "TNG_GMX_ENERGY_PRES_YY"
block_dictionary[TNG_GMX_ENERGY_PRES_YZ] = "TNG_GMX_ENERGY_PRES_YZ"
block_dictionary[TNG_GMX_ENERGY_PRES_ZX] = "TNG_GMX_ENERGY_PRES_ZX"
block_dictionary[TNG_GMX_ENERGY_PRES_ZY] = "TNG_GMX_ENERGY_PRES_ZY"
block_dictionary[TNG_GMX_ENERGY_PRES_ZZ] = "TNG_GMX_ENERGY_PRES_ZZ"
block_dictionary[TNG_GMX_ENERGY_SURFXSURFTEN] = "TNG_GMX_ENERGY_SURFXSURFTEN"
block_dictionary[TNG_GMX_ENERGY_MUX] = "TNG_GMX_ENERGY_MUX"
block_dictionary[TNG_GMX_ENERGY_MUY] = "TNG_GMX_ENERGY_MUY"
block_dictionary[TNG_GMX_ENERGY_MUZ] = "TNG_GMX_ENERGY_MUZ"
block_dictionary[TNG_GMX_ENERGY_VCOS] = "TNG_GMX_ENERGY_VCOS"
block_dictionary[TNG_GMX_ENERGY_VISC] = "TNG_GMX_ENERGY_VISC"
block_dictionary[TNG_GMX_ENERGY_BAROSTAT] = "TNG_GMX_ENERGY_BAROSTAT"
block_dictionary[TNG_GMX_ENERGY_T_SYSTEM] = "TNG_GMX_ENERGY_T_SYSTEM"
block_dictionary[TNG_GMX_ENERGY_LAMB_SYSTEM] = "TNG_GMX_ENERGY_LAMB_SYSTEM"
block_dictionary[TNG_GMX_SELECTION_GROUP_NAMES] = "TNG_GMX_SELECTION_GROUP_NAMES"
block_dictionary[TNG_GMX_ATOM_SELECTION_GROUP] = "TNG_GMX_ATOM_SELECTION_GROUP"
# reverse the mapping
block_id_dictionary = {v: k for k, v in block_dictionary.items()}
<|end_of_text|>"""
Cython wrapper around the NASA TRMM RSL library.
"""
cimport pyart.io._rsl_h as _rsl_h
# the next line is required so that RSL_F_LIST and RSL_INVF_LIST can be
# properly wrapped as Cython does not export the typedef from _rsl_h
# define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" [-W#warnings]
ctypedef unsigned short Range
import numpy as np
cimport numpy as np
from datetime import datetime, timedelta
_RSL_VERSION_STR = _rsl_h._RSL_VERSION_STR
cpdef copy_volume(_RslVolume volume):
"""
copy_volume(volume)
Return a copy of a _RslVolume object.
Parameters
----------
volume : _RslVolume
_RslVolume object to create a copy of.
Returns
-------
nvolume : _RslVolume
Copy of volume.
"""
volume_copy = _rsl_h.RSL_copy_volume(volume._Volume)
rslvolume = _RslVolume()
rslvolume.load(volume_copy)
rslvolume._dealloc = 1
return rslvolume
cpdef create_volume(
np.ndarray[np.float32_t, ndim=2] arr,
np.ndarray[np.int32_t, ndim=1] rays_per_sweep,
int vol_num=1):
"""
create_volume(arr, rays_per_sweep, vol_num=1)
Create a _RslVolume object from a 2D float32 array.
No headers parameters except nsweeps, nrays and nbins are not set in the
resulting _RslVolume object.
Parameters
----------
arr : array, 2D, float32
Two dimensional float32 array.
rays_per_sweep: array, 1D, int32
Array listing number of rays in each sweep.
vol_num : int
Volume number used to set f and invf in the header. The default is
for velocity fields. Useful values are 0 for reflectivity and
1 for velocity.
Returns
-------
volumes : _RslVolume
_RslVolume containing | Cython |
array data.
"""
# these variables can be moved to the module level if used elsewhere
cdef (float (*)(_rsl_h.Range) noexcept) * RSL_F_LIST = [
_rsl_h.DZ_F, _rsl_h.VR_F, _rsl_h.SW_F, _rsl_h.CZ_F, _rsl_h.ZT_F,
_rsl_h.DR_F, _rsl_h.LR_F, _rsl_h.ZD_F, _rsl_h.DM_F, _rsl_h.RH_F,
_rsl_h.PH_F, _rsl_h.XZ_F, _rsl_h.CD_F, _rsl_h.MZ_F, _rsl_h.MD_F,
_rsl_h.ZE_F, _rsl_h.VE_F, _rsl_h.KD_F, _rsl_h.TI_F, _rsl_h.DX_F,
_rsl_h.CH_F, _rsl_h.AH_F, _rsl_h.CV_F, _rsl_h.AV_F, _rsl_h.SQ_F,
_rsl_h.VS_F, _rsl_h.VL_F, _rsl_h.VG_F, _rsl_h.VT_F, _rsl_h.NP_F,
_rsl_h.HC_F, _rsl_h.VC_F, _rsl_h.VR_F, _rsl_h.SW_F, _rsl_h.VR_F,
_rsl_h.SW_F, _rsl_h.DZ_F, _rsl_h.CZ_F, _rsl_h.PH_F, _rsl_h.SD_F,
_rsl_h.DZ_F, _rsl_h.DZ_F]
cdef (_rsl_h.Range (*)(float) noexcept) * RSL_INVF_LIST = [
_rsl_h.DZ_INVF, _rsl_h.VR_INVF, _rsl_h.SW_INVF, _rsl_h.CZ_INVF,
_rsl_h.ZT_INVF, _rsl_h.DR_INVF, _rsl_h.LR_INVF, _rsl_h.ZD_INVF,
_rsl_h.DM_INVF, _rsl_h.RH_INVF, _rsl_h.PH_INVF, _rsl_h.XZ_INVF,
_rsl_h.CD_INVF, _rsl_h.MZ_INVF, _rsl_h.MD_INVF, _rsl_h.ZE_INVF,
_rsl_h.VE_INVF, _rsl_h.KD_INVF, _rsl_h.TI_INVF, _rsl_h.DX_INVF,
_rsl_h.CH_INVF, _rsl_h.AH_INVF, _rsl_h.CV_INVF, _rsl_h.AV_INVF,
_rsl_h.SQ_INVF, _rsl_h.VS_INVF, _rsl_h.VL_INVF, _rsl_h.VG_INVF,
_rsl_h.VT_INVF, _rsl_h.NP_INVF, _rsl_h.HC_INVF, _rsl_h.VC_INVF,
_rsl_h.VR_INVF, _rsl_h.SW_INVF, _rsl_h.VR_INVF, _rsl_h.SW_INVF,
_rsl_h.DZ_INVF, _rsl_h.CZ_INVF, _rsl_h.PH_INVF, _rsl_h.SD_INVF,
_rsl_h.DZ_INVF, _rsl_h.DZ_INVF]
cdef _rsl_h.Volume * volume
cdef _rsl_h.Sweep * sweep
cdef _rsl_h.Ray * ray
cdef int ray_i
nsweeps = rays_per_sweep.shape[0]
nbins = arr.shape[1]
volume = _rsl_h.RSL_new_volume(nsweeps)
volume.h.nsweeps = nsweeps
ray_index = 0
for nsweep in range(nsweeps):
nrays = rays_per_sweep[nsweep]
sweep = _rsl_h.RSL_new_sweep(nrays)
volume.sweep[nsweep] = sweep
sweep.h.nrays = nrays
for nray in range(nrays):
ray = _rsl_h.RSL_new_ray(nbins)
sweep.ray[nray] = ray
ray.h.nbins = nbins
ray.h.f = RSL_F_LIST[vol_num]
ray.h.invf = RSL_INVF_LIST[vol_num]
for nbin in range(nbins):
ray.range[nbin] = ray.h.invf(arr[ray_index, nbin])
ray_index += 1
rslvolume = _RslVolume()
rslvolume.load(volume)
rslvolume._dealloc = 1
return rslvolume
cpdef _label_volume(_RslVolume volume, radar):
"""
_label_volume(volume, radar)
Add labels for dealiasing to a _RslVolume object from a radar object.
This function does not set all parameter in the _RslVolume suitable for
writing out the volume, rather it set those parameters which must be set
prior to using :py:func:`pyart.correct._fourdd_interface.fourdd_dealias`.
Parameters
----------
volume : _RslVolume
Volume object to which parameters will be set as needed prior to
dealiasing. Object is manipulated in-place.
radar : Radar
Radar object from which parameters are taken.
"""
cdef _rsl_h.Sweep * sweep
cdef _rsl_h.Ray * ray
cdef int ray_index
vol = volume._Volume
nsweeps = vol.h.nsweeps
nbins = vol.sweep[0].ray[0].h.nbins
gate_size = int(radar.range['meters_between_gates'])
range_bin1 = int(radar.range['meters_to_center_of_first_gate'])
if'shape' in dir(radar.altitude['data']):
if radar.altitude['data'].shape == ():
alt = float(radar.altitude['data'])
else:
alt = radar.altitude['data'][0]
else:
alt = radar.altitude['data']
nyq_vels = radar.instrument_parameters['nyquist_velocity']['data']
azimuths = radar.azimuth['data']
elevs = radar.elevation['data']
# label the volume
ray_index = 0
vol.h.nsweeps = nsweeps
for nsweep in range(nsweeps):
sweep = vol.sweep[nsweep]
nrays = sweep.h.nrays
for nray in range(nrays):
ray = sweep.ray[nray]
ray.h.azimuth = azimuths[ray_index]
ray.h.elev = elevs[ray_index]
ray.h.nyq_vel = nyq_vels[ray_index]
ray.h.range_bin1 = range_bin1
ray.h.gate_size = gate_size
ray.h.alt = alt
ray_index += 1
return
cdef class _RslRay:
"""
A object for accessing RSL Ray data and header information
This class should not be initalized from within Python. _RslRay object are
returned from the :py:func:`_RslSweep.get_ray` method.
Attributes
----------
month : int
Date for this ray, month (1-12).
day : int
Date for this ray, day (1-31).
year : int
Date for this ray, year (eg. 1993).
hour : int
Time for this ray, hour (0-23).
minute : int
Time for this ray, minute (0-59).
sec : float
Time for this ray, second + fractor of second.
unam_rng : float
Unambiguous range in km.
azimuth : float
Mean azimuth angle in degrees for the ray, must be positive.
0 for north, 90 for east, 270 for west.
ray_num : int
Ray number within a scan.
elev : float
Elevation angle in degrees.
elev_num : int
Elevation number within the volume scan.
range_bin1 : int
Range to first gate in meters.
gate_size : int
Gate size in meters.
vel_res : float
Doppler velocity resolution.
sweep_rate : float
Sweep rate, full sweeps / minute.
prf : int
Pulse repetition frequency in Hz.
prf2 : int
Second pulse repition frequenct for dual PRF data.
azim_rate : float
Sweep rate in degrees / second.
fix_angle : float
Elevation angle for the sweep in degrees.
pitch : float
Pitch angle.
roll : float
Roll angle.
heading : float
Heading.
pitch_rate : float
Pitch rate in angle / sec.
roll_rate : float
Roll rate in angle / sec.
heading_rate : float
Heading rate in angle / sec.
lat : float
Latitude in degrees.
lon : float | Cython |
Longitude in degrees.
alt : int
Altitude in meters.
rvs : float
Radial velocity correction in meters / second.
vel_east : float
Platform velocity to the east in meters / second. Negative values for
velocity to the west.
vel_north : float
Platform velocity to the north in meters / second. Negative values for
velocity to the south.
vel_up : float
Platform velocity upward in meters / second. Negative values for
velocity downward.
pulse_count : int
Pulses used in a single dwell time.
pulse_width : float
Pulse width in microseconds.
beam_width : float
Beamwidth in degrees.
frequency : float
Carrier frequency in GHz.
wavelength : float
Wavelength in meters.
nyq_vel : float
Nyquist velocity in meters / second.
nbins : int
Number of array elements in ray data.
"""
cdef _rsl_h.Ray * _Ray
cdef int _dealloc
def __dealloc__(self):
if self._dealloc == 1:
_rsl_h.RSL_free_ray(self._Ray)
cdef load(self, _rsl_h.Ray * Ray):
""" Load the _RslRay object, must be called after creation. """
if Ray is NULL:
raise ValueError('cannot load _RslRay with NULL')
self._Ray = Ray
self._dealloc = 0
def get_datetime(self):
"""
get_datetime()
Return a datetime describing the date and time of the ray.
"""
s = self
full_seconds, fractional_seconds = divmod(s.sec, 1)
microseconds = int(fractional_seconds * 1e6)
# Some UF writers incorrectly specify midnight as 24:00:00 rather
# than 00:00:00. Handle this case explicitly
if s.hour == 24:
s.hour = 23
s.minute = 0
full_seconds = 0
dt = datetime(s.year, s.month, s.day, s.hour, s.minute,
full_seconds, microseconds)
return dt + timedelta(seconds=1)
return datetime(s.year, s.month, s.day, s.hour, s.minute,
int(full_seconds), microseconds)
def get_data(self):
"""
get_data()
Return the one-dimensional data contained in the ray.
"""
cdef _rsl_h.Range raw
cdef np.ndarray[np.float32_t, ndim = 1] data
shape = (self._Ray.h.nbins)
data = np.zeros(shape, dtype='float32') + 1.31072000e+05
for nbin in range(self._Ray.h.nbins):
raw = self._Ray.range[nbin]
data[nbin] = self._Ray.h.f(raw)
return data
# header properties mapped to class attributes.
property month:
def __get__(self):
return self._Ray.h.month
def __set__(self, int month):
self._Ray.h.month = month
property day:
def __get__(self):
return self._Ray.h.day
def __set__(self, int day):
self._Ray.h.day = day
property year:
def __get__(self):
return self._Ray.h.year
def __set__(self, int year):
self._Ray.h.year = year
property hour:
def __get__(self):
return self._Ray.h.hour
def __set__(self, int hour):
self._Ray.h.hour = hour
property minute:
def __get__(self):
return self._Ray.h.minute
def __set__(self, int minute):
self._Ray.h.minute = minute
property sec:
def __get__(self):
return self._Ray.h.sec
def __set__(self, float sec):
self._Ray.h.sec = sec
property unam_rng:
def __get__(self):
return self._Ray.h.unam_rng
def __set__(self, float unam_rng):
self._Ray.h.unam_rng = unam_rng
property azimuth:
def __get__(self):
return self._Ray.h.azimuth
def __set__(self, float azimuth):
self._Ray.h.azimuth = azimuth
property ray_num:
def __get__(self):
return self._Ray.h.ray_num
def __set__(self, int ray_num):
self._Ray.h.ray_num = ray_num
property elev:
def __get__(self):
return self._Ray.h.elev
def __set__(self, float elev):
self._Ray.h.elev = elev
property elev_num:
def __get__(self):
return self._Ray.h.elev_num
def __set__(self, int elev_num):
self._Ray.h.elev_num = elev_num
property range_bin1:
def __get__(self):
return self._Ray.h.range_bin1
def __set__(self, int range_bin1):
self._Ray.h.range_bin1 = range_bin1
property gate_size:
def __get__(self):
return self._Ray.h.gate_size
def __set__(self, int gate_size):
self._Ray.h.gate_size = gate_size
property vel_res:
def __get__(self):
return self._Ray.h.vel_res
def __set__(self, float vel_res):
self._Ray.h.vel_res = vel_res
property sweep_rate:
def __get__(self):
return self._Ray.h.sweep_rate
def __set__(self, float sweep_rate):
self._Ray.h.sweep_rate = sweep_rate
property prf:
def __get__(self):
return self._Ray.h.prf
def __set__(self, int prf):
self._Ray.h.prf = prf
property prf2:
def __get__(self):
return self._Ray.h.prf2
def __set__(self, int prf2):
self._Ray.h.prf2 = prf2
property azim_rate:
def __get__(self):
return self._Ray.h.azim_rate
def __set__(self, float azim_rate):
self._Ray.h.azim_rate = azim_rate
property fix_angle:
def __get__(self):
return self._Ray.h.fix_angle
def __set__(self, float fix_angle):
self._Ray.h.fix_angle = fix_angle
property pitch:
def __get__(self):
return self._Ray.h.pitch
def __set__(self, float pitch):
self._Ray.h.pitch = pitch
property roll:
def __get__(self):
return self._Ray.h.roll
def __set__(self, float roll):
self._Ray.h.roll = roll
property heading:
def __get__(self):
return self._Ray.h.heading
def __set__(self, float heading):
self._Ray.h.heading = heading
property pitch_rate:
def __get__(self):
return self._Ray.h.pitch_rate
def __set__(self, float pitch_rate):
self._Ray.h.pitch_rate = pitch_rate
property roll_rate:
def __get__(self):
return self._Ray.h.roll_rate
def __set__(self, float roll_rate):
self._Ray.h.roll_rate = roll_rate
property heading_rate:
def __get__(self):
return self._Ray.h.heading_rate
def __set__(self, float heading_rate):
self._Ray.h.heading_rate = heading_rate
property lat:
def __get__(self):
return self._Ray.h.lat
def __set__(self, float lat):
self._Ray.h.lat = lat
property lon:
def __get__(self):
return self._Ray.h.lon
def __set__(self, float lon):
self._Ray.h.lon = lon
property alt:
def __get__(self):
return self._Ray.h.alt
def __set__(self, int alt):
self._Ray.h.alt = alt
property rvc:
def __get__(self):
return self._Ray.h.rvc
def __set__(self, float rvc):
self._Ray.h.rvc = rvc
property vel_east:
def __get__(self):
return self._Ray.h.vel_east
def __set__(self, float vel_east):
self._Ray.h.vel_east = vel_east
property vel_north:
def __get__(self):
return self._Ray.h.vel_north
def __set__(self, float vel_north):
self._Ray.h.vel_north = vel_north
property vel_up:
def __get__(self):
return self._Ray.h.vel_up
def __set__(self, float vel_up):
self._Ray.h.vel_up = vel_up
property pulse_count:
def __get__(self):
return self._Ray.h.pulse_count
def __set__(self, int pulse_count):
self._Ray.h.pulse_count = pulse_count
property pulse_width:
def __ | Cython |
get__(self):
return self._Ray.h.pulse_width
def __set__(self, float pulse_width):
self._Ray.h.pulse_width = pulse_width
property beam_width:
def __get__(self):
return self._Ray.h.beam_width
def __set__(self, float beam_width):
self._Ray.h.beam_width = beam_width
property frequency:
def __get__(self):
return self._Ray.h.frequency
def __set__(self, float frequency):
self._Ray.h.frequency = frequency
property wavelength:
def __get__(self):
return self._Ray.h.wavelength
def __set__(self, float wavelength):
self._Ray.h.wavelength = wavelength
property nyq_vel:
def __get__(self):
return self._Ray.h.nyq_vel
def __set__(self, float nyq_vel):
self._Ray.h.nyq_vel = nyq_vel
property nbins:
def __get__(self):
return self._Ray.h.nbins
def __set__(self, int nbins):
self._Ray.h.nbins = nbins
cdef class _RslSweep:
"""
A object for accessing RSL Sweep data and header information.
This class should not be initalized from within Python. _RslSweep objects
are returned from the :py:func:`_RslVolume.get_sweep` method.
Attributes
----------
sweep_num : int
Interger sweep number.
elev : float
Mean elevation angle for thr sweep. -999.0 for RHI sweeps.
azimuth : float
Azumuth for the sweep. -999.0 for PPI scans.
beam_width : float
Beam width in degrees. Can also be found in _RslRay objects.
vert_half_bw : float
Vertical beam width divided by 2.
horz_half_bw : float
Horizontal beam width divided by 2.
nrays : int
Number of rays in the sweep.
"""
cdef _rsl_h.Sweep * _Sweep
cdef int _dealloc
def __dealloc__(self):
if self._dealloc == 1:
_rsl_h.RSL_free_sweep(self._Sweep)
cdef load(self, _rsl_h.Sweep * Sweep):
""" Load the _RslSweep object, must be called after creation. """
if Sweep is NULL:
raise ValueError("cannot load _RslSweep with NULL")
self._Sweep = Sweep
self._dealloc = 0
def get_ray(self, int ray_number):
"""
get_ray(ray_number)
Return a _RslRay for a given ray.
Parameters
----------
ray_number : int
Ray number to retrieve
Returns
-------
ray : _RslRay
_RslRay object containing the requested ray.
"""
if ray_number < 0 or ray_number >= self._Sweep.h.nrays:
raise ValueError('invalid ray_number')
rslray = _RslRay()
rslray.load(self._Sweep.ray[ray_number])
return rslray
def get_data(self):
"""
get_data()
Return the two-dimensional data contained in the sweep.
If a given ray has few bins than the first ray, the missing bins
will be filled with 131072.0
"""
cdef _rsl_h.Range raw
cdef _rsl_h.Ray * ray
cdef np.ndarray[np.float32_t, ndim = 2] data
sweep = self._Sweep
nrays = sweep.h.nrays
nbins = sweep.ray[0].h.nbins
shape = (nrays, nbins)
data = np.zeros(shape, dtype='float32') + 1.31072000e+05
for nray in range(nrays):
ray = sweep.ray[nray]
assert ray is not NULL
nbins = ray.h.nbins
for nbin in range(nbins):
raw = ray.range[nbin]
data[nray, nbin] = ray.h.f(raw)
return data
# header properties mapped to class attributes.
property sweep_num:
def __get__(self):
return self._Sweep.h.sweep_num
def __set__(self, int sweep_num):
self._Sweep.h.sweep_num = sweep_num
property elev:
def __get__(self):
return self._Sweep.h.elev
def __set__(self, float elev):
self._Sweep.h.elev = elev
property azimuth:
def __get__(self):
return self._Sweep.h.azimuth
def __set__(self, float azimuth):
self._Sweep.h.azimuth = azimuth
property beam_width:
def __get__(self):
return self._Sweep.h.beam_width
def __set__(self, float beam_width):
self._Sweep.h.beam_width = beam_width
property vert_half_bw:
def __get__(self):
return self._Sweep.h.vert_half_bw
def __set__(self, float vert_half_bw):
self._Sweep.h.vert_half_bw = vert_half_bw
property horz_half_bw:
def __get__(self):
return self._Sweep.h.horz_half_bw
def __set__(self, float horz_half_bw):
self._Sweep.h.horz_half_bw = horz_half_bw
property nrays:
def __get__(self):
return self._Sweep.h.nrays
def __set__(self, int nrays):
self._Sweep.h.nrays = nrays
cdef class _RslVolume:
"""
A object for accessing RSL Volume data and header information.
This class should not be initalized from within Python. _RslVolume
objects are returned from the :py:func:`RslFile.get_volume` and other
functions/methods.
Attributes
----------
nsweeps : int
Sweep number.
calibr_const : float
Calibration constant.
"""
def __dealloc__(self):
if self._dealloc == 1:
_rsl_h.RSL_free_volume(self._Volume)
cdef load(self, _rsl_h.Volume * Volume):
""" Load the _RslVolume object, must be called after creation. """
if Volume is NULL:
raise ValueError('cannot load _RslVolume with NULL')
self._Volume = Volume
self._dealloc = 0
def total_rays(self):
"""
total_rays()
Return the total number of rays present in all sweeps of the volume.
"""
return np.sum(self.get_nray_array())
def get_nray_array(self):
"""
get_nray_array()
Return an array of the number of rays for each sweep.
"""
cdef _rsl_h.Sweep * sweep
nrays = np.empty((self.nsweeps), dtype='int32')
for i in range(self.nsweeps):
sweep = self._Volume.sweep[i]
assert sweep is not NULL
nrays[i] = sweep.h.nrays
return nrays
def get_sweep(self, int sweep_number):
"""
get_sweep(sweep_numer)
Return a _RslSweep for a given sweep number.
Parameters
----------
sweep_number : int
Sweep number to retrieve
Returns
-------
sweep : _RslSweep
_RslSweep object containing the requested sweep.
"""
if sweep_number < 0 or sweep_number >= self._Volume.h.nsweeps:
raise ValueError('invalid sweep_number')
rslsweep = _RslSweep()
rslsweep.load(self._Volume.sweep[sweep_number])
return rslsweep
def get_azimuth_and_elev_array(self):
"""
get_azimuth_and_elev_array()
Return azimuth and elevation array for each sweep and ray.
"""
cdef int nrays = self._Volume.sweep[0].h.nrays
cdef int ray_count
cdef _rsl_h.Sweep * sweep
cdef _rsl_h.Ray * ray
# create empty azimuth and elev output arrays
total_rays = self.total_rays()
azimuth = np.empty([total_rays], dtype='float32')
elev = np.empty([total_rays], dtype='float32')
# loop over the sweeps and rays storing azimuth and elev
ray_count = 0
for i in range(self.nsweeps):
sweep = self._Volume.sweep[i]
assert sweep is not NULL
nrays = sweep.h.nrays
for j in range(nrays):
ray = sweep.ray[j]
assert ray is not NULL
azimuth[ray_count + j] = ray.h.azimuth
elev[ray_count + j] = ray.h.elev
ray_count += nrays
return azimuth, elev
def get_sweep_fix_angles(self):
"""
get_sweep_fix_angles()
Return array of fix angle for each sweep.
Angles determined from the first ray in each sweep.
"""
cdef _r | Cython |
sl_h.Sweep * sweep
cdef _rsl_h.Ray * ray
fix_angles = np.empty((self.nsweeps), dtype='float32')
for i in range(self.nsweeps):
sweep = self._Volume.sweep[i]
assert sweep is not NULL
ray = sweep.ray[0]
assert ray is not NULL
fix_angles[i] = ray.h.fix_angle
return fix_angles
def get_sweep_azimuths(self):
"""
get_sweep_azimuths()
Return azimuth array for each sweep.
"""
cdef _rsl_h.Sweep * sweep
azimuth = np.empty((self.nsweeps), dtype='float32')
for i in range(self.nsweeps):
sweep = self._Volume.sweep[i]
assert sweep is not NULL
azimuth[i] = sweep.h.azimuth
return azimuth
def get_sweep_elevs(self):
"""
get_sweep_elevs()
Return elevation array for each sweep.
"""
cdef _rsl_h.Sweep * sweep
elev = np.empty((self.nsweeps), dtype='float32')
for i in range(self.nsweeps):
sweep = self._Volume.sweep[i]
assert sweep is not NULL
elev[i] = sweep.h.elev
return elev
def get_instr_params(self):
"""
get_instr_params()
Return instrumental parameter for the volume.
Returns
-------
pm_data : array, (nsweeps)
Array of prt modes.
nv_data : array, (total_rays)
Array of nyquist velocities.
pr_data : array, (total_rays)
Array of pulse repetition frequency in Hz.
ur_data : array, (total_rays)
Array of unambiguous ranges, in km.
"""
cdef int nrays = self._Volume.sweep[0].h.nrays
cdef int ray_count
cdef _rsl_h.Sweep * sweep
cdef _rsl_h.Ray * ray
# calculate the total number of rays in the volume
# initalize empty instrument parameter arrays
total_rays = self.total_rays()
nyq_vel = self._Volume.sweep[0].ray[0].h.nyq_vel
valid_nyq_vel = abs(nyq_vel) > 0.1
pm_data = np.empty(self.nsweeps, dtype='|S24')
nv_data = np.empty((total_rays), dtype='float32')
pr_data = np.empty((total_rays), dtype='float32')
ur_data = np.empty((total_rays), dtype='float32')
# loop over sweeps and rays storing instrument parameters
ray_count = 0
for i in range(self.nsweeps):
sweep = self._Volume.sweep[i]
assert sweep is not NULL
nrays = sweep.h.nrays
for j in range(nrays):
ray = sweep.ray[j]
assert ray is not NULL
if j == 0:
pm_data[i] = self._prtmode(ray.h)
if valid_nyq_vel:
nv_data[ray_count + j] = ray.h.nyq_vel
else:
nv_data[ray_count + j] = (ray.h.wavelength *
ray.h.prf / 4.0)
if ray.h.prf == 0:
pr_data[ray_count + j] = -999.
else:
pr_data[ray_count + j] = 1. / ray.h.prf
ur_data[ray_count + j] = ray.h.unam_rng * 1000.0
ray_count += nrays
return pm_data, nv_data, pr_data, ur_data
def get_data(self):
"""
get_data()
Return the two-dimensional data contained in the volume.
If a given ray has few bins than the first ray, the missing bins
will be filled with 131072.0
"""
cdef _rsl_h.Range raw
cdef _rsl_h.Sweep * sweep
cdef _rsl_h.Ray * ray
cdef int ray_count, nsweeps, nrays, nbins, nray, nsweep, nbin
cdef np.ndarray[np.float32_t, ndim = 2] data
vol = self._Volume
nbins = vol.sweep[0].ray[0].h.nbins
total_rays = self.total_rays()
shape = (total_rays, nbins)
data = np.zeros(shape, dtype='float32') + 1.31072000e+05
ray_count = 0
nsweeps = vol.h.nsweeps
for nsweep in range(nsweeps):
sweep = vol.sweep[nsweep]
assert sweep is not NULL
nrays = sweep.h.nrays
for nray in range(nrays):
ray = sweep.ray[nray]
assert ray is not NULL
nbins = ray.h.nbins
for nbin in range(nbins):
raw = ray.range[nbin]
data[ray_count + nray, nbin] = ray.h.f(raw)
ray_count += nrays
return data
def is_range_bins_uniform(self):
"""
is_range_bins_uniform()
Return True is the locations of the range bin are identical for all
rays, False if locations change in one or more rays.
"""
cdef int nrays = self._Volume.sweep[0].h.nrays
cdef _rsl_h.Sweep * sweep
cdef _rsl_h.Ray * ray
# loop over the sweeps and rays checking that the gate_size and
# range_bin1 are the same as the that in the first ray
sweep = self._Volume.sweep[0]
assert sweep is not NULL
ray = sweep.ray[0]
assert ray is not NULL
ref_gate_size = ray.h.gate_size
ref_range_bin1 = ray.h.range_bin1
for i in range(self.nsweeps):
sweep = self._Volume.sweep[i]
assert sweep is not NULL
nrays = sweep.h.nrays
for j in range(nrays):
ray = sweep.ray[j]
assert ray is not NULL
if ray.h.gate_size!= ref_gate_size:
return False
if ray.h.range_bin1!= ref_range_bin1:
return False
return True
cdef _prtmode(self, _rsl_h.Ray_header h):
""" Return the prt mode of a given Ray header. """
# TODO need to add additional logic here
if h.prf2!= h.prf:
mode = 'dual '
else:
mode = 'fixed '
return mode
# header properties mapped to class attributes.
property nsweeps:
def __get__(self):
return self._Volume.h.nsweeps
def __set__(self, int nsweeps):
self._Volume.h.nsweeps = nsweeps
property calibr_const:
def __get__(self):
return self._Volume.h.calibr_const
def __set__(self, float calibr_const):
self._Volume.h.calibr_const = calibr_const
cdef class RslFile:
"""
RslFile(filename)
A object for accessing Radar data and parameter using the RSL library.
Parameters
----------
filename : str
Radar file to read.
Attributes
----------
month : int
Date, month (1-12).
day : int
Date, day (1-31).
year : int
Date, year (eg. 1993).
hour : int
Time, hour (0-23).
minute : int
Time, minute (0-59).
sec : float
Time, second + fractions of second.
nvolumes : int
Number of volume slots in the file.
number : int
Arbitrary number for this radar site.
latd, latm, lats : int
Latitude degrees, minutes and seconds for the site.
lond, lonm, lons : int
Longitude degrees, minutes and seconds for the site.
height : int
Height of site in meters above sea level.
spulse : int
Length of short pulse in ns.
lpulse : int
Length of long pulse in ns.
scan_mode : int
Scan mode, 0 for PPI, 1 for RHI.
vcp : int
Volume coverage pattern, WSR-88D only.
"""
cdef _rsl_h.Radar * _Radar
cdef _rsl_h.Volume * _Volume
cdef _rsl_h.Sweep * _Sweep
cdef _rsl_h.Ray * _Ray
def __cinit__(self, filename, radar_format=None, callid=None):
""" Initalize the _RslFile object. """
if radar_format == 'wsr88d':
if callid is None:
raise ValueError('callid must be provided.')
self._Radar = _rsl_h.RSL_wsr88d_to_radar(filename, callid)
elif radar_format is None:
| Cython |
self._Radar = _rsl_h.RSL_anyformat_to_radar(filename)
else:
raise ValueError('invalid radar_format:', radar_format)
if self._Radar is NULL:
raise IOError('file cannot be read.')
def __dealloc__(self):
""" Free memory used by object. """
_rsl_h.RSL_free_radar(self._Radar)
def get_volume(self, int volume_number):
"""
get_volume(volume_number)
Return a _RslVolume for a given volume number.
Parameters
----------
volume_number : int
Volume number to retrieve
Returns
-------
volume : _RslVolume
_RslVolume object containing requested volume.
"""
if volume_number < 0 or volume_number >= self._Radar.h.nvolumes:
raise ValueError('invalid volume_number')
rslvolume = _RslVolume()
rslvolume.load(self._Radar.v[volume_number])
return rslvolume
def available_moments(self):
"""
available_moments()
Return a list of available volume moments.
"""
av = []
for i in range(self._Radar.h.nvolumes):
if self._Radar.v[i] is not NULL:
av.append(i)
return av
def get_radar_header(self):
"""
get_radar_headers()
Return a dictionary of radar header parameters.
"""
return self._Radar.h
def get_volume_array(self, int volume_num):
"""
get_volume_array(volume_number)
Return the three-dimensional data contained in a given volume.
Parameters
----------
volume_number : int
Returns
-------
volume : array (nsweep, nrays, nbins), float32
Array containing data for the given volume.
"""
return self.get_volume(volume_num).get_data()
# header properties mapped to class attributes.
property month:
def __get__(self):
return self._Radar.h.month
def __set__(self, int month):
self._Radar.h.month = month
property day:
def __get__(self):
return self._Radar.h.day
def __set__(self, int day):
self._Radar.h.day = day
property year:
def __get__(self):
return self._Radar.h.year
def __set__(self, int year):
self._Radar.h.year = year
property hour:
def __get__(self):
return self._Radar.h.hour
def __set__(self, int hour):
self._Radar.h.hour = hour
property minute:
def __get__(self):
return self._Radar.h.minute
def __set__(self, int minute):
self._Radar.h.minute = minute
property sec:
def __get__(self):
return self._Radar.h.sec
def __set__(self, float sec):
self._Radar.h.sec = sec
property nvolumes:
def __get__(self):
return self._Radar.h.nvolumes
def __set__(self, int nvolumes):
self._Radar.h.nvolumes = nvolumes
property number:
def __get__(self):
return self._Radar.h.number
def __set__(self, int number):
self._Radar.h.number = number
property latd:
def __get__(self):
return self._Radar.h.latd
def __set__(self, int latd):
self._Radar.h.latd = latd
property latm:
def __get__(self):
return self._Radar.h.latm
def __set__(self, int latm):
self._Radar.h.latm = latm
property lats:
def __get__(self):
return self._Radar.h.lats
def __set__(self, int lats):
self._Radar.h.lats = lats
property lond:
def __get__(self):
return self._Radar.h.lond
def __set__(self, int lond):
self._Radar.h.lond = lond
property lonm:
def __get__(self):
return self._Radar.h.lonm
def __set__(self, int lonm):
self._Radar.h.lonm = lonm
property lons:
def __get__(self):
return self._Radar.h.lons
def __set__(self, int lons):
self._Radar.h.lons = lons
property height:
def __get__(self):
return self._Radar.h.height
def __set__(self, int height):
self._Radar.h.height = height
property spulse:
def __get__(self):
return self._Radar.h.spulse
def __set__(self, int spulse):
self._Radar.h.spulse = spulse
property lpulse:
def __get__(self):
return self._Radar.h.lpulse
def __set__(self, int lpulse):
self._Radar.h.lpulse = lpulse
property scan_mode:
def __get__(self):
return self._Radar.h.scan_mode
def __set__(self, int scan_mode):
self._Radar.h.scan_mode = scan_mode
property vcp:
def __get__(self):
return self._Radar.h.vcp
def __set__(self, int vcp):
self._Radar.h.vcp = vcp
<|end_of_text|># -*- coding: utf-8 -*-
import numpy as np
cimport numpy as np
import scipy.sparse as ss
from libc.stdlib cimport malloc, free
from libcpp.pair cimport pair
from libcpp.vector cimport vector
cdef extern from "wrapper/xgboost_wrapper.h":
ctypedef unsigned long bst_ulong
void *XGDMatrixCreateFromMat(
const float *data,
bst_ulong nrow,
bst_ulong ncol,
float missing)
void *XGDMatrixCreateFromCSR(
const bst_ulong *indptr,
const unsigned *indices,
const float *data,
bst_ulong nindptr,
bst_ulong nelem)
void XGDMatrixSetFloatInfo(
void *handle,
const char *field,
const float *array,
bst_ulong length)
void XGDMatrixFree(void *handle)
void XGBoosterSetParam(void *handle, const char *k, const char *v)
void *XGBoosterCreate(void *dmats[], bst_ulong length)
void XGBoosterFree(void *handle)
void XGBoosterUpdateOneIter(void *handle, int it, void *dtrain)
const char *XGBoosterEvalOneIter(
void *handle,
int iteration,
void *dmats[],
const char *evnames[],
bst_ulong length)
const float *XGBoosterPredict(
void *handle,
void *dmat,
int option_mask,
unsigned ntree_limit,
bst_ulong *length)
cdef class XGBoost:
cdef void *booster
cdef object params
cdef void *dmats[2] # 0: train, 1: test
cdef object num_round
cdef object y_test
def __cinit__(self, **cons_params):
self.params = cons_params
self.num_round = cons_params.get('num_round', 10)
self.y_test = None
def __del__(self):
XGBoosterFree(self.booster)
XGDMatrixFree(self.dmats[0])
XGDMatrixFree(self.dmats[1])
def load_nd(self, X, y, idx, missing=0.0):
cdef np.ndarray[float, ndim=1, mode='c'] data = np.array(
X.reshape(X.size), dtype='float32')
cdef void *dmat = XGDMatrixCreateFromMat(
&data[0], X.shape[0], X.shape[1], missing)
cdef np.ndarray[float, ndim=1, mode='c'] labels = np.array(
y.reshape(y.size), dtype='float32')
XGDMatrixSetFloatInfo(dmat, "label", &labels[0], y.size)
self.dmats[idx] = dmat
def load_ss(self, X, y, idx):
assert len(X.indices) == len(X.data)
nindptr = len(X.indptr)
nelem = len(X.data)
cdef bst_ulong *col_ptr = <bst_ulong*>malloc(len(X.indptr) * sizeof(bst_ulong))
for i in range(len(X.indptr)): col_ptr[i] = X.indptr[i]
cdef np.ndarray[unsigned, ndim=1, mode='c'] indices = np.array(
X.indices, dtype='uint32')
cdef np.ndarray[float, ndim=1, mode='c'] data = np.array(
X.data, dtype='float32')
cdef void *dmat = XGDMatrixCreateFromCSR(
col_ptr, &indices[0], &data[0], nindptr, nelem)
cdef np.ndarray[float, ndim=1, mode='c'] labels | Cython |
= np.array(y, dtype='float32')
XGDMatrixSetFloatInfo(dmat, "label", &labels[0], y.size)
self.dmats[idx] = dmat
def set_test_label(self, y):
self.y_test = y
def set_param(self, k_str, v_str):
k_byte_string = k_str.encode('utf-8')
v_byte_string = v_str.encode('utf-8')
cdef const char* param_k = k_byte_string
cdef const char* param_v = v_byte_string
XGBoosterSetParam(self.booster, param_k, param_v)
def set_params(self):
if isinstance(self.params, dict):
for k, v in self.params.items():
self.set_param(str(k), str(v))
def setup_cache(self, X_tr, y_tr, X_ts):
if isinstance(X_tr, np.ndarray):
self.load_nd(X_tr, y_tr, 0)
elif isinstance(X_tr, ss.csr_matrix):
self.load_ss(X_tr, y_tr, 0)
else:
raise NotImplementedError("Unsupported data type")
y_ts = np.zeros(X_ts.shape[0])
if self.y_test is not None:
y_ts = self.y_test
if isinstance(X_ts, np.ndarray):
self.load_nd(X_ts, y_ts, 1)
elif isinstance(X_ts, ss.csr_matrix):
self.load_ss(X_ts, y_ts, 1)
else:
raise NotImplementedError("Unsupported data type")
self.booster = XGBoosterCreate(self.dmats, 2)
self.set_param('seed', '0')
self.set_params()
def eval_set(self, it):
k_byte_string = "train".encode('utf-8')
v_byte_string = "test".encode('utf-8')
cdef const char* param_k = k_byte_string
cdef const char* param_v = v_byte_string
cdef const char* setnames[2]
setnames[0] = param_k
setnames[1] = param_v
length = 2
if self.y_test is None:
length = 1
s = XGBoosterEvalOneIter(
self.booster,
it,
self.dmats,
setnames,
length)
print(s.decode('utf-8','strict'))
def fit_predict(self, X_tr, y_tr, X_ts):
self.setup_cache(X_tr, y_tr, X_ts)
for i in range(self.num_round):
XGBoosterUpdateOneIter(self.booster, i, self.dmats[0])
if int(self.params.get('silent', 1)) < 2:
self.eval_set(i)
# Options
ntree_limit = 0
option_mask = 0x00
cdef const float* preds_raw;
cdef bst_ulong length;
preds_raw = XGBoosterPredict(
self.booster, self.dmats[1], option_mask,
ntree_limit, &length)
preds = np.array([preds_raw[i] for i in range(length)])
num_class = self.params.get('num_class', 1)
n_samples = length / num_class
return preds.reshape((n_samples, num_class))
<|end_of_text|>cdef extern int performWTP(unsigned char *inputFilename,
unsigned char *outputFilename,
int image_width,
int image_height,
int to_extend_image,
int extend_by_pixels,
int use_FFT,
int wavelet_type,
int ridge_alg,
double starting_scale,
double scale_step,
double ending_scale,
double Morlet_sigma)
def perform_WTP(unsigned char *inputFilename,
unsigned char *outputFilename,
int image_width,
int image_height,
int to_extend_image,
int extend_by_pixels,
int use_FFT,
int wavelet_type,
int ridge_alg,
double starting_scale,
double scale_step,
double ending_scale,
double Morlet_sigma):
performWTP(inputFilename,
outputFilename,
image_width,
image_height,
to_extend_image,
extend_by_pixels,
use_FFT,
wavelet_type,
ridge_alg,
starting_scale,
scale_step,
ending_scale,
Morlet_sigma)
<|end_of_text|>from cec2006 import CFunction
import numpy as np
import scipy.stats as ss
import random
import operator
from libc.math cimport sin, cos, pi, e, exp, sqrt, log, tan
from libc.stdlib cimport rand, RAND_MAX, srand
from libc.time cimport time
cimport cython
##generate a random number by normal distribution
cdef double rand_normal(double mu, double sigma):
cdef:
double z, uniform
uniform = random.random()
z = sqrt(- 2.0 * log(uniform)) * sin(2.0 * pi * uniform)
z = mu + sigma * z
return z
##generate a random number by cauchy distribution
cdef double rand_cauchy(double mu, double gamma):
cdef:
double z, uniform
uniform = random.random()
z = mu + gamma * tan(pi * (uniform - 0.5))
return z
cdef class EECO:
params = {'H': 5, #historical memory size
'num_stg': 4 #number of strategies
}
cdef public:
list lb, ub #lower, upper bounds
o
M, M1, M2
tuple mat
def __init__(self, benchID, D):
self.lb = CFunction().get_LB(benchID, D)
self.ub = CFunction().get_UB(benchID, D)
mat = CFunction().load_mat(benchID, D)
self.o, self.M, self.M1, self.M2 = (mat[_] for _ in range(4))
##Initialize population P
cdef void init_P(self, double[:, ::1] P_X, int NP, int benchID, int D):
for i in range(NP):
CFunction().evaluate(benchID, D, P_X[i, :], self.o, self.M, self.M1, self.M2)
##Initialize population Q
cdef list init_Q(self, double[:, ::1] P_X, double[:, ::1] Q_X, int NP, int Lambda, int benchID, int D):
cdef:
list sel_idx
sel_idx = random.sample(range(NP), Lambda)
for i in range(len(sel_idx)):
Q_X[i, :] = P_X[sel_idx[i], :]
return sel_idx
##Initiallize memory for f and cr
cdef tuple initMemory(self):
cdef:
int H, n, j, num_stg
list M_CR, M_F, Temp_1, Temp_2
H = self.params['H']
num_stg = self.params['num_stg']
M_CR = []
M_F = []
for i in range(num_stg):
Temp_1, Temp_2 = [], []
for j in range(H):
Temp_1.append(0.5)
M_CR.append(Temp_1)
for j in range(H):
Temp_2.append(0.5)
M_F.append(Temp_2)
return M_CR, M_F
##strategy decision
cdef int chooseStrategy(self, list ql, list Num_Success_n):
cdef:
int n_sum, k, l, Strategy
double wheel
n_sum = 0
Strategy = 0
for k in range(4):
n_sum += Num_Success_n[k] + 2
if n_sum!= 0:
for k in range(4):
ql[k] = <double>(Num_Success_n[k] + 2) / n_sum
for k in range(4):
if ql[k] < 0.05:
for l in range(4):
ql[l] = 0.25
Num_Success_n[l] = 0
break
wheel = random.random()
if wheel <= ql[0]:
Strategy = 0
elif wheel <= sum(ql[:2]) and wheel > ql[0]:
Strategy = 1
elif wheel <= sum(ql[:3]) and wheel > sum(ql[:2]):
Strategy = 2
elif wheel <= sum(ql[:4]) and wheel > sum(ql[:3]):
Strategy = 3
return Strategy
##generate F and CR in DE
cdef tuple generate_F_CR(self, int Stg, list Memory_CR, list Memory_F, list Success_CR, list CR, list F):
cdef:
int H, i, j, ri, num_stg
double cr, f
list muCR, muF
H = self.params['H']
num_stg = self.params['num_stg']
ri = random.randint(0, H - 1)
muCR = []
for i in range(num_stg):
muCR.append(0.5)
muF = []
| Cython |
for i in range(num_stg):
muF.append(0.5)
if Success_CR[Stg]!= []:
if muCR[Stg] == -1.0 or max(Success_CR[Stg]) == 0.0:
muCR[Stg] == 0.0
else:
muCR[Stg] = Memory_CR[Stg][ri]
else:
muCR[Stg] = Memory_CR[Stg][ri]
muF[Stg] = Memory_F[Stg][ri]
cr = rand_normal(muCR[Stg], 0.1)
f = rand_cauchy(muF[Stg], 0.1)
if cr < 0.0:
cr = 0.0
elif cr > 1.0:
cr = 1.0
while f <= 0.0:
f = rand_cauchy(muF[Stg], 0.1)
if f > 1.0:
f = 1.0
CR.append(cr)
F.append(f)
return cr, f
##current-to-Qbest/1
cdef void mutation_1(self, double[:, ::1] QParent, double[:, ::1] QChild, list A, int Lambda, int D, double f, int idx, int bestEqIndex, double grate):
cdef:
int x_r1, x_r2
x_r1 = random.randint(0, Lambda - 1)
x_r2 = random.randint(0, Lambda + <int>len(A) - 1)
while x_r1 == idx:
x_r1 = random.randint(0, Lambda - 1)
while x_r2 ==x_r1 or x_r2 == idx:
x_r2 = random.randint(0, Lambda + <int>len(A) - 1)
for j in range(D):
if x_r2 < Lambda:
QChild[idx, j] = QParent[idx, j] + f * (QParent[bestEqIndex, j] - QParent[idx, j]) + f * (QParent[x_r1, j] - QParent[x_r2, j])
else:
QChild[idx, j] = QParent[idx, j] + f * (QParent[bestEqIndex, j] - QParent[idx, j]) + f * (QParent[x_r1, j] - A[x_r2 - Lambda][j])
if QChild[idx, j] < self.lb[j]:
QChild[idx, j] = min(self.ub[j], 2 * self.lb[j] - QChild[idx, j])
elif QChild[idx, j] > self.ub[j]:
QChild[idx, j] = max(self.lb[j], 2 * self.ub[j] - QChild[idx, j])
##randr1/1
cdef void mutation_2(self, double[:, ::1] QParent, double[:, ::1] QChild, int Lambda, int D, double f, int idx):
cdef:
int x_1, x_2, x_3
x_1 = random.randint(0, Lambda - 1)
x_2 = random.randint(0, Lambda - 1)
x_3 = random.randint(0, Lambda - 1)
while x_1 == x_2:
x_2 = random.randint(0, Lambda - 1)
while x_1 == x_3 or x_2 == x_3:
x_3 = random.randint(0, Lambda - 1)
for j in range(D):
QChild[idx, j] = QParent[x_1, j] + f * (QParent[x_2, j] - QParent[x_3, j])
if QChild[idx, j] < self.lb[j]:
QChild[idx, j] = min(self.ub[j], 2 * self.lb[j] - QChild[idx, j])
elif QChild[idx, j] > self.ub[j]:
QChild[idx, j] = max(self.lb[j], 2 * self.ub[j] - QChild[idx, j])
##binomial crossover
cdef void crossover_1(self, double[:, ::1] QParent, double[:, ::1] QChild, int D, double cr, int idx):
cdef:
int jRand, j
jRand = random.randint (0, D - 1)
for j in range(D):
if jRand!= j and random.random() <= cr:
QChild[idx, j] = QParent[idx, j]
##exponential crossover
cdef void crossover_2(self, double[:, ::1] QParent, double[:, ::1] QChild, int D, double cr, int idx):
cdef:
int n, L
n = random.randint (0, D - 1)
L = 0
while random.random() <= cr and L < D:
QChild[idx, (n + L) % D] = QParent[idx, (n + L) % D]
L += 1
##DE
cdef tuple DE(self, double[:, ::1] QParent, double[:, ::1] QChild, list A, int Lambda, int D, int Stg, int idx, int bestEqIndex, double f, double cr, double grate):
if Stg == 0:
self.mutation_1(QParent, QChild, A, Lambda, D, f, idx, bestEqIndex, grate)
self.crossover_1(QParent, QChild, D, cr, idx)
elif Stg == 1:
self.mutation_1(QParent, QChild, A, Lambda, D, f, idx, bestEqIndex, grate)
self.crossover_2(QParent, QChild, D, cr, idx)
elif Stg == 2:
self.mutation_2(QParent, QChild, Lambda, D, f, idx)
self.crossover_1(QParent, QChild, D, cr, idx)
elif Stg == 3:
self.mutation_2(QParent, QChild, Lambda, D, f, idx)
self.crossover_2(QParent, QChild, D, cr, idx)
##equivalent function - feasible rule
cdef void Equ_FR(self, double[:, ::1] Q, int Lambda, int D):
cdef:
int jugg, len_Q
double Fea_min, IF_min
jugg = 1
Fea_max = -RAND_MAX
len_Q = Q.shape[0]
for i in range(len_Q):
if Q[i, D + 1] > 0.0:
jugg = -1
break
if jugg == -1:
for i in range(len_Q):
if Q[i, D + 1] == 0.0:
jugg = 0
break
if jugg == 1:
for i in range(len_Q):
Q[i, D + 2] = Q[i, D]
elif jugg == -1:
for i in range(len_Q):
Q[i, D + 2] = Q[i, D + 1]
else:
for i in range(len_Q):
if Q[i, D + 1] == 0.0:
if Fea_max > Q[i, D]:
Fea_max = Q[i, D]
for i in range(len_Q):
if Q[i, D + 1] > 0.0:
Q[i, D + 2] = Fea_max + Q[i, D + 1]
else:
Q[i, D + 2] = Q[i, D]
##new equivalent function
cdef void Equ_New(self, double[:, ::1] Q, int Lambda, int D):
len_Q = Q.shape[0]
bestIndex = self.find_FR_Best(Q, len_Q, D)
for i in range(len_Q):
Q[i, D + 2] = abs(Q[i, D] - Q[bestIndex, D])
##calculate fi(x) = w1 * e(x) + w2 * v(x) + w3 * f(x)
cdef void Hl_Eq(self, double[:, ::1] Q, int Lambda, int D, int idx, double para, double gamma):
cdef:
double f_max, f_min, v_max, v_min, eq_max, eq_min, w_t, n, w_i, w1, w2, w3
int len_Q
##normalization
f_max = self.np_max(Q, D)
f_min = self.np_min(Q, D)
v_max = self.np_max(Q, D + 1)
v_min = self.np_min(Q, D + 1)
# self.EquFR(Q, Lambda, D)
self.Equ_New(Q, Lambda, D)
eq_max = self.np_max(Q, D + 2)
eq_min = self.np_min(Q, D + 2)
w_t = para
w_i = (<double>idx + 1)/ Lambda
w1 = w | Cython |
_t * w_i
w2 = w_t * w_i + gamma
w3 = (1.0 - w_t) * (1.0 - w_i)
len_Q = Q.shape[0]
for _ in range(len_Q):
Q[_, D + 3] = w1 * (Q[_, D + 2] - eq_min) / (eq_max - eq_min + 10.0**-100)
Q[_, D + 3] += w2 * (Q[_, D + 1] - v_min) / (v_max - v_min + 10.0**-100)
Q[_, D + 3] += w3 * (Q[_, D] - f_min) / (f_max - f_min + 10.0**-100)
##minimum value of e, v or f with axis_n
cdef double np_min(self, double[:, ::1] P, int axis_n):
cdef:
double min_value
Py_ssize_t bound
min_value = P[0, axis_n]
bound = P.shape[0]
for i in range(bound):
if min_value > P[i, axis_n]:
min_value = P[i, axis_n]
return min_value
##maximum value of e, v or f with axis_n
cdef double np_max(self, double[:, ::1] P, int axis_n):
cdef:
double max_value
Py_ssize_t bound
max_value = P[0, axis_n]
bound = P.shape[0]
for i in range(bound):
if max_value < P[i, axis_n]:
max_value = P[i, axis_n]
return max_value
##selection/updating subpopulation Q
cdef void selection(self, double[:, ::1] QSum, double[:, ::1] QParent, double[:, ::1] QChild, list A, int Lambda, int D, int idx, int stg, list CR, list F, list Success_cr, list Success_F, list fit_improve, list Num_Success_n):
if QSum[idx, D + 3] > QSum[Lambda, D + 3]:
Success_cr[stg].append(CR[len(CR) - 1])
Success_F[stg].append(F[len(F) - 1])
Num_Success_n[stg] += 1
fit_improve[stg].append(QSum[idx, D + 3] - QSum[Lambda, D + 3])
A.append(QParent[idx, :])
QParent[idx, :] = QChild[idx, :]
##|A| < |A|_max
cdef void stableA(self, list A, int ASize):
if len(A) > ASize:
for i in range(<int>len(A) - ASize):
A.remove(A[random.randint(0, <int>len(A) - 1)])
##Updateing Memory
cdef void UpdateMemory(self, list Memory_cr, list Memory_F, list Success_cr, list Success_F, list fit_improve, int H, list pos):
cdef:
int n, k, num_Scr, num_SF, num_stg
double f1, f3, f4, weight_1, weight_2, meanScr, meanSF
num_stg = self.params['num_stg']
for k in range(num_stg):
if Success_cr[k]!= [] and Success_F[k]!= []:
num_Scr = <int>len(Success_cr[k])
num_SF = <int>len(Success_F[k])
meanScr = 0.0
meanSF = 0.0
weight_1 = 0.0
f1 = 0.0
for i in range(num_Scr):
weight_1 += abs(fit_improve[k][i])
for i in range(num_Scr):
f1 += abs(fit_improve[k][i]) / (weight_1 + 10.0**-100) * (Success_cr[k][i])
meanScr = f1
# Memory_cr[k][pos[k]] = (meanScr + Success_cr[k][num_Scr - 1]) / 2
Memory_cr[k][pos[k]] = meanScr
weight_2 = 0.0
f3 = 0.0
f4 = 0.0
for i in range(num_SF):
weight_2 += abs(fit_improve[k][i])
for i in range(num_SF):
f3 += abs(fit_improve[k][i]) / (weight_2 + 10.0**-100) * np.power(Success_F[k][i], 2)
f4 += abs(fit_improve[k][i]) / (weight_2 + 10.0**-100) * Success_F[k][i]
meanSF = f3 / (f4 + 10.0**-100)
# Memory_F[k][pos[k]] = (meanSF + Success_F[k][num_SF - 1]) / 2
Memory_F[k][pos[k]] = meanSF
pos[k] = pos[k] + 1
if pos[k] > H - 1:
pos[k] = 0
##the index of best solution of feasibility rule
cdef int find_FR_Best(self, double[:, ::1] P, int NP, int D):
cdef:
int bestIndex
bestIndex = 0
for i in range(NP):
if P[bestIndex, D + 1] > P[i, D + 1]:
bestIndex = i
elif P[bestIndex, D + 1] == P[i, D + 1] and P[bestIndex, D] > P[i, D]:
bestIndex = i
return bestIndex
##the index of best solution of e, v or f or fi
cdef int findBest(self, double[:, ::1] Q, int Lambda, int D, int axis_n):
cdef:
int bestIndex
bestIndex = 0
for i in range(Lambda):
if Q[bestIndex, D + axis_n] > Q[i, D + axis_n]:
bestIndex = i
return bestIndex
##the main process of HECO-DE
cdef tuple _optimize(self, int benchID, int D, int Lambda, double gamma):
cdef:
int NP, NPinit, NPmin, NPlast, H, num_stg, FES, FES_MAX, gen_count, Srun, stg, bestEqIndex, bestIndex, bestFIndex, worstIndex, worstFIndex, Success
list sel_idx, A, M_CR, M_F, CR, F, Num_Success_n, pos, ql, S_CR, S_F, fit_improve
double para, cr, f
double[::1] narr_eq, narr_f, narr_v
double[:, ::1] P, QParent, QChild
tuple Init_M
NPinit = 450
NP = NPinit ##the size of P
NPmin = Lambda
NPlast = 0
H = self.params['H']
num_stg = self.params['num_stg']
FES = NP
gen_count = 1
Srun = 0 ##the FES needed when saftisfy the condition of sucess run
Success = 0
FES_MAX = 500000
##population P, 0~D-1 are the solutions, D~D+7 are solution's value of f, v, e, fi, c1, c2 and c3 respectively
narr_P = np.zeros((NP, D + 7), dtype = np.float64)
P = narr_P ##initialize solutions
for i in range(NP):
for j in range(D):
P[i, j] = self.lb[j] + random.random() * (self.ub[j] - self.lb[j])
self.init_P(P, NP, benchID, D)
narr_QParent = np.zeros((Lambda, D + 7), dtype = np.float64)
narr_QChild = np.zeros((Lambda, D + 7), dtype = np.float64)
narr_QSum = np.zeros((Lambda + 1, D + 7), dtype = np.float64)
QParent = narr_QParent
QChild = narr_QChild
QSum = narr_QSum ##QSum is Q + i_th child
bestEqIndex = 0
A = [] ## archive A
Init_M = self.initMemory()
M_CR = Init_M[0]
M_F = Init_M[1]
CR = []
F = []
Num_Success_n = []
pos = []
ql = []
for i in range(num_stg):
Num_Success_n.append(0)
pos.append(0)
ql.append(0.25)
while FES < FES_MAX:
ASize = round(4.0 * NP)
S_CR = []
S_F = []
fit_improve = []
for i in range(num_stg):
S | Cython |
_CR.append([])
S_F.append([])
fit_improve.append([])
sel_idx = self.init_Q(P, QParent, NP, Lambda, benchID, D)
para = <double>FES / FES_MAX ##t/T_MAX
for idx in range(Lambda):
stg = self.chooseStrategy(ql, Num_Success_n)
f_cr = self.generate_F_CR(stg, M_CR, M_F, S_CR, CR, F)
cr = f_cr[0]
f = f_cr[1]
self.Hl_Eq(QParent, Lambda, D, idx, para, gamma)
bestEqIndex = self.findBest(QParent, Lambda, D, 3)
self.DE(QParent, QChild, A, Lambda, D, stg, idx, bestEqIndex, f, cr, para)
CFunction().evaluate(benchID, D, QChild[idx, :], self.o, self.M, self.M1, self.M2)
QSum[:Lambda, :] = QParent[:, :]
QSum[Lambda, :] = QChild[idx, :]
self.Hl_Eq(QSum, Lambda, D, idx, para, gamma)
self.selection(QSum, QParent, QChild, A, Lambda, D, idx, stg, CR, F, S_CR, S_F, fit_improve, Num_Success_n)
self.stableA(A, ASize)
FES += 1
for i in range(len(sel_idx)):
P[sel_idx[i], :] = QParent[i, :]
self.UpdateMemory(M_CR, M_F, S_CR, S_F, fit_improve, H, pos)
gen_count += 1
##get the FES needed for arriving sucess condition
bestIndex = self.find_FR_Best(P, NP, D)
if P[bestIndex, D] < 0.0001 and P[bestIndex, D + 1] == 0.0 and Success == 0:
Srun = FES
Success = 1
##population size reduction
NPlast = narr_P.shape[0]
if NP > Lambda:
NP = round(<double>(NPmin - NPinit) / (FES_MAX) * FES + NPinit)
if NP < NPlast and NP >= Lambda:
for i in range(NPlast - NP):
r = random.randint(0, narr_P.shape[0] - 1)
while r == bestIndex:
r = random.randint(0, narr_P.shape[0] - 1)
narr_P = np.delete(narr_P, r, 0)
P = narr_P
S_CR.clear()
S_F.clear()
fit_improve.clear()
return P[bestIndex, D], P[bestIndex, D + 1], <int>P[bestIndex, D + 4], <int>P[bestIndex, D + 5], <int>P[bestIndex, D + 6], Srun
def optimize(self, benchID, D, Lambda, gamma):
return self._optimize(benchID, D, Lambda, gamma)
<|end_of_text|>from libc.stdlib cimport malloc, free
from cpython cimport array
from pyjkstra cimport dijkstra as c_dijkstra
from pyjkstra cimport graph_t, createGraph, freeGraph
from pyjkstra cimport printGraph, printNode, addEdge, INT_MAX
cdef class c_Graph:
'''Cython class that exposes a graph'''
cdef graph_t * thisptr
def __cinit__(self, int n):
''' Initialises a C pointer to the graph structure.'''
self.thisptr = createGraph(n)
if self.thisptr is NULL:
raise MemoryError
def __dealloc__(self):
''' Free the malloced memory. '''
if self.thisptr is not NULL:
freeGraph(self.thisptr)
def __str__(self):
''' Print a representation of self.'''
# Bad hack, it prints but returns an empty string …
printGraph(self.thisptr)
return ""
@property
def nb_vertices(self):
return self.thisptr.nb_vertices
def get(self, int n):
printNode(self.thisptr, n)
def addEdge(self, int src, int dest, double weight):
addEdge(self.thisptr, src, dest, weight)
def dijkstra (self, int s):
''' Converts the Python objects to and from C variables and
call the algorithm. '''
cdef int l = self.thisptr.nb_vertices
# Malloc arrays for return values of the dijkstra algorithm
cdef int* prev_arg = <int*>malloc(sizeof(int)*l)
cdef double* dist_arg = <double*>malloc(sizeof(double)*l)
# Call the algorithm
c_dijkstra(self.thisptr, s, prev_arg, dist_arg)
prev_out = []
dist_out = []
# Convert back from C-types to python object
for i in range(l):
if (prev_arg[i] == INT_MAX):
val = None
else:
val = prev_arg[i]
prev_out.append(val)
if (dist_arg[i] == INT_MAX):
val = None
else:
val = dist_arg[i]
dist_out.append(val)
# Free C arrays
free(dist_arg)
free(prev_arg)
return (prev_out, dist_out)
# This is the pure Python class that implements all "methods" of our graph
class Graph:
''' A graph represented as an adjacency list.'''
c_graph = None
def __init__(self, int n):
''' n is the number of vertices.'''
self.c_graph = c_Graph(n)
def __del__(self):
del self.c_graph
def __str__(self):
return self.c_graph.__str__()
@property
def nb_vertices(self):
return self.c_graph.nb_vertices
def addEdge(self, int src, int dest, double weight):
''' Adds an edge to the graph from `src` to `dest` with weight `weight`.'''
self.c_graph.addEdge(src, dest, weight)
def get(self, int n):
return self.c_graph.get(n)
def dijkstra (self, int s):
''' Implement the dijkstra path-finding algorithm.
Parameters:
G dijkstra.Graph a python representation of a graph
s int the starting point of the algorithm
Returns:
(prev, dist) with prev the antecedent in the path and dist the distance of
each node from the start
'''
return self.c_graph.dijkstra(s)
<|end_of_text|># -*- coding: utf-8 -*-
"""
`p`-adic distributions spaces
This module implements p-adic distributions, a `p`-adic Banach
space dual to locally analytic functions on a disc.
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15)
sage: v = D([7,14,21,28,35]); v
(7 + O(7^5), 2*7 + O(7^4), 3*7 + O(7^3), 4*7 + O(7^2), O(7))
REFERENCES:
.. [PS] Overconvergent modular symbols and p-adic L-functions
Robert Pollack, Glenn Stevens
Annales Scientifiques de l'Ecole Normale Superieure, serie 4, 44 fascicule 1 (2011), 1--42.
"""
#*****************************************************************************
# Copyright (C) 2012 Robert Pollack <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from sage.structure.sage_object import SageObject
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.power_series_ring import PowerSeriesRing
from sage.rings.finite_rings.integer_mod_ring import Zmod
from sage.arith.all import binomial, bernoulli
from sage.modules.free_module_element import vector, zero_vector
from sage.matrix.matrix cimport Matrix
from sage.matrix.matrix_space import MatrixSpace
from sage.matrix.all import matrix
from sage.misc.prandom import random
from sage.functions.other import floor
from sage.structure.element cimport RingElement, Element
import operator
from sage.rings.padics.padic_generic import pAdicGeneric
from sage.rings.padics.padic_capped_absolute_element cimport pAdicCappedAbsoluteElement
from sage.rings.padics.padic_capped_relative_element cimport pAdicCappedRelativeElement
from sage.rings.padics.padic_fixed_mod_element cimport pAdicFixedModElement
from sage.rings.integer cimport Integer
from sage.rings.rational cimport Rational
from sage.misc.misc import verbose, cputime
from sage.rings.infinity import Infinity
include "sage/ext/cdefs.pxi"
include "cysignals/signals.pxi"
include "sage/ext/stdsage.pxi"
from sage.libs.flint.nmod_poly cimport (n | Cython |
mod_poly_init2_preinv,
nmod_poly_set_coeff_ui,
nmod_poly_inv_series,
nmod_poly_mullow,
nmod_poly_pow_trunc,
nmod_poly_get_coeff_ui, nmod_poly_t)
#from sage.libs.flint.ulong_extras cimport *
from sigma0 import Sigma0
cdef long overflow = 1 << (4 * sizeof(long) - 1)
cdef long underflow = -overflow
cdef long maxordp = (1L << (sizeof(long) * 8 - 2)) - 1
def get_dist_classes(p, prec_cap, base, symk, implementation):
r"""
Determine the element and action classes to be used for given inputs.
INPUT:
- ``p`` -- prime
- ``prec_cap`` -- The `p`-adic precision cap
- ``base`` -- The base ring
- ``symk`` -- An element of Symk
- ``implementation`` - string - If not None, override the
automatic choice of implementation. May be 'long' or'vector',
otherwise raise a ``NotImplementedError``
OUTPUT:
- Either a Dist_vector and WeightKAction_vector, or a Dist_vector_long
and WeightKAction_vector_long
EXAMPLES::
sage: D = OverconvergentDistributions(2, 3, 5); D # indirect doctest
Space of 3-adic distributions with k=2 action and precision cap 5
"""
if implementation is not None:
if implementation == 'long':
raise NotImplementedError('The optimized implementation -using longs- has been disabled and may return wrong results.')
#if base.is_field():
# raise NotImplementedError('The implementation "long" does'
# 'not support fields as base rings')
#if (isinstance(base, pAdicGeneric) and base.degree() > 1):
# raise NotImplementedError('The implementation "long" does not '
# 'support extensions of p-adics')
#if p is None:
# raise NotImplementedError('The implementation "long" supports'
# 'only p-adic rings')
#return Dist_long, WeightKAction_long
elif implementation =='vector':
return Dist_vector, WeightKAction_vector
else:
raise NotImplementedError('The implementation "%s" does not exist yet' % (implementation))
return Dist_vector, WeightKAction_vector
# We return always the "slow" (but safe) implementation.
# if symk or p is None or base.is_field() or (isinstance(base, pAdicGeneric) and base.degree() > 1):
# return Dist_vector, WeightKAction_vector
# if 7 * p ** (prec_cap) < ZZ(2) ** (4 * sizeof(long) - 1):
# return Dist_long, WeightKAction_long
# else:
# return Dist_vector, WeightKAction_vector
cdef class Dist(ModuleElement):
r"""
The main `p`-adic distribution class, implemented as per the paper [PS]__.
"""
def moment(self, n):
r"""
Return the `n`-th moment.
INPUT:
- ``n`` -- an integer or slice, to be passed on to moments.
OUTPUT:
- the `n`-th moment, or a list of moments in the case that `n`
is a slice.
EXAMPLES::
sage: D = OverconvergentDistributions(4, 7, 10)
sage: v = D([7,14,21,28,35]);
sage: v.moment(3)
4*7 + O(7^2)
sage: v.moment(0)
7 + O(7^5)
"""
return self.parent().prime() ** (self.ordp) * self._unscaled_moment(n)
def moments(self):
r"""
Return the vector of moments.
OUTPUT:
- the vector of moments
EXAMPLES::
sage: D = OverconvergentDistributions(4, 5, 10, base = Qp(5));
sage: v = D([1,7,4,2,-1])
sage: v = 1/5^3 * v
sage: v
5^-3 * (1 + O(5^5), 2 + 5 + O(5^4), 4 + O(5^3), 2 + O(5^2), 4 + O(5))
sage: v.moments()
(5^-3 + O(5^2), 2*5^-3 + 5^-2 + O(5), 4*5^-3 + O(5^0), 2*5^-3 + O(5^-1), 4*5^-3 + O(5^-2))
"""
return self.parent().prime() ** (self.ordp) * self._moments
cpdef normalize(self):
r"""
Normalize so that the precision of the `i`-th moment is `n-i`,
where `n` is the number of moments.
OUTPUT:
- Normalized entries of the distribution
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15); D
Space of 7-adic distributions with k=5 action and precision cap 15
sage: v = D([1,2,3,4,5]); v
(1 + O(7^5), 2 + O(7^4), 3 + O(7^3), 4 + O(7^2), 5 + O(7))
sage: v.normalize()
(1 + O(7^5), 2 + O(7^4), 3 + O(7^3), 4 + O(7^2), 5 + O(7))
"""
raise NotImplementedError
cdef long _relprec(self):
raise NotImplementedError
cdef _unscaled_moment(self, long i):
raise NotImplementedError
cpdef long _ord_p(self):
r"""
Return power of `p` by which the moments are shifted.
.. NOTE:
This is not necessarily the same as the valuation,
since the moments could all be divisible by `p`.
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15)
sage: v = D([7,14,21,28,35]); v
(7 + O(7^5), 2*7 + O(7^4), 3*7 + O(7^3), 4*7 + O(7^2), O(7))
sage: v._ord_p()
0
"""
return self.ordp
def scale(self, left):
r"""
Scale the moments of the distribution by ``left``
INPUT:
- ``left`` -- scalar
OUTPUT:
- Scales the moments by ``left``
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15)
sage: v = D([1,2,3,4,5]); v
(1 + O(7^5), 2 + O(7^4), 3 + O(7^3), 4 + O(7^2), 5 + O(7))
sage: v.scale(2)
(2 + O(7^5), 4 + O(7^4), 6 + O(7^3), 1 + 7 + O(7^2), 3 + O(7))
"""
# if isinstance(self, Dist_long) and isinstance(left, (Integer, pAdicCappedRelativeElement, pAdicCappedAbsoluteElement, pAdicFixedModElement)):
# return self._lmul_(left)
R = left.parent()
base = self.parent().base_ring()
if base is R:
return self._lmul_(left)
elif base.has_coerce_map_from(R):
return self._lmul_(base(left))
else:
from sage.categories.pushout import pushout
new_base = pushout(base, R)
V = self.parent().change_ring(new_base)
scalar = new_base(left)
return V([scalar * new_base(self.moment(i)) for i in range(self.precision_absolute())])
def is_zero(self, p=None, M=None):
r"""
Return True if the `i`-th moment is zero for all `i` (case ``M`` is None)
or zero modulo `p^{M-i}` for all `i` (when ``M`` is not None).
Note that some moments are not known to precision ``M``, in which
case they are only checked to be equal to zero modulo the
precision to which they are defined.
INPUT:
- ``p`` -- prime
- ``M`` -- precision
OUTPUT:
- True/False
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15)
sage: v = D([1,2,3,4,5]); v
(1 + O(7^5), 2 + O | Cython |
(7^4), 3 + O(7^3), 4 + O(7^2), 5 + O(7))
sage: v.is_zero()
False
sage: v = D(5*[0])
sage: v.is_zero()
True
::
sage: D = Symk(0)
sage: v = D([0])
sage: v.is_zero(5,3)
True
"""
n = self.precision_relative()
aprec = self.precision_absolute()
if M is None:
M = n
# elif M > aprec: # DEBUG
# return False
elif M < aprec:
n -= (aprec - M)
M -= self.ordp
if p is None:
p = self.parent().prime()
cdef bint usearg = True
if n == 0:
return True
else:
try:
z = self._unscaled_moment(0).is_zero(M)
except TypeError:
z = self._unscaled_moment(0).is_zero()
use_arg = False
if not z:
return False
for a in xrange(1, n):
if usearg:
try:
z = self._unscaled_moment(a).is_zero(M - a)
except TypeError:
z = self._unscaled_moment(a).is_zero()
use_arg = False
else:
z = self._unscaled_moment(a).is_zero()
if not z:
return False
return True
def find_scalar(self, _other, p, M=None, check=True):
r"""
Return an ``alpha`` with ``other = self * alpha``, or raises
a ``ValueError``.
It will also raise a ``ValueError`` if this distribution is zero.
INPUT:
- ``other`` -- another distribution
- ``p`` -- an integral prime (only used if the parent is not a Symk)
- ``M`` -- (default: None) an integer, the relative precision
to which the scalar must be determined
- ``check`` -- (default: True) boolean, whether to validate
that ``other`` is actually a multiple of this element.
OUTPUT:
- A scalar ``alpha`` with ``other = self * alpha``.
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15)
sage: v = D([1,2,3,4,5])
sage: w = D([3,6,9,12,15])
sage: v.find_scalar(w,p=7)
3 + O(7^5)
sage: v.find_scalar(w,p=7,M=4)
3 + O(7^4)
sage: u = D([1,4,9,16,25])
sage: v.find_scalar(u,p=7)
Traceback (most recent call last):
...
ValueError: not a scalar multiple
"""
cdef Dist other = _other
i = 0
n = self.precision_relative()
other_pr = other.precision_relative()
if n == 0:
raise ValueError("self is zero")
verbose("n = %s" % n, level = 2)
verbose("moment 0", level = 2)
a = self._unscaled_moment(i)
verbose("a = %s" % a, level = 2)
padic = isinstance(a.parent(), pAdicGeneric)
if self.parent().is_symk():
while a == 0:
if other._unscaled_moment(i)!= 0:
raise ValueError("not a scalar multiple")
i += 1
verbose("moment %s" % i, level = 2)
try:
a = self._unscaled_moment(i)
except IndexError:
raise ValueError("self is zero")
alpha = other._unscaled_moment(i) / a
if check:
i += 1
while i < n:
verbose("comparing moment %s" % i, level = 2)
if alpha * self._unscaled_moment(i)!= other._unscaled_moment(i):
raise ValueError("not a scalar multiple")
i += 1
else:
p = self.parent().prime()
v = a.valuation(p)
while v >= n - i:
i += 1
verbose("p moment %s" % i, level = 2)
try:
a = self._unscaled_moment(i)
except IndexError:
raise ValueError("self is zero")
v = a.valuation(p)
relprec = n - i - v
# verbose("p=%s, n-i=%s\nself.moment=%s, other.moment=%s" % (p, n-i, a, other._unscaled_moment(i)),level=2)
## RP: This code was crashing because other may have too few moments -- so I added this bound with other's relative precision
if padic:
if i < other_pr:
alpha = (other._unscaled_moment(i) / a).add_bigoh(n - i)
else:
alpha = (0 * a).add_bigoh(other_pr - i)
else:
if i < other_pr:
alpha = (other._unscaled_moment(i) / a) % p ** (n - i)
else:
alpha = 0
verbose("alpha = %s" % alpha, level = 2)
## RP: This code was crashing because other may have too few moments -- so I added this bound with other's relative precision
while i < other_pr - 1:
i += 1
verbose("comparing p moment %s" % i, level = 2)
a = self._unscaled_moment(i)
if check:
# verbose("self.moment=%s, other.moment=%s" % (a, other._unscaled_moment(i)))
if (padic and other._unscaled_moment(i)!= alpha * a) or \
(not padic and other._unscaled_moment(i) % p ** (n - i)!= alpha * a % p ** (n - i)):
raise ValueError("not a scalar multiple")
v = a.valuation(p)
if n - i - v > relprec:
verbose("Reseting alpha: relprec=%s, n-i=%s, v=%s" % (relprec, n - i, v), level = 2)
relprec = n - i - v
if padic:
alpha = (other._unscaled_moment(i) / a).add_bigoh(n - i)
else:
alpha = (other._unscaled_moment(i) / a) % p ** (n - i)
verbose("alpha=%s" % alpha, level = 2)
if relprec < M:
raise ValueError("result not determined to high enough precision")
alpha = alpha * self.parent().prime() ** (other.ordp - self.ordp)
verbose("alpha=%s" % alpha, level = 2)
try:
alpha = self.parent().base_ring()(alpha)
if M is not None:
alpha = alpha.add_bigoh(M)
except (ValueError, AttributeError):
pass
return alpha
def find_scalar_from_zeroth_moment(self, _other, p, M=None, check=True):
r"""
Return an ``alpha`` with ``other = self * alpha`` using only
the zeroth moment, or raises a ``ValueError``.
It will also raise a ``ValueError`` if the zeroth moment of the
distribution is zero.
INPUT:
- ``other`` -- another distribution
- ``p`` -- an integral prime (only used if the parent is not a Symk)
- ``M`` -- (default: None) an integer, the relative precision
to which the scalar must be determined
- ``check`` -- (default: True) boolean, whether to validate
that ``other`` is actually a multiple of this element.
OUTPUT:
- A scalar ``alpha`` with ``other = self * alpha``.
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15)
sage: v = D([1,2,3,4,5])
sage: w = D([3,6,9,12,15])
sage: v.find_scalar_from_zeroth_moment(w,p=7)
3 + O(7^5)
sage: v.find_scalar_from_zeroth_moment(w,p=7,M=4)
3 + O(7^4)
sage: u = D([1,4,9,16,25])
sage: v.find_scalar_from_zeroth_moment(u,p=7)
Traceback (most recent call last):
...
ValueError: not a scalar multiple
"""
cdef Dist other = _other
n = self.precision_relative()
other_pr = other.precision_relative()
if n == 0:
raise ValueError("zeroth moment is zero")
verbose("n = %s" % n, level = 2)
a = self.moment(0 | Cython |
)
if a.is_zero():
raise ValueError("zeroth moment is zero")
padic = isinstance(a.parent(), pAdicGeneric)
alpha = other.moment(0) / a
if check:
for i in range(1, n):
verbose("comparing moment %s" % i, level = 2)
if alpha * self.moment(i)!= other.moment(i):
raise ValueError("not a scalar multiple")
alpha = self.parent().base_ring()(alpha)
if M is not None:
try:
absprec = alpha.precision_absolute()
if absprec < M:
raise ValueError("result not determined to high "
"enough precision")
verbose("alpha=%s" % (alpha), level = 2)
alpha = alpha.add_bigoh(M)
except AttributeError:
pass
return alpha
cpdef int _cmp_(_left, _right) except -2:
r"""
Comparison.
EXAMPLES:
Equality of two distributions::
sage: D = OverconvergentDistributions(0, 5, 10)
sage: D([1, 2]) == D([1])
True
sage: D([1]) == D([1, 2])
True
sage: v = D([1+O(5^3),2+O(5^2),3+O(5)])
sage: w = D([1+O(5^2),2+O(5)])
sage: v == w
True
sage: D = Symk(0,Qp(5,5))
sage: v = 5 * D([4*5^-1+3+O(5^2)])
sage: w = D([4+3*5+O(5^2)])
sage: v == w
True
"""
cdef Dist left = _left
cdef Dist right = _right
left.normalize()
right.normalize()
cdef long rprec = min(left._relprec(), right._relprec())
cdef long i, c
p = left.parent().prime()
if left.ordp > right.ordp:
shift = p ** (left.ordp - right.ordp)
for i in range(rprec):
c = cmp(shift * left._unscaled_moment(i), right._unscaled_moment(i))
if c:
return c
elif left.ordp < right.ordp:
shift = p ** (right.ordp - left.ordp)
for i in range(rprec):
c = cmp(left._unscaled_moment(i), shift * right._unscaled_moment(i))
if c:
return c
else:
for i in range(rprec):
c = cmp(left.moment(i), right.moment(i))
if c:
return c
return 0
def diagonal_valuation(self, p=None):
"""
Return the largest `m` so that this distribution lies in `Fil^m`.
INPUT:
- ``p`` -- (default: None) a positive integral prime
OUTPUT:
- the largest integer `m` so that `p^m` divides the `0`-th
moment, `p^{m-1}` divides the first moment, etc.
EXAMPLES::
sage: D = OverconvergentDistributions(8, 7, 15)
sage: v = D([7^(5-i) for i in range(1,5)])
sage: v
(O(7^4), O(7^3), O(7^2), O(7))
sage: v.diagonal_valuation(7)
4
"""
if p is None:
p = self.parent()._p
n = self.precision_relative()
return self.ordp + min([n] + [a + self._unscaled_moment(a).valuation(p) for a in range(n)])
def valuation(self, p=None):
"""
Return the minimum valuation of any moment.
INPUT:
- ``p`` -- (default: None) a positive integral prime
OUTPUT:
- an integer
.. WARNING::
Since only finitely many moments are computed, this valuation may
be larger than the actual valuation of this distribution.
Moreover, this valuation may be smaller than the actual
valuation if all entries are zero to the known precision.
EXAMPLES::
sage: D = OverconvergentDistributions(8, 7, 15)
sage: v = D([7^(5-i) for i in range(1,5)])
sage: v
(O(7^4), O(7^3), O(7^2), O(7))
sage: v.valuation(7)
4
"""
if p is None:
p = self.parent()._p
n = self.precision_relative()
if self.parent().is_symk():
return self.ordp + min([self._unscaled_moment(a).valuation(p) for a in range(n)])
else:
return self.ordp + min([n] + [self._unscaled_moment(a).valuation(p) for a in range(n) if not self._unscaled_moment(a).is_zero()])
def specialize(self, new_base_ring=None):
"""
Return the image of this overconvergent distribution under
the canonical projection from distributions of weight `k` to
`Sym^k`.
INPUT:
- ``new_base_ring`` -- (default: None) a ring giving the
desired base ring of the result.
OUTPUT:
- An element of `Sym^k(K)`, where `K` is the specified base ring.
EXAMPLES::
sage: D = OverconvergentDistributions(4, 13)
sage: d = D([0,2,4,6,8,10,12])
sage: d.specialize()
(O(13^7), 2 + O(13^6), 4 + O(13^5), 6 + O(13^4), 8 + O(13^3))
"""
# self.normalize() # This method should not change self
k = self.parent()._k
if k < 0:
raise ValueError("negative weight")
if self.precision_absolute() < k + 1:
raise ValueError("not enough moments")
V = self.parent().specialize(new_base_ring)
new_base_ring = V.base_ring()
if self.precision_relative() == 0:
return V.zero()
return V([new_base_ring.coerce(self.moment(j)) for j in range(k + 1)])
def lift(self, p=None, M=None, new_base_ring=None):
r"""
Lift a distribution or element of `Sym^k` to an overconvergent distribution.
INPUT:
- ``p`` -- (default: None) a positive integral prime. If None
then ``p`` must be available in the parent.
- ``M`` -- (default: None) a positive integer giving the
desired number of moments. If None, returns a distribution having one
more moment than this one.
- ``new_base_ring`` -- (default: None) a ring giving the desired base
ring of the result. If None, a base ring is chosen automatically.
OUTPUT:
- An overconvergent distribution with `M` moments whose image
under the specialization map is this element.
EXAMPLES::
sage: V = Symk(0)
sage: x = V(1/4)
sage: y = x.lift(17, 5)
sage: y
(13 + 12*17 + 12*17^2 + 12*17^3 + 12*17^4 + O(17^5), O(17^4), O(17^3), O(17^2), O(17))
sage: y.specialize()._moments == x._moments
True
"""
V = self.parent().lift(p, M, new_base_ring)
k = V._k
p = V.prime()
M = V.precision_cap()
R = V.base_ring()
moments = [R(self.moment(j)) for j in range(k + 1)]
zero = R(0)
moments.extend([zero] * (M - k - 1))
mu = V(moments)
#val = mu.valuation()
#if val < 0:
# # This seems unnatural
# print("scaling by ", p, "^", -val, " to keep things integral")
# mu *= p**(-val)
return mu
def _is_malformed(self):
r"""
Check that the precision of ``self`` is sensible.
EXAMPLE::
sage: D = sage.modular.pollack_stevens.distributions.Symk(2, base=Qp(5))
sage: v = D([1, 2, 3])
sage: v._is_malformed()
False
sage: v = D([1 + O(5), 2, 3])
sage: v._is_malformed()
True
"""
n = self.precision_absolute()
for i in range | Cython |
(n):
if self.moment(i).precision_absolute() < n - i:
return True
return False
def act_right(self, gamma):
r"""
The image of this element under the right action by a
`2 \times 2` matrix.
INPUT:
- ``gamma`` -- the matrix by which to act
OUTPUT:
- ``self | gamma``
.. NOTE::
You may also just use multiplication ``self * gamma``.
EXAMPLES::
sage: D = OverconvergentDistributions(4, 7, 10)
sage: v = D([98,49,21,28,35])
sage: M = matrix([[1,0], [7,1]])
sage: v.act_right(M)
(2*7^2 + 7^3 + 5*7^4 + O(7^5), 3*7^2 + 6*7^3 + O(7^4), 3*7 + 7^2 + O(7^3), 4*7 + O(7^2), O(7))
"""
return self.parent()._act(self, gamma)
cdef class Dist_vector(Dist):
r"""
A distribution is stored as a vector whose `j`-th entry is the `j`-th moment of the distribution.
The `j`-th entry is stored modulo `p^{N-j}` where `N` is the total number of moments.
(This is the accuracy that is maintained after acting by `\Gamma_0(p)`.)
INPUTS:
- ``moments`` -- the list of moments. If ``check == False`` it
must be a vector in the appropriate approximation module.
- ``parent`` -- a :class:`distributions.OverconvergentDistributions_class` or
:class:`distributions.Symk_class` instance
- ``ordp`` -- an integer. This MUST be zero in the case of Symk
of an exact ring.
- ``check`` -- (default: True) boolean, whether to validate input
EXAMPLES::
sage: D = OverconvergentDistributions(3,5,6) # indirect doctest
sage: v = D([1,1,1])
"""
def __init__(self, moments, parent, ordp=0, check=True):
"""
Initialization.
TESTS::
sage: Symk(4)(0)
(0, 0, 0, 0, 0)
"""
# if not hasattr(parent,'Element'):
# parent, moments = moments, parent
Dist.__init__(self, parent)
if check:
# case 1: input is a distribution already
if isinstance(moments, Dist):
ordp = moments._ord_p()
moments = moments._moments.change_ring(parent.base_ring())
# case 2: input is a vector, or something with a len
elif hasattr(moments, '__len__'):
M = len(moments)
moments = parent.approx_module(M)(moments)
# case 3: input is zero
elif moments == 0:
moments = parent.approx_module(parent.precision_cap())(moments)
# case 4: everything else
else:
moments = parent.approx_module(1)([moments])
# TODO: This is not quite right if the input is an inexact zero.
if ordp!= 0 and parent.prime() == 0:
raise ValueError("can not specify a valuation shift for an exact ring")
self._moments = moments
self.ordp = ordp
self.normalize() # DEBUG
def __reduce__(self):
r"""
Used for pickling.
EXAMPLE::
sage: D = sage.modular.pollack_stevens.distributions.Symk(2)
sage: x = D([2,3,4])
sage: x.__reduce__()
(<type'sage.modular.pollack_stevens.dist.Dist_vector'>, ((2, 3, 4), Sym^2 Q^2, 0, False))
"""
return (self.__class__, (self._moments, self.parent(), self.ordp, False))
cdef Dist_vector _new_c(self):
r"""
Creates an empty distribution.
Note that you MUST fill in the ordp attribute on the resulting distribution.
OUTPUT:
- A distribution with no moments. The moments are then filled
in by the calling function.
EXAMPLES::
sage: D = OverconvergentDistributions(3,5,4) # indirect doctest
sage: v = D([1,1,1])
"""
cdef Dist_vector ans = PY_NEW(Dist_vector)
ans._parent = self._parent
return ans
def _repr_(self):
r"""
String representation.
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15)
sage: v = D([1,2,3,4,5]); v
(1 + O(7^5), 2 + O(7^4), 3 + O(7^3), 4 + O(7^2), 5 + O(7))
sage: repr(v)
'(1 + O(7^5), 2 + O(7^4), 3 + O(7^3), 4 + O(7^2), 5 + O(7))'
"""
# self.normalize() # Should normalize only when absolutely needed.
valstr = ""
if self.ordp == 1:
valstr = "%s * " % (self.parent().prime())
elif self.ordp!= 0:
valstr = "%s^%s * " % (self.parent().prime(), self.ordp)
if len(self._moments) == 1:
return valstr + repr(self._moments[0])
else:
return valstr + repr(self._moments)
def _rational_(self):
"""
Convert to a rational number.
EXAMPLES::
sage: D = Symk(0); d = D(4/3); d
4/3
sage: QQ(d)
4/3
We get a TypeError if there is more than 1 moment::
sage: D = Symk(1); d = D([1,2]); d
(1, 2)
sage: QQ(d)
Traceback (most recent call last):
...
TypeError: k must be 0
"""
if len(self._moments) == 1:
return QQ(self.moment(0))
raise TypeError("k must be 0")
cdef long _relprec(self):
"""
Return the number of moments.
EXAMPLES::
sage: D = Symk(4)
sage: d = D([1,2,3,4,5]); e = D([2,3,4,5,6])
sage: d == e # indirect doctest
False
"""
return len(self._moments)
cdef _unscaled_moment(self, long n):
r"""
Return the `n`-th moment, unscaled by the overall power of `p`
stored in ``self.ordp``.
EXAMPLES::
sage: D = OverconvergentDistributions(4,3,5)
sage: d = D([3,3,3,3,3])
sage: d.moment(2) # indirect doctest
3 + O(3^3)
"""
return self._moments[n]
cdef Dist_vector _addsub(self, Dist_vector right, bint negate):
r"""
Common code for the sum and the difference of two distributions
EXAMPLES::
sage: D = Symk(2)
sage: u = D([1,2,3]); v = D([4,5,6])
sage: u + v # indirect doctest
(5, 7, 9)
sage: u - v # indirect doctest
(-3, -3, -3)
"""
cdef Dist_vector ans = self._new_c()
cdef long aprec = min(self.ordp + len(self._moments), right.ordp + len(right._moments))
ans.ordp = min(self.ordp, right.ordp)
cdef long rprec = aprec - ans.ordp
# In the case of symk, rprec will always be k
V = ans.parent().approx_module(rprec)
R = V.base_ring()
smoments = self._moments
rmoments = right._moments
# We truncate if the moments are too long; extend by zero if too short
if smoments.parent() is not V:
vec = smoments.list(copy=False)[:rprec] + ([R(0)] * (rprec - len(smoments)) if rprec > len(smoments) else [])
smoments = V(vec)
if rmoments.parent() is not V:
vec = rmoments.list(copy=False)[:rprec] + ([R(0 | Cython |
)] * (rprec - len(rmoments)) if rprec > len(rmoments) else [])
rmoments = V(vec)
# We multiply by the relative power of p
if self.ordp > right.ordp:
smoments *= self.parent().prime() ** (self.ordp - right.ordp)
elif self.ordp < right.ordp:
rmoments *= self.parent().prime() ** (right.ordp - self.ordp)
if negate:
rmoments = -rmoments
ans._moments = smoments + rmoments
return ans
cpdef _add_(self, _right):
r"""
Sum of two distributions.
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15)
sage: v = D([1,2,3,4,5]); w = D([3,6,9,12,15])
sage: v+w
(4 + O(7^5), 1 + 7 + O(7^4), 5 + 7 + O(7^3), 2 + 2*7 + O(7^2), 6 + O(7))
"""
return self._addsub(<Dist_vector>_right, False)
cpdef _sub_(self, _right):
r"""
Difference of two distributions.
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15)
sage: v = D([1,2,3,4,5]); w = D([1,1,1,8,8])
sage: v-w
(O(7^5), 1 + O(7^4), 2 + O(7^3), 3 + 6*7 + O(7^2), 4 + O(7))
"""
return self._addsub(<Dist_vector>_right, True)
cpdef _lmul_(self, RingElement right):
r"""
Scalar product of a distribution with a ring element that coerces into the base ring.
EXAMPLES::
sage: D = OverconvergentDistributions(5, 7, 15)
sage: v = D([1,2,3,4,5]); v
(1 + O(7^5), 2 + O(7^4), 3 + O(7^3), 4 + O(7^2), 5 + O(7))
sage: 3*v; 7*v
(3 + O(7^5), 6 + O(7^4), 2 + 7 + O(7^3), 5 + 7 + O(7^2), 1 + O(7))
7 * (1 + O(7^5), 2 + O(7^4), 3 + O(7^3), 4 + O(7^2), 5 + O(7))
sage: v*3; v*7
(3 + O(7^5), 6 + O(7^4), 2 + 7 + O(7^3), 5 + 7 + O(7^2), 1 + O(7))
7 * (1 + O(7^5), 2 + O(7^4), 3 + O(7^3), 4 + O(7^2), 5 + O(7))
"""
cdef Dist_vector ans = self._new_c()
p = self.parent().prime()
if p == 0:
ans._moments = self._moments * right
ans.ordp = self.ordp
elif right.valuation(p) == Infinity:
ans._moments = self.parent().approx_module(0)([])
ans.ordp += self.precision_relative()
else:
try:
v, u = right.val_unit(p)
except TypeError: # bug in p-adics: they should accept p here
v, u = right.val_unit()
ans._moments = self._moments * u
ans.ordp = self.ordp + v
# if the relative precision of u is less than that of self, ans may not be normalized.
return ans
def precision_relative(self):
r"""
Return the relative precision of this distribution.
The precision is just the number of moments stored, which is
also `k+1` in the case of `Sym^k(R)`. For overconvergent
distributions, the precision is the integer `m` so that the
sequence of moments is known modulo `Fil^m`.
OUTPUT:
- An integer giving the number of moments.
EXAMPLES::
sage: D = OverconvergentDistributions(2, 11, 15)
sage: v = D([1,1,10,9,6,15])
sage: v.precision_relative()
6
sage: v = v.reduce_precision(4); v.precision_relative()
4
sage: D = Symk(10)
sage: v = D.random_element()
sage: v.precision_relative()
11
"""
return Integer(len(self._moments))
def precision_absolute(self):
r"""
Return the absolute precision of this distribution.
The absolute precision is the sum of the relative precision
(number of moments) and the valuation.
EXAMPLES::
sage: D = OverconvergentDistributions(3, 7, base = Qp(7))
sage: v = D([3,1,10,0])
sage: v.precision_absolute()
4
sage: v *= 7
sage: v.precision_absolute()
5
sage: v = 1/7^10 * v
sage: v.precision_absolute()
-5
"""
return Integer(len(self._moments) + self.ordp)
cpdef normalize(self, include_zeroth_moment = True):
r"""
Normalize by reducing modulo `Fil^N`, where `N` is the number of moments.
If the parent is Symk, then normalize has no effect. If the
parent is a space of distributions, then normalize reduces the
`i`-th moment modulo `p^{N-i}`.
OUTPUT:
- this distribtion, after normalizing.
.. WARNING::
This function modifies the distribution in place as well as returning it.
EXAMPLES::
sage: D = OverconvergentDistributions(3,7,10)
sage: v = D([1,2,3,4,5]) ; v
(1 + O(7^5), 2 + O(7^4), 3 + O(7^3), 4 + O(7^2), 5 + O(7))
sage: w = v.reduce_precision(3) ; w
(1 + O(7^5), 2 + O(7^4), 3 + O(7^3))
sage: w.normalize()
(1 + O(7^3), 2 + O(7^2), 3 + O(7))
sage: w
(1 + O(7^3), 2 + O(7^2), 3 + O(7))
sage: v.reduce_precision(3).normalize(include_zeroth_moment=False)
(1 + O(7^5), 2 + O(7^2), 3 + O(7))
"""
if not self.parent().is_symk() and self._moments!= 0: # non-classical
if len(self._moments) == 0:
return self
V = self._moments.parent()
R = V.base_ring()
n = self.precision_relative()
p = self.parent()._p
shift = self.ordp
if include_zeroth_moment:
if isinstance(R, pAdicGeneric):
self._moments = V([self._moments[i].add_bigoh(n -shift - i) for i in range(n)])
else:
self._moments = V([self._moments[i] % (p ** (n -shift - i)) for i in range(n)])
else:
if isinstance(R, pAdicGeneric):
self._moments = V([self._moments[0]] + [self._moments[i].add_bigoh(n -shift - i) for i in range(1, n)]) # Don't normalize the zeroth moment
else:
self._moments = V([self._moments[0]] + [self._moments[i] % (p ** (n -shift- i)) for i in range(1, n)]) # Don't normalize the zeroth moment
return self
def reduce_precision(self, M):
r"""
Only hold on to `M` moments.
INPUT:
- ``M`` -- a positive integer less than the precision of this
distribution.
OUTPUT:
- a new distribution with `M` moments equal to the first `M`
moments of this distribution.
EXAMPLES::
sage: D = OverconvergentDistributions(3,7,10)
| Cython |
sage: v = D([3,4,5])
sage: v
(3 + O(7^3), 4 + O(7^2), 5 + O(7))
sage: v.reduce_precision(2)
(3 + O(7^3), 4 + O(7^2))
"""
assert M <= self.precision_relative(), "not enough moments"
cdef Dist_vector ans = self._new_c()
ans._moments = self._moments[:M]
ans.ordp = self.ordp
return ans
def solve_difference_equation(self):
r"""
Solve the difference equation. `self = v | \Delta`, where `\Delta = [1, 1; 0, 1] - 1`.
See Theorem 4.5 and Lemma 4.4 of [PS]_.
OUTPUT:
- a distribution `v` so that `self = v | Delta`, assuming ``self.moment(0) == 0``.
Otherwise solves the difference equation for ``self - (self.moment(0),0,...,0)``.
EXAMPLES::
sage: D = OverconvergentDistributions(5,7,15)
sage: v = D(([0,2,3,4,5]))
sage: g = D._act.actor()(Matrix(ZZ,2,2,[1,1,0,1]))
sage: w = v.solve_difference_equation()
sage: v - (w*g - w)
(O(7^4), O(7^3), O(7^2), O(7))
sage: v = D(([7,2,3,4,5]))
sage: w = v.solve_difference_equation()
sage: v - (w*g - w)
(7 + O(7^4), O(7^3), O(7^2), O(7))
"""
# assert self._moments[0][0]==0, "not total measure zero"
# print("result accurate modulo p^",self.moment(0).valuation(self.p) )
#v=[0 for j in range(0,i)]+[binomial(j,i)*bernoulli(j-i) for j in range(i,M)]
M = self.precision_relative()
R = self.parent().base_ring()
K = R.fraction_field()
V = self._moments.parent()
v = [K(0) for i in range(M)]
bern = [bernoulli(i) for i in range(0, M, 2)]
minhalf = ~K(-2)
for m in range(1, M):
scalar = K(self.moment(m) / m)
# bernoulli(1) = -1/2; the only nonzero odd bernoulli number
v[m] += m * minhalf * scalar
for j in range(m - 1, M, 2):
v[j] += binomial(j, m - 1) * bern[(j - m + 1) // 2] * scalar
p = self.parent().prime()
cdef Dist_vector ans
if p == 0:
if R.is_field():
ans = self._new_c()
ans.ordp = 0
ans._moments = V(v)
else:
newparent = self.parent().change_ring(K)
ans = newparent(v)
else:
ans = self._new_c()
try:
ans.ordp = min(a.valuation(p) for a in v)
except TypeError:
ans.ordp = 0
if ans.ordp < 0:
scalar = K(p) ** (-ans.ordp)
ans._moments = V([R(a * scalar) for a in v])
elif ans.ordp > 0:
scalar = K(p) ** ans.ordp
ans._moments = V([R(a // scalar) for a in v])
else:
ans._moments = V([R(a) for a in v])
v = ans._moments
N = len(ans._moments)
prec_loss = max([N - j - v[j].precision_absolute()
for j in range(N)])
# print("precision loss = ", prec_loss)
if prec_loss > 0:
ans._moments = ans._moments[:(N - prec_loss)]
return ans
# cdef class Dist_long(Dist):
# r"""
# A class for distributions implemented using a C array of longs.
# INPUT:
# - ``moments`` -- the list of moments. If ``check == False`` it
# must be a vector in the appropriate approximation module.
# - ``parent`` -- a :class:`distributions.OverconvergentDistributions_class` or
# :class:`distributions.Symk_class` instance
# - ``check`` -- (default: True) boolean, whether to validate input
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# def __init__(self, moments, parent, ordp=0, check=True):
# """
# Initialization.
# TESTS::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# # if not hasattr(parent,'Element'):
# # parent, moments = moments, parent
# Dist.__init__(self, parent)
# p = parent._p
# cdef int i
# if check:
# # case 1: input is a distribution already
# if isinstance(moments, Dist):
# M = len(moments)
# moments = [ZZ(moments.moment(i)) for i in range(M)]
# # case 2: input is a vector, or something with a len
# elif hasattr(moments, '__len__'):
# M = len(moments)
# moments = [ZZ(a) for a in parent.approx_module(M)(moments)]
# # case 3: input is zero
# elif moments == 0:
# M = parent.precision_cap()
# moments = [ZZ(0)] * M
# else:
# M = 1
# moments = [ZZ(moments)]
# if M > 100 or 7 * p ** M > ZZ(2) ** (4 * sizeof(long) - 1): # 6 is so that we don't overflow on gathers
# raise ValueError("moments too long")
# else:
# M = len(moments)
# for i in range(len(moments)):
# self._moments[i] = moments[i]
# self.relprec = M
# self.prime_pow = <PowComputer_class?>parent.prime_pow
# self.normalize()
# cdef Dist_long _new_c(self):
# r"""
# OUTPUT:
# -
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# cdef Dist_long ans = PY_NEW(Dist_long)
# ans._parent = self._parent
# ans.prime_pow = self.prime_pow
# return ans
# def _repr_(self):
# r"""
# OUTPUT:
# -
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# valstr = ""
# if self.ordp == 1:
# valstr = "%s * " % (self.prime_pow.prime)
# elif self.ordp!= 0:
# valstr = "%s^%s * " % (self.prime_pow.prime, self.ordp)
# if self.relprec == 1:
# return valstr + repr(self._moments[0])
# else:
# return valstr + "(" + ", ".join([repr(self._moments[i]) for i in range(self.relprec)]) + ")"
# cdef int quasi_normalize(self) except -1:
# r"""
# OUTPUT:
# -
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# cdef int i
# for i in range(self.relprec):
# if self._moments[i] > overflow:
# self._moments[i] = self._moments[i] % self.prime_pow(self.relprec - i)
# elif self._moments[i] < underflow:
# self._moments[i] = self._moments[i] % self.prime_pow(self.relprec - i)
# self._moments[i] += self.prime_pow(self.relprec - i)
# cpdef normalize(self):
# r"""
# OUTPUT:
# -
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# cdef int | Cython |
i
# for i in range(1, self.relprec): # Don't normalize the zeroth moment
# if self._moments[i] < 0:
# self._moments[i] = self._moments[i] % self.prime_pow(self.relprec - i)
# self._moments[i] += self.prime_pow(self.relprec - i)
# elif self._moments[i] >= self.prime_pow(self.relprec - i):
# self._moments[i] = self._moments[i] % self.prime_pow(self.relprec - i)
# return self
# cdef long _relprec(self):
# return self.relprec
# cdef _unscaled_moment(self, long _n):
# r"""
# INPUT:
# - ``_n`` -- an integer or slice giving an index into the
# moments.
# OUTPUT:
# -
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# if isinstance(_n, slice):
# a, b, c = _n.indices(self.relprec)
# return [self.moment(i) for i in range(a, b, c)]
# cdef int n = _n
# if n < 0:
# n += self.relprec
# if n < 0 or n >= self.relprec:
# raise IndexError("list index out of range")
# return self._moments[n]
# cdef Dist_long _addsub(self, Dist_long right, bint negate):
# r"""
# Common code for the sum and the difference of two distributions
# """
# cdef Dist_long ans = self._new_c()
# cdef long aprec = min(self.ordp + self.relprec, right.ordp + right.relprec)
# ans.ordp = min(self.ordp, right.ordp)
# ans.relprec = aprec - ans.ordp
# # In the case of symk, rprec will always be k
# cdef int i, n
# cdef long diff, cutoff
# # The following COULD overflow, but it would require 2^32
# # additions (on a 64-bit machine), since we restrict p^k to be
# # less than 2^31/7.
# if self.ordp == right.ordp:
# n = min(self.relprec, right.relprec)
# for i in range(n):
# ans._moments[i] = self._moments[i] - right._moments[i] if negate else self._moments[i] + right._moments[i]
# if self.relprec < ans.relprec:
# for i in range(n, ans.relprec):
# ans._moments[i] = -right._moments[i] if negate else right._moments[i]
# elif ans.relprec < self.relprec:
# for i in range(n, ans.relprec):
# ans._moments[i] = self._moments[i]
# elif self.ordp < right.ordp:
# diff = right.ordp - self.ordp
# n = min(right.relprec, ans.relprec - diff)
# for i in range(n):
# ans._moments[i] = self.prime_pow(diff) * (right._moments[i] % self.prime_pow(ans.relprec - diff - i))
# ans._moments[i] = self._moments[i] - ans._moments[i] if negate else self._moments[i] + ans._moments[i]
# if n < ans.relprec:
# for i in range(n, ans.relprec):
# ans._moments[i] = self._moments[i]
# else: # self.ordp > right.ordp
# diff = self.ordp - right.ordp
# n = min(self.relprec, ans.relprec - diff)
# for i in range(n):
# ans._moments[i] = self.prime_pow(diff) * (self._moments[i] % self.prime_pow(ans.relprec - diff - i))
# ans._moments[i] += -right._moments[i] if negate else right._moments[i]
# if n < ans.relprec:
# for i in range(n, ans.relprec):
# ans._moments[i] = -right._moments[i] if negate else right._moments[i]
# return ans
# cpdef _add_(self, ModuleElement right):
# r"""
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# return self._addsub(<Dist_long?> right, False)
# cpdef _sub_(self, ModuleElement right):
# r"""
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# return self._addsub(<Dist_long?> right, True)
# cpdef _lmul_(self, RingElement _right):
# r"""
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# cdef Dist_long ans = self._new_c()
# ans.relprec = self.relprec
# self.quasi_normalize()
# cdef long scalar, absprec, ordp
# cdef Integer iright, unit, ppow, p = self.prime_pow.prime
# cdef Rational qright, qunit
# cdef pAdicCappedAbsoluteElement pcaright
# cdef pAdicCappedRelativeElement pcrright
# cdef pAdicFixedModElement pfmright
# if isinstance(_right, Integer):
# iright = <Integer>_right
# if mpz_sgn(iright.value) == 0:
# ans.ordp = maxordp
# ans.relprec = 0
# return ans
# unit = PY_NEW(Integer)
# ordp = mpz_remove(unit.value, iright.value, p.value)
# if mpz_fits_slong_p(unit.value):
# scalar = mpz_get_si(iright.value) % self.prime_pow(self.relprec)
# else:
# scalar = mpz_fdiv_ui(iright.value, self.prime_pow(self.relprec))
# elif isinstance(_right, Rational):
# qright = <Rational>_right
# if mpq_sgn(qright.value) == 0:
# ans.ordp = maxordp
# ans.relprec = 0
# return ans
# qunit = PY_NEW(Rational)
# ordp = mpz_remove(mpq_numref(qunit.value), mpq_numref(qright.value), p.value)
# if ordp == 0:
# ordp = -mpz_remove(mpq_denref(qunit.value), mpq_denref(qright.value), p.value)
# else:
# mpz_set(mpq_denref(qunit.value), mpq_denref(qright.value))
# ppow = PY_NEW(Integer)
# mpz_set_ui(ppow.value, self.prime_pow(self.relprec))
# # We reuse the pointers inside qunit, since we're going to discard it.
# mpz_invert(mpq_denref(qunit.value), mpq_denref(qunit.value), ppow.value)
# mpz_mul(mpq_numref(qunit.value), mpq_numref(qunit.value), mpq_denref(qunit.value))
# scalar = mpz_fdiv_ui(mpq_numref(qunit.value), self.prime_pow(self.relprec))
# # qunit should not be used now (it's unnormalized)
# elif isinstance(_right, pAdicCappedAbsoluteElement):
# pcaright = <pAdicCappedAbsoluteElement>_right
# unit = PY_NEW(Integer)
# ordp = mpz_remove(unit.value, pcaright.value, p.value)
# if pcaright.absprec - ordp <= self.relprec:
# ans.relprec = pcaright.absprec - ordp
# scalar = mpz_get_si(unit.value)
# else:
# scalar = mpz_fdiv_ui(unit.value, self.prime_pow(self.relprec))
# elif isinstance(_right, pAdicCappedRelativeElement):
# pcrright = <pAdicCappedRelativeElement>_right
# ordp = pcrright.ordp
# if pcrright.relprec <= self.relprec:
# ans.relprec = pcrright.relprec
# scalar = mpz_get_si(pcrright.unit)
# else:
# scalar = mpz_fdiv_ui(pcrright.unit, self.prime_pow(self.relprec))
# elif isinstance(_right, pAdicFixedModElement):
# pfmright = <pAdicFixedModElement>_right
# scalar = mpz_get_si(pfmright.value)
# ordp = 0
# cdef int i
# for i | Cython |
in range(self.relprec):
# ans._moments[i] = self._moments[i] * scalar
# ans.ordp = self.ordp + ordp
# ans.quasi_normalize()
# return ans
# def precision_relative(self):
# return Integer(self.relprec)
# def precision_absolute(self):
# r"""
# OUTPUT:
# -
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# return Integer(self.relprec + self.ordp)
# def reduce_precision(self, M):
# r"""
# INPUT:
# - ``M`` -- a positive integer less than the precision of this
# distribution.
# OUTPUT:
# -
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# if M > self.relprec:
# raise ValueError("not enough moments")
# if M < 0:
# raise ValueError("precision must be non-negative")
# cdef Dist_long ans = self._new_c()
# ans.relprec = M
# cdef int i
# for i in range(ans.relprec):
# ans._moments[i] = self._moments[i]
# ans.ordp = self.ordp
# return ans
# def solve_diff_eqn(self):
# r"""
# OUTPUT:
# -
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# raise NotImplementedError
# def __reduce__(self):
# r"""
# Used in pickling.
# EXAMPLE::
# sage: D = OverconvergentDistributions(0, 5, 10)
# sage: D([1,2,3,4]).__reduce__()
# (<type'sage.modular.pollack_stevens.dist.Dist_long'>, ([1, 2, 3, 4], Space of 5-adic distributions with k=0 action and precision cap 10, 0, False))
# """
# return (self.__class__, ([self._moments[i]
# for i in xrange(self.relprec)],
# self.parent(), self.ordp, False))
cdef class WeightKAction(Action):
r"""
Encode the action of the monoid `\Sigma_0(N)` on the space of distributions.
INPUT:
- ``Dk`` -- a space of distributions
- ``character`` -- data specifying a Dirichlet character to apply to
the top right corner, and a power of the determinant by which to scale.
See the documentation of
:class:`sage.modular.pollack_stevens.distributions.OverconvergentDistributions_factory`
for more details.
- ``adjuster`` -- a callable object that turns matrices into 4-tuples.
- ``on_left`` -- whether this action should be on the left.
- ``dettwist`` -- a power of the determinant to twist by
- ``padic`` -- if True, define an action of `p`-adic matrices (not just integer ones)
EXAMPLES::
sage: D = OverconvergentDistributions(4,5,10,base = Qp(5,20)); D
Space of 5-adic distributions with k=4 action and precision cap 10
sage: D._act
Right action by Monoid Sigma0(5) with coefficients in 5-adic Field with capped relative precision 20 on Space of 5-adic distributions with k=4 action and precision cap 10
"""
def __init__(self, Dk, character, adjuster, on_left, dettwist, padic=False):
r"""
Initialization.
TESTS::
sage: D = OverconvergentDistributions(4,5,10,base = Qp(5,20)); D # indirect doctest
Space of 5-adic distributions with k=4 action and precision cap 10
sage: D = Symk(10) # indirect doctest
"""
self._k = Dk._k
# if self._k < 0: raise ValueError("k must not be negative")
self._adjuster = adjuster
self._character = character
self._dettwist = dettwist
self._p = Dk._p
self._symk = Dk.is_symk()
self._actmat = {}
self._maxprecs = {}
if character is None:
self._Np = ZZ(1) # all of M2Z acts
else:
self._Np = character.modulus()
if not self._symk:
self._Np = self._Np.lcm(self._p)
if padic:
self._Sigma0 = Sigma0(self._Np, base_ring=Dk.base_ring(), adjuster=self._adjuster)
else:
self._Sigma0 = Sigma0(self._Np, base_ring=ZZ, adjuster=self._adjuster)
Action.__init__(self, self._Sigma0, Dk, on_left, operator.mul)
def clear_cache(self):
r"""
Clear the cached matrices which define the action of `U_p`
(these depend on the desired precision) and the
dictionary that stores the maximum precisions computed so far.
EXAMPLES::
sage: D = OverconvergentDistributions(4,5,4)
sage: D([1,2,5,3]) * D._act.actor()(Matrix(ZZ,2,2,[1,1,0,1]))
(1 + O(5^4), 3 + O(5^3), 2*5 + O(5^2), O(5))
sage: D._act.clear_cache()
"""
self._actmat = {}
self._maxprecs = {}
cpdef acting_matrix(self, g, M):
r"""
The matrix defining the action of ``g`` at precision ``M``.
INPUT:
- ``g`` -- an instance of
:class:`sage.matrix.matrix_generic_dense.Matrix_generic_dense`
- ``M`` -- a positive integer giving the precision at which
``g`` should act.
OUTPUT:
- An `M \times M` matrix so that the action of `g` on a
distribution with `M` moments is given by a vector-matrix
multiplication.
.. NOTE::
This function caches its results. To clear the cache use
:meth:`clear_cache`.
EXAMPLES::
sage: D = Symk(3)
sage: v = D([5,2,7,1])
sage: g = Matrix(ZZ,2,2,[1,3,0,1])
sage: v * D._act.actor()(g) # indirect doctest
(5, 17, 64, 253)
"""
g = g.matrix()
if not g in self._maxprecs:
A = self._compute_acting_matrix(g, M)
self._actmat[g] = {M: A}
self._maxprecs[g] = M
return A
else:
mats = self._actmat[g]
if M in mats:
return mats[M]
maxprec = self._maxprecs[g]
if M < maxprec:
A = mats[maxprec][:M, :M] # submatrix; might want to reduce precisions
mats[M] = A
return A
if M < 30: # This should not be hard-coded
maxprec = max([M, 2 * maxprec]) # This may be wasting memory
else:
maxprec = M
self._maxprecs[g] = maxprec
mats[maxprec] = self._compute_acting_matrix(g, maxprec) # could lift from current maxprec
if M == maxprec:
return mats[maxprec]
A = mats[maxprec][:M, :M] # submatrix; might want to reduce precisions
mats[M] = A
return A
cpdef _compute_acting_matrix(self, g, M):
r"""
Compute the matrix defining the action of ``g`` at precision ``M``.
INPUT:
- ``g`` -- a `2 \times 2` instance of
:class:`sage.matrices.matrix_integer_dense.Matrix_integer_dense`
- ``M`` -- a positive integer giving the precision at which
``g`` should act.
OUTPUT:
- ``G`` -- an `M \times M` matrix. If `v `is the vector of moments of a
distribution `\mu`, then `v*G` is the vector of moments of `\mu|[a,b;c,d]`
EXAMPLES::
sage: D = Symk(3)
sage: v = D([5,2,7,1])
sage: g = Matrix(ZZ,2,2,[-2,1,-1,0])
sage: v * D._act.actor()(g) # indirect doctest
(-107, | Cython |
35, -12, 5)
"""
raise NotImplementedError
cdef class WeightKAction_vector(WeightKAction):
cpdef _compute_acting_matrix(self, g, M):
r"""
Compute the matrix defining the action of ``g`` at precision ``M``.
INPUT:
- ``g`` -- a `2 \times 2` instance of
:class:`sage.matrix.matrix_generic_dense.Matrix_generic_dense`
- ``M`` -- a positive integer giving the precision at which
``g`` should act.
OUTPUT:
- ``G`` -- an `M \times M` matrix. If `v` is the vector of moments of a
distribution `\mu`, then `v*G` is the vector of moments of `\mu|[a,b;c,d]`
EXAMPLES::
sage: D = Symk(3)
sage: v = D([5,2,7,1])
sage: g = Matrix(ZZ,2,2,[-2,1,-1,0])
sage: v * D._act.actor()(g) # indirect doctest
(-107, 35, -12, 5)
"""
#tim = verbose("Starting")
a, b, c, d = self._adjuster(g)
# if g.parent().base_ring().is_exact():
# self._check_mat(a, b, c, d)
k = self._k
if g.parent().base_ring() is ZZ:
if self._symk:
base_ring = QQ
else:
base_ring = Zmod(self._p ** M)
else:
base_ring = self.underlying_set().base_ring()
cdef Matrix B = matrix(base_ring, M, M)
if M == 0:
return B.change_ring(self.codomain().base_ring())
R = PowerSeriesRing(base_ring, 'y', default_prec=M)
y = R.gen()
#tim = verbose("Checked, made R",tim)
# special case for small precision, large weight
scale = (b + d * y) / (a + c * y)
t = (a + c * y) ** k # will already have precision M
cdef long row, col
#tim = verbose("Made matrix",tim)
for col in range(M):
for row in range(M):
B.set_unsafe(row, col, t[row])
t *= scale
#verbose("Finished loop",tim)
# the changering here is annoying, but otherwise we have to
# change ring each time we multiply
B = B.change_ring(self.codomain().base_ring())
if self._character is not None:
B *= self._character(a)
if self._dettwist is not None:
B *= (a * d - b * c) ** (self._dettwist)
return B
cpdef _call_(self, _v, g):
r"""
The right action of ``g`` on a distribution.
INPUT:
- ``_v`` -- a :class:`Dist_vector` instance, the distribution
on which to act.
- ``g`` -- a `2 \times 2` instance of
:class:`sage.matrix.matrix_integer_dense.Matrix_integer_dense`.
OUTPUT:
- the distribution ``_v * g``.
EXAMPLES::
sage: D = sage.modular.pollack_stevens.distributions.Symk(2)
sage: v = D([2,3,4])
sage: g = Matrix(ZZ,2,2,[3,-1,1,0])
sage: v * D._act.actor()(g) # indirect doctest
(40, -9, 2)
"""
# if g is a matrix it needs to be immutable
# hashing on arithmetic_subgroup_elements is by str
if self.is_left():
_v, g = g, _v
if g == 1:
return _v
cdef Dist_vector v = <Dist_vector?>_v
cdef Dist_vector ans = v._new_c()
try:
g.set_immutable()
except AttributeError:
pass
coeffmodule = v._moments.parent()
v_moments = v._moments
ans._moments = v_moments * self.acting_matrix(g, len(v_moments))
ans.ordp = v.ordp
ans.normalize()
return ans
# cdef inline long mymod(long a, unsigned long pM):
# """
# Returns the remainder ``a % pM``.
# INPUT:
# - ``a`` -- a long
# - ``pM`` -- an unsigned long
# OUTPUT:
# - ``a % pM`` as a positive integer.
# """
# a = a % pM
# if a < 0:
# a += pM
# return a
# cdef class SimpleMat(SageObject):
# r"""
# A simple class emulating a square matrix that holds its values as
# a C array of longs.
# INPUT:
# - ``M`` -- a positive integer, the dimension of the matrix
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# def __cinit__(self, unsigned long M):
# r"""
# Memory initialization.
# TESTS::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# self._inited = False
# self.M = M
# self._mat = <long*>sage_malloc(M * M * sizeof(long))
# if self._mat == NULL:
# raise MemoryError
# self._inited = True
# def __getitem__(self, i):
# r"""
# INPUT:
# - ``i`` -- a tuple containing two slices, each from `0` to `M'` for some `M' < M`
# OUTPUT:
# - A new SimpleMat of size `M'` with the top left `M' \times
# M'` block of values copied over.
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# cdef Py_ssize_t r, c, Mnew, Morig = self.M
# cdef SimpleMat ans
# if isinstance(i,tuple) and len(i) == 2:
# a, b = i
# if isinstance(a, slice) and isinstance(b, slice):
# r0, r1, rs = a.indices(Morig)
# c0, c1, cs = b.indices(Morig)
# if r0!= 0 or c0!= 0 or rs!= 1 or cs!= 1:
# raise NotImplementedError
# Mr = r1
# Mc = c1
# if Mr!= Mc:
# raise ValueError("result not square")
# Mnew = Mr
# if Mnew > Morig:
# raise IndexError("index out of range")
# ans = SimpleMat(Mnew)
# for r in range(Mnew):
# for c in range(Mnew):
# ans._mat[Mnew * c + r] = self._mat[Morig * c + r]
# return ans
# raise NotImplementedError
# def __dealloc__(self):
# r"""
# Deallocation.
# TESTS::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# sage_free(self._mat)
# cdef class WeightKAction_long(WeightKAction):
# cpdef _compute_acting_matrix(self, g, _M):
# r"""
# INPUT:
# - ``g`` -- a `2 \times 2` instance of
# :class:`sage.matrices.matrix_integer_dense.Matrix_integer_dense`
# - ``_M`` -- a positive integer giving the precision at which
# ``g`` should act.
# OUTPUT:
# - A :class:`SimpleMat` that gives the action of ``g`` at
# precision ``_M`` in the sense that the moments of the result
# are obtained from the moments of the input by a
# vector-matrix multiplication.
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# _a, _b, _c, _d = self._adjuster(g)
# #if self._character is not None: raise NotImplementedError
# # self._check_mat(_a, _b, _c, _d)
# cdef long k = self._k
# cdef Py_ssize_t row, col, M = _M
# cdef nmod_poly_t t, scale, xM, bdy
# cdef mp_limb_t pM = self._p ** M # unsigned long
# cdef long a, b, c, d
# a = mymod(ZZ | Cython |
(_a), pM)
# b = mymod(ZZ(_b), pM)
# c = mymod(ZZ(_c), pM)
# d = mymod(ZZ(_d), pM)
# cdef mp_limb_t pMinv = 1 / pM # n_preinvert_limb(pM) # DEBUG!!! was pM
# nmod_poly_init2_preinv(t, pM, pMinv, M)
# nmod_poly_init2_preinv(scale, pM, pMinv, M)
# nmod_poly_init2_preinv(xM, pM, pMinv, M)
# nmod_poly_init2_preinv(bdy, pM, pMinv, 2)
# nmod_poly_set_coeff_ui(xM, M, 1)
# nmod_poly_set_coeff_ui(t, 0, a)
# nmod_poly_set_coeff_ui(t, 1, c)
# nmod_poly_inv_series(scale, t, M)
# nmod_poly_set_coeff_ui(bdy, 0, b)
# nmod_poly_set_coeff_ui(bdy, 1, d)
# nmod_poly_mullow(scale, scale, bdy, M) # scale = (b+dy)/(a+cy)
# nmod_poly_pow_trunc(t, t, k, M) # t = (a+cy)^k
# cdef SimpleMat B = SimpleMat(M)
# for col in range(M):
# for row in range(M):
# B._mat[M * col + row] = nmod_poly_get_coeff_ui(t, row)
# if col < M - 1:
# nmod_poly_mullow(t, t, scale, M)
# if self._character is not None:
# B = B * self._character(_a, _b, _c, _d)
# return B
# cpdef _call_(self, _v, g):
# r"""
# Application of the action.
# INPUT:
# - ``_v`` -- a :class:`Dist_long` instance, the distribution on
# which to act.
# - ``g`` -- a `2 \times 2` instance of
# :class:`sage.matrix.matrix_integer_dense.Matrix_integer_dense`.
# OUTPUT:
# - The image of ``_v`` under the action of ``g``.
# EXAMPLES::
# sage: from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# """
# if self.is_left():
# _v, g = g, _v
# cdef Dist_long v = <Dist_long?>_v
# cdef Dist_long ans = v._new_c()
# ans.relprec = v.relprec
# ans.ordp = v.ordp
# cdef long pM = self._p ** ans.relprec
# cdef SimpleMat B = <SimpleMat>self.acting_matrix(g, ans.relprec)
# cdef long row, col, entry = 0
# for col in range(ans.relprec):
# ans._moments[col] = 0
# for row in range(ans.relprec):
# mom = v._moments[row]
# # DEBUG BELOW
# # if not mom.parent().base_ring().is_exact():
# # try:
# # mom = mom.apply_map(operator.methodcaller('lift'))
# # except AttributeError:
# # pass
# ans._moments[col] += mymod(B._mat[entry] * mom, pM)
# entry += 1
# ans.normalize()
# return ans
<|end_of_text|># -*- coding: utf-8 -*-
"""Random kit wrapper class.
Implementation of a basic Cython wrapper class around the 'Random kit' library
by Jean-Sebastien Roy. Intended for use in other Cython modules as a more
robust replacement for C stdlib rand().
"""
cimport randomkit_wrapper
from libc cimport stdlib
cdef class RandomKit:
"""
Basic wrapper around 'Random kit' for pseudorandom number generation.
Intended for use in other Cython modules as a more robust replacement for
C stdlib rand().
"""
def __cinit__(RandomKit self):
self.state = <rk_state*> stdlib.malloc(sizeof(rk_state))
if (self.state == NULL):
raise MemoryError
def __init__(RandomKit self, unsigned long seed):
rk_seed(seed, self.state)
def __dealloc__(RandomKit self):
if self.state:
stdlib.free(self.state)
cdef unsigned long integer(RandomKit self, unsigned long maximum):
"""
Returns a random integer in range 0 and maximum inclusive.
Parameters
----------
maximum : unsigned long
Maximum of integer range to sample from.
"""
return rk_interval(maximum, self.state)
cdef double uniform(RandomKit self):
"""
Returns a sample from a uniform distribution over [0,1].
"""
return rk_double(self.state)
cdef double gaussian(RandomKit self):
"""
Returns a sample from a zero-mean unit-variance Gaussian distribution.
"""
return rk_gauss(self.state)
<|end_of_text|>import numpy as np
cimport numpy as np
cimport cython
ctypedef fused DOUBLE_TYPES:
float
double
cdef extern from "cpp/central_difference.h":
void central_difference[T](const T* input, const Py_ssize_t rows,
const Py_ssize_t cols, const Py_ssize_t n_channels,
T* output)
@cython.boundscheck(False)
@cython.wraparound(False)
cpdef gradient_cython(np.ndarray[DOUBLE_TYPES, ndim=3] input):
cdef Py_ssize_t n_channels = input.shape[0]
cdef Py_ssize_t rows = input.shape[1]
cdef Py_ssize_t cols = input.shape[2]
# Maintain the dtype that was passed in (float or double)
dtype = input.dtype
cdef np.ndarray[DOUBLE_TYPES, ndim=3] output = np.zeros((n_channels * 2,
rows, cols),
dtype=dtype)
central_difference(&input[0,0,0], rows, cols, n_channels,
&output[0,0,0])
return output
<|end_of_text|># coding:utf-8
# TODO: batchでまんべんなくデータが含まれるようにする。
# TODO: オブジェクトだからこのファイルを実行しても何も起きないので、mainから実行する。
# sgf読み込みで新しい棋譜になったら盤面を初期化する
import pyximport;
pyximport.install()
import sgf # Please install "sgf 0.5". You can install it by using the command of "pip install sgf".
import re
import os # osモジュールのインポート
from game import Player
from game import State
from search import MontecarloSearch
from go import Go
from input_plane import MakeInputPlane
import tensorflow as tf
import math
from go import GoVariable
from go import GoStateObject
from numpy import *
import traceback
# パスはどうする?forwardした結果一番良い答えがパスかもしれない
import sys
import datetime
import numpy as np
import tensorflow as tf
import pickle
#from tensorflow.python import control_flow_ops
# デフォルトの文字コードを出力する
# from guppy import hpy
# h = hpy()
class Train(GoVariable):
character_list = [chr(i) for i in range(97, 97 + 26)]
def __init__(self):
# self.train()
self.train()
def reshape_board(self, board_array):
reshaped_boards = []
for i in xrange(len(board_array)):
reshaped_boards.append(reshape(board_array[i], 361))
return reshaped_boards
def reshape_answer_board(self, board_array):
return reshape(board_array, 361)
def invert_board_input(self, board_array):
for i in xrange(len(board_array)):
board_array[i] = board_array[i][::-1]
return board_array
def invert_board_answer(self, board_array):
board_array[::-1]
return board_array
def rotate90_input(self, board_array):
for i in xrange(len(board_array)):
board_array[i] = rot90(board_array[i])
return board_array
def rotate90_answer(self, board_array):
# 90度回転させるために、配列を2次元にした方が良い。input shapeもNone,1,361にする。
rot90(board_array)
return board_array
def make_rotated_train_batch(self, xTrain, yTrain, input_board, answer_board):
xTrain.append(self.reshape_board(input_board))
yTrain.append(self.reshape_answer_board(answer_board))
# print self.reshape_answer_board(answer_board)
# print self.reshape_answer_board(answer_board)
input_board2 = self.rotate90_input(input_board)
answer_board2 = self.rotate90_answer(answer_board)
xTrain.append(self.reshape_board(input_board2))
yTrain.append(self.reshape_answer_board(answer_board2))
input_board3 = self.rotate90_input(input_board2)
answer_board3 | Cython |
= self.rotate90_answer(answer_board2)
xTrain.append(self.reshape_board(input_board3))
yTrain.append(self.reshape_answer_board(answer_board3))
input_board4 = self.rotate90_input(input_board3)
answer_board4 = self.rotate90_answer(answer_board3)
xTrain.append(self.reshape_board(input_board4))
yTrain.append(self.reshape_answer_board(answer_board4))
input_board5 = self.invert_board_input(input_board4)
answer_board5 = self.invert_board_answer(answer_board4)
xTrain.append(self.reshape_board(input_board5))
yTrain.append(self.reshape_answer_board(answer_board5))
input_board6 = self.rotate90_input(input_board5)
answer_board6 = self.rotate90_answer(answer_board5)
xTrain.append(self.reshape_board(input_board6))
yTrain.append(self.reshape_answer_board(answer_board6))
input_board7 = self.rotate90_input(input_board6)
answer_board7 = self.rotate90_answer(answer_board6)
xTrain.append(self.reshape_board(input_board7))
yTrain.append(self.reshape_answer_board(answer_board7))
input_board8 = self.rotate90_input(input_board7)
answer_board8 = self.rotate90_answer(answer_board7)
xTrain.append(self.reshape_board(input_board8))
yTrain.append(self.reshape_answer_board(answer_board8))
return xTrain, yTrain
def train(self):
players = [Player(0.0, 'human'), Player(1.0, 'human')]
players[0].next_player = players[1]
players[1].next_player = players[0]
# player = players[0]
rule = Go()
files = os.listdir(os.getcwd() + "/../../kifu")
print("kifu loaded!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
init = tf.global_variables_initializer()
xTrain = []
yTrain = []
num = 0
batch_count_num = 0
train_count_num = 0
make_input = MakeInputPlane()
step = 0
ckpt_num = 100
batch_count_sum_all = 0
character_list = [chr(i) for i in range(97, 97 + 26)]
print("kifu passed")
continue_kifu_num = 0
for file_name in files:
# print h.heap()
continue_kifu_num += 1
if continue_kifu_num < 150:
continue
step += 1
with open(os.getcwd() + "/../../kifu/" + file_name) as f:
try:
collection = sgf.parse(f.read())
flag = False
except:
continue
try:
go_state_obj = GoStateObject()
# print "通過"
for game in collection:
for node in game:
if flag == False:
flag = True
continue
position = list(node.properties.values())[0]
xpos = character_list.index(position[0][0])
ypos = character_list.index(position[0][1])
pos_tuple = (xpos, ypos)
# print xpos,ypos
#print(pos_tuple)
color = list(node.properties.keys())[0]
if color == "B":
current_player = players[0]
elif color == 'W':
current_player = players[1]
# print "move ends"
num += 1
if num > 90:
input_board = make_input.generate_input(go_state_obj, current_player)
answer_board = make_input.generate_answer(pos_tuple)
xTrain, yTrain = self.make_rotated_train_batch(xTrain, yTrain, input_board,
answer_board)
num = 0
batch_count_num += 1
# 注意 moveはinputを作成した後にすること。
go_state_obj = rule.move(go_state_obj, current_player, pos_tuple)
rule.move(go_state_obj, current_player, pos_tuple)
if batch_count_num > 50:
np.savez_compressed('./npzkifu/kifu' + str(train_count_num) + '.npz', x=xTrain, y=yTrain)
#train_step.run(feed_dict={x_input: xTrain, y_: yTrain, keep_prob: 0.5})
batch_count_sum_all += 1
batch_count_num = 0
train_count_num += 1
xTrain = []
yTrain = []
print(train_count_num)
except:
traceback.print_exc()
f.close()
pass
<|end_of_text|>from._results cimport Result
cdef:
Result _clausen(double) nogil
<|end_of_text|>
cdef class ChanFactory:
cdef:
dict single_chans, vector_chans
def __init__(self):
self.single_chans = {
'double': DChan,
'int': IChan,
'int32': IChan
}
self.vector_chans = {
'str': StrChan,
}
def create_chan(self, name, dtype, dsize, **kwargs):
if dtype not in cx_dtype_map:
return None
if dsize == 1:
if dtype in self.single_chans:
return self.single_chans[dtype](name, **kwargs)
else:
return Chan(name, cx_dtype_map[dtype])
elif dtype in self.vector_chans:
return self.vector_chans[dtype](name, dsize)
return VChan(name, cx_dtype_map[dtype], dsize, **kwargs)
cfactory = ChanFactory()
<|end_of_text|># distutils: language = c++
# cython: language_level=2
from libcpp.memory cimport unique_ptr
from cython.operator cimport dereference as deref
cimport numpy as np
from rfbp cimport focusingBP
from rfbp cimport nonbayes_test
from Patterns cimport _Patterns
from FocusingProtocol cimport _FocusingProtocol
from MagP64 cimport MagP64
from MagT64 cimport MagT64
from misc cimport double_pointers_for_cython
from enum import Enum
class Mag (int, Enum):
MagP64 = 0
MagT64 = 1
def _rfbp (int mag, _Patterns pattern, _FocusingProtocol protocol,
long int hidden=3, long int max_iter=1000, long int max_steps=101, double randfact=1e-1,
double damping=5e-1, double epsil=1e-1, accuracy=(b'accurate', b'exact'),
long int seed=135, long int nth=1):
acc1, acc2 = accuracy
cdef long int ** weights
if mag == Mag.MagP64:
weights = focusingBP[MagP64](hidden, deref(pattern.thisptr.get()), max_iter, max_steps, seed, damping, acc1, acc2, randfact, deref(protocol.thisptr.get()), epsil, nth)
elif mag == Mag.MagT64:
weights = focusingBP[MagT64](hidden, deref(pattern.thisptr.get()), max_iter, max_steps, seed, damping, acc1, acc2, randfact, deref(protocol.thisptr.get()), epsil, nth)
else:
raise TypeError('Invalid Magnetization given. Possible values are stored in Mag Enum {magP, magT}')
return [[int(weights[i][j]) for j in range(pattern.Ncol)] for i in range(hidden)]
def _nonbayes_test (long int[::1] weights, long int row_size, long int column_size, _Patterns pattern, long int K):
nlabel = pattern.Nrow
cdef long int ** c_weights = double_pointers_for_cython['long int', 'long int'](&weights[0], row_size, column_size)
cdef long int * predicted_labels = nonbayes_test(c_weights, pattern.thisptr.get()[0], K)
return [int(predicted_labels[i]) for i in range(nlabel)]
<|end_of_text|>###############################################################################
##
## Copyright (C) 2009-2012 Kyle Lutz <[email protected]>
## All rights reserved.
##
## This file is a part of the chemkit project. For more information
## see <http://www.chemkit.org>.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
##
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of the chemkit project nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | Cython |
TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
###############################################################################
import cython
from libcpp cimport bool
from libcpp.vector cimport vector
from variant cimport _Variant
from molecule cimport _Molecule
from shared_ptr cimport shared_ptr
cdef class Molecule:
"""The Molecule class represents a molecule."""
cdef _Molecule *_molecule
cdef shared_ptr[_Molecule] *_moleculePointer
### Construction and Destruction ##########################################
def __cinit__(self):
self._molecule = NULL
self._moleculePointer = NULL
def __init__(self, char *formula = NULL, char *format = NULL):
"""Creates a new molecule."""
if formula and format:
self._moleculePointer = new shared_ptr[_Molecule](new _Molecule(formula, format))
else:
self._moleculePointer = new shared_ptr[_Molecule](new _Molecule())
self._molecule = self._moleculePointer.get()
def __dealloc__(self):
"""Destroys the molecule object."""
if self._moleculePointer!= NULL:
del self._moleculePointer
### Properties ############################################################
def setName(self, char *name):
"""Sets the name for the molecule."""
self._molecule.setName(name)
def name(self):
"""Returns the name of the molecule."""
return self._molecule.name().c_str()
def formula(self, char *format = NULL):
"""Returns the formula for the molecule."""
if format:
return self._molecule.formula(format).c_str()
else:
return self._molecule.formula().c_str()
def descriptor(self, char *name):
"""Returns the value of the molecule descriptor given by name."""
cdef _Variant value = self._molecule.descriptor(name)
return value.toDouble()
def size(self):
"""Returns the number of atoms in the molecule."""
return self._molecule.size()
def isEmpty(self):
"""Returns True if the molecule is empty."""
return self._molecule.isEmpty()
def mass(self):
"""Returns the molecular mass of the molecule."""
return self._molecule.mass()
def data(self, char *name):
"""Returns the molecule data for name."""
cdef _Variant value = self._molecule.data(name)
return value.toString().c_str()
### Structure #############################################################
def addAtom(self, element):
"""Adds a new atom to the molecule."""
e = Element(element)
if not e.isValid():
return None
cdef _Atom *_atom = self._molecule.addAtom(cython.operator.dereference(e._element))
return Atom_fromPointer(_atom)
def removeAtom(self, Atom atom):
"""Removes the atom from the molecule."""
self._molecule.removeAtom(atom._atom)
def atom(self, int index):
"""Returns the atom at index in the molecule."""
cdef _Atom *_atom = self._molecule.atom(index)
return Atom_fromPointer(_atom)
def atoms(self):
"""Returns a list of the atoms in the molecule."""
atoms = []
for i in range(self.atomCount()):
atoms.append(self.atom(i))
return atoms
def atomCount(self):
"""Returns the number of atoms in the molecule."""
return self._molecule.atomCount()
def addBond(self, Atom a, Atom b, int order = Bond.Single):
"""Adds and returns a new bond between atoms a and b with order."""
cdef _Bond *_bond = self._molecule.addBond(a._atom, b._atom, order)
return Bond_fromPointer(_bond)
def removeBond(self, Bond bond):
"""Removes bond from the molecule."""
self._molecule.removeBond(bond._bond)
def bond(self, int index):
"""Returns the bond at index in the molecule."""
cdef _Bond *_bond = self._molecule.bond(index)
return Bond_fromPointer(_bond)
def bonds(self):
"""Returns a list of the bonds in the molecule."""
bonds = []
for i in range(self.bondCount()):
bonds.append(self.bond(i))
return bonds
def bondCount(self):
"""Returns the number of bonds in the molecule."""
return self._molecule.bondCount()
### Ring Perception #######################################################
def ring(self, int index):
"""Returns the ring at index in the molecule."""
return Ring_fromPointer(self._molecule.ring(index))
def rings(self):
"""Returns a list of rings in the molecule."""
rings = []
for i in range(self.ringCount()):
rings.append(self.ring(i))
return rings
def ringCount(self):
"""Returns the number of rings in the molecule."""
return self._molecule.ringCount()
### Fragment Perception ###################################################
def fragment(self, int index):
"""Returns the fragment at index in the molecule."""
return Fragment_fromPointer(self._molecule.fragment(index))
def fragments(self):
"""Returns a list of all the fragments in the molecule."""
fragments = []
for i in range(self.fragmentCount()):
fragments.append(self.fragment(i))
return fragments
def fragmentCount(self):
"""Returns the number of fragments in the molecule."""
return self._molecule.fragmentCount()
def isFragmented(self):
"""Returns True if the molecule contains more than one fragment."""
return self._molecule.isFragmented()
def removeFragment(self, Fragment fragment):
"""Removes the fragment from the molecule."""
self._molecule.removeFragment(fragment._fragment)
cdef Molecule Molecule_fromPointer(_Molecule *_molecule):
cdef Molecule molecule = Molecule.__new__(Molecule)
molecule._molecule = _molecule
molecule._moleculePointer = NULL
return molecule
cdef Molecule Molecule_fromSharedPointer(shared_ptr[_Molecule] *_molecule):
cdef Molecule molecule = Molecule.__new__(Molecule)
molecule._molecule = _molecule.get()
molecule._moleculePointer = _molecule
return molecule
<|end_of_text|># distutils: language = c
# cython: cdivision = True
# cython: boundscheck = False
# cython: wraparound = False
# cython: profile = True
import numpy as np
cdef void c_hash_argmin(const double * d, const long n, const long m,
const npy_intp * idx, const long el, const long size,
long * out) nogil:
cdef npy_intp i, j, k, r = 0
cdef long argmin
cdef double best, current
for i in range(n):
j = 0
while j < el:
best = INFINITY
argmin = 0
for k in range(size):
current = d[i * m + idx[j]]
if current < best:
best = current
argmin = k
j += 1
out[r] = argmin
r += 1
cdef void c_encode_by_bits(const npy_intp n, const npy_intp m, const long * a,
const long bitshift, unsigned long * out) nogil:
cdef npy_intp i, j
cdef long shift
cdef unsigned long value
for i in range(n):
value = 0
shift = 0
for j in range(m):
value += a[i * m + j] << shift
shift += bitshift
out[i] = value
cdef void c_decode_by_bits(const npy_intp n, const npy_intp m,
const unsigned long * a, const long bitshift,
long * out) nogil:
cdef npy_intp i, j, k
cdef long shift, mask = (1 << bitshift) - 1
for i in range(n):
shift = 0
for j in range(m):
out[i * m + j] = (a[i] >> shift) & mask
shift += bitshift
cdef void c_encode_by_place(const npy_intp n, const npy_intp m, const long * a,
const long width, unsigned long * out) nogil:
cdef npy_intp i, j
cdef long place, value, x, k
for i in range(n):
value = 0
place = 1
for j in range(m):
x = a[i * m + j]
x = 2 * x if x > 0 else -2 * x - 1
k = width
while x > 0 and k > 0:
value += (x % 10) * place
x //= 10
place *= 10
k -= 1
while k > 0:
place *= 10
k -= 1
out[i] = value
cdef void c_decode_by_place(const npy_intp n, const npy_intp m,
const unsigned long * a, const long width,
long * out) nogil:
cdef npy_intp i, j
cdef long place, value, x, k | Cython |
for i in range(n):
shift = 0
x = a[i]
while x > 0:
j = m - 1
k = 0
place = 1
value = 0
while k < width:
value += (x % 10) * place
x //= 10
place *= 10
k += 1
value = value // 2 if value % 2 == 0 else -(value + 1) // 2
out[i * m + j] = value
j -= 1
cdef char c_count_set_bits(unsigned long n) nogil:
cdef char count
while n > 0:
n &= n - 1
count += 1
return count
cdef void c_hash_dist(npy_intp n, const unsigned long * h,
const unsigned long ref, const long b, char * out) nogil:
cdef npy_intp i
cdef unsigned long tmp
cdef unsigned long mask
cdef char x
if b == 1:
for i in range(n):
out[i] = c_count_set_bits(h[i] ^ ref)
else:
mask = (2 << b) - 1
for i in range(n):
tmp = h[i] ^ ref
x = 0
while tmp > 0:
if tmp & mask!= 0:
x += 1
tmp >>= b
out[i] = x
def hash_argmin(const double[:,::1] d, const npy_intp[::1] idx, const long size,
long[::1] out):
with nogil:
c_hash_argmin(&d[0,0], d.shape[0], d.shape[1], &idx[0], idx.shape[0],
size, &out[0])
return np.asarray(out)
def encode_by_bits(const long[:,::1] a, const long bitshift,
unsigned long[::1] out):
cdef npy_intp n = a.shape[0]
cdef npy_intp m = a.shape[1]
with nogil:
c_encode_by_bits(n, m, &a[0,0], bitshift, &out[0])
return np.asarray(out)
def encode_by_place(const long[:,::1] a, const long width,
unsigned long[::1] out):
cdef npy_intp n = a.shape[0]
cdef npy_intp m = a.shape[1]
with nogil:
c_encode_by_place(n, m, &a[0,0], width, &out[0])
return np.asarray(out)
def decode_by_bits(const unsigned long[::1] a, const npy_intp m,
const long bitshift, long[:,::1] out):
cdef npy_intp n = a.shape[0]
with nogil:
c_decode_by_bits(n, m, &a[0], bitshift, &out[0,0])
return np.asarray(out)
def decode_by_place(const unsigned long[::1] a, const npy_intp m,
const long width, long[:,::1] out):
cdef npy_intp n = a.shape[0]
with nogil:
c_decode_by_place(n, m, &a[0], width, &out[0,0])
return np.asarray(out)
def get_hash_distances(const unsigned long hashkey,
const unsigned long[::1] hashlist,
const long bitshift_or_width, char[::1] out=None):
if out is None:
out = np.empty(hashlist.shape[0], dtype=np.int8)
elif out.shape[0]!= hashlist.shape[0]:
raise ValueError("Output array has incorrect shape.")
with nogil:
c_hash_dist(hashlist.shape[0], &hashlist[0], hashkey,
bitshift_or_width, &out[0])
return np.asarray(out)<|end_of_text|>'''
Core qldate to pydate utilities from bg c++ library, including IMM date handling
'''
import datetime
from bgtools.utils.dates import parse_date
from cython.operator cimport dereference as deref
from libcpp cimport bool as bool
from libcpp.string cimport string
from pybg.version import version
# Date Interfaces
cdef object _pydate_from_qldate(_qldate.Date qdate):
"""c++ QuantLib::Date to python datetime.time
"""
cdef int m = qdate.month()
cdef int d = qdate.dayOfMonth()
cdef int y = qdate.year()
return datetime.date(y, m, d)
cpdef object pydate_from_qldate(qldate.Date qdate):
"""cython Date to python datetime.time
"""
cdef int m = qdate.month
cdef int d = qdate.day
cdef int y = qdate.year
return datetime.date(y, m, d)
cdef _qldate.Date _qldate_from_pydate(object pydate):
pydate = parse_date(pydate)
cdef qldate.Date qdate_ref
if not pydate:
raise ValueError
else:
qdate_ref = qldate.Date.from_datetime(pydate)
cdef _qldate.Date* date_ref = <_qldate.Date*>qdate_ref._thisptr.get()
return deref(date_ref)
cpdef qldate.Date qldate_from_pydate(object pydate):
pydate = parse_date(pydate)
cdef qldate.Date qdate_ref
if not pydate:
raise ValueError
else:
qdate_ref = qldate.Date.from_datetime(pydate)
return qdate_ref
<|end_of_text|># Markov Logic Networks -- Inference
#
# (C) 2006-2013 by Daniel Nyga ([email protected])
# Dominik Jain ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from dnutils import logs
from dnutils.console import barstr
from...logic.common import Logic
from..database import Database
from..constants import ALL
from..mrfvars import MutexVariable, SoftMutexVariable, FuzzyVariable
from..util import StopWatch, elapsed_time_str, headline, tty, edict
import sys
from..errors import NoSuchPredicateError
from..mlnpreds import SoftFunctionalPredicate, FunctionalPredicate
from functools import reduce
logger = logs.getlogger(__name__)
cdef class Inference():
"""
Represents a super class for all inference methods.
Also provides some convenience methods for collecting statistics
about the inference process and nicely outputting results.
:param mrf: the MRF inference is being applied to.
:param queries: a query or list of queries, can be either instances of
:class:`pracmln.logic.common.Logic` or string representations of them,
or predicate names that get expanded to all of their ground atoms.
If `ALL`, all ground atoms are subject to inference.
Additional keyword parameters:
:param cw: (bool) if `True`, the closed-world assumption will be applied
to all but the query atoms.
"""
def __init__(self, mrf, queries=ALL, **params):
self.mrf = mrf
#print(self.mrf)
self.mln = mrf.mln
self._params = edict(params)
if not queries:
self.queries = [self.mln.logic.gnd_lit(ga, negated=False, mln=self.mln) for ga in self.mrf.gndatoms if self.mrf.evidence[ga.idx] is None]
else:
# check for single/multiple query and expand
if type(queries) is not list:
queries = [queries]
self.queries = self._expand_queries(queries)
# fill in the missing truth values of variables that have only one remaining value
for variable in self.mrf.variables:
if variable.valuecount(self.mrf.evidence_dicti()) == 1: # the var is fully determined by the evidence
for _, value in variable.it | Cython |
ervalues(self.mrf.evidence): break
self.mrf.set_evidence(variable.value2dict(value), erase=False)
# apply the closed world assumptions to the explicitly specified predicates
if self.cwpreds:
for pred in self.cwpreds:
if isinstance(self.mln.predicate(pred), SoftFunctionalPredicate):
if self.verbose: logger.warning('Closed world assumption will be applied to soft functional predicate %s' % pred)
elif isinstance(self.mln.predicate(pred), FunctionalPredicate):
raise Exception('Closed world assumption is inapplicable to functional predicate %s' % pred)
for gndatom in self.mrf.gndatoms:
if gndatom.predname!= pred: continue
if self.mrf.evidence[gndatom.idx] is None:
self.mrf.evidence[gndatom.idx] = 0
# apply the closed world assumption to all remaining ground atoms that are not in the queries
if self.closedworld:
qpreds = set()
for q in self.queries:
qpreds.update(q.prednames())
for gndatom in self.mrf.gndatoms:
if isinstance(self.mln.predicate(gndatom.predname), FunctionalPredicate) \
or isinstance(self.mln.predicate(gndatom.predname), SoftFunctionalPredicate):
continue
if gndatom.predname not in qpreds and self.mrf.evidence[gndatom.idx] is None:
self.mrf.evidence[gndatom.idx] = 0
for var in self.mrf.variables:
if isinstance(var, FuzzyVariable):
var.consistent(self.mrf.evidence, strict=True)
self._watch = StopWatch()
@property
def verbose(self):
return self._params.get('verbose', False)
@property
def results(self):
if self._results is None:
raise Exception('No results available. Run the inference first.')
else:
return self._results
@property
def elapsedtime(self):
return self._watch['inference'].elapsedtime
@property
def multicore(self):
return self._params.get('multicore')
@property
def resultdb(self):
if '_resultdb' in self.__dict__:
return self._resultdb
db = Database(self.mrf.mln)
for atom in sorted(self.results, key=str):
db[str(atom)] = self.results[atom]
return db
@resultdb.setter
def resultdb(self, db):
self._resultdb = db
@property
def closedworld(self):
return self._params.get('cw', False)
@property
def cwpreds(self):
return self._params.get('cw_preds', [])
def _expand_queries(self, queries):
"""
Expands the list of queries where necessary, e.g. queries that are
just predicate names are expanded to the corresponding list of atoms.
"""
equeries = []
for query in queries:
if type(query) == str:
prevLen = len(equeries)
if '(' in query: # a fully or partially grounded formula
f = self.mln.logic.parse_formula(query)
for gf in f.itergroundings(self.mrf):
equeries.append(gf)
else: # just a predicate name
if query not in self.mln.prednames:
raise NoSuchPredicateError('Unsupported query: %s is not among the admissible predicates.' % (query))
#continue
for gndatom in self.mln.predicate(query).groundatoms(self.mln, self.mrf.domains):
equeries.append(self.mln.logic.gnd_lit(self.mrf.gndatom(gndatom), negated=False, mln=self.mln))
if len(equeries) - prevLen == 0:
raise Exception("String query '%s' could not be expanded." % query)
elif isinstance(query, Logic.Formula):
equeries.append(query)
else:
raise Exception("Received query of unsupported type '%s'" % str(type(query)))
return equeries
def _run(self):
raise Exception('%s does not implement _run()' % self.__class__.__name__)
def run(self):
"""
Starts the inference process.
"""
# perform actual inference (polymorphic)
if self.verbose: print('Inference engine: %s' % self.__class__.__name__)
self._watch.tag('inference', verbose=self.verbose)
_weights_backup = list(self.mln.weights)
self._results = self._run()
self.mln.weights = _weights_backup
self._watch.finish('inference')
return self
def write(self, stream=sys.stdout, color=None, sort='prob', group=True, reverse=True):
cdef int barwidth = 30
if tty(stream) and color is None:
color = 'yellow'
if sort not in ('alpha', 'prob'):
raise Exception('Unknown sorting: %s' % sort)
results = dict(self.results)
cdef bint wrote_results
if group:
wrote_results = False
for var in sorted(self.mrf.variables, key=str):
res = dict([(atom, prob) for atom, prob in results.items() if atom in list(map(str, var.gndatoms))])
if not res: continue
if isinstance(var, MutexVariable) or isinstance(var, SoftMutexVariable):
stream.write('%s:\n' % var)
if sort == 'prob':
res = sorted(res, key=self.results.__getitem__, reverse=reverse)
elif sort == 'alpha':
res = sorted(res, key=str)
for atom in res:
stream.write('%s %s\n' % (barstr(barwidth, self.results[atom], color=color), atom))
wrote_results = True
if not wrote_results:
max_len = max([len(str(q)) for q, p in list(results.items())])
result_tuples = list(results.items())
result_tuples.sort(key=lambda pair: pair[1], reverse=True)
str_results = [("{:" + str(max_len) + "s} {:7.2f}").format(str(q), p) for q, p in result_tuples]
stream.write(reduce(lambda a,b: a + "\n" + b, str_results, ""))
return
# first sort wrt to probability
results = sorted(results, key=self.results.__getitem__, reverse=reverse)
# then wrt gnd atoms
results = sorted(results, key=str)
for q in results:
stream.write('%s %s\n' % (barstr(barwidth, self.results[q], color=color), q))
self._watch.printSteps()
def write_elapsed_time(self, stream=sys.stdout, color=None):
if stream is sys.stdout and color is None:
color = True
elif color is None:
color = False
if color: col = 'blue'
else: col = None
total = float(self._watch['inference'].elapsedtime)
stream.write(headline('INFERENCE RUNTIME STATISTICS'))
print()
self._watch.finish()
for t in sorted(list(self._watch.tags.values()), key=lambda t: t.elapsedtime, reverse=True):
stream.write('%s %s %s\n' % (barstr(width=30, percent=t.elapsedtime / total, color=col), elapsed_time_str(t.elapsedtime), t.label))
<|end_of_text|>def primes(k_max):
cdef int n,k,i
cdef int p[1000]
result = []
if k_max >1000:
k_max = 1000
k=0
n=2
while k<k_max:
i=0
while i<k and n % p[i]!=0:
i = i+1
if i==k:
p[k] =n
k = k+1
result.append(n)
n = n+1
return result<|end_of_text|>cimport cyminheap # import the declaration in cyminheap.pxd
from libc.stdint cimport uint32_t # import the integer type from C
# define a struct that would be stored in the heap.
# NOTE THAT all structs, defined in pyx files, would not be exported
# with this module. Rather, as it serves as the parameter type of the
# exported class, MinHeap in this case, it could be substituted by
# a python dictionary object wherever it's required.
# e.g. heap.push({"id":1, "price":10.0})
cdef struct good_t:
int id
float price
# define the required C functions for comparison, copy and swap of good_t
cdef int good_cmp(void *self, void *other):
cdef good_t *pself = <good_t *>self
cdef good_t *pother = <good_t *>other
# NOTE THAT <*type*> is the special form for coercion in cython
if pself.price > pother.price:
return 1
elif pself.price < pother.price:
return -1
else:
return 0
cdef void *good_copy(void *self, void *other):
# NOTE THAT cython does NOT support unary operator * in C,
# use some_pointer[0], instead of *some_pointer to reference
# the pointer
(<good_t *>self)[0] = (<good_t *>other)[0]
return self
| Cython |
cdef void good_swap(void *self, void *other):
if self == other:
return
cdef good_t temp = (<good_t *>self)[0]
(<good_t *>self)[0] = (<good_t *>other)[0]
(<good_t *>other)[0] = temp
return
cdef class MinHeap:
""" Minimum heap container, a wrapper based on an implementation in C.
>>>from minheap import MinHeap
>>>heap=MinHeap()
>>>heap.push({"id":1, "price":1.0})
>>>heap.peek()
{"id":1, "price":1.0}
>>>item=heap.pop()
item == {"id":1, "price":1.0}
>>>heap.peek()
raise IndexError
>>>heap.pop()
raise IndexError
"""
cdef cyminheap.minheap_t *_c_minheap
# cython garantees that __cinit__ would be called when a new
# MinHeap object is instantiated. Likewise, when this object
# is no longer referenced by any others, __dealloc__ would be
# called before this object is reclaimed by python runtime
def __cinit__(self):
cdef uint32_t initial_number = 0
self._c_minheap = cyminheap.minheap_create(initial_number,
sizeof(good_t),
<cyminheap.compare>good_cmp,
<cyminheap.copy>good_copy,
<cyminheap.swap>good_swap)
if self._c_minheap is NULL:
raise MemoryError()
def __dealloc__(self):
if self._c_minheap is not NULL:
cyminheap.minheap_free(self._c_minheap)
# since __cinit__ has already created the heap itself, no further
# operations need to be done in __init__
def __init__(self):
pass
cdef void _c_push(self, void *item):
cyminheap.minheap_push(self._c_minheap, item)
cpdef push(self, good_t item):
self._c_push(&item)
cdef good_t * _c_pop(self):
return <good_t*>cyminheap.minheap_pop(self._c_minheap)
# NOTE THAT by defining a function through 'def', it means it's a
# regular python function. Therefore, whenever it encounters an exception
# during the running of function, the exception would be automatically
# propagated to the caller by cython. Howerver, if define a function
# through 'cdef' or 'cpdef', in order to to propagate exception, you have to
# declare it explcitely by 'except?/*/-1' statement, otherwise, the exception
# would be ignored by default, and just posts a warning in the runtime
def pop(self):
cdef good_t *data = self._c_pop()
if data is NULL:
raise IndexError("Can not pop from an empty heap.")
else:
return data[0]
cdef good_t * _c_peek(self):
return <good_t*>cyminheap.minheap_min(self._c_minheap)
def peek(self):
cdef good_t *data = self._c_peek()
if data is NULL:
raise IndexError("Can not peek from an empty heap.")
else:
return data[0]
<|end_of_text|># cython: language_level = 3, boundscheck = False
cdef extern from "../../../src/objects/tests/ism.h":
unsigned short test_ism_initialize()
unsigned short test_ism_free()
<|end_of_text|>#cython: language_level=3, boundscheck=False, wraparound=False, nonecheck=False, cdivision=True
#distutils: define_macros=NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION
#distutils: language = c++
# -*- coding: utf-8 -*-
__version__="0.2"
__doc__='''
############## CYTHON RASTERIZATION & UTILS #############################
### Author: Pedro Henrique A. Hasselmann, Dr. (OBSPM-Lesia)
### Last Modified: April, 2021
###
### requirements:
### Cython, cpp compiler
###
### Main Functions:
### rebase_c
###
###
###
###
###
###
################################################################################
'''
# Cythonize
import cython
cimport cython
from cython.parallel import prange, parallel
from cython cimport view as cview
from cpython cimport array
from libc.math cimport ceil
from libc.string cimport memcpy, memset
#from libcpp.vector cimport vector
import numpy as np
cimport numpy as np
from numpy import ndarray, uint8, uint16, uint32, uint64, int32, float32, float64, uint8
from numpy cimport ndarray, uint8_t, uint16_t, uint32_t, int32_t, int64_t, float32_t, float64_t
from numpy.math cimport INFINITY
np.import_array()
# NUMBER OF THREADS
cdef short thrn=4
################
### FUNCTIONS ##
################
cpdef rebase_c(double[:,::1] v, # Vertices N columns
unsigned int[:,::1] fc, # Facet indexes
):
cdef:
ssize_t i, j
ssize_t F = fc.shape[0]
double[:,:,::1] R = np.empty((F,3,v.shape[1]), order='C', dtype=float64)
double[:,::1] t = np.empty((3,v.shape[1]), order='C', dtype=float64)
with nogil:
for i in range(F):
j = fc[i][0]
R[i][0,...] = v[j,...]
j = fc[i][1]
R[i][1,...] = v[j,...]
j = fc[i][2]
R[i][2,...] = v[j,...]
return np.asarray(R)
cpdef weighted_average_c(float[:,::1] fpa, # Y X Facet Area
float[::1] X, # Values to be broadcast
):
cdef:
ssize_t i
unsigned int f=0
double value=0., a=0.
ssize_t N = fpa.shape[0]
float[:,::1] X_pix = np.empty((N,3), dtype=float32, order='C')
X_pix[:,:] = -999.
#ith nogil:
a = fpa[0,3]
#f = int(fpa[0,2])
value = a*X[0]
for i in range(1,N):
if (fpa[i,0] == fpa[i-1,0]) and (fpa[i,1] == fpa[i-1,1]):
#f = int(fpa[i,2])
value += fpa[i,3]*X[i]
a += fpa[i,3]
else:
#if a>0.99:
# value = value/a
X_pix[i,2] = value/a
X_pix[i,1] = fpa[i-1,1]
X_pix[i,0] = fpa[i-1,0]
#print(i, N, np.asarray(X_pix[i,:]))
a = fpa[i,3]
#f = int(fpa[i,2])
value = a*X[i]
return np.unique(np.asarray(X_pix), axis=0)
####################################################################
#################### RASTERIZATION #################################
####################################################################
cdef float[:,::1] vc = np.empty((2,3), dtype=float32, order='C')
cpdef raster_c((ssize_t, ssize_t) it,
float[:,:,::1] V,
unsigned int[::1] f,
(short, short) frame,
unicode mode=u'pixelation'):
'''
Rasterization.
Fill-in the area inside vertices (skimage.draw.polygon) -- DONE
https://stackoverflow.com/questions/26807177/overlapping-polygons-in-python-pil
https://stackoverflow.com/questions/42349587/cython-slow-numpy-arrays
https://www.scratchapixel.com/lessons/3d-basic-rendering/rasterization-practical-implementation
Parameters
==========
it : start & stop iteration (mutiprocessing.pool.map input)
V : 2x3xN numpy.array, vertices triplet per facet
facetid : N numpy.array, original facet id
frame : frame size in pixels
mode : 'pixelation' or 'occultation'
Output
======
image : 2D int32 numpy.array
facet_pix : 3xN numpy.array, X and Y pixel-coordinates alongside with facetid.
'''
#from skimage.draw import polygon, polygon_perimeter
cdef:
ssize_t j
pa = TriangleAccumulator(frame[1], frame[0], mem_fac=4)
#with nogil:
if mode == 'pixelation':
for j in range(it[0],it[1]):
# Tringular facet area projection (polygon filling)
vc[...] = V[...,j]
pa.superimpose_polygon(vc[0,:],vc[1,:], | Cython |
f[j])
#print('v', j)
#print('f', f[j])
#print('n', pa.n)
#r = np.asarray(v[0,:], dtype=int32)
#c = np.asarray(v[1,:], dtype=int32)
#print(rr_area)
#print(cc_area)
#print(r)
#print(c)
#image[rr_area, cc_area]=n+1
#image[rr_peri, cc_peri]=n+1
#image[r,c]=n+1
#raw_input(image[np.min(r)-3:np.max(r)+3,np.min(c)-3:np.max(c)+3])
if mode == 'occultation':
for j in range(it[0],it[1]):
# Tringular facet area projection (polygon filling)
vc[...] = V[...,j]
pa.add_polygon(vc[0,:],vc[1,:], 1)
return np.asarray(pa.image), np.stack((pa.rr, pa.cc, pa.ff), axis=-1).astype(uint32)
##############################
## DRAW.POLYGON ACCUMULATOR ##
##############################
# https://stackoverflow.com/questions/2049582/how-to-determine-if-a-point-is-in-a-2d-triangle
cdef bint point_in_triangle(float[::1] xp,
float[::1] yp,
float x,
float y) nogil:
cdef:
float dX = x - xp[0]
float dY = y - yp[0]
float dX20 = xp[2] - xp[0]
float dY20 = yp[2] - yp[0]
float dX10 = xp[1] - xp[0]
float dY10 = yp[1] - yp[0]
float s = (dY20*dX) - (dX20*dY)
float t = (dX10*dY) - (dY10*dX)
float D = (dX10*dY20) - (dY10*dX20)
if D<0:
return (s<=0) & (t<=0) & ((s + t)>=D)
else:
return (s>=0) & (t>=0) & ((s + t)<=D)
cdef (float, float) minmax(float[::1] arr) nogil:
cdef float min = INFINITY
cdef float max = -INFINITY
cdef ssize_t i, L = arr.shape[0]
for i in range(L):
if arr[i] < min:
min = arr[i]
if arr[i] > max:
max = arr[i]
return min, max
#https://stackoverflow.com/questions/26807177/overlapping-polygons-in-python-pil
cdef class TriangleAccumulator:
cdef:
short width, height
int n
unsigned int[:, ::1] image
unsigned short[::1] rr, cc
unsigned int[::1] ff
def __cinit__(self, short width,
short height,
short mem_fac):
self.n = 0
self.width = width
self.height = height
shape = (height, width)
self.image = np.zeros(shape, dtype=uint32)
self.rr = np.empty(mem_fac*(self.width-1)*(self.height-1), dtype=uint16, order='C')
self.cc = np.empty(mem_fac*(self.width-1)*(self.height-1), dtype=uint16, order='C')
self.ff = np.empty(mem_fac*(self.width-1)*(self.height-1), dtype=uint32, order='C')
def reset(self):
self.image[:, :] = 0
# Rasterization of triangles in self.image are **summed** over the previous values
cdef void add_polygon(self,
float[::1] ya,
float[::1] xa,
unsigned int value) nogil:
cdef float minya, maxya, minxa, maxxa
minya, maxya = minmax(ya)
minxa, maxxa = minmax(xa)
cdef:
ssize_t minr = int(max(0, minya))
ssize_t maxr = int(ceil(maxya))
ssize_t minc = int(max(0, minxa))
ssize_t maxc = int(ceil(maxxa))
unsigned short r, c
maxr = min(self.height -1, maxr)
maxc = min(self.width -1, maxc)
#with nogil:
for r in range(minr, maxr+1):
for c in range(minc, maxc+1):
if (point_in_triangle(xa, ya, c, r+0.5) or
point_in_triangle(xa, ya, c+0.5, r) or
point_in_triangle(xa, ya, c-0.5, r) or
point_in_triangle(xa, ya, c, r-0.5) or
point_in_triangle(xa, ya, c+0.5, r+0.5) or
point_in_triangle(xa, ya, c-0.5, r-0.5) or
point_in_triangle(xa, ya, c-0.5, r+0.5) or
point_in_triangle(xa, ya, c+0.5, r-0.5) or
point_in_triangle(xa, ya, c, r)
):
self.image[r, c] += value
self.rr[self.n] = r
self.cc[self.n] = c
self.ff[self.n] = value
self.n +=1
#return rr, cc
# Rasterization of triangles in self.image are **superimposed** over the previous values
#@cython.boundscheck(False) # Deactivate bounds checking
#@cython.wraparound(False) # Deactivate negative indexing.
#@cython.nonecheck(False)
#@cython.cdivision(True)
cdef void superimpose_polygon(self,
float[::1] ya,
float[::1] xa,
unsigned int value) nogil:
cdef float minya, maxya, minxa, maxxa
minya, maxya = minmax(ya)
minxa, maxxa = minmax(xa)
cdef:
ssize_t minr = int(max(0, minya))
ssize_t maxr = int(ceil(maxya))
ssize_t minc = int(max(0, minxa))
ssize_t maxc = int(ceil(maxxa))
unsigned short r, c
maxr = min(self.height -1, maxr)
maxc = min(self.width -1, maxc)
#with nogil:
for r in range(minr, maxr+1):
for c in range(minc, maxc+1):
if (point_in_triangle(xa, ya, c, r+0.5) or
point_in_triangle(xa, ya, c+0.5, r) or
point_in_triangle(xa, ya, c-0.5, r) or
point_in_triangle(xa, ya, c, r-0.5) or
point_in_triangle(xa, ya, c+0.5, r+0.5) or
point_in_triangle(xa, ya, c-0.5, r-0.5) or
point_in_triangle(xa, ya, c-0.5, r+0.5) or
point_in_triangle(xa, ya, c+0.5, r-0.5) or
point_in_triangle(xa, ya, c, r)
):
self.image[r, c] = value
self.rr[self.n] = r
self.cc[self.n] = c
self.ff[self.n] = value
self.n +=1
#print('sizes')
#print(self.n)
#print(maxc-minc+1)
#print(maxr-minr+1)
#print('pixel')
#print(value)
#print(r, self.rr[self.n-1])
#print(self.rr[self.n])
#input('raster pixel enter')
#return rr, cc
# END
<|end_of_text|>from cpython cimport bool
from libc.stdint cimport int64_t, int32_t
from libc.stdlib cimport calloc, free
from libc.string cimport memcpy, memcmp
from collections import namedtuple
cdef extern from "src/sophia.h" nogil:
cdef void *sp_env()
cdef void *sp_document(void *)
cdef int sp_setstring(void*, const char*, const void*, int)
cdef int sp_setint(void*, const char*, int64_t)
cdef void *sp_getobject(void*, const char*)
cdef void *sp_getstring(void*, const char*, int*)
cdef int64_t sp_getint(void*, const char*)
cdef int sp_open(void *)
cdef int sp_destroy(void *)
cdef int sp_set(void*, void*)
cdef | Cython |
int sp_upsert(void*, void*)
cdef int sp_delete(void*, void*)
cdef void *sp_get(void*, void*)
cdef void *sp_cursor(void*)
cdef void *sp_begin(void *)
cdef int sp_prepare(void *)
cdef int sp_commit(void *)
class SophiaError(Exception): pass
class SophiaClosed(SophiaError): pass
class DocumentClosed(SophiaClosed): pass
class BadQuery(SophiaError): pass
class TransactionError(SophiaError): pass
class TransactionRollback(TransactionError): pass
class TransactionLocked(TransactionError): pass
IndexType = namedtuple('IndexType', ('value', 'is_bytes'))
cdef class Types:
string = IndexType(b'string', True)
u64 = IndexType(b'u64', False)
u32 = IndexType(b'u32', False)
u16 = IndexType(b'u16', False)
u8 = IndexType(b'u8', False)
u64_rev = IndexType(b'u64_rev', False)
u32_rev = IndexType(b'u32_rev', False)
u16_rev = IndexType(b'u16_rev', False)
u8_rev = IndexType(b'u8_rev', False)
cdef class cstring:
""" Simple lazy string on dynamic memory """
cdef readonly char *c_str
cdef readonly size_t size
@classmethod
def from_string(cls, str string):
return cls(string.encode())
def __cinit__(self, bytes value):
cdef char* cvalue = value
self.size = len(value)
with nogil:
self.c_str = <char*> calloc(self.size + 1, sizeof(char))
memcpy(<void*> self.c_str, <void*> cvalue, self.size)
def __dealloc__(self):
if self.c_str!= NULL:
free(self.c_str)
def __str__(self):
return "%r" % self.value
def __repr__(self):
return self.__str__()
@property
def value(self):
return self.c_str[:self.size]
def __eq__(self, cstring other):
cdef int result
if self.size!= other.size:
return False
with nogil:
result = memcmp(self.c_str, other.c_str, self.size)
return result == 0
cdef class Environment(object):
cdef void *env
cdef readonly bool _closed
cdef readonly Configuration _configuration
def __check_error(self, int rc):
if rc!= -1:
return rc
try:
error = self.get_string('sophia.error').decode('utf-8', 'ignore')
except KeyError:
error = 'unknown error occurred.'
raise SophiaError(error)
def check_closed(self):
if self._closed:
raise SophiaClosed("Environment closed")
def __cinit__(self):
self.env = sp_env()
self._closed = None
self._configuration = Configuration(self)
@property
def configuration(self) -> dict:
return dict(self._configuration)
@property
def is_closed(self):
return self._closed
@property
def is_opened(self):
return self._closed is not None
def open(self) -> int:
self.check_closed()
cdef int rc
with nogil:
rc = sp_open(self.env)
return self.__check_error(rc)
def close(self) -> int:
self.check_closed()
cdef int rc
if self.is_opened and self.env!= NULL:
rc = sp_destroy(self.env)
self._closed = True
return self.__check_error(rc)
def __dealloc__(self):
if not self._closed:
self.close()
def get_string(self, str key) -> bytes:
self.check_closed()
cdef char* buf
cdef int nlen
cdef cstring ckey = cstring.from_string(key)
with nogil:
buf = <char *>sp_getstring(self.env, ckey.c_str, &nlen)
if buf == NULL:
raise KeyError("Key %r not found in document" % key)
value = buf[:nlen]
return value
def get_int(self, str key) -> int:
self.check_closed()
cdef cstring ckey = cstring.from_string(key)
cdef int64_t result
with nogil:
result = sp_getint(self.env, ckey.c_str)
return result
def set_string(self, str key, bytes value) -> int:
self.check_closed()
cdef int rc
cdef cstring ckey = cstring.from_string(key)
cdef cstring cvalue = cstring(value)
with nogil:
rc = sp_setstring(self.env, ckey.c_str, cvalue.c_str, cvalue.size)
self.__check_error(rc)
return rc
def set_int(self, str key, int value) -> int:
self.check_closed()
cdef cstring ckey = cstring.from_string(key)
cdef int64_t cvalue = value
cdef int rc
with nogil:
rc = sp_setint(self.env, ckey.c_str, cvalue)
self.__check_error(rc)
return rc
def get_object(self, str name) -> Database:
self.check_closed()
cdef cstring cname = cstring.from_string(name)
db = Database(self, name)
with nogil:
db.db = sp_getobject(self.env, cname.c_str)
if db.db == NULL:
self.__check_error(-1)
return db
def transaction(self) -> Transaction:
return Transaction(self)
cdef class Configuration:
cdef readonly Environment env
def __cinit__(self, Environment env):
self.env = env
def __iter__(self):
self.env.check_closed()
cdef void *cursor
with nogil:
cursor = sp_getobject(self.env.env, NULL)
if cursor == NULL:
try:
error = self.env.get_string('sophia.error').decode('utf-8', 'ignore')
except KeyError:
error = 'unknown error occurred.'
raise SophiaError(error)
cdef char *key, *value
cdef int key_len, value_len
cdef void* obj
try:
while True:
with nogil:
obj = sp_get(cursor, obj)
if obj == NULL:
raise StopIteration
with nogil:
key = <char*> sp_getstring(obj, 'key', &key_len)
value = <char*> sp_getstring(obj, 'value', &value_len)
if key_len > 0:
key_len -= 1
if value_len > 0:
value_len -= 1
k = key[:key_len].decode()
v = value[:value_len].decode()
key_len = 0
value_len = 0
if v.isdigit():
v = int(v)
yield k, v
finally:
if cursor!= NULL:
with nogil:
sp_destroy(cursor)
cdef class Transaction:
cdef void* tx
cdef readonly Environment env
cdef readonly bool closed
cdef readonly list __refs
def __check_error(self, int rc):
if rc!= -1:
return rc
try:
error = self.env.get_string('sophia.error').decode('utf-8', 'ignore')
except KeyError:
error = 'unknown error occurred.'
raise SophiaError(error)
def __check_closed(self):
if self.closed:
raise TransactionError('Transaction closed')
self.env.check_closed()
def __cinit__(self, Environment env):
self.closed = True
self.env = env
with nogil:
self.tx = sp_begin(env.env)
if not self.tx:
self.__check_error(-1)
self.closed = False
self.__refs = []
def set(self, Document document) -> int:
self.__check_closed()
if document.closed:
raise DocumentClosed
cdef int rc
with nogil:
rc = sp_set(self.tx, document.obj)
document.obj = NULL
self.__check_error(rc)
self.__refs.append(Document)
return rc
def get(self, Document query) -> Document:
cdef void* result_ptr = NULL
cdef Database db = query.db
with nogil:
result_ptr = sp_get(self.tx, query.obj)
# sp_get destroy object inside
query.obj = NULL
if result_ptr == NULL:
raise LookupError
result = Document(db, external=True, readonly=True)
result.obj = result_ptr
result.external = False
return result
def delete(self, Document query):
cdef int rc
with nogil:
rc = sp_delete(self.tx, query.obj)
query.obj = NULL
return self.__check_error(rc)
def commit(self) -> int:
self.__check_closed()
cdef int rc
with nogil:
rc = sp_commit(self.tx)
self.__check_error(rc)
self.closed = True
self.tx = NULL
if rc == 0:
return 0
elif rc == 1:
raise TransactionRollback
elif rc == | Cython |
2:
raise TransactionLocked
def rollback(self) -> int:
self.__check_closed()
if self.tx!= NULL:
with nogil:
sp_destroy(self.tx)
self.tx = NULL
self.closed = True
def __enter__(self):
self.__check_closed()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
self.rollback()
return
self.commit()
cdef class Database:
cdef readonly str name
cdef readonly Environment env
cdef void* db
def __cinit__(self, Environment env, str name):
self.name = name
self.env = env
def __check_error(self, int rc):
if rc!= -1:
return rc
try:
error = self.env.get_string('sophia.error').decode('utf-8', 'ignore')
except KeyError:
error = 'unknown error occurred.'
raise SophiaError(error)
def document(self) -> Document:
doc = Document(self)
with nogil:
doc.obj = sp_document(self.db)
return doc
def get(self, Document query) -> Document:
cdef void* result_ptr = NULL
with nogil:
result_ptr = sp_get(self.db, query.obj)
# sp_get destroy object inside
query.obj = NULL
if result_ptr == NULL:
raise LookupError
result = Document(self, external=True, readonly=True)
result.obj = result_ptr
result.external = False
return result
def set(self, Document document) -> int:
cdef int rc
if document.closed:
raise DocumentClosed
with nogil:
rc = sp_set(self.db, document.obj)
document.obj = NULL
return self.__check_error(rc)
def delete(self, Document document) -> int:
cdef int rc
with nogil:
rc = sp_delete(self.db, document.obj)
document.obj = NULL
return self.__check_error(rc)
cdef int32_t get_length(self) nogil:
cdef void* obj
cdef void* cursor
cdef size_t result = 0
obj = sp_document(self.db)
if obj == NULL:
return -1
cursor = sp_cursor(self.env.env)
if not cursor:
return -1
while True:
obj = sp_get(cursor, obj)
if obj!= NULL:
result += 1
else:
break
if cursor!= NULL:
sp_destroy(cursor)
return result
def __len__(self) -> int:
cdef int32_t result = 0
with nogil:
result = self.get_length()
return self.__check_error(result)
def cursor(self, dict query) -> Cursor:
return Cursor(self.env, query, self)
def transaction(self) -> Transaction:
self.env.check_closed()
return self.env.transaction()
def delete_many(self, **query):
query.setdefault('order', '>=')
if query['order'] not in ('>=', '<=', '>', '<'):
raise ValueError('Invalid order')
cdef void* obj
with nogil:
obj = sp_document(self.db)
if obj == NULL:
self.__check_error(-1)
prefix = '%s.scheme.' % self.name
key_fields = []
for key, value in self.env._configuration:
if not key.startswith(prefix):
continue
if isinstance(value, int):
continue
if ',key(' not in value:
continue
key_fields.append((
key.replace(prefix, '').encode(),
'string' in value
))
cdef size_t key_num = len(key_fields)
cdef char **keys = <char**> calloc(sizeof(char*), key_num)
cdef char *str_key = <char*> calloc(sizeof(char), key_num)
for idx, item in enumerate(key_fields):
key, is_str = item
keys[idx] = key
str_key[idx] = is_str
document = Document(self, external=True)
document.obj = obj
for key, value in query.items():
if not isinstance(key, str):
raise BadQuery("Bad key. Key must be str %r %r" % (
key, type(key)
))
if isinstance(value, int):
document.set_int(key, value)
elif isinstance(value, bytes):
document.set_string(key, value)
elif isinstance(value, str):
document.set_string(key, value.encode())
else:
raise BadQuery(
"Bad value. Value must be bytes or int not %r %r" % (
value, type(value)
)
)
document.obj = NULL
cdef void* tx
cdef void* cursor
with nogil:
cursor = sp_cursor(self.env.env)
tx = sp_begin(self.env.env)
if tx == NULL or cursor == NULL:
self.__check_error(-1)
cdef size_t result = 0
cdef void *rm_obj
cdef char* str_v
cdef int64_t int_v
cdef int nlen
with nogil:
while True:
obj = sp_get(cursor, obj)
if obj == NULL:
if cursor!= NULL:
sp_destroy(cursor)
break
rm_obj = sp_document(self.db)
for i in range(key_num):
k = keys[i]
if str_key[i]:
str_v = <char *>sp_getstring(obj, keys[i], &nlen)
sp_setstring(rm_obj, keys[i], str_v, nlen)
nlen = 0
else:
int_v = sp_getint(obj, keys[i])
sp_setint(rm_obj, keys[i], int_v)
str_v = b''
int_v = 0
sp_delete(tx, rm_obj)
result += 1
sp_commit(tx)
free(str_key)
free(keys)
return result
cdef class Cursor:
cdef readonly Environment env
cdef readonly Database db
cdef readonly dict query
def __raise_error(self):
try:
error = self.env.get_string(
'sophia.error'
).decode('utf-8', 'ignore')
except KeyError:
error = 'unknown error occurred.'
raise SophiaError(error)
def __cinit__(self, Environment env, dict query, Database db):
self.db = db
self.env = env
self.query = query
def __init__(self, Environment env, dict query, Database db):
self.query.setdefault('order', '>=')
if self.query['order'] not in ('>=', '<=', '>', '<'):
raise ValueError('Invalid order')
def __iter__(self):
document = Document(self.db, external=True)
cdef void* obj
with nogil:
obj = sp_document(self.db.db)
if obj == NULL:
self.__raise_error()
cdef void* cursor
with nogil:
cursor = sp_cursor(self.env.env)
if not cursor:
self.__raise_error()
document.obj = obj
for key, value in self.query.items():
if not isinstance(key, str):
raise BadQuery("Bad key. Key must be str %r %r" % (
key, type(key)
))
if isinstance(value, int):
document.set_int(key, value)
elif isinstance(value, bytes):
document.set_string(key, value)
elif isinstance(value, str):
document.set_string(key, value.encode())
else:
raise BadQuery(
"Bad value. Value must be bytes or int not %r %r" % (
value, type(value)
)
)
try:
while True:
with nogil:
obj = sp_get(cursor, obj)
if obj == NULL:
raise StopIteration
else:
document.obj = obj
yield document
document.obj = NULL
finally:
if cursor!= NULL:
with nogil:
sp_destroy(cursor)
cdef class Document:
cdef void* obj
cdef readonly Database db
cdef char external
cdef readonly list __refs
cdef readonly bool readonly
def __check_closed(self):
if self.closed:
raise DocumentClosed
if self.db.env.is_closed:
raise SophiaClosed
def __cinit__(self, Database db, external=False, readonly=False):
self.db = db
self.external = 1 if external else 0
self.__refs = []
self.readonly = readonly
if not self.external:
with nogil:
self.obj = sp_document(db.db)
if self.obj == NULL:
self.__check_error(-1)
def __dealloc__(self):
if self.obj!= NULL and not self.external:
with nogil:
sp_destroy(self.obj)
self.__refs[:] = []
self.obj = NULL
@property
def closed(self) -> bool:
return self.obj == NULL
def __check_error(self, int rc):
if rc!= -1:
return
try:
error = self.db.env.get_string(
'sophia.error'
).decode('utf-8', 'ignore')
except KeyError:
error = 'unknown error occurred | Cython |
.'
raise SophiaError(error)
def get_string(self, str key) -> bytes:
self.__check_closed()
cdef char* buf
cdef int nlen
cdef bytes bkey
cdef cstring ckey = cstring.from_string(key)
with nogil:
buf = <char *>sp_getstring(self.obj, ckey.c_str, &nlen)
if buf == NULL:
raise KeyError('Key %r not found in the document' % key)
cdef bytes value = buf[:nlen]
return value
def get_int(self, str key) -> int:
self.__check_closed()
cdef cstring ckey = cstring.from_string(key)
cdef int64_t result
with nogil:
result = sp_getint(self.obj, ckey.c_str)
return result
def set_string(self, str key, bytes value) -> int:
if self.readonly:
raise RuntimeError('read-only document')
self.__check_closed()
cdef int rc
cdef cstring ckey = cstring.from_string(key)
cdef cstring cvalue = cstring(value)
with nogil:
rc = sp_setstring(self.obj, ckey.c_str, cvalue.c_str, cvalue.size)
self.__check_error(rc)
self.__refs.append(ckey)
self.__refs.append(cvalue)
return rc
def set_int(self, str key, int value) -> int:
if self.readonly:
raise RuntimeError('read-only document')
self.__check_closed()
cdef int rc
cdef cstring ckey = cstring.from_string(key)
cdef int64_t cvalue = value
with nogil:
rc = sp_setint(self.obj, ckey.c_str, cvalue)
return self.__check_error(rc)
<|end_of_text|># -*- coding: utf-8 -*-
# distutils: language=c
# cython: initializedcheck=False
# cython: nonecheck=False
# cython: overflowcheck=False
# cython: boundscheck=False
from myawesomelib.types cimport data_t
cimport numpy as cnp
cimport libc.math
cdef data_t _sigmoid(data_t x) nogil:
return 1. / (1. + libc.math.exp(-x))
cdef data_t _logit(data_t x) nogil:
return libc.math.log(x / (1. - x))
<|end_of_text|># cython: infer_types=True, profile=True
from typing import Any, Callable, Dict, Iterable
import srsly
from cpython.exc cimport PyErr_SetFromErrno
from libc.stdint cimport int32_t, int64_t
from libc.stdio cimport fclose, feof, fopen, fread, fseek, fwrite
from libcpp.vector cimport vector
from preshed.maps cimport PreshMap
import warnings
from pathlib import Path
from..tokens import Span
from..typedefs cimport hash_t
from.. import util
from..errors import Errors, Warnings
from..util import SimpleFrozenList, ensure_path
from..vocab cimport Vocab
from.kb cimport KnowledgeBase
from.candidate import Candidate as Candidate
cdef class InMemoryLookupKB(KnowledgeBase):
"""An `InMemoryLookupKB` instance stores unique identifiers for entities
and their textual aliases, to support entity linking of named entities to
real-world concepts.
DOCS: https://spacy.io/api/inmemorylookupkb
"""
def __init__(self, Vocab vocab, entity_vector_length):
"""Create an InMemoryLookupKB."""
super().__init__(vocab, entity_vector_length)
self._entry_index = PreshMap()
self._alias_index = PreshMap()
self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
def _initialize_entities(self, int64_t nr_entities):
self._entry_index = PreshMap(nr_entities + 1)
self._entries = entry_vec(nr_entities + 1)
def _initialize_vectors(self, int64_t nr_entities):
self._vectors_table = float_matrix(nr_entities + 1)
def _initialize_aliases(self, int64_t nr_aliases):
self._alias_index = PreshMap(nr_aliases + 1)
self._aliases_table = alias_vec(nr_aliases + 1)
def is_empty(self):
return len(self) == 0
def __len__(self):
return self.get_size_entities()
def get_size_entities(self):
return len(self._entry_index)
def get_entity_strings(self):
return [self.vocab.strings[x] for x in self._entry_index]
def get_size_aliases(self):
return len(self._alias_index)
def get_alias_strings(self):
return [self.vocab.strings[x] for x in self._alias_index]
def add_entity(self, str entity, float freq, vector[float] entity_vector):
"""
Add an entity to the KB, optionally specifying its log probability
based on corpus frequency.
Return the hash of the entity ID/name at the end.
"""
cdef hash_t entity_hash = self.vocab.strings.add(entity)
# Return if this entity was added before
if entity_hash in self._entry_index:
warnings.warn(Warnings.W018.format(entity=entity))
return
# Raise an error if the provided entity vector is not of the correct length
if len(entity_vector)!= self.entity_vector_length:
raise ValueError(
Errors.E141.format(
found=len(entity_vector), required=self.entity_vector_length
)
)
vector_index = self.c_add_vector(entity_vector=entity_vector)
new_index = self.c_add_entity(
entity_hash=entity_hash,
freq=freq,
vector_index=vector_index,
feats_row=-1
) # Features table currently not implemented
self._entry_index[entity_hash] = new_index
return entity_hash
cpdef set_entities(self, entity_list, freq_list, vector_list):
if len(entity_list)!= len(freq_list) or len(entity_list)!= len(vector_list):
raise ValueError(Errors.E140)
nr_entities = len(set(entity_list))
self._initialize_entities(nr_entities)
self._initialize_vectors(nr_entities)
i = 0
cdef KBEntryC entry
cdef hash_t entity_hash
while i < len(entity_list):
# only process this entity if its unique ID hadn't been added before
entity_hash = self.vocab.strings.add(entity_list[i])
if entity_hash in self._entry_index:
warnings.warn(Warnings.W018.format(entity=entity_list[i]))
else:
entity_vector = vector_list[i]
if len(entity_vector)!= self.entity_vector_length:
raise ValueError(
Errors.E141.format(
found=len(entity_vector),
required=self.entity_vector_length
)
)
entry.entity_hash = entity_hash
entry.freq = freq_list[i]
self._vectors_table[i] = entity_vector
entry.vector_index = i
entry.feats_row = -1 # Features table currently not implemented
self._entries[i+1] = entry
self._entry_index[entity_hash] = i+1
i += 1
def contains_entity(self, str entity):
cdef hash_t entity_hash = self.vocab.strings.add(entity)
return entity_hash in self._entry_index
def contains_alias(self, str alias):
cdef hash_t alias_hash = self.vocab.strings.add(alias)
return alias_hash in self._alias_index
def add_alias(self, str alias, entities, probabilities):
"""
For a given alias, add its potential entities and prior probabilies to the KB.
Return the alias_hash at the end
"""
if alias is None or len(alias) == 0:
raise ValueError(Errors.E890.format(alias=alias))
previous_alias_nr = self.get_size_aliases()
# Throw an error if the length of entities and probabilities are not the same
if not len(entities) == len(probabilities):
raise ValueError(
Errors.E132.format(
alias=alias,
entities_length=len(entities),
probabilities_length=len(probabilities))
)
# Throw an error if the probabilities sum up to more than 1 (allow for
# some rounding errors)
prob_sum = sum(probabilities)
if prob_sum > 1.00001:
raise ValueError(Errors.E133.format(alias=alias, sum=prob_sum))
cdef hash_t alias_hash = self.vocab.strings.add(alias)
# Check whether this alias was added before
if alias_hash in self._alias_index:
warnings.warn(Warnings.W017.format(alias=alias))
return
cdef vector[int64_t] entry_indices
cdef vector[float] probs
for entity, prob in zip(entities, probabilities):
entity_hash = self.vocab.strings[entity]
if entity_hash not in self._entry_index:
raise ValueError(Errors.E134.format(entity=entity))
entry_index = <int64_t>self._entry_index.get(entity_hash)
entry_indices.push_back(int(entry_index))
probs.push_back(float(prob))
new_index = self.c_add_aliases(
alias_hash=alias_hash, entry_indices=entry_indices, probs=probs
)
self._alias_index[alias_hash] = new_index
if previous_alias_nr + 1!= self.get_size_aliases():
raise RuntimeError(Errors.E891.format(alias=alias))
return alias_hash
def | Cython |
append_alias(
self, str alias, str entity, float prior_prob, ignore_warnings=False
):
"""
For an alias already existing in the KB, extend its potential entities
with one more.
Throw a warning if either the alias or the entity is unknown,
or when the combination is already previously recorded.
Throw an error if this entity+prior prob would exceed the sum of 1.
For efficiency, it's best to use the method `add_alias` as much as
possible instead of this one.
"""
# Check if the alias exists in the KB
cdef hash_t alias_hash = self.vocab.strings[alias]
if alias_hash not in self._alias_index:
raise ValueError(Errors.E176.format(alias=alias))
# Check if the entity exists in the KB
cdef hash_t entity_hash = self.vocab.strings[entity]
if entity_hash not in self._entry_index:
raise ValueError(Errors.E134.format(entity=entity))
entry_index = <int64_t>self._entry_index.get(entity_hash)
# Throw an error if the prior probabilities (including the new one)
# sum up to more than 1
alias_index = <int64_t>self._alias_index.get(alias_hash)
alias_entry = self._aliases_table[alias_index]
current_sum = sum([p for p in alias_entry.probs])
new_sum = current_sum + prior_prob
if new_sum > 1.00001:
raise ValueError(Errors.E133.format(alias=alias, sum=new_sum))
entry_indices = alias_entry.entry_indices
is_present = False
for i in range(entry_indices.size()):
if entry_indices[i] == int(entry_index):
is_present = True
if is_present:
if not ignore_warnings:
warnings.warn(Warnings.W024.format(entity=entity, alias=alias))
else:
entry_indices.push_back(int(entry_index))
alias_entry.entry_indices = entry_indices
probs = alias_entry.probs
probs.push_back(float(prior_prob))
alias_entry.probs = probs
self._aliases_table[alias_index] = alias_entry
def get_candidates(self, mention: Span) -> Iterable[Candidate]:
return self.get_alias_candidates(mention.text) # type: ignore
def get_alias_candidates(self, str alias) -> Iterable[Candidate]:
"""
Return candidate entities for an alias. Each candidate defines the
entity, the original alias, and the prior probability of that alias
resolving to that entity.
If the alias is not known in the KB, and empty list is returned.
"""
cdef hash_t alias_hash = self.vocab.strings[alias]
if alias_hash not in self._alias_index:
return []
alias_index = <int64_t>self._alias_index.get(alias_hash)
alias_entry = self._aliases_table[alias_index]
return [Candidate(kb=self,
entity_hash=self._entries[entry_index].entity_hash,
entity_freq=self._entries[entry_index].freq,
entity_vector=self._vectors_table[
self._entries[entry_index].vector_index
],
alias_hash=alias_hash,
prior_prob=prior_prob)
for (entry_index, prior_prob) in zip(
alias_entry.entry_indices, alias_entry.probs
)
if entry_index!= 0]
def get_vector(self, str entity):
cdef hash_t entity_hash = self.vocab.strings[entity]
# Return an empty list if this entity is unknown in this KB
if entity_hash not in self._entry_index:
return [0] * self.entity_vector_length
entry_index = self._entry_index[entity_hash]
return self._vectors_table[self._entries[entry_index].vector_index]
def get_prior_prob(self, str entity, str alias):
""" Return the prior probability of a given alias being linked to a
given entity, or return 0.0 when this combination is not known in the
knowledge base."""
cdef hash_t alias_hash = self.vocab.strings[alias]
cdef hash_t entity_hash = self.vocab.strings[entity]
if entity_hash not in self._entry_index or alias_hash not in self._alias_index:
return 0.0
alias_index = <int64_t>self._alias_index.get(alias_hash)
entry_index = self._entry_index[entity_hash]
alias_entry = self._aliases_table[alias_index]
for (entry_index, prior_prob) in zip(
alias_entry.entry_indices, alias_entry.probs
):
if self._entries[entry_index].entity_hash == entity_hash:
return prior_prob
return 0.0
def to_bytes(self, **kwargs):
"""Serialize the current state to a binary string.
"""
def serialize_header():
header = (
self.get_size_entities(),
self.get_size_aliases(),
self.entity_vector_length
)
return srsly.json_dumps(header)
def serialize_entries():
i = 1
tuples = []
for entry_hash, entry_index in sorted(
self._entry_index.items(), key=lambda x: x[1]
):
entry = self._entries[entry_index]
assert entry.entity_hash == entry_hash
assert entry_index == i
tuples.append((entry.entity_hash, entry.freq, entry.vector_index))
i = i + 1
return srsly.json_dumps(tuples)
def serialize_aliases():
i = 1
headers = []
indices_lists = []
probs_lists = []
for alias_hash, alias_index in sorted(
self._alias_index.items(), key=lambda x: x[1]
):
alias = self._aliases_table[alias_index]
assert alias_index == i
candidate_length = len(alias.entry_indices)
headers.append((alias_hash, candidate_length))
indices_lists.append(alias.entry_indices)
probs_lists.append(alias.probs)
i = i + 1
headers_dump = srsly.json_dumps(headers)
indices_dump = srsly.json_dumps(indices_lists)
probs_dump = srsly.json_dumps(probs_lists)
return srsly.json_dumps((headers_dump, indices_dump, probs_dump))
serializers = {
"header": serialize_header,
"entity_vectors": lambda: srsly.json_dumps(self._vectors_table),
"entries": serialize_entries,
"aliases": serialize_aliases,
}
return util.to_bytes(serializers, [])
def from_bytes(self, bytes_data, *, exclude=tuple()):
"""Load state from a binary string.
"""
def deserialize_header(b):
header = srsly.json_loads(b)
nr_entities = header[0]
nr_aliases = header[1]
entity_vector_length = header[2]
self._initialize_entities(nr_entities)
self._initialize_vectors(nr_entities)
self._initialize_aliases(nr_aliases)
self.entity_vector_length = entity_vector_length
def deserialize_vectors(b):
self._vectors_table = srsly.json_loads(b)
def deserialize_entries(b):
cdef KBEntryC entry
tuples = srsly.json_loads(b)
i = 1
for (entity_hash, freq, vector_index) in tuples:
entry.entity_hash = entity_hash
entry.freq = freq
entry.vector_index = vector_index
entry.feats_row = -1 # Features table currently not implemented
self._entries[i] = entry
self._entry_index[entity_hash] = i
i += 1
def deserialize_aliases(b):
cdef AliasC alias
i = 1
all_data = srsly.json_loads(b)
headers = srsly.json_loads(all_data[0])
indices = srsly.json_loads(all_data[1])
probs = srsly.json_loads(all_data[2])
for header, indices, probs in zip(headers, indices, probs):
alias_hash, _candidate_length = header
alias.entry_indices = indices
alias.probs = probs
self._aliases_table[i] = alias
self._alias_index[alias_hash] = i
i += 1
setters = {
"header": deserialize_header,
"entity_vectors": deserialize_vectors,
"entries": deserialize_entries,
"aliases": deserialize_aliases,
}
util.from_bytes(bytes_data, setters, exclude)
return self
def to_disk(self, path, exclude: Iterable[str] = SimpleFrozenList()):
path = ensure_path(path)
if not path.exists():
path.mkdir(parents=True)
if not path.is_dir():
raise ValueError(Errors.E928.format(loc=path))
serialize = {}
serialize["contents"] = lambda p: self.write_contents(p)
serialize["strings.json"] = lambda p: self.vocab.strings.to_disk(p)
util.to_disk(path, serialize, exclude)
def from_disk(self, path, exclude: Iterable[str] = SimpleFrozenList()):
path = ensure_path(path)
if not path.exists():
raise ValueError(Errors.E929.format(loc=path))
if not path.is_dir():
raise ValueError(Errors.E928.format(loc=path))
deserialize: Dict[str, Callable[[Any], Any]] = {}
deserialize["contents"] = lambda p: self.read_contents(p)
deserialize["strings.json"] = lambda p: self.vocab.strings.from_disk(p)
util.from_disk(path, deserialize, exclude)
def write_contents(self, file_path):
cdef Writer writer = | Cython |
Writer(file_path)
writer.write_header(self.get_size_entities(), self.entity_vector_length)
# dumping the entity vectors in their original order
i = 0
for entity_vector in self._vectors_table:
for element in entity_vector:
writer.write_vector_element(element)
i = i+1
# dumping the entry records in the order in which they are in the
# _entries vector.
# index 0 is a dummy object not stored in the _entry_index and can
# be ignored.
i = 1
for entry_hash, entry_index in sorted(
self._entry_index.items(), key=lambda x: x[1]
):
entry = self._entries[entry_index]
assert entry.entity_hash == entry_hash
assert entry_index == i
writer.write_entry(entry.entity_hash, entry.freq, entry.vector_index)
i = i+1
writer.write_alias_length(self.get_size_aliases())
# dumping the aliases in the order in which they are in the _alias_index vector.
# index 0 is a dummy object not stored in the _aliases_table and can be ignored.
i = 1
for alias_hash, alias_index in sorted(
self._alias_index.items(), key=lambda x: x[1]
):
alias = self._aliases_table[alias_index]
assert alias_index == i
candidate_length = len(alias.entry_indices)
writer.write_alias_header(alias_hash, candidate_length)
for j in range(0, candidate_length):
writer.write_alias(alias.entry_indices[j], alias.probs[j])
i = i+1
writer.close()
def read_contents(self, file_path):
cdef hash_t entity_hash
cdef hash_t alias_hash
cdef int64_t entry_index
cdef float freq, prob
cdef int32_t vector_index
cdef KBEntryC entry
cdef AliasC alias
cdef float vector_element
cdef Reader reader = Reader(file_path)
# STEP 0: load header and initialize KB
cdef int64_t nr_entities
cdef int64_t entity_vector_length
reader.read_header(&nr_entities, &entity_vector_length)
self._initialize_entities(nr_entities)
self._initialize_vectors(nr_entities)
self.entity_vector_length = entity_vector_length
# STEP 1: load entity vectors
cdef int i = 0
cdef int j = 0
while i < nr_entities:
entity_vector = float_vec(entity_vector_length)
j = 0
while j < entity_vector_length:
reader.read_vector_element(&vector_element)
entity_vector[j] = vector_element
j = j+1
self._vectors_table[i] = entity_vector
i = i+1
# STEP 2: load entities
# we assume that the entity data was written in sequence
# index 0 is a dummy object not stored in the _entry_index and can be ignored.
i = 1
while i <= nr_entities:
reader.read_entry(&entity_hash, &freq, &vector_index)
entry.entity_hash = entity_hash
entry.freq = freq
entry.vector_index = vector_index
entry.feats_row = -1 # Features table currently not implemented
self._entries[i] = entry
self._entry_index[entity_hash] = i
i += 1
# check that all entities were read in properly
assert nr_entities == self.get_size_entities()
# STEP 3: load aliases
cdef int64_t nr_aliases
reader.read_alias_length(&nr_aliases)
self._initialize_aliases(nr_aliases)
cdef int64_t nr_candidates
cdef vector[int64_t] entry_indices
cdef vector[float] probs
i = 1
# we assume the alias data was written in sequence
# index 0 is a dummy object not stored in the _entry_index and can be ignored.
while i <= nr_aliases:
reader.read_alias_header(&alias_hash, &nr_candidates)
entry_indices = vector[int64_t](nr_candidates)
probs = vector[float](nr_candidates)
for j in range(0, nr_candidates):
reader.read_alias(&entry_index, &prob)
entry_indices[j] = entry_index
probs[j] = prob
alias.entry_indices = entry_indices
alias.probs = probs
self._aliases_table[i] = alias
self._alias_index[alias_hash] = i
i += 1
# check that all aliases were read in properly
assert nr_aliases == self.get_size_aliases()
cdef class Writer:
def __init__(self, path):
assert isinstance(path, Path)
content = bytes(path)
cdef bytes bytes_loc = content.encode('utf8') \
if type(content) == str else content
self._fp = fopen(<char*>bytes_loc, 'wb')
if not self._fp:
raise IOError(Errors.E146.format(path=path))
fseek(self._fp, 0, 0)
def close(self):
cdef size_t status = fclose(self._fp)
assert status == 0
cdef int write_header(
self, int64_t nr_entries, int64_t entity_vector_length
) except -1:
self._write(&nr_entries, sizeof(nr_entries))
self._write(&entity_vector_length, sizeof(entity_vector_length))
cdef int write_vector_element(self, float element) except -1:
self._write(&element, sizeof(element))
cdef int write_entry(
self, hash_t entry_hash, float entry_freq, int32_t vector_index
) except -1:
self._write(&entry_hash, sizeof(entry_hash))
self._write(&entry_freq, sizeof(entry_freq))
self._write(&vector_index, sizeof(vector_index))
# Features table currently not implemented and not written to file
cdef int write_alias_length(self, int64_t alias_length) except -1:
self._write(&alias_length, sizeof(alias_length))
cdef int write_alias_header(
self, hash_t alias_hash, int64_t candidate_length
) except -1:
self._write(&alias_hash, sizeof(alias_hash))
self._write(&candidate_length, sizeof(candidate_length))
cdef int write_alias(self, int64_t entry_index, float prob) except -1:
self._write(&entry_index, sizeof(entry_index))
self._write(&prob, sizeof(prob))
cdef int _write(self, void* value, size_t size) except -1:
status = fwrite(value, size, 1, self._fp)
assert status == 1, status
cdef class Reader:
def __init__(self, path):
content = bytes(path)
cdef bytes bytes_loc = content.encode('utf8') \
if type(content) == str else content
self._fp = fopen(<char*>bytes_loc, 'rb')
if not self._fp:
PyErr_SetFromErrno(IOError)
fseek(self._fp, 0, 0) # this can be 0 if there is no header
def __dealloc__(self):
fclose(self._fp)
cdef int read_header(
self, int64_t* nr_entries, int64_t* entity_vector_length
) except -1:
status = self._read(nr_entries, sizeof(int64_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="header"))
status = self._read(entity_vector_length, sizeof(int64_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="vector length"))
cdef int read_vector_element(self, float* element) except -1:
status = self._read(element, sizeof(float))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="vector element"))
cdef int read_entry(
self, hash_t* entity_hash, float* freq, int32_t* vector_index
) except -1:
status = self._read(entity_hash, sizeof(hash_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="entity hash"))
status = self._read(freq, sizeof(float))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="entity freq"))
status = self._read(vector_index, sizeof(int32_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="vector index"))
if feof(self._fp):
return 0
else:
return 1
cdef int read_alias_length(self, int64_t* alias_length) except -1:
status = self._read(alias_length, sizeof(int64_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param | Cython |