Spaces:
Running
Running
/* | |
Copyright (c) 2008 - 2022, Ilan Schnell; All Rights Reserved | |
bitarray is published under the PSF license. | |
Author: Ilan Schnell | |
*/ | |
/* Compatibility with Visual Studio 2013 and older which don't support | |
the inline keyword in C (only in C++): use __inline instead. | |
(copied from pythoncapi_compat.h) */ | |
/* --- definitions specific to Python --- */ | |
/* Py_UNREACHABLE was introduced in Python 3.7 */ | |
/* the Py_MIN and Py_MAX macros were introduced in Python 3.3 */ | |
/* --- bitarrayobject --- */ | |
/* .ob_size is buffer size (in bytes), not the number of elements. | |
The number of elements (bits) is .nbits. */ | |
typedef struct { | |
PyObject_VAR_HEAD | |
char *ob_item; /* buffer */ | |
Py_ssize_t allocated; /* allocated buffer size (in bytes) */ | |
Py_ssize_t nbits; /* length of bitarray, i.e. elements */ | |
int endian; /* bit endianness of bitarray */ | |
int ob_exports; /* how many buffer exports */ | |
PyObject *weakreflist; /* list of weak references */ | |
Py_buffer *buffer; /* used when importing a buffer */ | |
int readonly; /* buffer is readonly */ | |
} bitarrayobject; | |
/* --- bit endianness --- */ | |
/* the endianness string */ | |
/* number of bytes necessary to store given bits */ | |
/* we're not using bitmask_table here, as it is actually slower */ | |
/* assert that .nbits is in agreement with .ob_size */ | |
/* assert byte index is in range */ | |
/* ------------ low level access to bits in bitarrayobject ------------- */ | |
static inline int | |
getbit(bitarrayobject *self, Py_ssize_t i) | |
{ | |
assert_nbits(self); | |
assert(0 <= i && i < self->nbits); | |
return self->ob_item[i >> 3] & BITMASK(self, i) ? 1 : 0; | |
} | |
static inline void | |
setbit(bitarrayobject *self, Py_ssize_t i, int vi) | |
{ | |
char *cp, mask; | |
assert_nbits(self); | |
assert(0 <= i && i < self->nbits); | |
assert(self->readonly == 0); | |
mask = BITMASK(self, i); | |
cp = self->ob_item + (i >> 3); | |
if (vi) | |
*cp |= mask; | |
else | |
*cp &= ~mask; | |
} | |
static const char bitmask_table[2][8] = { | |
{0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80}, /* little endian */ | |
{0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01}, /* big endian */ | |
}; | |
/* character with n leading ones is: ones_table[endian][n] */ | |
static const char ones_table[2][8] = { | |
{0x00, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f}, /* little endian */ | |
{0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe}, /* big endian */ | |
}; | |
/* Return last byte in buffer with pad bits zeroed out. The the number of | |
bits in the bitarray must not be a multiple of 8. */ | |
static inline char | |
zeroed_last_byte(bitarrayobject *self) | |
{ | |
const int r = self->nbits % 8; /* index into mask table */ | |
assert(r > 0); | |
assert_nbits(self); | |
return ones_table[IS_BE(self)][r] & self->ob_item[Py_SIZE(self) - 1]; | |
} | |
/* Unless buffer is readonly, zero out pad bits. | |
Always return the number of pad bits - leave self->nbits unchanged */ | |
static inline int | |
setunused(bitarrayobject *self) | |
{ | |
const int r = self->nbits % 8; | |
if (r == 0) | |
return 0; | |
if (self->readonly == 0) | |
self->ob_item[Py_SIZE(self) - 1] = zeroed_last_byte(self); | |
return 8 - r; | |
} | |
static const unsigned char bitcount_lookup[256] = { | |
B6(0), B6(1), B6(1), B6(2) | |
}; | |
/* adjust index a manner consistent with the handling of normal slices */ | |
static inline void | |
adjust_index(Py_ssize_t length, Py_ssize_t *i, Py_ssize_t step) | |
{ | |
if (*i < 0) { | |
*i += length; | |
if (*i < 0) | |
*i = (step < 0) ? -1 : 0; | |
} | |
else if (*i >= length) { | |
*i = (step < 0) ? length - 1 : length; | |
} | |
} | |
/* same as PySlice_AdjustIndices() which was introduced in Python 3.6.1 */ | |
static inline Py_ssize_t | |
adjust_indices(Py_ssize_t length, Py_ssize_t *start, Py_ssize_t *stop, | |
Py_ssize_t step) | |
{ | |
return PySlice_AdjustIndices(length, start, stop, step); | |
assert(step != 0); | |
adjust_index(length, start, step); | |
adjust_index(length, stop, step); | |
/* | |
a / b does integer division. If either a or b is negative, the result | |
depends on the compiler (rounding can go toward 0 or negative infinity). | |
Therefore, we are careful that both a and b are always positive. | |
*/ | |
if (step < 0) { | |
if (*stop < *start) | |
return (*start - *stop - 1) / (-step) + 1; | |
} | |
else { | |
if (*start < *stop) | |
return (*stop - *start - 1) / step + 1; | |
} | |
return 0; | |
} | |
/* adjust slice parameters such that step is always positive; produces | |
simpler loops over elements when their order is irrelevant */ | |
static inline void | |
adjust_step_positive(Py_ssize_t slicelength, | |
Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step) | |
{ | |
if (*step < 0) { | |
*stop = *start + 1; | |
*start = *stop + *step * (slicelength - 1) - 1; | |
*step = -(*step); | |
} | |
assert(*start >= 0 && *stop >= 0 && *step > 0 && slicelength >= 0); | |
/* slicelength == 0 implies stop <= start */ | |
assert(slicelength != 0 || *stop <= *start); | |
/* step == 1 and slicelength != 0 implies stop - start == slicelength */ | |
assert(*step != 1 || slicelength == 0 || *stop - *start == slicelength); | |
} | |
/* Interpret a PyObject (usually PyLong or PyBool) as a bit, return 0 or 1. | |
On error, return -1 and set error message. */ | |
static inline int | |
pybit_as_int(PyObject *value) | |
{ | |
Py_ssize_t x; | |
x = PyNumber_AsSsize_t(value, NULL); | |
if (x == -1 && PyErr_Occurred()) | |
return -1; | |
if (x < 0 || x > 1) { | |
PyErr_Format(PyExc_ValueError, "bit must be 0 or 1, got %zd", x); | |
return -1; | |
} | |
return (int) x; | |
} | |