repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx512f.h"
#include "memcpy_memset.h"
static force_inline void
memmove_mov32x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
__m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8);
__m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9);
__m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10);
__m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11);
__m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12);
__m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13);
__m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14);
__m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15);
__m512i zmm16 = _mm512_loadu_si512((__m512i *)src + 16);
__m512i zmm17 = _mm512_loadu_si512((__m512i *)src + 17);
__m512i zmm18 = _mm512_loadu_si512((__m512i *)src + 18);
__m512i zmm19 = _mm512_loadu_si512((__m512i *)src + 19);
__m512i zmm20 = _mm512_loadu_si512((__m512i *)src + 20);
__m512i zmm21 = _mm512_loadu_si512((__m512i *)src + 21);
__m512i zmm22 = _mm512_loadu_si512((__m512i *)src + 22);
__m512i zmm23 = _mm512_loadu_si512((__m512i *)src + 23);
__m512i zmm24 = _mm512_loadu_si512((__m512i *)src + 24);
__m512i zmm25 = _mm512_loadu_si512((__m512i *)src + 25);
__m512i zmm26 = _mm512_loadu_si512((__m512i *)src + 26);
__m512i zmm27 = _mm512_loadu_si512((__m512i *)src + 27);
__m512i zmm28 = _mm512_loadu_si512((__m512i *)src + 28);
__m512i zmm29 = _mm512_loadu_si512((__m512i *)src + 29);
__m512i zmm30 = _mm512_loadu_si512((__m512i *)src + 30);
__m512i zmm31 = _mm512_loadu_si512((__m512i *)src + 31);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
_mm512_store_si512((__m512i *)dest + 1, zmm1);
_mm512_store_si512((__m512i *)dest + 2, zmm2);
_mm512_store_si512((__m512i *)dest + 3, zmm3);
_mm512_store_si512((__m512i *)dest + 4, zmm4);
_mm512_store_si512((__m512i *)dest + 5, zmm5);
_mm512_store_si512((__m512i *)dest + 6, zmm6);
_mm512_store_si512((__m512i *)dest + 7, zmm7);
_mm512_store_si512((__m512i *)dest + 8, zmm8);
_mm512_store_si512((__m512i *)dest + 9, zmm9);
_mm512_store_si512((__m512i *)dest + 10, zmm10);
_mm512_store_si512((__m512i *)dest + 11, zmm11);
_mm512_store_si512((__m512i *)dest + 12, zmm12);
_mm512_store_si512((__m512i *)dest + 13, zmm13);
_mm512_store_si512((__m512i *)dest + 14, zmm14);
_mm512_store_si512((__m512i *)dest + 15, zmm15);
_mm512_store_si512((__m512i *)dest + 16, zmm16);
_mm512_store_si512((__m512i *)dest + 17, zmm17);
_mm512_store_si512((__m512i *)dest + 18, zmm18);
_mm512_store_si512((__m512i *)dest + 19, zmm19);
_mm512_store_si512((__m512i *)dest + 20, zmm20);
_mm512_store_si512((__m512i *)dest + 21, zmm21);
_mm512_store_si512((__m512i *)dest + 22, zmm22);
_mm512_store_si512((__m512i *)dest + 23, zmm23);
_mm512_store_si512((__m512i *)dest + 24, zmm24);
_mm512_store_si512((__m512i *)dest + 25, zmm25);
_mm512_store_si512((__m512i *)dest + 26, zmm26);
_mm512_store_si512((__m512i *)dest + 27, zmm27);
_mm512_store_si512((__m512i *)dest + 28, zmm28);
_mm512_store_si512((__m512i *)dest + 29, zmm29);
_mm512_store_si512((__m512i *)dest + 30, zmm30);
_mm512_store_si512((__m512i *)dest + 31, zmm31);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
flush64b(dest + 16 * 64);
flush64b(dest + 17 * 64);
flush64b(dest + 18 * 64);
flush64b(dest + 19 * 64);
flush64b(dest + 20 * 64);
flush64b(dest + 21 * 64);
flush64b(dest + 22 * 64);
flush64b(dest + 23 * 64);
flush64b(dest + 24 * 64);
flush64b(dest + 25 * 64);
flush64b(dest + 26 * 64);
flush64b(dest + 27 * 64);
flush64b(dest + 28 * 64);
flush64b(dest + 29 * 64);
flush64b(dest + 30 * 64);
flush64b(dest + 31 * 64);
}
static force_inline void
memmove_mov16x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
__m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8);
__m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9);
__m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10);
__m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11);
__m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12);
__m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13);
__m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14);
__m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
_mm512_store_si512((__m512i *)dest + 1, zmm1);
_mm512_store_si512((__m512i *)dest + 2, zmm2);
_mm512_store_si512((__m512i *)dest + 3, zmm3);
_mm512_store_si512((__m512i *)dest + 4, zmm4);
_mm512_store_si512((__m512i *)dest + 5, zmm5);
_mm512_store_si512((__m512i *)dest + 6, zmm6);
_mm512_store_si512((__m512i *)dest + 7, zmm7);
_mm512_store_si512((__m512i *)dest + 8, zmm8);
_mm512_store_si512((__m512i *)dest + 9, zmm9);
_mm512_store_si512((__m512i *)dest + 10, zmm10);
_mm512_store_si512((__m512i *)dest + 11, zmm11);
_mm512_store_si512((__m512i *)dest + 12, zmm12);
_mm512_store_si512((__m512i *)dest + 13, zmm13);
_mm512_store_si512((__m512i *)dest + 14, zmm14);
_mm512_store_si512((__m512i *)dest + 15, zmm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
flush64b(dest + 8 * 64);
flush64b(dest + 9 * 64);
flush64b(dest + 10 * 64);
flush64b(dest + 11 * 64);
flush64b(dest + 12 * 64);
flush64b(dest + 13 * 64);
flush64b(dest + 14 * 64);
flush64b(dest + 15 * 64);
}
static force_inline void
memmove_mov8x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
_mm512_store_si512((__m512i *)dest + 1, zmm1);
_mm512_store_si512((__m512i *)dest + 2, zmm2);
_mm512_store_si512((__m512i *)dest + 3, zmm3);
_mm512_store_si512((__m512i *)dest + 4, zmm4);
_mm512_store_si512((__m512i *)dest + 5, zmm5);
_mm512_store_si512((__m512i *)dest + 6, zmm6);
_mm512_store_si512((__m512i *)dest + 7, zmm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
_mm512_store_si512((__m512i *)dest + 1, zmm1);
_mm512_store_si512((__m512i *)dest + 2, zmm2);
_mm512_store_si512((__m512i *)dest + 3, zmm3);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
_mm512_store_si512((__m512i *)dest + 1, zmm1);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
_mm512_store_si512((__m512i *)dest + 0, zmm0);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_avx512f_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx512f(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memmove_mov32x64b(dest, src);
dest += 32 * 64;
src += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memmove_mov16x64b(dest, src);
dest += 16 * 64;
src += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memmove_mov8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_mov4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_avx512f(dest, src, len);
}
static force_inline void
memmove_mov_avx512f_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx512f(dest, src, cnt);
}
while (len >= 32 * 64) {
dest -= 32 * 64;
src -= 32 * 64;
len -= 32 * 64;
memmove_mov32x64b(dest, src);
}
if (len >= 16 * 64) {
dest -= 16 * 64;
src -= 16 * 64;
len -= 16 * 64;
memmove_mov16x64b(dest, src);
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_mov8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src);
}
if (len)
memmove_small_avx512f(dest - len, src - len, len);
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_avx512f_fw(dest, src, len);
else
memmove_mov_avx512f_bw(dest, src, len);
avx_zeroupper();
}
| 12,825 | 30.131068 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx.h"
#include "memcpy_memset.h"
static force_inline void
memmove_mov8x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
__m256i ymm8 = _mm256_loadu_si256((__m256i *)src + 8);
__m256i ymm9 = _mm256_loadu_si256((__m256i *)src + 9);
__m256i ymm10 = _mm256_loadu_si256((__m256i *)src + 10);
__m256i ymm11 = _mm256_loadu_si256((__m256i *)src + 11);
__m256i ymm12 = _mm256_loadu_si256((__m256i *)src + 12);
__m256i ymm13 = _mm256_loadu_si256((__m256i *)src + 13);
__m256i ymm14 = _mm256_loadu_si256((__m256i *)src + 14);
__m256i ymm15 = _mm256_loadu_si256((__m256i *)src + 15);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
_mm256_store_si256((__m256i *)dest + 2, ymm2);
_mm256_store_si256((__m256i *)dest + 3, ymm3);
_mm256_store_si256((__m256i *)dest + 4, ymm4);
_mm256_store_si256((__m256i *)dest + 5, ymm5);
_mm256_store_si256((__m256i *)dest + 6, ymm6);
_mm256_store_si256((__m256i *)dest + 7, ymm7);
_mm256_store_si256((__m256i *)dest + 8, ymm8);
_mm256_store_si256((__m256i *)dest + 9, ymm9);
_mm256_store_si256((__m256i *)dest + 10, ymm10);
_mm256_store_si256((__m256i *)dest + 11, ymm11);
_mm256_store_si256((__m256i *)dest + 12, ymm12);
_mm256_store_si256((__m256i *)dest + 13, ymm13);
_mm256_store_si256((__m256i *)dest + 14, ymm14);
_mm256_store_si256((__m256i *)dest + 15, ymm15);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
flush64b(dest + 4 * 64);
flush64b(dest + 5 * 64);
flush64b(dest + 6 * 64);
flush64b(dest + 7 * 64);
}
static force_inline void
memmove_mov4x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
_mm256_store_si256((__m256i *)dest + 2, ymm2);
_mm256_store_si256((__m256i *)dest + 3, ymm3);
_mm256_store_si256((__m256i *)dest + 4, ymm4);
_mm256_store_si256((__m256i *)dest + 5, ymm5);
_mm256_store_si256((__m256i *)dest + 6, ymm6);
_mm256_store_si256((__m256i *)dest + 7, ymm7);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
flush64b(dest + 2 * 64);
flush64b(dest + 3 * 64);
}
static force_inline void
memmove_mov2x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
_mm256_store_si256((__m256i *)dest + 2, ymm2);
_mm256_store_si256((__m256i *)dest + 3, ymm3);
flush64b(dest + 0 * 64);
flush64b(dest + 1 * 64);
}
static force_inline void
memmove_mov1x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
_mm256_store_si256((__m256i *)dest + 0, ymm0);
_mm256_store_si256((__m256i *)dest + 1, ymm1);
flush64b(dest + 0 * 64);
}
static force_inline void
memmove_mov_avx_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memmove_mov8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_mov4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_mov2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_mov1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len)
memmove_small_avx(dest, src, len);
}
static force_inline void
memmove_mov_avx_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx(dest, src, cnt);
}
while (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_mov8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_mov4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_mov2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_mov1x64b(dest, src);
}
if (len)
memmove_small_avx(dest - len, src - len, len);
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_mov_avx_fw(dest, src, len);
else
memmove_mov_avx_bw(dest, src, len);
avx_zeroupper();
}
| 7,378 | 27.937255 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef PMEM_MEMCPY_AVX512F_H
#define PMEM_MEMCPY_AVX512F_H
#include <stddef.h>
#include "memcpy_avx.h"
static force_inline void
memmove_small_avx512f(char *dest, const char *src, size_t len)
{
/* We can't do better than AVX here. */
memmove_small_avx(dest, src, len);
}
#endif
| 1,886 | 38.3125 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx512f.h"
#include "memcpy_memset.h"
#include "libpmem.h"
#include "valgrind_internal.h"
static force_inline void
memmove_movnt32x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
__m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8);
__m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9);
__m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10);
__m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11);
__m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12);
__m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13);
__m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14);
__m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15);
__m512i zmm16 = _mm512_loadu_si512((__m512i *)src + 16);
__m512i zmm17 = _mm512_loadu_si512((__m512i *)src + 17);
__m512i zmm18 = _mm512_loadu_si512((__m512i *)src + 18);
__m512i zmm19 = _mm512_loadu_si512((__m512i *)src + 19);
__m512i zmm20 = _mm512_loadu_si512((__m512i *)src + 20);
__m512i zmm21 = _mm512_loadu_si512((__m512i *)src + 21);
__m512i zmm22 = _mm512_loadu_si512((__m512i *)src + 22);
__m512i zmm23 = _mm512_loadu_si512((__m512i *)src + 23);
__m512i zmm24 = _mm512_loadu_si512((__m512i *)src + 24);
__m512i zmm25 = _mm512_loadu_si512((__m512i *)src + 25);
__m512i zmm26 = _mm512_loadu_si512((__m512i *)src + 26);
__m512i zmm27 = _mm512_loadu_si512((__m512i *)src + 27);
__m512i zmm28 = _mm512_loadu_si512((__m512i *)src + 28);
__m512i zmm29 = _mm512_loadu_si512((__m512i *)src + 29);
__m512i zmm30 = _mm512_loadu_si512((__m512i *)src + 30);
__m512i zmm31 = _mm512_loadu_si512((__m512i *)src + 31);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
_mm512_stream_si512((__m512i *)dest + 4, zmm4);
_mm512_stream_si512((__m512i *)dest + 5, zmm5);
_mm512_stream_si512((__m512i *)dest + 6, zmm6);
_mm512_stream_si512((__m512i *)dest + 7, zmm7);
_mm512_stream_si512((__m512i *)dest + 8, zmm8);
_mm512_stream_si512((__m512i *)dest + 9, zmm9);
_mm512_stream_si512((__m512i *)dest + 10, zmm10);
_mm512_stream_si512((__m512i *)dest + 11, zmm11);
_mm512_stream_si512((__m512i *)dest + 12, zmm12);
_mm512_stream_si512((__m512i *)dest + 13, zmm13);
_mm512_stream_si512((__m512i *)dest + 14, zmm14);
_mm512_stream_si512((__m512i *)dest + 15, zmm15);
_mm512_stream_si512((__m512i *)dest + 16, zmm16);
_mm512_stream_si512((__m512i *)dest + 17, zmm17);
_mm512_stream_si512((__m512i *)dest + 18, zmm18);
_mm512_stream_si512((__m512i *)dest + 19, zmm19);
_mm512_stream_si512((__m512i *)dest + 20, zmm20);
_mm512_stream_si512((__m512i *)dest + 21, zmm21);
_mm512_stream_si512((__m512i *)dest + 22, zmm22);
_mm512_stream_si512((__m512i *)dest + 23, zmm23);
_mm512_stream_si512((__m512i *)dest + 24, zmm24);
_mm512_stream_si512((__m512i *)dest + 25, zmm25);
_mm512_stream_si512((__m512i *)dest + 26, zmm26);
_mm512_stream_si512((__m512i *)dest + 27, zmm27);
_mm512_stream_si512((__m512i *)dest + 28, zmm28);
_mm512_stream_si512((__m512i *)dest + 29, zmm29);
_mm512_stream_si512((__m512i *)dest + 30, zmm30);
_mm512_stream_si512((__m512i *)dest + 31, zmm31);
VALGRIND_DO_FLUSH(dest, 32 * 64);
}
static force_inline void
memmove_movnt16x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
__m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8);
__m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9);
__m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10);
__m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11);
__m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12);
__m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13);
__m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14);
__m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
_mm512_stream_si512((__m512i *)dest + 4, zmm4);
_mm512_stream_si512((__m512i *)dest + 5, zmm5);
_mm512_stream_si512((__m512i *)dest + 6, zmm6);
_mm512_stream_si512((__m512i *)dest + 7, zmm7);
_mm512_stream_si512((__m512i *)dest + 8, zmm8);
_mm512_stream_si512((__m512i *)dest + 9, zmm9);
_mm512_stream_si512((__m512i *)dest + 10, zmm10);
_mm512_stream_si512((__m512i *)dest + 11, zmm11);
_mm512_stream_si512((__m512i *)dest + 12, zmm12);
_mm512_stream_si512((__m512i *)dest + 13, zmm13);
_mm512_stream_si512((__m512i *)dest + 14, zmm14);
_mm512_stream_si512((__m512i *)dest + 15, zmm15);
VALGRIND_DO_FLUSH(dest, 16 * 64);
}
static force_inline void
memmove_movnt8x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
__m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4);
__m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5);
__m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6);
__m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
_mm512_stream_si512((__m512i *)dest + 4, zmm4);
_mm512_stream_si512((__m512i *)dest + 5, zmm5);
_mm512_stream_si512((__m512i *)dest + 6, zmm6);
_mm512_stream_si512((__m512i *)dest + 7, zmm7);
VALGRIND_DO_FLUSH(dest, 8 * 64);
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
__m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2);
__m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
_mm512_stream_si512((__m512i *)dest + 2, zmm2);
_mm512_stream_si512((__m512i *)dest + 3, zmm3);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
__m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
_mm512_stream_si512((__m512i *)dest + 1, zmm1);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0);
_mm512_stream_si512((__m512i *)dest + 0, zmm0);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m256i zmm0 = _mm256_loadu_si256((__m256i *)src);
_mm256_stream_si256((__m256i *)dest, zmm0);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i ymm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, ymm0);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
VALGRIND_DO_FLUSH(dest, 4);
}
static force_inline void
memmove_movnt_avx512f_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx512f(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 32 * 64) {
memmove_movnt32x64b(dest, src);
dest += 32 * 64;
src += 32 * 64;
len -= 32 * 64;
}
if (len >= 16 * 64) {
memmove_movnt16x64b(dest, src);
dest += 16 * 64;
src += 16 * 64;
len -= 16 * 64;
}
if (len >= 8 * 64) {
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
goto end;
}
nonnt:
memmove_small_avx512f(dest, src, len);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx512f_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx512f(dest, src, cnt);
}
while (len >= 32 * 64) {
dest -= 32 * 64;
src -= 32 * 64;
len -= 32 * 64;
memmove_movnt32x64b(dest, src);
}
if (len >= 16 * 64) {
dest -= 16 * 64;
src -= 16 * 64;
len -= 16 * 64;
memmove_movnt16x64b(dest, src);
}
if (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
goto end;
}
nonnt:
dest -= len;
src -= len;
memmove_small_avx512f(dest, src, len);
end:
avx_zeroupper();
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_avx512f_fw(dest, src, len);
else
memmove_movnt_avx512f_bw(dest, src, len);
maybe_barrier();
}
| 13,191 | 28.446429 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memcpy_sse2.h"
#include "valgrind_internal.h"
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
__m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4);
__m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5);
__m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6);
__m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7);
__m128i xmm8 = _mm_loadu_si128((__m128i *)src + 8);
__m128i xmm9 = _mm_loadu_si128((__m128i *)src + 9);
__m128i xmm10 = _mm_loadu_si128((__m128i *)src + 10);
__m128i xmm11 = _mm_loadu_si128((__m128i *)src + 11);
__m128i xmm12 = _mm_loadu_si128((__m128i *)src + 12);
__m128i xmm13 = _mm_loadu_si128((__m128i *)src + 13);
__m128i xmm14 = _mm_loadu_si128((__m128i *)src + 14);
__m128i xmm15 = _mm_loadu_si128((__m128i *)src + 15);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
_mm_stream_si128((__m128i *)dest + 2, xmm2);
_mm_stream_si128((__m128i *)dest + 3, xmm3);
_mm_stream_si128((__m128i *)dest + 4, xmm4);
_mm_stream_si128((__m128i *)dest + 5, xmm5);
_mm_stream_si128((__m128i *)dest + 6, xmm6);
_mm_stream_si128((__m128i *)dest + 7, xmm7);
_mm_stream_si128((__m128i *)dest + 8, xmm8);
_mm_stream_si128((__m128i *)dest + 9, xmm9);
_mm_stream_si128((__m128i *)dest + 10, xmm10);
_mm_stream_si128((__m128i *)dest + 11, xmm11);
_mm_stream_si128((__m128i *)dest + 12, xmm12);
_mm_stream_si128((__m128i *)dest + 13, xmm13);
_mm_stream_si128((__m128i *)dest + 14, xmm14);
_mm_stream_si128((__m128i *)dest + 15, xmm15);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
__m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4);
__m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5);
__m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6);
__m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
_mm_stream_si128((__m128i *)dest + 2, xmm2);
_mm_stream_si128((__m128i *)dest + 3, xmm3);
_mm_stream_si128((__m128i *)dest + 4, xmm4);
_mm_stream_si128((__m128i *)dest + 5, xmm5);
_mm_stream_si128((__m128i *)dest + 6, xmm6);
_mm_stream_si128((__m128i *)dest + 7, xmm7);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
__m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2);
__m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
_mm_stream_si128((__m128i *)dest + 2, xmm2);
_mm_stream_si128((__m128i *)dest + 3, xmm3);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0);
__m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1);
_mm_stream_si128((__m128i *)dest + 0, xmm0);
_mm_stream_si128((__m128i *)dest + 1, xmm1);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, xmm0);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
VALGRIND_DO_FLUSH(dest, 4);
}
static force_inline void
memmove_movnt_sse_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_sse2(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
return;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
return;
}
nonnt:
memmove_small_sse2(dest, src, len);
}
static force_inline void
memmove_movnt_sse_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_sse2(dest, src, cnt);
}
while (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
return;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
return;
}
nonnt:
dest -= len;
src -= len;
memmove_small_sse2(dest, src, len);
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_sse_fw(dest, src, len);
else
memmove_movnt_sse_bw(dest, src, len);
maybe_barrier();
}
| 8,204 | 25.813725 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx.h | /*
* Copyright 2017-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_avx.h"
#include "memcpy_memset.h"
#include "valgrind_internal.h"
static force_inline void
memmove_movnt8x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
__m256i ymm8 = _mm256_loadu_si256((__m256i *)src + 8);
__m256i ymm9 = _mm256_loadu_si256((__m256i *)src + 9);
__m256i ymm10 = _mm256_loadu_si256((__m256i *)src + 10);
__m256i ymm11 = _mm256_loadu_si256((__m256i *)src + 11);
__m256i ymm12 = _mm256_loadu_si256((__m256i *)src + 12);
__m256i ymm13 = _mm256_loadu_si256((__m256i *)src + 13);
__m256i ymm14 = _mm256_loadu_si256((__m256i *)src + 14);
__m256i ymm15 = _mm256_loadu_si256((__m256i *)src + 15);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
_mm256_stream_si256((__m256i *)dest + 2, ymm2);
_mm256_stream_si256((__m256i *)dest + 3, ymm3);
_mm256_stream_si256((__m256i *)dest + 4, ymm4);
_mm256_stream_si256((__m256i *)dest + 5, ymm5);
_mm256_stream_si256((__m256i *)dest + 6, ymm6);
_mm256_stream_si256((__m256i *)dest + 7, ymm7);
_mm256_stream_si256((__m256i *)dest + 8, ymm8);
_mm256_stream_si256((__m256i *)dest + 9, ymm9);
_mm256_stream_si256((__m256i *)dest + 10, ymm10);
_mm256_stream_si256((__m256i *)dest + 11, ymm11);
_mm256_stream_si256((__m256i *)dest + 12, ymm12);
_mm256_stream_si256((__m256i *)dest + 13, ymm13);
_mm256_stream_si256((__m256i *)dest + 14, ymm14);
_mm256_stream_si256((__m256i *)dest + 15, ymm15);
VALGRIND_DO_FLUSH(dest, 8 * 64);
}
static force_inline void
memmove_movnt4x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
__m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4);
__m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5);
__m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6);
__m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
_mm256_stream_si256((__m256i *)dest + 2, ymm2);
_mm256_stream_si256((__m256i *)dest + 3, ymm3);
_mm256_stream_si256((__m256i *)dest + 4, ymm4);
_mm256_stream_si256((__m256i *)dest + 5, ymm5);
_mm256_stream_si256((__m256i *)dest + 6, ymm6);
_mm256_stream_si256((__m256i *)dest + 7, ymm7);
VALGRIND_DO_FLUSH(dest, 4 * 64);
}
static force_inline void
memmove_movnt2x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
__m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2);
__m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
_mm256_stream_si256((__m256i *)dest + 2, ymm2);
_mm256_stream_si256((__m256i *)dest + 3, ymm3);
VALGRIND_DO_FLUSH(dest, 2 * 64);
}
static force_inline void
memmove_movnt1x64b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0);
__m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1);
_mm256_stream_si256((__m256i *)dest + 0, ymm0);
_mm256_stream_si256((__m256i *)dest + 1, ymm1);
VALGRIND_DO_FLUSH(dest, 64);
}
static force_inline void
memmove_movnt1x32b(char *dest, const char *src)
{
__m256i ymm0 = _mm256_loadu_si256((__m256i *)src);
_mm256_stream_si256((__m256i *)dest, ymm0);
VALGRIND_DO_FLUSH(dest, 32);
}
static force_inline void
memmove_movnt1x16b(char *dest, const char *src)
{
__m128i xmm0 = _mm_loadu_si128((__m128i *)src);
_mm_stream_si128((__m128i *)dest, xmm0);
VALGRIND_DO_FLUSH(dest, 16);
}
static force_inline void
memmove_movnt1x8b(char *dest, const char *src)
{
_mm_stream_si64((long long *)dest, *(long long *)src);
VALGRIND_DO_FLUSH(dest, 8);
}
static force_inline void
memmove_movnt1x4b(char *dest, const char *src)
{
_mm_stream_si32((int *)dest, *(int *)src);
VALGRIND_DO_FLUSH(dest, 4);
}
static force_inline void
memmove_movnt_avx_fw(char *dest, const char *src, size_t len)
{
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memmove_small_avx(dest, src, cnt);
dest += cnt;
src += cnt;
len -= cnt;
}
while (len >= 8 * 64) {
memmove_movnt8x64b(dest, src);
dest += 8 * 64;
src += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memmove_movnt4x64b(dest, src);
dest += 4 * 64;
src += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memmove_movnt2x64b(dest, src);
dest += 2 * 64;
src += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memmove_movnt1x64b(dest, src);
dest += 1 * 64;
src += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memmove_movnt1x32b(dest, src);
else if (len == 16)
memmove_movnt1x16b(dest, src);
else if (len == 8)
memmove_movnt1x8b(dest, src);
else if (len == 4)
memmove_movnt1x4b(dest, src);
else
goto nonnt;
goto end;
}
nonnt:
memmove_small_avx(dest, src, len);
end:
avx_zeroupper();
}
static force_inline void
memmove_movnt_avx_bw(char *dest, const char *src, size_t len)
{
dest += len;
src += len;
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
if (cnt > len)
cnt = len;
dest -= cnt;
src -= cnt;
len -= cnt;
memmove_small_avx(dest, src, cnt);
}
while (len >= 8 * 64) {
dest -= 8 * 64;
src -= 8 * 64;
len -= 8 * 64;
memmove_movnt8x64b(dest, src);
}
if (len >= 4 * 64) {
dest -= 4 * 64;
src -= 4 * 64;
len -= 4 * 64;
memmove_movnt4x64b(dest, src);
}
if (len >= 2 * 64) {
dest -= 2 * 64;
src -= 2 * 64;
len -= 2 * 64;
memmove_movnt2x64b(dest, src);
}
if (len >= 1 * 64) {
dest -= 1 * 64;
src -= 1 * 64;
len -= 1 * 64;
memmove_movnt1x64b(dest, src);
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32) {
dest -= 32;
src -= 32;
memmove_movnt1x32b(dest, src);
} else if (len == 16) {
dest -= 16;
src -= 16;
memmove_movnt1x16b(dest, src);
} else if (len == 8) {
dest -= 8;
src -= 8;
memmove_movnt1x8b(dest, src);
} else if (len == 4) {
dest -= 4;
src -= 4;
memmove_movnt1x4b(dest, src);
} else {
goto nonnt;
}
goto end;
}
nonnt:
dest -= len;
src -= len;
memmove_small_avx(dest, src, len);
end:
avx_zeroupper();
}
void
EXPORTED_SYMBOL(char *dest, const char *src, size_t len)
{
if ((uintptr_t)dest - (uintptr_t)src >= len)
memmove_movnt_avx_fw(dest, src, len);
else
memmove_movnt_avx_bw(dest, src, len);
maybe_barrier();
}
| 8,883 | 25.519403 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/aarch64/flush.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ARM64_FLUSH_H
#define ARM64_FLUSH_H
#include <stdint.h>
#include "arm_cacheops.h"
#include "util.h"
#define FLUSH_ALIGN ((uintptr_t)64)
/*
* flush_clflushopt_nolog -- flush the CPU cache, using
* arm_clean_and_invalidate_va_to_poc (see arm_cacheops.h) {DC CIVAC}
*/
static force_inline void
flush_dcache_invalidate_opt_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
arm_data_memory_barrier();
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
arm_clean_and_invalidate_va_to_poc((char *)uptr);
}
arm_data_memory_barrier();
}
/*
* flush_dcache_nolog -- flush the CPU cache, using DC CVAC
*/
static force_inline void
flush_dcache_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
arm_clean_va_to_poc((char *)uptr);
}
}
#endif
| 2,631 | 32.74359 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/aarch64/arm_cacheops.h | /*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ARM inline assembly to flush and invalidate caches
* clwb => dc cvac
* clflush | clflushopt => dc civac
* fence => dmb ish
*/
#ifndef AARCH64_CACHEOPS_H
#define AARCH64_CACHEOPS_H
#include <stdlib.h>
static inline void
arm_clean_va_to_poc(void const *p __attribute__((unused)))
{
asm volatile("dc cvac, %0" : : "r" (p) : "memory");
}
static inline void
arm_data_memory_barrier(void)
{
asm volatile("dmb ish" : : : "memory");
}
static inline void
arm_clean_and_invalidate_va_to_poc(const void *addr)
{
asm volatile("dc civac, %0" : : "r" (addr) : "memory");
}
#endif
| 2,185 | 34.258065 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libvmem/vmem.h | /*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* vmem.h -- internal definitions for libvmem
*/
#ifndef VMEM_H
#define VMEM_H 1
#include <stddef.h>
#include "pool_hdr.h"
#ifdef __cplusplus
extern "C" {
#endif
#define VMEM_LOG_PREFIX "libvmem"
#define VMEM_LOG_LEVEL_VAR "VMEM_LOG_LEVEL"
#define VMEM_LOG_FILE_VAR "VMEM_LOG_FILE"
/* attributes of the vmem memory pool format for the pool header */
#define VMEM_HDR_SIG "VMEM " /* must be 8 bytes including '\0' */
#define VMEM_FORMAT_MAJOR 1
struct vmem {
struct pool_hdr hdr; /* memory pool header */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int caller_mapped;
};
void vmem_construct(void);
#ifdef __cplusplus
}
#endif
#endif
| 2,284 | 31.183099 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/getopt/getopt.h | /*
* *Copyright (c) 2012, Kim Gräsman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Kim Gräsman nor the
* names of contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INCLUDED_GETOPT_PORT_H
#define INCLUDED_GETOPT_PORT_H
#if defined(__cplusplus)
extern "C" {
#endif
#define no_argument 0
#define required_argument 1
#define optional_argument 2
extern char* optarg;
extern int optind, opterr, optopt;
struct option {
const char* name;
int has_arg;
int* flag;
int val;
};
int getopt(int argc, char* const argv[], const char* optstring);
int getopt_long(int argc, char* const argv[],
const char* optstring, const struct option* longopts, int* longindex);
#if defined(__cplusplus)
}
#endif
#endif // INCLUDED_GETOPT_PORT_H
| 2,137 | 35.237288 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_defs.h | /* ./../windows/jemalloc_gen/include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */
/* Defined if __attribute__((...)) syntax is supported. */
/* #undef JEMALLOC_HAVE_ATTR */
/* Defined if alloc_size attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
/* Defined if format(gnu_printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
/* Defined if format(printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
/* #undef JEMALLOC_OVERRIDE_VALLOC */
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#define JEMALLOC_USABLE_SIZE_CONST const
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
/* #undef JEMALLOC_USE_CXX_THROW */
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
| 1,327 | 27.255319 | 115 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_mangle_jet.h | /*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create jet_pool_create
# define pool_delete jet_pool_delete
# define pool_malloc jet_pool_malloc
# define pool_calloc jet_pool_calloc
# define pool_ralloc jet_pool_ralloc
# define pool_aligned_alloc jet_pool_aligned_alloc
# define pool_free jet_pool_free
# define pool_malloc_usable_size jet_pool_malloc_usable_size
# define pool_malloc_stats_print jet_pool_malloc_stats_print
# define pool_extend jet_pool_extend
# define pool_set_alloc_funcs jet_pool_set_alloc_funcs
# define pool_check jet_pool_check
# define malloc_conf jet_malloc_conf
# define malloc_message jet_malloc_message
# define malloc jet_malloc
# define calloc jet_calloc
# define posix_memalign jet_posix_memalign
# define aligned_alloc jet_aligned_alloc
# define realloc jet_realloc
# define free jet_free
# define mallocx jet_mallocx
# define rallocx jet_rallocx
# define xallocx jet_xallocx
# define sallocx jet_sallocx
# define dallocx jet_dallocx
# define nallocx jet_nallocx
# define mallctl jet_mallctl
# define mallctlnametomib jet_mallctlnametomib
# define mallctlbymib jet_mallctlbymib
# define navsnprintf jet_navsnprintf
# define malloc_stats_print jet_malloc_stats_print
# define malloc_usable_size jet_malloc_usable_size
#endif
/*
* The jet_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef jet_pool_create
# undef jet_pool_delete
# undef jet_pool_malloc
# undef jet_pool_calloc
# undef jet_pool_ralloc
# undef jet_pool_aligned_alloc
# undef jet_pool_free
# undef jet_pool_malloc_usable_size
# undef jet_pool_malloc_stats_print
# undef jet_pool_extend
# undef jet_pool_set_alloc_funcs
# undef jet_pool_check
# undef jet_malloc_conf
# undef jet_malloc_message
# undef jet_malloc
# undef jet_calloc
# undef jet_posix_memalign
# undef jet_aligned_alloc
# undef jet_realloc
# undef jet_free
# undef jet_mallocx
# undef jet_rallocx
# undef jet_xallocx
# undef jet_sallocx
# undef jet_dallocx
# undef jet_nallocx
# undef jet_mallctl
# undef jet_mallctlnametomib
# undef jet_mallctlbymib
# undef jet_navsnprintf
# undef jet_malloc_stats_print
# undef jet_malloc_usable_size
#endif
| 2,939 | 32.793103 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_protos_jet.h | /*
* The jet_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@[email protected]).
*/
extern JEMALLOC_EXPORT const char *jet_malloc_conf;
extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *jet_pool_create(void *addr, size_t size, int zeroed);
JEMALLOC_EXPORT int jet_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t jet_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *jet_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *jet_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *jet_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *jet_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void jet_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t jet_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void jet_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void jet_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int jet_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *jet_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *jet_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int jet_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *jet_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *jet_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void jet_free(void *ptr);
JEMALLOC_EXPORT void *jet_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *jet_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t jet_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t jet_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void jet_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t jet_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int jet_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int jet_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int jet_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void jet_malloc_stats_print(void (*write_cb)(void *,
const char *), void *jet_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t jet_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int jet_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * jet_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * jet_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
| 3,176 | 45.043478 | 91 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_rename.h | /*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
# define je_pool_create je_vmem_pool_create
# define je_pool_delete je_vmem_pool_delete
# define je_pool_malloc je_vmem_pool_malloc
# define je_pool_calloc je_vmem_pool_calloc
# define je_pool_ralloc je_vmem_pool_ralloc
# define je_pool_aligned_alloc je_vmem_pool_aligned_alloc
# define je_pool_free je_vmem_pool_free
# define je_pool_malloc_usable_size je_vmem_pool_malloc_usable_size
# define je_pool_malloc_stats_print je_vmem_pool_malloc_stats_print
# define je_pool_extend je_vmem_pool_extend
# define je_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs
# define je_pool_check je_vmem_pool_check
# define je_malloc_conf je_vmem_malloc_conf
# define je_malloc_message je_vmem_malloc_message
# define je_malloc je_vmem_malloc
# define je_calloc je_vmem_calloc
# define je_posix_memalign je_vmem_posix_memalign
# define je_aligned_alloc je_vmem_aligned_alloc
# define je_realloc je_vmem_realloc
# define je_free je_vmem_free
# define je_mallocx je_vmem_mallocx
# define je_rallocx je_vmem_rallocx
# define je_xallocx je_vmem_xallocx
# define je_sallocx je_vmem_sallocx
# define je_dallocx je_vmem_dallocx
# define je_nallocx je_vmem_nallocx
# define je_mallctl je_vmem_mallctl
# define je_mallctlnametomib je_vmem_mallctlnametomib
# define je_mallctlbymib je_vmem_mallctlbymib
# define je_navsnprintf je_vmem_navsnprintf
# define je_malloc_stats_print je_vmem_malloc_stats_print
# define je_malloc_usable_size je_vmem_malloc_usable_size
#endif
| 1,694 | 41.375 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_mangle.h | /*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create je_pool_create
# define pool_delete je_pool_delete
# define pool_malloc je_pool_malloc
# define pool_calloc je_pool_calloc
# define pool_ralloc je_pool_ralloc
# define pool_aligned_alloc je_pool_aligned_alloc
# define pool_free je_pool_free
# define pool_malloc_usable_size je_pool_malloc_usable_size
# define pool_malloc_stats_print je_pool_malloc_stats_print
# define pool_extend je_pool_extend
# define pool_set_alloc_funcs je_pool_set_alloc_funcs
# define pool_check je_pool_check
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define navsnprintf je_navsnprintf
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
#endif
/*
* The je_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_pool_create
# undef je_pool_delete
# undef je_pool_malloc
# undef je_pool_calloc
# undef je_pool_ralloc
# undef je_pool_aligned_alloc
# undef je_pool_free
# undef je_pool_malloc_usable_size
# undef je_pool_malloc_stats_print
# undef je_pool_extend
# undef je_pool_set_alloc_funcs
# undef je_pool_check
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_navsnprintf
# undef je_malloc_stats_print
# undef je_malloc_usable_size
#endif
| 2,874 | 32.045977 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc.h | #ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Defined if __attribute__((...)) syntax is supported. */
/* #undef JEMALLOC_HAVE_ATTR */
/* Defined if alloc_size attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */
/* Defined if format(gnu_printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
/* Defined if format(printf, ...) attribute is supported. */
/* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
*/
/* #undef JEMALLOC_OVERRIDE_MEMALIGN */
/* #undef JEMALLOC_OVERRIDE_VALLOC */
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#define JEMALLOC_USABLE_SIZE_CONST const
/*
* If defined, specify throw() for the public function prototypes when compiling
* with C++. The only justification for this is to match the prototypes that
* glibc defines.
*/
/* #undef JEMALLOC_USE_CXX_THROW */
#ifdef _MSC_VER
# ifdef _WIN64
# define LG_SIZEOF_PTR_WIN 3
# else
# define LG_SIZEOF_PTR_WIN 2
# endif
#endif
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
#define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
# define je_pool_create je_vmem_pool_create
# define je_pool_delete je_vmem_pool_delete
# define je_pool_malloc je_vmem_pool_malloc
# define je_pool_calloc je_vmem_pool_calloc
# define je_pool_ralloc je_vmem_pool_ralloc
# define je_pool_aligned_alloc je_vmem_pool_aligned_alloc
# define je_pool_free je_vmem_pool_free
# define je_pool_malloc_usable_size je_vmem_pool_malloc_usable_size
# define je_pool_malloc_stats_print je_vmem_pool_malloc_stats_print
# define je_pool_extend je_vmem_pool_extend
# define je_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs
# define je_pool_check je_vmem_pool_check
# define je_malloc_conf je_vmem_malloc_conf
# define je_malloc_message je_vmem_malloc_message
# define je_malloc je_vmem_malloc
# define je_calloc je_vmem_calloc
# define je_posix_memalign je_vmem_posix_memalign
# define je_aligned_alloc je_vmem_aligned_alloc
# define je_realloc je_vmem_realloc
# define je_free je_vmem_free
# define je_mallocx je_vmem_mallocx
# define je_rallocx je_vmem_rallocx
# define je_xallocx je_vmem_xallocx
# define je_sallocx je_vmem_sallocx
# define je_dallocx je_vmem_dallocx
# define je_nallocx je_vmem_nallocx
# define je_mallctl je_vmem_mallctl
# define je_mallctlnametomib je_vmem_mallctlnametomib
# define je_mallctlbymib je_vmem_mallctlbymib
# define je_navsnprintf je_vmem_navsnprintf
# define je_malloc_stats_print je_vmem_malloc_stats_print
# define je_malloc_usable_size je_vmem_malloc_usable_size
#endif
#include <limits.h>
#include <strings.h>
#include <stdbool.h>
#include <stdarg.h>
#define JEMALLOC_VERSION ""
#define JEMALLOC_VERSION_MAJOR
#define JEMALLOC_VERSION_MINOR
#define JEMALLOC_VERSION_BUGFIX
#define JEMALLOC_VERSION_NREV
#define JEMALLOC_VERSION_GID ""
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
(((a) < (size_t)INT_MAX) ? ffs(a)-1 : ffs((a)>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifndef JEMALLOC_EXPORT
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
/*
* The je_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *je_pool_create(void *addr, size_t size, int zeroed, int empty);
JEMALLOC_EXPORT int je_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t je_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *je_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *je_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *je_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void je_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t je_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void je_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void je_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int je_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int je_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned, pool_t *);
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned, pool_t *);
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
# define pool_create je_pool_create
# define pool_delete je_pool_delete
# define pool_malloc je_pool_malloc
# define pool_calloc je_pool_calloc
# define pool_ralloc je_pool_ralloc
# define pool_aligned_alloc je_pool_aligned_alloc
# define pool_free je_pool_free
# define pool_malloc_usable_size je_pool_malloc_usable_size
# define pool_malloc_stats_print je_pool_malloc_stats_print
# define pool_extend je_pool_extend
# define pool_set_alloc_funcs je_pool_set_alloc_funcs
# define pool_check je_pool_check
# define malloc_conf je_malloc_conf
# define malloc_message je_malloc_message
# define malloc je_malloc
# define calloc je_calloc
# define posix_memalign je_posix_memalign
# define aligned_alloc je_aligned_alloc
# define realloc je_realloc
# define free je_free
# define mallocx je_mallocx
# define rallocx je_rallocx
# define xallocx je_xallocx
# define sallocx je_sallocx
# define dallocx je_dallocx
# define nallocx je_nallocx
# define mallctl je_mallctl
# define mallctlnametomib je_mallctlnametomib
# define mallctlbymib je_mallctlbymib
# define navsnprintf je_navsnprintf
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
#endif
/*
* The je_* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
# undef je_pool_create
# undef je_pool_delete
# undef je_pool_malloc
# undef je_pool_calloc
# undef je_pool_ralloc
# undef je_pool_aligned_alloc
# undef je_pool_free
# undef je_pool_malloc_usable_size
# undef je_pool_malloc_stats_print
# undef je_pool_extend
# undef je_pool_set_alloc_funcs
# undef je_pool_check
# undef je_malloc_conf
# undef je_malloc_message
# undef je_malloc
# undef je_calloc
# undef je_posix_memalign
# undef je_aligned_alloc
# undef je_realloc
# undef je_free
# undef je_mallocx
# undef je_rallocx
# undef je_xallocx
# undef je_sallocx
# undef je_dallocx
# undef je_nallocx
# undef je_mallctl
# undef je_mallctlnametomib
# undef je_mallctlbymib
# undef je_navsnprintf
# undef je_malloc_stats_print
# undef je_malloc_usable_size
#endif
#ifdef __cplusplus
}
#endif
#endif /* JEMALLOC_H_ */
| 10,674 | 34 | 90 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_protos.h | /*
* The je_ prefix on the following public symbol declarations is an artifact
* of namespace management, and should be omitted in application code unless
* JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
typedef struct pool_s pool_t;
JEMALLOC_EXPORT pool_t *je_pool_create(void *addr, size_t size, int zeroed);
JEMALLOC_EXPORT int je_pool_delete(pool_t *pool);
JEMALLOC_EXPORT size_t je_pool_extend(pool_t *pool, void *addr,
size_t size, int zeroed);
JEMALLOC_EXPORT void *je_pool_malloc(pool_t *pool, size_t size);
JEMALLOC_EXPORT void *je_pool_calloc(pool_t *pool, size_t nmemb, size_t size);
JEMALLOC_EXPORT void *je_pool_ralloc(pool_t *pool, void *ptr, size_t size);
JEMALLOC_EXPORT void *je_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size);
JEMALLOC_EXPORT void je_pool_free(pool_t *pool, void *ptr);
JEMALLOC_EXPORT size_t je_pool_malloc_usable_size(pool_t *pool, void *ptr);
JEMALLOC_EXPORT void je_pool_malloc_stats_print(pool_t *pool,
void (*write_cb)(void *, const char *),
void *cbopaque, const char *opts);
JEMALLOC_EXPORT void je_pool_set_alloc_funcs(void *(*malloc_func)(size_t),
void (*free_func)(void *));
JEMALLOC_EXPORT int je_pool_check(pool_t *pool);
JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment,
size_t size) JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
int flags);
JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
size_t *miblenp);
JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT int je_navsnprintf(char *str, size_t size,
const char *format, va_list ap);
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
| 3,124 | 44.289855 | 90 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_typedefs.h | typedef void *(chunk_alloc_t)(void *, size_t, size_t, bool *, unsigned, pool_t *);
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned, pool_t *);
| 150 | 49.333333 | 82 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_macros.h | #include <limits.h>
#include <strings.h>
#include <stdbool.h>
#include <stdarg.h>
#define JEMALLOC_VERSION ""
#define JEMALLOC_VERSION_MAJOR
#define JEMALLOC_VERSION_MINOR
#define JEMALLOC_VERSION_BUGFIX
#define JEMALLOC_VERSION_NREV
#define JEMALLOC_VERSION_GID ""
# define MALLOCX_LG_ALIGN(la) (la)
# if LG_SIZEOF_PTR == 2
# define MALLOCX_ALIGN(a) (ffs(a)-1)
# else
# define MALLOCX_ALIGN(a) \
(((a) < (size_t)INT_MAX) ? ffs(a)-1 : ffs((a)>>32)+31)
# endif
# define MALLOCX_ZERO ((int)0x40)
/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
#elif _MSC_VER
# define JEMALLOC_ATTR(s)
# ifdef DLLEXPORT
# define JEMALLOC_EXPORT __declspec(dllexport)
# else
# define JEMALLOC_EXPORT __declspec(dllimport)
# endif
# define JEMALLOC_ALIGNED(s) __declspec(align(s))
# define JEMALLOC_SECTION(s) __declspec(allocate(s))
# define JEMALLOC_NOINLINE __declspec(noinline)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_EXPORT
# define JEMALLOC_ALIGNED(s)
# define JEMALLOC_SECTION(s)
# define JEMALLOC_NOINLINE
#endif
| 1,426 | 29.361702 | 76 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/size_classes.h | /* This file was automatically generated by size_classes.sh. */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
* SIZE_CLASSES: Complete table of
* SC(index, lg_delta, size, bin, lg_delta_lookup) tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* bin: 'yes' if a small bin size class, 'no' otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
*/
#define LG_SIZE_CLASS_GROUP 2
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, no, 9) \
\
SC( 32, 12, 10, 1, no, no) \
SC( 33, 12, 10, 2, no, no) \
SC( 34, 12, 10, 3, no, no) \
SC( 35, 12, 10, 4, no, no) \
\
SC( 36, 13, 11, 1, no, no) \
SC( 37, 13, 11, 2, no, no) \
SC( 38, 13, 11, 3, no, no) \
SC( 39, 13, 11, 4, no, no) \
\
SC( 40, 14, 12, 1, no, no) \
SC( 41, 14, 12, 2, no, no) \
SC( 42, 14, 12, 3, no, no) \
SC( 43, 14, 12, 4, no, no) \
\
SC( 44, 15, 13, 1, no, no) \
SC( 45, 15, 13, 2, no, no) \
SC( 46, 15, 13, 3, no, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 31
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, yes, 9) \
\
SC( 32, 12, 10, 1, yes, no) \
SC( 33, 12, 10, 2, yes, no) \
SC( 34, 12, 10, 3, yes, no) \
SC( 35, 12, 10, 4, no, no) \
\
SC( 36, 13, 11, 1, no, no) \
SC( 37, 13, 11, 2, no, no) \
SC( 38, 13, 11, 3, no, no) \
SC( 39, 13, 11, 4, no, no) \
\
SC( 40, 14, 12, 1, no, no) \
SC( 41, 14, 12, 2, no, no) \
SC( 42, 14, 12, 3, no, no) \
SC( 43, 14, 12, 4, no, no) \
\
SC( 44, 15, 13, 1, no, no) \
SC( 45, 15, 13, 2, no, no) \
SC( 46, 15, 13, 3, no, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 35
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, yes, 9) \
\
SC( 32, 12, 10, 1, yes, no) \
SC( 33, 12, 10, 2, yes, no) \
SC( 34, 12, 10, 3, yes, no) \
SC( 35, 12, 10, 4, yes, no) \
\
SC( 36, 13, 11, 1, yes, no) \
SC( 37, 13, 11, 2, yes, no) \
SC( 38, 13, 11, 3, yes, no) \
SC( 39, 13, 11, 4, yes, no) \
\
SC( 40, 14, 12, 1, yes, no) \
SC( 41, 14, 12, 2, yes, no) \
SC( 42, 14, 12, 3, yes, no) \
SC( 43, 14, 12, 4, yes, no) \
\
SC( 44, 15, 13, 1, yes, no) \
SC( 45, 15, 13, 2, yes, no) \
SC( 46, 15, 13, 3, yes, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 47
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, no, 9) \
\
SC( 29, 12, 10, 1, no, no) \
SC( 30, 12, 10, 2, no, no) \
SC( 31, 12, 10, 3, no, no) \
SC( 32, 12, 10, 4, no, no) \
\
SC( 33, 13, 11, 1, no, no) \
SC( 34, 13, 11, 2, no, no) \
SC( 35, 13, 11, 3, no, no) \
SC( 36, 13, 11, 4, no, no) \
\
SC( 37, 14, 12, 1, no, no) \
SC( 38, 14, 12, 2, no, no) \
SC( 39, 14, 12, 3, no, no) \
SC( 40, 14, 12, 4, no, no) \
\
SC( 41, 15, 13, 1, no, no) \
SC( 42, 15, 13, 2, no, no) \
SC( 43, 15, 13, 3, no, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 28
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, yes, 9) \
\
SC( 29, 12, 10, 1, yes, no) \
SC( 30, 12, 10, 2, yes, no) \
SC( 31, 12, 10, 3, yes, no) \
SC( 32, 12, 10, 4, no, no) \
\
SC( 33, 13, 11, 1, no, no) \
SC( 34, 13, 11, 2, no, no) \
SC( 35, 13, 11, 3, no, no) \
SC( 36, 13, 11, 4, no, no) \
\
SC( 37, 14, 12, 1, no, no) \
SC( 38, 14, 12, 2, no, no) \
SC( 39, 14, 12, 3, no, no) \
SC( 40, 14, 12, 4, no, no) \
\
SC( 41, 15, 13, 1, no, no) \
SC( 42, 15, 13, 2, no, no) \
SC( 43, 15, 13, 3, no, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 32
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, yes, 9) \
\
SC( 29, 12, 10, 1, yes, no) \
SC( 30, 12, 10, 2, yes, no) \
SC( 31, 12, 10, 3, yes, no) \
SC( 32, 12, 10, 4, yes, no) \
\
SC( 33, 13, 11, 1, yes, no) \
SC( 34, 13, 11, 2, yes, no) \
SC( 35, 13, 11, 3, yes, no) \
SC( 36, 13, 11, 4, yes, no) \
\
SC( 37, 14, 12, 1, yes, no) \
SC( 38, 14, 12, 2, yes, no) \
SC( 39, 14, 12, 3, yes, no) \
SC( 40, 14, 12, 4, yes, no) \
\
SC( 41, 15, 13, 1, yes, no) \
SC( 42, 15, 13, 2, yes, no) \
SC( 43, 15, 13, 3, yes, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 44
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, no, 9) \
\
SC( 28, 12, 10, 1, no, no) \
SC( 29, 12, 10, 2, no, no) \
SC( 30, 12, 10, 3, no, no) \
SC( 31, 12, 10, 4, no, no) \
\
SC( 32, 13, 11, 1, no, no) \
SC( 33, 13, 11, 2, no, no) \
SC( 34, 13, 11, 3, no, no) \
SC( 35, 13, 11, 4, no, no) \
\
SC( 36, 14, 12, 1, no, no) \
SC( 37, 14, 12, 2, no, no) \
SC( 38, 14, 12, 3, no, no) \
SC( 39, 14, 12, 4, no, no) \
\
SC( 40, 15, 13, 1, no, no) \
SC( 41, 15, 13, 2, no, no) \
SC( 42, 15, 13, 3, no, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 27
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, yes, 9) \
\
SC( 28, 12, 10, 1, yes, no) \
SC( 29, 12, 10, 2, yes, no) \
SC( 30, 12, 10, 3, yes, no) \
SC( 31, 12, 10, 4, no, no) \
\
SC( 32, 13, 11, 1, no, no) \
SC( 33, 13, 11, 2, no, no) \
SC( 34, 13, 11, 3, no, no) \
SC( 35, 13, 11, 4, no, no) \
\
SC( 36, 14, 12, 1, no, no) \
SC( 37, 14, 12, 2, no, no) \
SC( 38, 14, 12, 3, no, no) \
SC( 39, 14, 12, 4, no, no) \
\
SC( 40, 15, 13, 1, no, no) \
SC( 41, 15, 13, 2, no, no) \
SC( 42, 15, 13, 3, no, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 31
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, yes, 9) \
\
SC( 28, 12, 10, 1, yes, no) \
SC( 29, 12, 10, 2, yes, no) \
SC( 30, 12, 10, 3, yes, no) \
SC( 31, 12, 10, 4, yes, no) \
\
SC( 32, 13, 11, 1, yes, no) \
SC( 33, 13, 11, 2, yes, no) \
SC( 34, 13, 11, 3, yes, no) \
SC( 35, 13, 11, 4, yes, no) \
\
SC( 36, 14, 12, 1, yes, no) \
SC( 37, 14, 12, 2, yes, no) \
SC( 38, 14, 12, 3, yes, no) \
SC( 39, 14, 12, 4, yes, no) \
\
SC( 40, 15, 13, 1, yes, no) \
SC( 41, 15, 13, 2, yes, no) \
SC( 42, 15, 13, 3, yes, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 43
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, no, 9) \
\
SC( 32, 12, 10, 1, no, no) \
SC( 33, 12, 10, 2, no, no) \
SC( 34, 12, 10, 3, no, no) \
SC( 35, 12, 10, 4, no, no) \
\
SC( 36, 13, 11, 1, no, no) \
SC( 37, 13, 11, 2, no, no) \
SC( 38, 13, 11, 3, no, no) \
SC( 39, 13, 11, 4, no, no) \
\
SC( 40, 14, 12, 1, no, no) \
SC( 41, 14, 12, 2, no, no) \
SC( 42, 14, 12, 3, no, no) \
SC( 43, 14, 12, 4, no, no) \
\
SC( 44, 15, 13, 1, no, no) \
SC( 45, 15, 13, 2, no, no) \
SC( 46, 15, 13, 3, no, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
SC(111, 31, 29, 4, no, no) \
\
SC(112, 32, 30, 1, no, no) \
SC(113, 32, 30, 2, no, no) \
SC(114, 32, 30, 3, no, no) \
SC(115, 32, 30, 4, no, no) \
\
SC(116, 33, 31, 1, no, no) \
SC(117, 33, 31, 2, no, no) \
SC(118, 33, 31, 3, no, no) \
SC(119, 33, 31, 4, no, no) \
\
SC(120, 34, 32, 1, no, no) \
SC(121, 34, 32, 2, no, no) \
SC(122, 34, 32, 3, no, no) \
SC(123, 34, 32, 4, no, no) \
\
SC(124, 35, 33, 1, no, no) \
SC(125, 35, 33, 2, no, no) \
SC(126, 35, 33, 3, no, no) \
SC(127, 35, 33, 4, no, no) \
\
SC(128, 36, 34, 1, no, no) \
SC(129, 36, 34, 2, no, no) \
SC(130, 36, 34, 3, no, no) \
SC(131, 36, 34, 4, no, no) \
\
SC(132, 37, 35, 1, no, no) \
SC(133, 37, 35, 2, no, no) \
SC(134, 37, 35, 3, no, no) \
SC(135, 37, 35, 4, no, no) \
\
SC(136, 38, 36, 1, no, no) \
SC(137, 38, 36, 2, no, no) \
SC(138, 38, 36, 3, no, no) \
SC(139, 38, 36, 4, no, no) \
\
SC(140, 39, 37, 1, no, no) \
SC(141, 39, 37, 2, no, no) \
SC(142, 39, 37, 3, no, no) \
SC(143, 39, 37, 4, no, no) \
\
SC(144, 40, 38, 1, no, no) \
SC(145, 40, 38, 2, no, no) \
SC(146, 40, 38, 3, no, no) \
SC(147, 40, 38, 4, no, no) \
\
SC(148, 41, 39, 1, no, no) \
SC(149, 41, 39, 2, no, no) \
SC(150, 41, 39, 3, no, no) \
SC(151, 41, 39, 4, no, no) \
\
SC(152, 42, 40, 1, no, no) \
SC(153, 42, 40, 2, no, no) \
SC(154, 42, 40, 3, no, no) \
SC(155, 42, 40, 4, no, no) \
\
SC(156, 43, 41, 1, no, no) \
SC(157, 43, 41, 2, no, no) \
SC(158, 43, 41, 3, no, no) \
SC(159, 43, 41, 4, no, no) \
\
SC(160, 44, 42, 1, no, no) \
SC(161, 44, 42, 2, no, no) \
SC(162, 44, 42, 3, no, no) \
SC(163, 44, 42, 4, no, no) \
\
SC(164, 45, 43, 1, no, no) \
SC(165, 45, 43, 2, no, no) \
SC(166, 45, 43, 3, no, no) \
SC(167, 45, 43, 4, no, no) \
\
SC(168, 46, 44, 1, no, no) \
SC(169, 46, 44, 2, no, no) \
SC(170, 46, 44, 3, no, no) \
SC(171, 46, 44, 4, no, no) \
\
SC(172, 47, 45, 1, no, no) \
SC(173, 47, 45, 2, no, no) \
SC(174, 47, 45, 3, no, no) \
SC(175, 47, 45, 4, no, no) \
\
SC(176, 48, 46, 1, no, no) \
SC(177, 48, 46, 2, no, no) \
SC(178, 48, 46, 3, no, no) \
SC(179, 48, 46, 4, no, no) \
\
SC(180, 49, 47, 1, no, no) \
SC(181, 49, 47, 2, no, no) \
SC(182, 49, 47, 3, no, no) \
SC(183, 49, 47, 4, no, no) \
\
SC(184, 50, 48, 1, no, no) \
SC(185, 50, 48, 2, no, no) \
SC(186, 50, 48, 3, no, no) \
SC(187, 50, 48, 4, no, no) \
\
SC(188, 51, 49, 1, no, no) \
SC(189, 51, 49, 2, no, no) \
SC(190, 51, 49, 3, no, no) \
SC(191, 51, 49, 4, no, no) \
\
SC(192, 52, 50, 1, no, no) \
SC(193, 52, 50, 2, no, no) \
SC(194, 52, 50, 3, no, no) \
SC(195, 52, 50, 4, no, no) \
\
SC(196, 53, 51, 1, no, no) \
SC(197, 53, 51, 2, no, no) \
SC(198, 53, 51, 3, no, no) \
SC(199, 53, 51, 4, no, no) \
\
SC(200, 54, 52, 1, no, no) \
SC(201, 54, 52, 2, no, no) \
SC(202, 54, 52, 3, no, no) \
SC(203, 54, 52, 4, no, no) \
\
SC(204, 55, 53, 1, no, no) \
SC(205, 55, 53, 2, no, no) \
SC(206, 55, 53, 3, no, no) \
SC(207, 55, 53, 4, no, no) \
\
SC(208, 56, 54, 1, no, no) \
SC(209, 56, 54, 2, no, no) \
SC(210, 56, 54, 3, no, no) \
SC(211, 56, 54, 4, no, no) \
\
SC(212, 57, 55, 1, no, no) \
SC(213, 57, 55, 2, no, no) \
SC(214, 57, 55, 3, no, no) \
SC(215, 57, 55, 4, no, no) \
\
SC(216, 58, 56, 1, no, no) \
SC(217, 58, 56, 2, no, no) \
SC(218, 58, 56, 3, no, no) \
SC(219, 58, 56, 4, no, no) \
\
SC(220, 59, 57, 1, no, no) \
SC(221, 59, 57, 2, no, no) \
SC(222, 59, 57, 3, no, no) \
SC(223, 59, 57, 4, no, no) \
\
SC(224, 60, 58, 1, no, no) \
SC(225, 60, 58, 2, no, no) \
SC(226, 60, 58, 3, no, no) \
SC(227, 60, 58, 4, no, no) \
\
SC(228, 61, 59, 1, no, no) \
SC(229, 61, 59, 2, no, no) \
SC(230, 61, 59, 3, no, no) \
SC(231, 61, 59, 4, no, no) \
\
SC(232, 62, 60, 1, no, no) \
SC(233, 62, 60, 2, no, no) \
SC(234, 62, 60, 3, no, no) \
SC(235, 62, 60, 4, no, no) \
\
SC(236, 63, 61, 1, no, no) \
SC(237, 63, 61, 2, no, no) \
SC(238, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 31
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, yes, 9) \
\
SC( 32, 12, 10, 1, yes, no) \
SC( 33, 12, 10, 2, yes, no) \
SC( 34, 12, 10, 3, yes, no) \
SC( 35, 12, 10, 4, no, no) \
\
SC( 36, 13, 11, 1, no, no) \
SC( 37, 13, 11, 2, no, no) \
SC( 38, 13, 11, 3, no, no) \
SC( 39, 13, 11, 4, no, no) \
\
SC( 40, 14, 12, 1, no, no) \
SC( 41, 14, 12, 2, no, no) \
SC( 42, 14, 12, 3, no, no) \
SC( 43, 14, 12, 4, no, no) \
\
SC( 44, 15, 13, 1, no, no) \
SC( 45, 15, 13, 2, no, no) \
SC( 46, 15, 13, 3, no, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
SC(111, 31, 29, 4, no, no) \
\
SC(112, 32, 30, 1, no, no) \
SC(113, 32, 30, 2, no, no) \
SC(114, 32, 30, 3, no, no) \
SC(115, 32, 30, 4, no, no) \
\
SC(116, 33, 31, 1, no, no) \
SC(117, 33, 31, 2, no, no) \
SC(118, 33, 31, 3, no, no) \
SC(119, 33, 31, 4, no, no) \
\
SC(120, 34, 32, 1, no, no) \
SC(121, 34, 32, 2, no, no) \
SC(122, 34, 32, 3, no, no) \
SC(123, 34, 32, 4, no, no) \
\
SC(124, 35, 33, 1, no, no) \
SC(125, 35, 33, 2, no, no) \
SC(126, 35, 33, 3, no, no) \
SC(127, 35, 33, 4, no, no) \
\
SC(128, 36, 34, 1, no, no) \
SC(129, 36, 34, 2, no, no) \
SC(130, 36, 34, 3, no, no) \
SC(131, 36, 34, 4, no, no) \
\
SC(132, 37, 35, 1, no, no) \
SC(133, 37, 35, 2, no, no) \
SC(134, 37, 35, 3, no, no) \
SC(135, 37, 35, 4, no, no) \
\
SC(136, 38, 36, 1, no, no) \
SC(137, 38, 36, 2, no, no) \
SC(138, 38, 36, 3, no, no) \
SC(139, 38, 36, 4, no, no) \
\
SC(140, 39, 37, 1, no, no) \
SC(141, 39, 37, 2, no, no) \
SC(142, 39, 37, 3, no, no) \
SC(143, 39, 37, 4, no, no) \
\
SC(144, 40, 38, 1, no, no) \
SC(145, 40, 38, 2, no, no) \
SC(146, 40, 38, 3, no, no) \
SC(147, 40, 38, 4, no, no) \
\
SC(148, 41, 39, 1, no, no) \
SC(149, 41, 39, 2, no, no) \
SC(150, 41, 39, 3, no, no) \
SC(151, 41, 39, 4, no, no) \
\
SC(152, 42, 40, 1, no, no) \
SC(153, 42, 40, 2, no, no) \
SC(154, 42, 40, 3, no, no) \
SC(155, 42, 40, 4, no, no) \
\
SC(156, 43, 41, 1, no, no) \
SC(157, 43, 41, 2, no, no) \
SC(158, 43, 41, 3, no, no) \
SC(159, 43, 41, 4, no, no) \
\
SC(160, 44, 42, 1, no, no) \
SC(161, 44, 42, 2, no, no) \
SC(162, 44, 42, 3, no, no) \
SC(163, 44, 42, 4, no, no) \
\
SC(164, 45, 43, 1, no, no) \
SC(165, 45, 43, 2, no, no) \
SC(166, 45, 43, 3, no, no) \
SC(167, 45, 43, 4, no, no) \
\
SC(168, 46, 44, 1, no, no) \
SC(169, 46, 44, 2, no, no) \
SC(170, 46, 44, 3, no, no) \
SC(171, 46, 44, 4, no, no) \
\
SC(172, 47, 45, 1, no, no) \
SC(173, 47, 45, 2, no, no) \
SC(174, 47, 45, 3, no, no) \
SC(175, 47, 45, 4, no, no) \
\
SC(176, 48, 46, 1, no, no) \
SC(177, 48, 46, 2, no, no) \
SC(178, 48, 46, 3, no, no) \
SC(179, 48, 46, 4, no, no) \
\
SC(180, 49, 47, 1, no, no) \
SC(181, 49, 47, 2, no, no) \
SC(182, 49, 47, 3, no, no) \
SC(183, 49, 47, 4, no, no) \
\
SC(184, 50, 48, 1, no, no) \
SC(185, 50, 48, 2, no, no) \
SC(186, 50, 48, 3, no, no) \
SC(187, 50, 48, 4, no, no) \
\
SC(188, 51, 49, 1, no, no) \
SC(189, 51, 49, 2, no, no) \
SC(190, 51, 49, 3, no, no) \
SC(191, 51, 49, 4, no, no) \
\
SC(192, 52, 50, 1, no, no) \
SC(193, 52, 50, 2, no, no) \
SC(194, 52, 50, 3, no, no) \
SC(195, 52, 50, 4, no, no) \
\
SC(196, 53, 51, 1, no, no) \
SC(197, 53, 51, 2, no, no) \
SC(198, 53, 51, 3, no, no) \
SC(199, 53, 51, 4, no, no) \
\
SC(200, 54, 52, 1, no, no) \
SC(201, 54, 52, 2, no, no) \
SC(202, 54, 52, 3, no, no) \
SC(203, 54, 52, 4, no, no) \
\
SC(204, 55, 53, 1, no, no) \
SC(205, 55, 53, 2, no, no) \
SC(206, 55, 53, 3, no, no) \
SC(207, 55, 53, 4, no, no) \
\
SC(208, 56, 54, 1, no, no) \
SC(209, 56, 54, 2, no, no) \
SC(210, 56, 54, 3, no, no) \
SC(211, 56, 54, 4, no, no) \
\
SC(212, 57, 55, 1, no, no) \
SC(213, 57, 55, 2, no, no) \
SC(214, 57, 55, 3, no, no) \
SC(215, 57, 55, 4, no, no) \
\
SC(216, 58, 56, 1, no, no) \
SC(217, 58, 56, 2, no, no) \
SC(218, 58, 56, 3, no, no) \
SC(219, 58, 56, 4, no, no) \
\
SC(220, 59, 57, 1, no, no) \
SC(221, 59, 57, 2, no, no) \
SC(222, 59, 57, 3, no, no) \
SC(223, 59, 57, 4, no, no) \
\
SC(224, 60, 58, 1, no, no) \
SC(225, 60, 58, 2, no, no) \
SC(226, 60, 58, 3, no, no) \
SC(227, 60, 58, 4, no, no) \
\
SC(228, 61, 59, 1, no, no) \
SC(229, 61, 59, 2, no, no) \
SC(230, 61, 59, 3, no, no) \
SC(231, 61, 59, 4, no, no) \
\
SC(232, 62, 60, 1, no, no) \
SC(233, 62, 60, 2, no, no) \
SC(234, 62, 60, 3, no, no) \
SC(235, 62, 60, 4, no, no) \
\
SC(236, 63, 61, 1, no, no) \
SC(237, 63, 61, 2, no, no) \
SC(238, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 35
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 3, 3, 2, yes, 3) \
SC( 3, 3, 3, 3, yes, 3) \
\
SC( 4, 5, 3, 1, yes, 3) \
SC( 5, 5, 3, 2, yes, 3) \
SC( 6, 5, 3, 3, yes, 3) \
SC( 7, 5, 3, 4, yes, 3) \
\
SC( 8, 6, 4, 1, yes, 4) \
SC( 9, 6, 4, 2, yes, 4) \
SC( 10, 6, 4, 3, yes, 4) \
SC( 11, 6, 4, 4, yes, 4) \
\
SC( 12, 7, 5, 1, yes, 5) \
SC( 13, 7, 5, 2, yes, 5) \
SC( 14, 7, 5, 3, yes, 5) \
SC( 15, 7, 5, 4, yes, 5) \
\
SC( 16, 8, 6, 1, yes, 6) \
SC( 17, 8, 6, 2, yes, 6) \
SC( 18, 8, 6, 3, yes, 6) \
SC( 19, 8, 6, 4, yes, 6) \
\
SC( 20, 9, 7, 1, yes, 7) \
SC( 21, 9, 7, 2, yes, 7) \
SC( 22, 9, 7, 3, yes, 7) \
SC( 23, 9, 7, 4, yes, 7) \
\
SC( 24, 10, 8, 1, yes, 8) \
SC( 25, 10, 8, 2, yes, 8) \
SC( 26, 10, 8, 3, yes, 8) \
SC( 27, 10, 8, 4, yes, 8) \
\
SC( 28, 11, 9, 1, yes, 9) \
SC( 29, 11, 9, 2, yes, 9) \
SC( 30, 11, 9, 3, yes, 9) \
SC( 31, 11, 9, 4, yes, 9) \
\
SC( 32, 12, 10, 1, yes, no) \
SC( 33, 12, 10, 2, yes, no) \
SC( 34, 12, 10, 3, yes, no) \
SC( 35, 12, 10, 4, yes, no) \
\
SC( 36, 13, 11, 1, yes, no) \
SC( 37, 13, 11, 2, yes, no) \
SC( 38, 13, 11, 3, yes, no) \
SC( 39, 13, 11, 4, yes, no) \
\
SC( 40, 14, 12, 1, yes, no) \
SC( 41, 14, 12, 2, yes, no) \
SC( 42, 14, 12, 3, yes, no) \
SC( 43, 14, 12, 4, yes, no) \
\
SC( 44, 15, 13, 1, yes, no) \
SC( 45, 15, 13, 2, yes, no) \
SC( 46, 15, 13, 3, yes, no) \
SC( 47, 15, 13, 4, no, no) \
\
SC( 48, 16, 14, 1, no, no) \
SC( 49, 16, 14, 2, no, no) \
SC( 50, 16, 14, 3, no, no) \
SC( 51, 16, 14, 4, no, no) \
\
SC( 52, 17, 15, 1, no, no) \
SC( 53, 17, 15, 2, no, no) \
SC( 54, 17, 15, 3, no, no) \
SC( 55, 17, 15, 4, no, no) \
\
SC( 56, 18, 16, 1, no, no) \
SC( 57, 18, 16, 2, no, no) \
SC( 58, 18, 16, 3, no, no) \
SC( 59, 18, 16, 4, no, no) \
\
SC( 60, 19, 17, 1, no, no) \
SC( 61, 19, 17, 2, no, no) \
SC( 62, 19, 17, 3, no, no) \
SC( 63, 19, 17, 4, no, no) \
\
SC( 64, 20, 18, 1, no, no) \
SC( 65, 20, 18, 2, no, no) \
SC( 66, 20, 18, 3, no, no) \
SC( 67, 20, 18, 4, no, no) \
\
SC( 68, 21, 19, 1, no, no) \
SC( 69, 21, 19, 2, no, no) \
SC( 70, 21, 19, 3, no, no) \
SC( 71, 21, 19, 4, no, no) \
\
SC( 72, 22, 20, 1, no, no) \
SC( 73, 22, 20, 2, no, no) \
SC( 74, 22, 20, 3, no, no) \
SC( 75, 22, 20, 4, no, no) \
\
SC( 76, 23, 21, 1, no, no) \
SC( 77, 23, 21, 2, no, no) \
SC( 78, 23, 21, 3, no, no) \
SC( 79, 23, 21, 4, no, no) \
\
SC( 80, 24, 22, 1, no, no) \
SC( 81, 24, 22, 2, no, no) \
SC( 82, 24, 22, 3, no, no) \
SC( 83, 24, 22, 4, no, no) \
\
SC( 84, 25, 23, 1, no, no) \
SC( 85, 25, 23, 2, no, no) \
SC( 86, 25, 23, 3, no, no) \
SC( 87, 25, 23, 4, no, no) \
\
SC( 88, 26, 24, 1, no, no) \
SC( 89, 26, 24, 2, no, no) \
SC( 90, 26, 24, 3, no, no) \
SC( 91, 26, 24, 4, no, no) \
\
SC( 92, 27, 25, 1, no, no) \
SC( 93, 27, 25, 2, no, no) \
SC( 94, 27, 25, 3, no, no) \
SC( 95, 27, 25, 4, no, no) \
\
SC( 96, 28, 26, 1, no, no) \
SC( 97, 28, 26, 2, no, no) \
SC( 98, 28, 26, 3, no, no) \
SC( 99, 28, 26, 4, no, no) \
\
SC(100, 29, 27, 1, no, no) \
SC(101, 29, 27, 2, no, no) \
SC(102, 29, 27, 3, no, no) \
SC(103, 29, 27, 4, no, no) \
\
SC(104, 30, 28, 1, no, no) \
SC(105, 30, 28, 2, no, no) \
SC(106, 30, 28, 3, no, no) \
SC(107, 30, 28, 4, no, no) \
\
SC(108, 31, 29, 1, no, no) \
SC(109, 31, 29, 2, no, no) \
SC(110, 31, 29, 3, no, no) \
SC(111, 31, 29, 4, no, no) \
\
SC(112, 32, 30, 1, no, no) \
SC(113, 32, 30, 2, no, no) \
SC(114, 32, 30, 3, no, no) \
SC(115, 32, 30, 4, no, no) \
\
SC(116, 33, 31, 1, no, no) \
SC(117, 33, 31, 2, no, no) \
SC(118, 33, 31, 3, no, no) \
SC(119, 33, 31, 4, no, no) \
\
SC(120, 34, 32, 1, no, no) \
SC(121, 34, 32, 2, no, no) \
SC(122, 34, 32, 3, no, no) \
SC(123, 34, 32, 4, no, no) \
\
SC(124, 35, 33, 1, no, no) \
SC(125, 35, 33, 2, no, no) \
SC(126, 35, 33, 3, no, no) \
SC(127, 35, 33, 4, no, no) \
\
SC(128, 36, 34, 1, no, no) \
SC(129, 36, 34, 2, no, no) \
SC(130, 36, 34, 3, no, no) \
SC(131, 36, 34, 4, no, no) \
\
SC(132, 37, 35, 1, no, no) \
SC(133, 37, 35, 2, no, no) \
SC(134, 37, 35, 3, no, no) \
SC(135, 37, 35, 4, no, no) \
\
SC(136, 38, 36, 1, no, no) \
SC(137, 38, 36, 2, no, no) \
SC(138, 38, 36, 3, no, no) \
SC(139, 38, 36, 4, no, no) \
\
SC(140, 39, 37, 1, no, no) \
SC(141, 39, 37, 2, no, no) \
SC(142, 39, 37, 3, no, no) \
SC(143, 39, 37, 4, no, no) \
\
SC(144, 40, 38, 1, no, no) \
SC(145, 40, 38, 2, no, no) \
SC(146, 40, 38, 3, no, no) \
SC(147, 40, 38, 4, no, no) \
\
SC(148, 41, 39, 1, no, no) \
SC(149, 41, 39, 2, no, no) \
SC(150, 41, 39, 3, no, no) \
SC(151, 41, 39, 4, no, no) \
\
SC(152, 42, 40, 1, no, no) \
SC(153, 42, 40, 2, no, no) \
SC(154, 42, 40, 3, no, no) \
SC(155, 42, 40, 4, no, no) \
\
SC(156, 43, 41, 1, no, no) \
SC(157, 43, 41, 2, no, no) \
SC(158, 43, 41, 3, no, no) \
SC(159, 43, 41, 4, no, no) \
\
SC(160, 44, 42, 1, no, no) \
SC(161, 44, 42, 2, no, no) \
SC(162, 44, 42, 3, no, no) \
SC(163, 44, 42, 4, no, no) \
\
SC(164, 45, 43, 1, no, no) \
SC(165, 45, 43, 2, no, no) \
SC(166, 45, 43, 3, no, no) \
SC(167, 45, 43, 4, no, no) \
\
SC(168, 46, 44, 1, no, no) \
SC(169, 46, 44, 2, no, no) \
SC(170, 46, 44, 3, no, no) \
SC(171, 46, 44, 4, no, no) \
\
SC(172, 47, 45, 1, no, no) \
SC(173, 47, 45, 2, no, no) \
SC(174, 47, 45, 3, no, no) \
SC(175, 47, 45, 4, no, no) \
\
SC(176, 48, 46, 1, no, no) \
SC(177, 48, 46, 2, no, no) \
SC(178, 48, 46, 3, no, no) \
SC(179, 48, 46, 4, no, no) \
\
SC(180, 49, 47, 1, no, no) \
SC(181, 49, 47, 2, no, no) \
SC(182, 49, 47, 3, no, no) \
SC(183, 49, 47, 4, no, no) \
\
SC(184, 50, 48, 1, no, no) \
SC(185, 50, 48, 2, no, no) \
SC(186, 50, 48, 3, no, no) \
SC(187, 50, 48, 4, no, no) \
\
SC(188, 51, 49, 1, no, no) \
SC(189, 51, 49, 2, no, no) \
SC(190, 51, 49, 3, no, no) \
SC(191, 51, 49, 4, no, no) \
\
SC(192, 52, 50, 1, no, no) \
SC(193, 52, 50, 2, no, no) \
SC(194, 52, 50, 3, no, no) \
SC(195, 52, 50, 4, no, no) \
\
SC(196, 53, 51, 1, no, no) \
SC(197, 53, 51, 2, no, no) \
SC(198, 53, 51, 3, no, no) \
SC(199, 53, 51, 4, no, no) \
\
SC(200, 54, 52, 1, no, no) \
SC(201, 54, 52, 2, no, no) \
SC(202, 54, 52, 3, no, no) \
SC(203, 54, 52, 4, no, no) \
\
SC(204, 55, 53, 1, no, no) \
SC(205, 55, 53, 2, no, no) \
SC(206, 55, 53, 3, no, no) \
SC(207, 55, 53, 4, no, no) \
\
SC(208, 56, 54, 1, no, no) \
SC(209, 56, 54, 2, no, no) \
SC(210, 56, 54, 3, no, no) \
SC(211, 56, 54, 4, no, no) \
\
SC(212, 57, 55, 1, no, no) \
SC(213, 57, 55, 2, no, no) \
SC(214, 57, 55, 3, no, no) \
SC(215, 57, 55, 4, no, no) \
\
SC(216, 58, 56, 1, no, no) \
SC(217, 58, 56, 2, no, no) \
SC(218, 58, 56, 3, no, no) \
SC(219, 58, 56, 4, no, no) \
\
SC(220, 59, 57, 1, no, no) \
SC(221, 59, 57, 2, no, no) \
SC(222, 59, 57, 3, no, no) \
SC(223, 59, 57, 4, no, no) \
\
SC(224, 60, 58, 1, no, no) \
SC(225, 60, 58, 2, no, no) \
SC(226, 60, 58, 3, no, no) \
SC(227, 60, 58, 4, no, no) \
\
SC(228, 61, 59, 1, no, no) \
SC(229, 61, 59, 2, no, no) \
SC(230, 61, 59, 3, no, no) \
SC(231, 61, 59, 4, no, no) \
\
SC(232, 62, 60, 1, no, no) \
SC(233, 62, 60, 2, no, no) \
SC(234, 62, 60, 3, no, no) \
SC(235, 62, 60, 4, no, no) \
\
SC(236, 63, 61, 1, no, no) \
SC(237, 63, 61, 2, no, no) \
SC(238, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 32
#define NBINS 47
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, no, 9) \
\
SC( 29, 12, 10, 1, no, no) \
SC( 30, 12, 10, 2, no, no) \
SC( 31, 12, 10, 3, no, no) \
SC( 32, 12, 10, 4, no, no) \
\
SC( 33, 13, 11, 1, no, no) \
SC( 34, 13, 11, 2, no, no) \
SC( 35, 13, 11, 3, no, no) \
SC( 36, 13, 11, 4, no, no) \
\
SC( 37, 14, 12, 1, no, no) \
SC( 38, 14, 12, 2, no, no) \
SC( 39, 14, 12, 3, no, no) \
SC( 40, 14, 12, 4, no, no) \
\
SC( 41, 15, 13, 1, no, no) \
SC( 42, 15, 13, 2, no, no) \
SC( 43, 15, 13, 3, no, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
SC(108, 31, 29, 4, no, no) \
\
SC(109, 32, 30, 1, no, no) \
SC(110, 32, 30, 2, no, no) \
SC(111, 32, 30, 3, no, no) \
SC(112, 32, 30, 4, no, no) \
\
SC(113, 33, 31, 1, no, no) \
SC(114, 33, 31, 2, no, no) \
SC(115, 33, 31, 3, no, no) \
SC(116, 33, 31, 4, no, no) \
\
SC(117, 34, 32, 1, no, no) \
SC(118, 34, 32, 2, no, no) \
SC(119, 34, 32, 3, no, no) \
SC(120, 34, 32, 4, no, no) \
\
SC(121, 35, 33, 1, no, no) \
SC(122, 35, 33, 2, no, no) \
SC(123, 35, 33, 3, no, no) \
SC(124, 35, 33, 4, no, no) \
\
SC(125, 36, 34, 1, no, no) \
SC(126, 36, 34, 2, no, no) \
SC(127, 36, 34, 3, no, no) \
SC(128, 36, 34, 4, no, no) \
\
SC(129, 37, 35, 1, no, no) \
SC(130, 37, 35, 2, no, no) \
SC(131, 37, 35, 3, no, no) \
SC(132, 37, 35, 4, no, no) \
\
SC(133, 38, 36, 1, no, no) \
SC(134, 38, 36, 2, no, no) \
SC(135, 38, 36, 3, no, no) \
SC(136, 38, 36, 4, no, no) \
\
SC(137, 39, 37, 1, no, no) \
SC(138, 39, 37, 2, no, no) \
SC(139, 39, 37, 3, no, no) \
SC(140, 39, 37, 4, no, no) \
\
SC(141, 40, 38, 1, no, no) \
SC(142, 40, 38, 2, no, no) \
SC(143, 40, 38, 3, no, no) \
SC(144, 40, 38, 4, no, no) \
\
SC(145, 41, 39, 1, no, no) \
SC(146, 41, 39, 2, no, no) \
SC(147, 41, 39, 3, no, no) \
SC(148, 41, 39, 4, no, no) \
\
SC(149, 42, 40, 1, no, no) \
SC(150, 42, 40, 2, no, no) \
SC(151, 42, 40, 3, no, no) \
SC(152, 42, 40, 4, no, no) \
\
SC(153, 43, 41, 1, no, no) \
SC(154, 43, 41, 2, no, no) \
SC(155, 43, 41, 3, no, no) \
SC(156, 43, 41, 4, no, no) \
\
SC(157, 44, 42, 1, no, no) \
SC(158, 44, 42, 2, no, no) \
SC(159, 44, 42, 3, no, no) \
SC(160, 44, 42, 4, no, no) \
\
SC(161, 45, 43, 1, no, no) \
SC(162, 45, 43, 2, no, no) \
SC(163, 45, 43, 3, no, no) \
SC(164, 45, 43, 4, no, no) \
\
SC(165, 46, 44, 1, no, no) \
SC(166, 46, 44, 2, no, no) \
SC(167, 46, 44, 3, no, no) \
SC(168, 46, 44, 4, no, no) \
\
SC(169, 47, 45, 1, no, no) \
SC(170, 47, 45, 2, no, no) \
SC(171, 47, 45, 3, no, no) \
SC(172, 47, 45, 4, no, no) \
\
SC(173, 48, 46, 1, no, no) \
SC(174, 48, 46, 2, no, no) \
SC(175, 48, 46, 3, no, no) \
SC(176, 48, 46, 4, no, no) \
\
SC(177, 49, 47, 1, no, no) \
SC(178, 49, 47, 2, no, no) \
SC(179, 49, 47, 3, no, no) \
SC(180, 49, 47, 4, no, no) \
\
SC(181, 50, 48, 1, no, no) \
SC(182, 50, 48, 2, no, no) \
SC(183, 50, 48, 3, no, no) \
SC(184, 50, 48, 4, no, no) \
\
SC(185, 51, 49, 1, no, no) \
SC(186, 51, 49, 2, no, no) \
SC(187, 51, 49, 3, no, no) \
SC(188, 51, 49, 4, no, no) \
\
SC(189, 52, 50, 1, no, no) \
SC(190, 52, 50, 2, no, no) \
SC(191, 52, 50, 3, no, no) \
SC(192, 52, 50, 4, no, no) \
\
SC(193, 53, 51, 1, no, no) \
SC(194, 53, 51, 2, no, no) \
SC(195, 53, 51, 3, no, no) \
SC(196, 53, 51, 4, no, no) \
\
SC(197, 54, 52, 1, no, no) \
SC(198, 54, 52, 2, no, no) \
SC(199, 54, 52, 3, no, no) \
SC(200, 54, 52, 4, no, no) \
\
SC(201, 55, 53, 1, no, no) \
SC(202, 55, 53, 2, no, no) \
SC(203, 55, 53, 3, no, no) \
SC(204, 55, 53, 4, no, no) \
\
SC(205, 56, 54, 1, no, no) \
SC(206, 56, 54, 2, no, no) \
SC(207, 56, 54, 3, no, no) \
SC(208, 56, 54, 4, no, no) \
\
SC(209, 57, 55, 1, no, no) \
SC(210, 57, 55, 2, no, no) \
SC(211, 57, 55, 3, no, no) \
SC(212, 57, 55, 4, no, no) \
\
SC(213, 58, 56, 1, no, no) \
SC(214, 58, 56, 2, no, no) \
SC(215, 58, 56, 3, no, no) \
SC(216, 58, 56, 4, no, no) \
\
SC(217, 59, 57, 1, no, no) \
SC(218, 59, 57, 2, no, no) \
SC(219, 59, 57, 3, no, no) \
SC(220, 59, 57, 4, no, no) \
\
SC(221, 60, 58, 1, no, no) \
SC(222, 60, 58, 2, no, no) \
SC(223, 60, 58, 3, no, no) \
SC(224, 60, 58, 4, no, no) \
\
SC(225, 61, 59, 1, no, no) \
SC(226, 61, 59, 2, no, no) \
SC(227, 61, 59, 3, no, no) \
SC(228, 61, 59, 4, no, no) \
\
SC(229, 62, 60, 1, no, no) \
SC(230, 62, 60, 2, no, no) \
SC(231, 62, 60, 3, no, no) \
SC(232, 62, 60, 4, no, no) \
\
SC(233, 63, 61, 1, no, no) \
SC(234, 63, 61, 2, no, no) \
SC(235, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 28
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, yes, 9) \
\
SC( 29, 12, 10, 1, yes, no) \
SC( 30, 12, 10, 2, yes, no) \
SC( 31, 12, 10, 3, yes, no) \
SC( 32, 12, 10, 4, no, no) \
\
SC( 33, 13, 11, 1, no, no) \
SC( 34, 13, 11, 2, no, no) \
SC( 35, 13, 11, 3, no, no) \
SC( 36, 13, 11, 4, no, no) \
\
SC( 37, 14, 12, 1, no, no) \
SC( 38, 14, 12, 2, no, no) \
SC( 39, 14, 12, 3, no, no) \
SC( 40, 14, 12, 4, no, no) \
\
SC( 41, 15, 13, 1, no, no) \
SC( 42, 15, 13, 2, no, no) \
SC( 43, 15, 13, 3, no, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
SC(108, 31, 29, 4, no, no) \
\
SC(109, 32, 30, 1, no, no) \
SC(110, 32, 30, 2, no, no) \
SC(111, 32, 30, 3, no, no) \
SC(112, 32, 30, 4, no, no) \
\
SC(113, 33, 31, 1, no, no) \
SC(114, 33, 31, 2, no, no) \
SC(115, 33, 31, 3, no, no) \
SC(116, 33, 31, 4, no, no) \
\
SC(117, 34, 32, 1, no, no) \
SC(118, 34, 32, 2, no, no) \
SC(119, 34, 32, 3, no, no) \
SC(120, 34, 32, 4, no, no) \
\
SC(121, 35, 33, 1, no, no) \
SC(122, 35, 33, 2, no, no) \
SC(123, 35, 33, 3, no, no) \
SC(124, 35, 33, 4, no, no) \
\
SC(125, 36, 34, 1, no, no) \
SC(126, 36, 34, 2, no, no) \
SC(127, 36, 34, 3, no, no) \
SC(128, 36, 34, 4, no, no) \
\
SC(129, 37, 35, 1, no, no) \
SC(130, 37, 35, 2, no, no) \
SC(131, 37, 35, 3, no, no) \
SC(132, 37, 35, 4, no, no) \
\
SC(133, 38, 36, 1, no, no) \
SC(134, 38, 36, 2, no, no) \
SC(135, 38, 36, 3, no, no) \
SC(136, 38, 36, 4, no, no) \
\
SC(137, 39, 37, 1, no, no) \
SC(138, 39, 37, 2, no, no) \
SC(139, 39, 37, 3, no, no) \
SC(140, 39, 37, 4, no, no) \
\
SC(141, 40, 38, 1, no, no) \
SC(142, 40, 38, 2, no, no) \
SC(143, 40, 38, 3, no, no) \
SC(144, 40, 38, 4, no, no) \
\
SC(145, 41, 39, 1, no, no) \
SC(146, 41, 39, 2, no, no) \
SC(147, 41, 39, 3, no, no) \
SC(148, 41, 39, 4, no, no) \
\
SC(149, 42, 40, 1, no, no) \
SC(150, 42, 40, 2, no, no) \
SC(151, 42, 40, 3, no, no) \
SC(152, 42, 40, 4, no, no) \
\
SC(153, 43, 41, 1, no, no) \
SC(154, 43, 41, 2, no, no) \
SC(155, 43, 41, 3, no, no) \
SC(156, 43, 41, 4, no, no) \
\
SC(157, 44, 42, 1, no, no) \
SC(158, 44, 42, 2, no, no) \
SC(159, 44, 42, 3, no, no) \
SC(160, 44, 42, 4, no, no) \
\
SC(161, 45, 43, 1, no, no) \
SC(162, 45, 43, 2, no, no) \
SC(163, 45, 43, 3, no, no) \
SC(164, 45, 43, 4, no, no) \
\
SC(165, 46, 44, 1, no, no) \
SC(166, 46, 44, 2, no, no) \
SC(167, 46, 44, 3, no, no) \
SC(168, 46, 44, 4, no, no) \
\
SC(169, 47, 45, 1, no, no) \
SC(170, 47, 45, 2, no, no) \
SC(171, 47, 45, 3, no, no) \
SC(172, 47, 45, 4, no, no) \
\
SC(173, 48, 46, 1, no, no) \
SC(174, 48, 46, 2, no, no) \
SC(175, 48, 46, 3, no, no) \
SC(176, 48, 46, 4, no, no) \
\
SC(177, 49, 47, 1, no, no) \
SC(178, 49, 47, 2, no, no) \
SC(179, 49, 47, 3, no, no) \
SC(180, 49, 47, 4, no, no) \
\
SC(181, 50, 48, 1, no, no) \
SC(182, 50, 48, 2, no, no) \
SC(183, 50, 48, 3, no, no) \
SC(184, 50, 48, 4, no, no) \
\
SC(185, 51, 49, 1, no, no) \
SC(186, 51, 49, 2, no, no) \
SC(187, 51, 49, 3, no, no) \
SC(188, 51, 49, 4, no, no) \
\
SC(189, 52, 50, 1, no, no) \
SC(190, 52, 50, 2, no, no) \
SC(191, 52, 50, 3, no, no) \
SC(192, 52, 50, 4, no, no) \
\
SC(193, 53, 51, 1, no, no) \
SC(194, 53, 51, 2, no, no) \
SC(195, 53, 51, 3, no, no) \
SC(196, 53, 51, 4, no, no) \
\
SC(197, 54, 52, 1, no, no) \
SC(198, 54, 52, 2, no, no) \
SC(199, 54, 52, 3, no, no) \
SC(200, 54, 52, 4, no, no) \
\
SC(201, 55, 53, 1, no, no) \
SC(202, 55, 53, 2, no, no) \
SC(203, 55, 53, 3, no, no) \
SC(204, 55, 53, 4, no, no) \
\
SC(205, 56, 54, 1, no, no) \
SC(206, 56, 54, 2, no, no) \
SC(207, 56, 54, 3, no, no) \
SC(208, 56, 54, 4, no, no) \
\
SC(209, 57, 55, 1, no, no) \
SC(210, 57, 55, 2, no, no) \
SC(211, 57, 55, 3, no, no) \
SC(212, 57, 55, 4, no, no) \
\
SC(213, 58, 56, 1, no, no) \
SC(214, 58, 56, 2, no, no) \
SC(215, 58, 56, 3, no, no) \
SC(216, 58, 56, 4, no, no) \
\
SC(217, 59, 57, 1, no, no) \
SC(218, 59, 57, 2, no, no) \
SC(219, 59, 57, 3, no, no) \
SC(220, 59, 57, 4, no, no) \
\
SC(221, 60, 58, 1, no, no) \
SC(222, 60, 58, 2, no, no) \
SC(223, 60, 58, 3, no, no) \
SC(224, 60, 58, 4, no, no) \
\
SC(225, 61, 59, 1, no, no) \
SC(226, 61, 59, 2, no, no) \
SC(227, 61, 59, 3, no, no) \
SC(228, 61, 59, 4, no, no) \
\
SC(229, 62, 60, 1, no, no) \
SC(230, 62, 60, 2, no, no) \
SC(231, 62, 60, 3, no, no) \
SC(232, 62, 60, 4, no, no) \
\
SC(233, 63, 61, 1, no, no) \
SC(234, 63, 61, 2, no, no) \
SC(235, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 32
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 3, 3, 0, yes, 3) \
\
SC( 1, 3, 3, 1, yes, 3) \
SC( 2, 4, 4, 1, yes, 4) \
SC( 3, 4, 4, 2, yes, 4) \
SC( 4, 4, 4, 3, yes, 4) \
\
SC( 5, 6, 4, 1, yes, 4) \
SC( 6, 6, 4, 2, yes, 4) \
SC( 7, 6, 4, 3, yes, 4) \
SC( 8, 6, 4, 4, yes, 4) \
\
SC( 9, 7, 5, 1, yes, 5) \
SC( 10, 7, 5, 2, yes, 5) \
SC( 11, 7, 5, 3, yes, 5) \
SC( 12, 7, 5, 4, yes, 5) \
\
SC( 13, 8, 6, 1, yes, 6) \
SC( 14, 8, 6, 2, yes, 6) \
SC( 15, 8, 6, 3, yes, 6) \
SC( 16, 8, 6, 4, yes, 6) \
\
SC( 17, 9, 7, 1, yes, 7) \
SC( 18, 9, 7, 2, yes, 7) \
SC( 19, 9, 7, 3, yes, 7) \
SC( 20, 9, 7, 4, yes, 7) \
\
SC( 21, 10, 8, 1, yes, 8) \
SC( 22, 10, 8, 2, yes, 8) \
SC( 23, 10, 8, 3, yes, 8) \
SC( 24, 10, 8, 4, yes, 8) \
\
SC( 25, 11, 9, 1, yes, 9) \
SC( 26, 11, 9, 2, yes, 9) \
SC( 27, 11, 9, 3, yes, 9) \
SC( 28, 11, 9, 4, yes, 9) \
\
SC( 29, 12, 10, 1, yes, no) \
SC( 30, 12, 10, 2, yes, no) \
SC( 31, 12, 10, 3, yes, no) \
SC( 32, 12, 10, 4, yes, no) \
\
SC( 33, 13, 11, 1, yes, no) \
SC( 34, 13, 11, 2, yes, no) \
SC( 35, 13, 11, 3, yes, no) \
SC( 36, 13, 11, 4, yes, no) \
\
SC( 37, 14, 12, 1, yes, no) \
SC( 38, 14, 12, 2, yes, no) \
SC( 39, 14, 12, 3, yes, no) \
SC( 40, 14, 12, 4, yes, no) \
\
SC( 41, 15, 13, 1, yes, no) \
SC( 42, 15, 13, 2, yes, no) \
SC( 43, 15, 13, 3, yes, no) \
SC( 44, 15, 13, 4, no, no) \
\
SC( 45, 16, 14, 1, no, no) \
SC( 46, 16, 14, 2, no, no) \
SC( 47, 16, 14, 3, no, no) \
SC( 48, 16, 14, 4, no, no) \
\
SC( 49, 17, 15, 1, no, no) \
SC( 50, 17, 15, 2, no, no) \
SC( 51, 17, 15, 3, no, no) \
SC( 52, 17, 15, 4, no, no) \
\
SC( 53, 18, 16, 1, no, no) \
SC( 54, 18, 16, 2, no, no) \
SC( 55, 18, 16, 3, no, no) \
SC( 56, 18, 16, 4, no, no) \
\
SC( 57, 19, 17, 1, no, no) \
SC( 58, 19, 17, 2, no, no) \
SC( 59, 19, 17, 3, no, no) \
SC( 60, 19, 17, 4, no, no) \
\
SC( 61, 20, 18, 1, no, no) \
SC( 62, 20, 18, 2, no, no) \
SC( 63, 20, 18, 3, no, no) \
SC( 64, 20, 18, 4, no, no) \
\
SC( 65, 21, 19, 1, no, no) \
SC( 66, 21, 19, 2, no, no) \
SC( 67, 21, 19, 3, no, no) \
SC( 68, 21, 19, 4, no, no) \
\
SC( 69, 22, 20, 1, no, no) \
SC( 70, 22, 20, 2, no, no) \
SC( 71, 22, 20, 3, no, no) \
SC( 72, 22, 20, 4, no, no) \
\
SC( 73, 23, 21, 1, no, no) \
SC( 74, 23, 21, 2, no, no) \
SC( 75, 23, 21, 3, no, no) \
SC( 76, 23, 21, 4, no, no) \
\
SC( 77, 24, 22, 1, no, no) \
SC( 78, 24, 22, 2, no, no) \
SC( 79, 24, 22, 3, no, no) \
SC( 80, 24, 22, 4, no, no) \
\
SC( 81, 25, 23, 1, no, no) \
SC( 82, 25, 23, 2, no, no) \
SC( 83, 25, 23, 3, no, no) \
SC( 84, 25, 23, 4, no, no) \
\
SC( 85, 26, 24, 1, no, no) \
SC( 86, 26, 24, 2, no, no) \
SC( 87, 26, 24, 3, no, no) \
SC( 88, 26, 24, 4, no, no) \
\
SC( 89, 27, 25, 1, no, no) \
SC( 90, 27, 25, 2, no, no) \
SC( 91, 27, 25, 3, no, no) \
SC( 92, 27, 25, 4, no, no) \
\
SC( 93, 28, 26, 1, no, no) \
SC( 94, 28, 26, 2, no, no) \
SC( 95, 28, 26, 3, no, no) \
SC( 96, 28, 26, 4, no, no) \
\
SC( 97, 29, 27, 1, no, no) \
SC( 98, 29, 27, 2, no, no) \
SC( 99, 29, 27, 3, no, no) \
SC(100, 29, 27, 4, no, no) \
\
SC(101, 30, 28, 1, no, no) \
SC(102, 30, 28, 2, no, no) \
SC(103, 30, 28, 3, no, no) \
SC(104, 30, 28, 4, no, no) \
\
SC(105, 31, 29, 1, no, no) \
SC(106, 31, 29, 2, no, no) \
SC(107, 31, 29, 3, no, no) \
SC(108, 31, 29, 4, no, no) \
\
SC(109, 32, 30, 1, no, no) \
SC(110, 32, 30, 2, no, no) \
SC(111, 32, 30, 3, no, no) \
SC(112, 32, 30, 4, no, no) \
\
SC(113, 33, 31, 1, no, no) \
SC(114, 33, 31, 2, no, no) \
SC(115, 33, 31, 3, no, no) \
SC(116, 33, 31, 4, no, no) \
\
SC(117, 34, 32, 1, no, no) \
SC(118, 34, 32, 2, no, no) \
SC(119, 34, 32, 3, no, no) \
SC(120, 34, 32, 4, no, no) \
\
SC(121, 35, 33, 1, no, no) \
SC(122, 35, 33, 2, no, no) \
SC(123, 35, 33, 3, no, no) \
SC(124, 35, 33, 4, no, no) \
\
SC(125, 36, 34, 1, no, no) \
SC(126, 36, 34, 2, no, no) \
SC(127, 36, 34, 3, no, no) \
SC(128, 36, 34, 4, no, no) \
\
SC(129, 37, 35, 1, no, no) \
SC(130, 37, 35, 2, no, no) \
SC(131, 37, 35, 3, no, no) \
SC(132, 37, 35, 4, no, no) \
\
SC(133, 38, 36, 1, no, no) \
SC(134, 38, 36, 2, no, no) \
SC(135, 38, 36, 3, no, no) \
SC(136, 38, 36, 4, no, no) \
\
SC(137, 39, 37, 1, no, no) \
SC(138, 39, 37, 2, no, no) \
SC(139, 39, 37, 3, no, no) \
SC(140, 39, 37, 4, no, no) \
\
SC(141, 40, 38, 1, no, no) \
SC(142, 40, 38, 2, no, no) \
SC(143, 40, 38, 3, no, no) \
SC(144, 40, 38, 4, no, no) \
\
SC(145, 41, 39, 1, no, no) \
SC(146, 41, 39, 2, no, no) \
SC(147, 41, 39, 3, no, no) \
SC(148, 41, 39, 4, no, no) \
\
SC(149, 42, 40, 1, no, no) \
SC(150, 42, 40, 2, no, no) \
SC(151, 42, 40, 3, no, no) \
SC(152, 42, 40, 4, no, no) \
\
SC(153, 43, 41, 1, no, no) \
SC(154, 43, 41, 2, no, no) \
SC(155, 43, 41, 3, no, no) \
SC(156, 43, 41, 4, no, no) \
\
SC(157, 44, 42, 1, no, no) \
SC(158, 44, 42, 2, no, no) \
SC(159, 44, 42, 3, no, no) \
SC(160, 44, 42, 4, no, no) \
\
SC(161, 45, 43, 1, no, no) \
SC(162, 45, 43, 2, no, no) \
SC(163, 45, 43, 3, no, no) \
SC(164, 45, 43, 4, no, no) \
\
SC(165, 46, 44, 1, no, no) \
SC(166, 46, 44, 2, no, no) \
SC(167, 46, 44, 3, no, no) \
SC(168, 46, 44, 4, no, no) \
\
SC(169, 47, 45, 1, no, no) \
SC(170, 47, 45, 2, no, no) \
SC(171, 47, 45, 3, no, no) \
SC(172, 47, 45, 4, no, no) \
\
SC(173, 48, 46, 1, no, no) \
SC(174, 48, 46, 2, no, no) \
SC(175, 48, 46, 3, no, no) \
SC(176, 48, 46, 4, no, no) \
\
SC(177, 49, 47, 1, no, no) \
SC(178, 49, 47, 2, no, no) \
SC(179, 49, 47, 3, no, no) \
SC(180, 49, 47, 4, no, no) \
\
SC(181, 50, 48, 1, no, no) \
SC(182, 50, 48, 2, no, no) \
SC(183, 50, 48, 3, no, no) \
SC(184, 50, 48, 4, no, no) \
\
SC(185, 51, 49, 1, no, no) \
SC(186, 51, 49, 2, no, no) \
SC(187, 51, 49, 3, no, no) \
SC(188, 51, 49, 4, no, no) \
\
SC(189, 52, 50, 1, no, no) \
SC(190, 52, 50, 2, no, no) \
SC(191, 52, 50, 3, no, no) \
SC(192, 52, 50, 4, no, no) \
\
SC(193, 53, 51, 1, no, no) \
SC(194, 53, 51, 2, no, no) \
SC(195, 53, 51, 3, no, no) \
SC(196, 53, 51, 4, no, no) \
\
SC(197, 54, 52, 1, no, no) \
SC(198, 54, 52, 2, no, no) \
SC(199, 54, 52, 3, no, no) \
SC(200, 54, 52, 4, no, no) \
\
SC(201, 55, 53, 1, no, no) \
SC(202, 55, 53, 2, no, no) \
SC(203, 55, 53, 3, no, no) \
SC(204, 55, 53, 4, no, no) \
\
SC(205, 56, 54, 1, no, no) \
SC(206, 56, 54, 2, no, no) \
SC(207, 56, 54, 3, no, no) \
SC(208, 56, 54, 4, no, no) \
\
SC(209, 57, 55, 1, no, no) \
SC(210, 57, 55, 2, no, no) \
SC(211, 57, 55, 3, no, no) \
SC(212, 57, 55, 4, no, no) \
\
SC(213, 58, 56, 1, no, no) \
SC(214, 58, 56, 2, no, no) \
SC(215, 58, 56, 3, no, no) \
SC(216, 58, 56, 4, no, no) \
\
SC(217, 59, 57, 1, no, no) \
SC(218, 59, 57, 2, no, no) \
SC(219, 59, 57, 3, no, no) \
SC(220, 59, 57, 4, no, no) \
\
SC(221, 60, 58, 1, no, no) \
SC(222, 60, 58, 2, no, no) \
SC(223, 60, 58, 3, no, no) \
SC(224, 60, 58, 4, no, no) \
\
SC(225, 61, 59, 1, no, no) \
SC(226, 61, 59, 2, no, no) \
SC(227, 61, 59, 3, no, no) \
SC(228, 61, 59, 4, no, no) \
\
SC(229, 62, 60, 1, no, no) \
SC(230, 62, 60, 2, no, no) \
SC(231, 62, 60, 3, no, no) \
SC(232, 62, 60, 4, no, no) \
\
SC(233, 63, 61, 1, no, no) \
SC(234, 63, 61, 2, no, no) \
SC(235, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 1
#define NLBINS 29
#define NBINS 44
#define LG_TINY_MAXCLASS 3
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, no, 9) \
\
SC( 28, 12, 10, 1, no, no) \
SC( 29, 12, 10, 2, no, no) \
SC( 30, 12, 10, 3, no, no) \
SC( 31, 12, 10, 4, no, no) \
\
SC( 32, 13, 11, 1, no, no) \
SC( 33, 13, 11, 2, no, no) \
SC( 34, 13, 11, 3, no, no) \
SC( 35, 13, 11, 4, no, no) \
\
SC( 36, 14, 12, 1, no, no) \
SC( 37, 14, 12, 2, no, no) \
SC( 38, 14, 12, 3, no, no) \
SC( 39, 14, 12, 4, no, no) \
\
SC( 40, 15, 13, 1, no, no) \
SC( 41, 15, 13, 2, no, no) \
SC( 42, 15, 13, 3, no, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
SC(107, 31, 29, 4, no, no) \
\
SC(108, 32, 30, 1, no, no) \
SC(109, 32, 30, 2, no, no) \
SC(110, 32, 30, 3, no, no) \
SC(111, 32, 30, 4, no, no) \
\
SC(112, 33, 31, 1, no, no) \
SC(113, 33, 31, 2, no, no) \
SC(114, 33, 31, 3, no, no) \
SC(115, 33, 31, 4, no, no) \
\
SC(116, 34, 32, 1, no, no) \
SC(117, 34, 32, 2, no, no) \
SC(118, 34, 32, 3, no, no) \
SC(119, 34, 32, 4, no, no) \
\
SC(120, 35, 33, 1, no, no) \
SC(121, 35, 33, 2, no, no) \
SC(122, 35, 33, 3, no, no) \
SC(123, 35, 33, 4, no, no) \
\
SC(124, 36, 34, 1, no, no) \
SC(125, 36, 34, 2, no, no) \
SC(126, 36, 34, 3, no, no) \
SC(127, 36, 34, 4, no, no) \
\
SC(128, 37, 35, 1, no, no) \
SC(129, 37, 35, 2, no, no) \
SC(130, 37, 35, 3, no, no) \
SC(131, 37, 35, 4, no, no) \
\
SC(132, 38, 36, 1, no, no) \
SC(133, 38, 36, 2, no, no) \
SC(134, 38, 36, 3, no, no) \
SC(135, 38, 36, 4, no, no) \
\
SC(136, 39, 37, 1, no, no) \
SC(137, 39, 37, 2, no, no) \
SC(138, 39, 37, 3, no, no) \
SC(139, 39, 37, 4, no, no) \
\
SC(140, 40, 38, 1, no, no) \
SC(141, 40, 38, 2, no, no) \
SC(142, 40, 38, 3, no, no) \
SC(143, 40, 38, 4, no, no) \
\
SC(144, 41, 39, 1, no, no) \
SC(145, 41, 39, 2, no, no) \
SC(146, 41, 39, 3, no, no) \
SC(147, 41, 39, 4, no, no) \
\
SC(148, 42, 40, 1, no, no) \
SC(149, 42, 40, 2, no, no) \
SC(150, 42, 40, 3, no, no) \
SC(151, 42, 40, 4, no, no) \
\
SC(152, 43, 41, 1, no, no) \
SC(153, 43, 41, 2, no, no) \
SC(154, 43, 41, 3, no, no) \
SC(155, 43, 41, 4, no, no) \
\
SC(156, 44, 42, 1, no, no) \
SC(157, 44, 42, 2, no, no) \
SC(158, 44, 42, 3, no, no) \
SC(159, 44, 42, 4, no, no) \
\
SC(160, 45, 43, 1, no, no) \
SC(161, 45, 43, 2, no, no) \
SC(162, 45, 43, 3, no, no) \
SC(163, 45, 43, 4, no, no) \
\
SC(164, 46, 44, 1, no, no) \
SC(165, 46, 44, 2, no, no) \
SC(166, 46, 44, 3, no, no) \
SC(167, 46, 44, 4, no, no) \
\
SC(168, 47, 45, 1, no, no) \
SC(169, 47, 45, 2, no, no) \
SC(170, 47, 45, 3, no, no) \
SC(171, 47, 45, 4, no, no) \
\
SC(172, 48, 46, 1, no, no) \
SC(173, 48, 46, 2, no, no) \
SC(174, 48, 46, 3, no, no) \
SC(175, 48, 46, 4, no, no) \
\
SC(176, 49, 47, 1, no, no) \
SC(177, 49, 47, 2, no, no) \
SC(178, 49, 47, 3, no, no) \
SC(179, 49, 47, 4, no, no) \
\
SC(180, 50, 48, 1, no, no) \
SC(181, 50, 48, 2, no, no) \
SC(182, 50, 48, 3, no, no) \
SC(183, 50, 48, 4, no, no) \
\
SC(184, 51, 49, 1, no, no) \
SC(185, 51, 49, 2, no, no) \
SC(186, 51, 49, 3, no, no) \
SC(187, 51, 49, 4, no, no) \
\
SC(188, 52, 50, 1, no, no) \
SC(189, 52, 50, 2, no, no) \
SC(190, 52, 50, 3, no, no) \
SC(191, 52, 50, 4, no, no) \
\
SC(192, 53, 51, 1, no, no) \
SC(193, 53, 51, 2, no, no) \
SC(194, 53, 51, 3, no, no) \
SC(195, 53, 51, 4, no, no) \
\
SC(196, 54, 52, 1, no, no) \
SC(197, 54, 52, 2, no, no) \
SC(198, 54, 52, 3, no, no) \
SC(199, 54, 52, 4, no, no) \
\
SC(200, 55, 53, 1, no, no) \
SC(201, 55, 53, 2, no, no) \
SC(202, 55, 53, 3, no, no) \
SC(203, 55, 53, 4, no, no) \
\
SC(204, 56, 54, 1, no, no) \
SC(205, 56, 54, 2, no, no) \
SC(206, 56, 54, 3, no, no) \
SC(207, 56, 54, 4, no, no) \
\
SC(208, 57, 55, 1, no, no) \
SC(209, 57, 55, 2, no, no) \
SC(210, 57, 55, 3, no, no) \
SC(211, 57, 55, 4, no, no) \
\
SC(212, 58, 56, 1, no, no) \
SC(213, 58, 56, 2, no, no) \
SC(214, 58, 56, 3, no, no) \
SC(215, 58, 56, 4, no, no) \
\
SC(216, 59, 57, 1, no, no) \
SC(217, 59, 57, 2, no, no) \
SC(218, 59, 57, 3, no, no) \
SC(219, 59, 57, 4, no, no) \
\
SC(220, 60, 58, 1, no, no) \
SC(221, 60, 58, 2, no, no) \
SC(222, 60, 58, 3, no, no) \
SC(223, 60, 58, 4, no, no) \
\
SC(224, 61, 59, 1, no, no) \
SC(225, 61, 59, 2, no, no) \
SC(226, 61, 59, 3, no, no) \
SC(227, 61, 59, 4, no, no) \
\
SC(228, 62, 60, 1, no, no) \
SC(229, 62, 60, 2, no, no) \
SC(230, 62, 60, 3, no, no) \
SC(231, 62, 60, 4, no, no) \
\
SC(232, 63, 61, 1, no, no) \
SC(233, 63, 61, 2, no, no) \
SC(234, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 27
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 11) + (((size_t)3) << 9))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, yes, 9) \
\
SC( 28, 12, 10, 1, yes, no) \
SC( 29, 12, 10, 2, yes, no) \
SC( 30, 12, 10, 3, yes, no) \
SC( 31, 12, 10, 4, no, no) \
\
SC( 32, 13, 11, 1, no, no) \
SC( 33, 13, 11, 2, no, no) \
SC( 34, 13, 11, 3, no, no) \
SC( 35, 13, 11, 4, no, no) \
\
SC( 36, 14, 12, 1, no, no) \
SC( 37, 14, 12, 2, no, no) \
SC( 38, 14, 12, 3, no, no) \
SC( 39, 14, 12, 4, no, no) \
\
SC( 40, 15, 13, 1, no, no) \
SC( 41, 15, 13, 2, no, no) \
SC( 42, 15, 13, 3, no, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
SC(107, 31, 29, 4, no, no) \
\
SC(108, 32, 30, 1, no, no) \
SC(109, 32, 30, 2, no, no) \
SC(110, 32, 30, 3, no, no) \
SC(111, 32, 30, 4, no, no) \
\
SC(112, 33, 31, 1, no, no) \
SC(113, 33, 31, 2, no, no) \
SC(114, 33, 31, 3, no, no) \
SC(115, 33, 31, 4, no, no) \
\
SC(116, 34, 32, 1, no, no) \
SC(117, 34, 32, 2, no, no) \
SC(118, 34, 32, 3, no, no) \
SC(119, 34, 32, 4, no, no) \
\
SC(120, 35, 33, 1, no, no) \
SC(121, 35, 33, 2, no, no) \
SC(122, 35, 33, 3, no, no) \
SC(123, 35, 33, 4, no, no) \
\
SC(124, 36, 34, 1, no, no) \
SC(125, 36, 34, 2, no, no) \
SC(126, 36, 34, 3, no, no) \
SC(127, 36, 34, 4, no, no) \
\
SC(128, 37, 35, 1, no, no) \
SC(129, 37, 35, 2, no, no) \
SC(130, 37, 35, 3, no, no) \
SC(131, 37, 35, 4, no, no) \
\
SC(132, 38, 36, 1, no, no) \
SC(133, 38, 36, 2, no, no) \
SC(134, 38, 36, 3, no, no) \
SC(135, 38, 36, 4, no, no) \
\
SC(136, 39, 37, 1, no, no) \
SC(137, 39, 37, 2, no, no) \
SC(138, 39, 37, 3, no, no) \
SC(139, 39, 37, 4, no, no) \
\
SC(140, 40, 38, 1, no, no) \
SC(141, 40, 38, 2, no, no) \
SC(142, 40, 38, 3, no, no) \
SC(143, 40, 38, 4, no, no) \
\
SC(144, 41, 39, 1, no, no) \
SC(145, 41, 39, 2, no, no) \
SC(146, 41, 39, 3, no, no) \
SC(147, 41, 39, 4, no, no) \
\
SC(148, 42, 40, 1, no, no) \
SC(149, 42, 40, 2, no, no) \
SC(150, 42, 40, 3, no, no) \
SC(151, 42, 40, 4, no, no) \
\
SC(152, 43, 41, 1, no, no) \
SC(153, 43, 41, 2, no, no) \
SC(154, 43, 41, 3, no, no) \
SC(155, 43, 41, 4, no, no) \
\
SC(156, 44, 42, 1, no, no) \
SC(157, 44, 42, 2, no, no) \
SC(158, 44, 42, 3, no, no) \
SC(159, 44, 42, 4, no, no) \
\
SC(160, 45, 43, 1, no, no) \
SC(161, 45, 43, 2, no, no) \
SC(162, 45, 43, 3, no, no) \
SC(163, 45, 43, 4, no, no) \
\
SC(164, 46, 44, 1, no, no) \
SC(165, 46, 44, 2, no, no) \
SC(166, 46, 44, 3, no, no) \
SC(167, 46, 44, 4, no, no) \
\
SC(168, 47, 45, 1, no, no) \
SC(169, 47, 45, 2, no, no) \
SC(170, 47, 45, 3, no, no) \
SC(171, 47, 45, 4, no, no) \
\
SC(172, 48, 46, 1, no, no) \
SC(173, 48, 46, 2, no, no) \
SC(174, 48, 46, 3, no, no) \
SC(175, 48, 46, 4, no, no) \
\
SC(176, 49, 47, 1, no, no) \
SC(177, 49, 47, 2, no, no) \
SC(178, 49, 47, 3, no, no) \
SC(179, 49, 47, 4, no, no) \
\
SC(180, 50, 48, 1, no, no) \
SC(181, 50, 48, 2, no, no) \
SC(182, 50, 48, 3, no, no) \
SC(183, 50, 48, 4, no, no) \
\
SC(184, 51, 49, 1, no, no) \
SC(185, 51, 49, 2, no, no) \
SC(186, 51, 49, 3, no, no) \
SC(187, 51, 49, 4, no, no) \
\
SC(188, 52, 50, 1, no, no) \
SC(189, 52, 50, 2, no, no) \
SC(190, 52, 50, 3, no, no) \
SC(191, 52, 50, 4, no, no) \
\
SC(192, 53, 51, 1, no, no) \
SC(193, 53, 51, 2, no, no) \
SC(194, 53, 51, 3, no, no) \
SC(195, 53, 51, 4, no, no) \
\
SC(196, 54, 52, 1, no, no) \
SC(197, 54, 52, 2, no, no) \
SC(198, 54, 52, 3, no, no) \
SC(199, 54, 52, 4, no, no) \
\
SC(200, 55, 53, 1, no, no) \
SC(201, 55, 53, 2, no, no) \
SC(202, 55, 53, 3, no, no) \
SC(203, 55, 53, 4, no, no) \
\
SC(204, 56, 54, 1, no, no) \
SC(205, 56, 54, 2, no, no) \
SC(206, 56, 54, 3, no, no) \
SC(207, 56, 54, 4, no, no) \
\
SC(208, 57, 55, 1, no, no) \
SC(209, 57, 55, 2, no, no) \
SC(210, 57, 55, 3, no, no) \
SC(211, 57, 55, 4, no, no) \
\
SC(212, 58, 56, 1, no, no) \
SC(213, 58, 56, 2, no, no) \
SC(214, 58, 56, 3, no, no) \
SC(215, 58, 56, 4, no, no) \
\
SC(216, 59, 57, 1, no, no) \
SC(217, 59, 57, 2, no, no) \
SC(218, 59, 57, 3, no, no) \
SC(219, 59, 57, 4, no, no) \
\
SC(220, 60, 58, 1, no, no) \
SC(221, 60, 58, 2, no, no) \
SC(222, 60, 58, 3, no, no) \
SC(223, 60, 58, 4, no, no) \
\
SC(224, 61, 59, 1, no, no) \
SC(225, 61, 59, 2, no, no) \
SC(226, 61, 59, 3, no, no) \
SC(227, 61, 59, 4, no, no) \
\
SC(228, 62, 60, 1, no, no) \
SC(229, 62, 60, 2, no, no) \
SC(230, 62, 60, 3, no, no) \
SC(231, 62, 60, 4, no, no) \
\
SC(232, 63, 61, 1, no, no) \
SC(233, 63, 61, 2, no, no) \
SC(234, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 31
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 12) + (((size_t)3) << 10))
#endif
#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16)
#define SIZE_CLASSES \
/* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \
SC( 0, 4, 4, 0, yes, 4) \
SC( 1, 4, 4, 1, yes, 4) \
SC( 2, 4, 4, 2, yes, 4) \
SC( 3, 4, 4, 3, yes, 4) \
\
SC( 4, 6, 4, 1, yes, 4) \
SC( 5, 6, 4, 2, yes, 4) \
SC( 6, 6, 4, 3, yes, 4) \
SC( 7, 6, 4, 4, yes, 4) \
\
SC( 8, 7, 5, 1, yes, 5) \
SC( 9, 7, 5, 2, yes, 5) \
SC( 10, 7, 5, 3, yes, 5) \
SC( 11, 7, 5, 4, yes, 5) \
\
SC( 12, 8, 6, 1, yes, 6) \
SC( 13, 8, 6, 2, yes, 6) \
SC( 14, 8, 6, 3, yes, 6) \
SC( 15, 8, 6, 4, yes, 6) \
\
SC( 16, 9, 7, 1, yes, 7) \
SC( 17, 9, 7, 2, yes, 7) \
SC( 18, 9, 7, 3, yes, 7) \
SC( 19, 9, 7, 4, yes, 7) \
\
SC( 20, 10, 8, 1, yes, 8) \
SC( 21, 10, 8, 2, yes, 8) \
SC( 22, 10, 8, 3, yes, 8) \
SC( 23, 10, 8, 4, yes, 8) \
\
SC( 24, 11, 9, 1, yes, 9) \
SC( 25, 11, 9, 2, yes, 9) \
SC( 26, 11, 9, 3, yes, 9) \
SC( 27, 11, 9, 4, yes, 9) \
\
SC( 28, 12, 10, 1, yes, no) \
SC( 29, 12, 10, 2, yes, no) \
SC( 30, 12, 10, 3, yes, no) \
SC( 31, 12, 10, 4, yes, no) \
\
SC( 32, 13, 11, 1, yes, no) \
SC( 33, 13, 11, 2, yes, no) \
SC( 34, 13, 11, 3, yes, no) \
SC( 35, 13, 11, 4, yes, no) \
\
SC( 36, 14, 12, 1, yes, no) \
SC( 37, 14, 12, 2, yes, no) \
SC( 38, 14, 12, 3, yes, no) \
SC( 39, 14, 12, 4, yes, no) \
\
SC( 40, 15, 13, 1, yes, no) \
SC( 41, 15, 13, 2, yes, no) \
SC( 42, 15, 13, 3, yes, no) \
SC( 43, 15, 13, 4, no, no) \
\
SC( 44, 16, 14, 1, no, no) \
SC( 45, 16, 14, 2, no, no) \
SC( 46, 16, 14, 3, no, no) \
SC( 47, 16, 14, 4, no, no) \
\
SC( 48, 17, 15, 1, no, no) \
SC( 49, 17, 15, 2, no, no) \
SC( 50, 17, 15, 3, no, no) \
SC( 51, 17, 15, 4, no, no) \
\
SC( 52, 18, 16, 1, no, no) \
SC( 53, 18, 16, 2, no, no) \
SC( 54, 18, 16, 3, no, no) \
SC( 55, 18, 16, 4, no, no) \
\
SC( 56, 19, 17, 1, no, no) \
SC( 57, 19, 17, 2, no, no) \
SC( 58, 19, 17, 3, no, no) \
SC( 59, 19, 17, 4, no, no) \
\
SC( 60, 20, 18, 1, no, no) \
SC( 61, 20, 18, 2, no, no) \
SC( 62, 20, 18, 3, no, no) \
SC( 63, 20, 18, 4, no, no) \
\
SC( 64, 21, 19, 1, no, no) \
SC( 65, 21, 19, 2, no, no) \
SC( 66, 21, 19, 3, no, no) \
SC( 67, 21, 19, 4, no, no) \
\
SC( 68, 22, 20, 1, no, no) \
SC( 69, 22, 20, 2, no, no) \
SC( 70, 22, 20, 3, no, no) \
SC( 71, 22, 20, 4, no, no) \
\
SC( 72, 23, 21, 1, no, no) \
SC( 73, 23, 21, 2, no, no) \
SC( 74, 23, 21, 3, no, no) \
SC( 75, 23, 21, 4, no, no) \
\
SC( 76, 24, 22, 1, no, no) \
SC( 77, 24, 22, 2, no, no) \
SC( 78, 24, 22, 3, no, no) \
SC( 79, 24, 22, 4, no, no) \
\
SC( 80, 25, 23, 1, no, no) \
SC( 81, 25, 23, 2, no, no) \
SC( 82, 25, 23, 3, no, no) \
SC( 83, 25, 23, 4, no, no) \
\
SC( 84, 26, 24, 1, no, no) \
SC( 85, 26, 24, 2, no, no) \
SC( 86, 26, 24, 3, no, no) \
SC( 87, 26, 24, 4, no, no) \
\
SC( 88, 27, 25, 1, no, no) \
SC( 89, 27, 25, 2, no, no) \
SC( 90, 27, 25, 3, no, no) \
SC( 91, 27, 25, 4, no, no) \
\
SC( 92, 28, 26, 1, no, no) \
SC( 93, 28, 26, 2, no, no) \
SC( 94, 28, 26, 3, no, no) \
SC( 95, 28, 26, 4, no, no) \
\
SC( 96, 29, 27, 1, no, no) \
SC( 97, 29, 27, 2, no, no) \
SC( 98, 29, 27, 3, no, no) \
SC( 99, 29, 27, 4, no, no) \
\
SC(100, 30, 28, 1, no, no) \
SC(101, 30, 28, 2, no, no) \
SC(102, 30, 28, 3, no, no) \
SC(103, 30, 28, 4, no, no) \
\
SC(104, 31, 29, 1, no, no) \
SC(105, 31, 29, 2, no, no) \
SC(106, 31, 29, 3, no, no) \
SC(107, 31, 29, 4, no, no) \
\
SC(108, 32, 30, 1, no, no) \
SC(109, 32, 30, 2, no, no) \
SC(110, 32, 30, 3, no, no) \
SC(111, 32, 30, 4, no, no) \
\
SC(112, 33, 31, 1, no, no) \
SC(113, 33, 31, 2, no, no) \
SC(114, 33, 31, 3, no, no) \
SC(115, 33, 31, 4, no, no) \
\
SC(116, 34, 32, 1, no, no) \
SC(117, 34, 32, 2, no, no) \
SC(118, 34, 32, 3, no, no) \
SC(119, 34, 32, 4, no, no) \
\
SC(120, 35, 33, 1, no, no) \
SC(121, 35, 33, 2, no, no) \
SC(122, 35, 33, 3, no, no) \
SC(123, 35, 33, 4, no, no) \
\
SC(124, 36, 34, 1, no, no) \
SC(125, 36, 34, 2, no, no) \
SC(126, 36, 34, 3, no, no) \
SC(127, 36, 34, 4, no, no) \
\
SC(128, 37, 35, 1, no, no) \
SC(129, 37, 35, 2, no, no) \
SC(130, 37, 35, 3, no, no) \
SC(131, 37, 35, 4, no, no) \
\
SC(132, 38, 36, 1, no, no) \
SC(133, 38, 36, 2, no, no) \
SC(134, 38, 36, 3, no, no) \
SC(135, 38, 36, 4, no, no) \
\
SC(136, 39, 37, 1, no, no) \
SC(137, 39, 37, 2, no, no) \
SC(138, 39, 37, 3, no, no) \
SC(139, 39, 37, 4, no, no) \
\
SC(140, 40, 38, 1, no, no) \
SC(141, 40, 38, 2, no, no) \
SC(142, 40, 38, 3, no, no) \
SC(143, 40, 38, 4, no, no) \
\
SC(144, 41, 39, 1, no, no) \
SC(145, 41, 39, 2, no, no) \
SC(146, 41, 39, 3, no, no) \
SC(147, 41, 39, 4, no, no) \
\
SC(148, 42, 40, 1, no, no) \
SC(149, 42, 40, 2, no, no) \
SC(150, 42, 40, 3, no, no) \
SC(151, 42, 40, 4, no, no) \
\
SC(152, 43, 41, 1, no, no) \
SC(153, 43, 41, 2, no, no) \
SC(154, 43, 41, 3, no, no) \
SC(155, 43, 41, 4, no, no) \
\
SC(156, 44, 42, 1, no, no) \
SC(157, 44, 42, 2, no, no) \
SC(158, 44, 42, 3, no, no) \
SC(159, 44, 42, 4, no, no) \
\
SC(160, 45, 43, 1, no, no) \
SC(161, 45, 43, 2, no, no) \
SC(162, 45, 43, 3, no, no) \
SC(163, 45, 43, 4, no, no) \
\
SC(164, 46, 44, 1, no, no) \
SC(165, 46, 44, 2, no, no) \
SC(166, 46, 44, 3, no, no) \
SC(167, 46, 44, 4, no, no) \
\
SC(168, 47, 45, 1, no, no) \
SC(169, 47, 45, 2, no, no) \
SC(170, 47, 45, 3, no, no) \
SC(171, 47, 45, 4, no, no) \
\
SC(172, 48, 46, 1, no, no) \
SC(173, 48, 46, 2, no, no) \
SC(174, 48, 46, 3, no, no) \
SC(175, 48, 46, 4, no, no) \
\
SC(176, 49, 47, 1, no, no) \
SC(177, 49, 47, 2, no, no) \
SC(178, 49, 47, 3, no, no) \
SC(179, 49, 47, 4, no, no) \
\
SC(180, 50, 48, 1, no, no) \
SC(181, 50, 48, 2, no, no) \
SC(182, 50, 48, 3, no, no) \
SC(183, 50, 48, 4, no, no) \
\
SC(184, 51, 49, 1, no, no) \
SC(185, 51, 49, 2, no, no) \
SC(186, 51, 49, 3, no, no) \
SC(187, 51, 49, 4, no, no) \
\
SC(188, 52, 50, 1, no, no) \
SC(189, 52, 50, 2, no, no) \
SC(190, 52, 50, 3, no, no) \
SC(191, 52, 50, 4, no, no) \
\
SC(192, 53, 51, 1, no, no) \
SC(193, 53, 51, 2, no, no) \
SC(194, 53, 51, 3, no, no) \
SC(195, 53, 51, 4, no, no) \
\
SC(196, 54, 52, 1, no, no) \
SC(197, 54, 52, 2, no, no) \
SC(198, 54, 52, 3, no, no) \
SC(199, 54, 52, 4, no, no) \
\
SC(200, 55, 53, 1, no, no) \
SC(201, 55, 53, 2, no, no) \
SC(202, 55, 53, 3, no, no) \
SC(203, 55, 53, 4, no, no) \
\
SC(204, 56, 54, 1, no, no) \
SC(205, 56, 54, 2, no, no) \
SC(206, 56, 54, 3, no, no) \
SC(207, 56, 54, 4, no, no) \
\
SC(208, 57, 55, 1, no, no) \
SC(209, 57, 55, 2, no, no) \
SC(210, 57, 55, 3, no, no) \
SC(211, 57, 55, 4, no, no) \
\
SC(212, 58, 56, 1, no, no) \
SC(213, 58, 56, 2, no, no) \
SC(214, 58, 56, 3, no, no) \
SC(215, 58, 56, 4, no, no) \
\
SC(216, 59, 57, 1, no, no) \
SC(217, 59, 57, 2, no, no) \
SC(218, 59, 57, 3, no, no) \
SC(219, 59, 57, 4, no, no) \
\
SC(220, 60, 58, 1, no, no) \
SC(221, 60, 58, 2, no, no) \
SC(222, 60, 58, 3, no, no) \
SC(223, 60, 58, 4, no, no) \
\
SC(224, 61, 59, 1, no, no) \
SC(225, 61, 59, 2, no, no) \
SC(226, 61, 59, 3, no, no) \
SC(227, 61, 59, 4, no, no) \
\
SC(228, 62, 60, 1, no, no) \
SC(229, 62, 60, 2, no, no) \
SC(230, 62, 60, 3, no, no) \
SC(231, 62, 60, 4, no, no) \
\
SC(232, 63, 61, 1, no, no) \
SC(233, 63, 61, 2, no, no) \
SC(234, 63, 61, 3, no, no) \
#define SIZE_CLASSES_DEFINED
#define NTBINS 0
#define NLBINS 28
#define NBINS 43
#define LG_TINY_MAXCLASS "NA"
#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9))
#define SMALL_MAXCLASS ((((size_t)1) << 15) + (((size_t)3) << 13))
#endif
#ifndef SIZE_CLASSES_DEFINED
# error "No size class definitions match configuration"
#endif
#undef SIZE_CLASSES_DEFINED
/*
* The small_size2bin lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to
* 255 since all small size classes, plus a "not small" size class must be
* stored in 8 bits of arena_chunk_map_t's bits field.
*/
#if (NBINS > 255)
# error "Too many small size classes"
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 199,875 | 46.931894 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/private_namespace.h | #define a0calloc JEMALLOC_N(a0calloc)
#define a0free JEMALLOC_N(a0free)
#define a0malloc JEMALLOC_N(a0malloc)
#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge)
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
#define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get)
#define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get)
#define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get)
#define arena_mapbits_get JEMALLOC_N(arena_mapbits_get)
#define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set)
#define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get)
#define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set)
#define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get)
#define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get)
#define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set)
#define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set)
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_mapelm_to_pageind JEMALLOC_N(arena_mapelm_to_pageind)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
#define arena_prefork JEMALLOC_N(arena_prefork)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get)
#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set)
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_runs_avail_tree_iter JEMALLOC_N(arena_runs_avail_tree_iter)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
#define pools JEMALLOC_N(pools)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
#define arenas_tls JEMALLOC_N(arenas_tls)
#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head)
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64)
#define atomic_add_z JEMALLOC_N(atomic_add_z)
#define atomic_sub_u JEMALLOC_N(atomic_sub_u)
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_calloc JEMALLOC_N(base_calloc)
#define base_free_fn JEMALLOC_N(base_free_fn)
#define base_malloc_fn JEMALLOC_N(base_malloc_fn)
#define base_node_alloc JEMALLOC_N(base_node_alloc)
#define base_node_dalloc JEMALLOC_N(base_node_dalloc)
#define base_pool JEMALLOC_N(base_pool)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
#define base_postfork_parent JEMALLOC_N(base_postfork_parent)
#define base_prefork JEMALLOC_N(base_prefork)
#define bitmap_full JEMALLOC_N(bitmap_full)
#define bitmap_get JEMALLOC_N(bitmap_get)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups)
#define bitmap_init JEMALLOC_N(bitmap_init)
#define bitmap_set JEMALLOC_N(bitmap_set)
#define bitmap_sfu JEMALLOC_N(bitmap_sfu)
#define bitmap_size JEMALLOC_N(bitmap_size)
#define bitmap_unset JEMALLOC_N(bitmap_unset)
#define bt_init JEMALLOC_N(bt_init)
#define buferror JEMALLOC_N(buferror)
#define choose_arena JEMALLOC_N(choose_arena)
#define choose_arena_hard JEMALLOC_N(choose_arena_hard)
#define chunk_alloc_arena JEMALLOC_N(chunk_alloc_arena)
#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base)
#define chunk_alloc_default JEMALLOC_N(chunk_alloc_default)
#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss)
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_global_boot JEMALLOC_N(chunk_global_boot)
#define chunk_boot JEMALLOC_N(chunk_boot)
#define chunk_dalloc_default JEMALLOC_N(chunk_dalloc_default)
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_npages JEMALLOC_N(chunk_npages)
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
#define chunk_prefork JEMALLOC_N(chunk_prefork)
#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunk_record JEMALLOC_N(chunk_record)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
#define chunksize_mask JEMALLOC_N(chunksize_mask)
#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert)
#define ckh_insert JEMALLOC_N(ckh_insert)
#define ckh_isearch JEMALLOC_N(ckh_isearch)
#define ckh_iter JEMALLOC_N(ckh_iter)
#define ckh_new JEMALLOC_N(ckh_new)
#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash)
#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp)
#define ckh_rebuild JEMALLOC_N(ckh_rebuild)
#define ckh_remove JEMALLOC_N(ckh_remove)
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert)
#define ckh_try_insert JEMALLOC_N(ckh_try_insert)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse)
#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start)
#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last)
#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new)
#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next)
#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch)
#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev)
#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch)
#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove)
#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter)
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter)
#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse)
#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start)
#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last)
#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new)
#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next)
#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch)
#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev)
#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch)
#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove)
#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter)
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define hash_fmix_32 JEMALLOC_N(hash_fmix_32)
#define hash_fmix_64 JEMALLOC_N(hash_fmix_64)
#define hash_get_block_32 JEMALLOC_N(hash_get_block_32)
#define hash_get_block_64 JEMALLOC_N(hash_get_block_64)
#define hash_rotl_32 JEMALLOC_N(hash_rotl_32)
#define hash_rotl_64 JEMALLOC_N(hash_rotl_64)
#define hash_x64_128 JEMALLOC_N(hash_x64_128)
#define hash_x86_128 JEMALLOC_N(hash_x86_128)
#define hash_x86_32 JEMALLOC_N(hash_x86_32)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
#define huge_nmalloc JEMALLOC_N(huge_nmalloc)
#define huge_palloc JEMALLOC_N(huge_palloc)
#define huge_postfork_child JEMALLOC_N(huge_postfork_child)
#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent)
#define huge_prefork JEMALLOC_N(huge_prefork)
#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get)
#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set)
#define huge_ralloc JEMALLOC_N(huge_ralloc)
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define icalloc JEMALLOC_N(icalloc)
#define icalloct JEMALLOC_N(icalloct)
#define idalloc JEMALLOC_N(idalloc)
#define idalloct JEMALLOC_N(idalloct)
#define imalloc JEMALLOC_N(imalloc)
#define imalloct JEMALLOC_N(imalloct)
#define in_valgrind JEMALLOC_N(in_valgrind)
#define ipalloc JEMALLOC_N(ipalloc)
#define ipalloct JEMALLOC_N(ipalloct)
#define iqalloc JEMALLOC_N(iqalloc)
#define iqalloct JEMALLOC_N(iqalloct)
#define iralloc JEMALLOC_N(iralloc)
#define iralloct JEMALLOC_N(iralloct)
#define iralloct_realign JEMALLOC_N(iralloct_realign)
#define isalloc JEMALLOC_N(isalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define ixalloc JEMALLOC_N(ixalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define lg_floor JEMALLOC_N(lg_floor)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock)
#define malloc_rwlock_init JEMALLOC_N(malloc_rwlock_init)
#define malloc_rwlock_postfork_child JEMALLOC_N(malloc_rwlock_postfork_child)
#define malloc_rwlock_postfork_parent JEMALLOC_N(malloc_rwlock_postfork_parent)
#define malloc_rwlock_prefork JEMALLOC_N(malloc_rwlock_prefork)
#define malloc_rwlock_rdlock JEMALLOC_N(malloc_rwlock_rdlock)
#define malloc_rwlock_wrlock JEMALLOC_N(malloc_rwlock_wrlock)
#define malloc_rwlock_unlock JEMALLOC_N(malloc_rwlock_unlock)
#define malloc_rwlock_destroy JEMALLOC_N(malloc_rwlock_destroy)
#define malloc_printf JEMALLOC_N(malloc_printf)
#define malloc_snprintf JEMALLOC_N(malloc_snprintf)
#define malloc_strtoumax JEMALLOC_N(malloc_strtoumax)
#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot)
#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
#define malloc_tsd_no_cleanup JEMALLOC_N(malloc_tsd_no_cleanup)
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
#define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_total JEMALLOC_N(narenas_total)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define npools JEMALLOC_N(npools)
#define npools_cnt JEMALLOC_N(npools_cnt)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_dss JEMALLOC_N(opt_dss)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
#define opt_narenas JEMALLOC_N(opt_narenas)
#define opt_prof JEMALLOC_N(opt_prof)
#define opt_prof_accum JEMALLOC_N(opt_prof_accum)
#define opt_prof_active JEMALLOC_N(opt_prof_active)
#define opt_prof_final JEMALLOC_N(opt_prof_final)
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
#define opt_quarantine JEMALLOC_N(opt_quarantine)
#define opt_redzone JEMALLOC_N(opt_redzone)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_tcache JEMALLOC_N(opt_tcache)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
#define pages_purge JEMALLOC_N(pages_purge)
#define pools_shared_data_initialized JEMALLOC_N(pools_shared_data_initialized)
#define pow2_ceil JEMALLOC_N(pow2_ceil)
#define prof_backtrace JEMALLOC_N(prof_backtrace)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
#define prof_bt_count JEMALLOC_N(prof_bt_count)
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
#define prof_dump_open JEMALLOC_N(prof_dump_open)
#define prof_free JEMALLOC_N(prof_free)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_malloc_record_object JEMALLOC_N(prof_malloc_record_object)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
#define quarantine_boot JEMALLOC_N(quarantine_boot)
#define quarantine_booted JEMALLOC_N(quarantine_booted)
#define quarantine_cleanup JEMALLOC_N(quarantine_cleanup)
#define quarantine_init JEMALLOC_N(quarantine_init)
#define quarantine_tls JEMALLOC_N(quarantine_tls)
#define quarantine_tsd JEMALLOC_N(quarantine_tsd)
#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot)
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head)
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
#define register_zone JEMALLOC_N(register_zone)
#define rtree_delete JEMALLOC_N(rtree_delete)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_set JEMALLOC_N(rtree_set)
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
#define small_bin2size JEMALLOC_N(small_bin2size)
#define small_bin2size_compute JEMALLOC_N(small_bin2size_compute)
#define small_bin2size_lookup JEMALLOC_N(small_bin2size_lookup)
#define small_bin2size_tab JEMALLOC_N(small_bin2size_tab)
#define small_s2u JEMALLOC_N(small_s2u)
#define small_s2u_compute JEMALLOC_N(small_s2u_compute)
#define small_s2u_lookup JEMALLOC_N(small_s2u_lookup)
#define small_size2bin JEMALLOC_N(small_size2bin)
#define small_size2bin_compute JEMALLOC_N(small_size2bin_compute)
#define small_size2bin_lookup JEMALLOC_N(small_size2bin_lookup)
#define small_size2bin_tab JEMALLOC_N(small_size2bin_tab)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub)
#define stats_chunks JEMALLOC_N(stats_chunks)
#define stats_print JEMALLOC_N(stats_print)
#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy)
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot0 JEMALLOC_N(tcache_boot0)
#define tcache_boot1 JEMALLOC_N(tcache_boot1)
#define tcache_booted JEMALLOC_N(tcache_booted)
#define tcache_create JEMALLOC_N(tcache_create)
#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large)
#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small)
#define tcache_destroy JEMALLOC_N(tcache_destroy)
#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted)
#define tcache_enabled_get JEMALLOC_N(tcache_enabled_get)
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
#define tcache_get JEMALLOC_N(tcache_get)
#define tcache_get_hard JEMALLOC_N(tcache_get_hard)
#define tcache_initialized JEMALLOC_N(tcache_initialized)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_salloc JEMALLOC_N(tcache_salloc)
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head)
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
#define u2rz JEMALLOC_N(u2rz)
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
#define pool_new JEMALLOC_N(pool_new)
#define pool_destroy JEMALLOC_N(pool_destroy)
#define pools_lock JEMALLOC_N(pools_lock)
#define pool_base_lock JEMALLOC_N(pool_base_lock)
#define pool_prefork JEMALLOC_N(pool_prefork)
#define pool_postfork_parent JEMALLOC_N(pool_postfork_parent)
#define pool_postfork_child JEMALLOC_N(pool_postfork_child)
#define pool_alloc JEMALLOC_N(pool_alloc)
#define vec_get JEMALLOC_N(vec_get)
#define vec_set JEMALLOC_N(vec_set)
#define vec_delete JEMALLOC_N(vec_delete)
| 25,252 | 53.778742 | 95 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/jemalloc_internal_defs.h | /* ./../windows/jemalloc_gen/include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */
#ifndef JEMALLOC_INTERNAL_DEFS_H_
#define JEMALLOC_INTERNAL_DEFS_H_
/*
* If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
* public APIs to be prefixed. This makes it possible, with some care, to use
* multiple allocators simultaneously.
*/
#define JEMALLOC_PREFIX "je_vmem_"
#define JEMALLOC_CPREFIX "JE_VMEM_"
/*
* JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
* For shared libraries, symbol visibility mechanisms prevent these symbols
* from being exported, but for static libraries, naming collisions are a real
* possibility.
*/
#define JEMALLOC_PRIVATE_NAMESPACE je_vmem_je_
/*
* Hyper-threaded CPUs may need a special instruction inside spin loops in
* order to yield to another virtual CPU.
*/
#define CPU_SPINWAIT
/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */
/* #undef JEMALLOC_ATOMIC9 */
/*
* Defined if OSAtomic*() functions are available, as provided by Darwin, and
* documented in the atomic(3) manual page.
*/
/* #undef JEMALLOC_OSATOMIC */
/*
* Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and
* __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */
/*
* Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and
* __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite
* __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the
* functions are defined in libgcc instead of being inlines)
*/
/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
*/
/* #undef JEMALLOC_HAVE_BUILTIN_CLZ */
/*
* Defined if madvise(2) is available.
*/
/* #undef JEMALLOC_HAVE_MADVISE */
/*
* Defined if OSSpin*() functions are available, as provided by Darwin, and
* documented in the spinlock(3) manual page.
*/
/* #undef JEMALLOC_OSSPIN */
/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
* _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
* malloc_tsd.
*/
/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
/*
* Defined if threaded initialization is known to be safe on this platform.
* Among other things, it must be possible to initialize a mutex without
* triggering allocation in order for threaded allocation to be safe.
*/
/* #undef JEMALLOC_THREADED_INIT */
/*
* Defined if the pthreads implementation defines
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
/* #undef JEMALLOC_MUTEX_INIT_CB */
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
#define JEMALLOC_CC_SILENCE
/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
/* #undef JEMALLOC_CODE_COVERAGE */
/*
* JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
* inline functions.
*/
/* #undef JEMALLOC_DEBUG */
/* JEMALLOC_STATS enables statistics calculation. */
#define JEMALLOC_STATS
/* JEMALLOC_PROF enables allocation profiling. */
/* #undef JEMALLOC_PROF */
/* Use libunwind for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBUNWIND */
/* Use libgcc for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_LIBGCC */
/* Use gcc intrinsics for profile backtracing if defined. */
/* #undef JEMALLOC_PROF_GCC */
/*
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
* This makes it possible to allocate/deallocate objects without any locking
* when the cache is in the steady state.
*/
#define JEMALLOC_TCACHE
/*
* JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage
* segment (DSS).
*/
/* #undef JEMALLOC_DSS */
/* Support memory filling (junk/zero/quarantine/redzone). */
#define JEMALLOC_FILL
/* Support utrace(2)-based tracing. */
/* #undef JEMALLOC_UTRACE */
/* Support Valgrind. */
/* #undef JEMALLOC_VALGRIND */
/* Support optional abort() on OOM. */
/* #undef JEMALLOC_XMALLOC */
/* Support lazy locking (avoid locking unless a second thread is launched). */
/* #undef JEMALLOC_LAZY_LOCK */
/* One page is 2^STATIC_PAGE_SHIFT bytes. */
#define STATIC_PAGE_SHIFT 12
/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
*/
/* #undef JEMALLOC_MUNMAP */
/* TLS is used to map arenas and magazine caches to threads. */
/* #undef JEMALLOC_TLS */
/*
* ffs()/ffsl() functions to use for bitmapping. Don't use these directly;
* instead, use jemalloc_ffs() or jemalloc_ffsl() from util.h.
*/
#define JEMALLOC_INTERNAL_FFSL ffsl
#define JEMALLOC_INTERNAL_FFS ffs
/*
* JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside
* within jemalloc-owned chunks before dereferencing them.
*/
/* #undef JEMALLOC_IVSALLOC */
/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
/* #undef JEMALLOC_ZONE_VERSION */
/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
* such that new pages will be demand-zeroed if
* the address region is later touched.
* madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being
* unused, such that they will be discarded rather
* than swapped out.
*/
/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */
/* #undef JEMALLOC_PURGE_MADVISE_FREE */
/*
* Define if operating system has alloca.h header.
*/
/* #undef JEMALLOC_HAS_ALLOCA_H */
/* C99 restrict keyword supported. */
/* #undef JEMALLOC_HAS_RESTRICT */
/* For use by hash code. */
/* #undef JEMALLOC_BIG_ENDIAN */
/* sizeof(int) == 2^LG_SIZEOF_INT. */
#define LG_SIZEOF_INT 2
/* sizeof(long) == 2^LG_SIZEOF_LONG. */
#define LG_SIZEOF_LONG 2
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
| 6,731 | 30.457944 | 142 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/public_unnamespace.h | #undef je_pool_create
#undef je_pool_delete
#undef je_pool_malloc
#undef je_pool_calloc
#undef je_pool_ralloc
#undef je_pool_aligned_alloc
#undef je_pool_free
#undef je_pool_malloc_usable_size
#undef je_pool_malloc_stats_print
#undef je_pool_extend
#undef je_pool_set_alloc_funcs
#undef je_pool_check
#undef je_malloc_conf
#undef je_malloc_message
#undef je_malloc
#undef je_calloc
#undef je_posix_memalign
#undef je_aligned_alloc
#undef je_realloc
#undef je_free
#undef je_mallocx
#undef je_rallocx
#undef je_xallocx
#undef je_sallocx
#undef je_dallocx
#undef je_nallocx
#undef je_mallctl
#undef je_mallctlnametomib
#undef je_mallctlbymib
#undef je_navsnprintf
#undef je_malloc_stats_print
#undef je_malloc_usable_size
| 720 | 20.848485 | 33 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/public_namespace.h | #define je_pool_create JEMALLOC_N(pool_create)
#define je_pool_delete JEMALLOC_N(pool_delete)
#define je_pool_malloc JEMALLOC_N(pool_malloc)
#define je_pool_calloc JEMALLOC_N(pool_calloc)
#define je_pool_ralloc JEMALLOC_N(pool_ralloc)
#define je_pool_aligned_alloc JEMALLOC_N(pool_aligned_alloc)
#define je_pool_free JEMALLOC_N(pool_free)
#define je_pool_malloc_usable_size JEMALLOC_N(pool_malloc_usable_size)
#define je_pool_malloc_stats_print JEMALLOC_N(pool_malloc_stats_print)
#define je_pool_extend JEMALLOC_N(pool_extend)
#define je_pool_set_alloc_funcs JEMALLOC_N(pool_set_alloc_funcs)
#define je_pool_check JEMALLOC_N(pool_check)
#define je_malloc_conf JEMALLOC_N(malloc_conf)
#define je_malloc_message JEMALLOC_N(malloc_message)
#define je_malloc JEMALLOC_N(malloc)
#define je_calloc JEMALLOC_N(calloc)
#define je_posix_memalign JEMALLOC_N(posix_memalign)
#define je_aligned_alloc JEMALLOC_N(aligned_alloc)
#define je_realloc JEMALLOC_N(realloc)
#define je_free JEMALLOC_N(free)
#define je_mallocx JEMALLOC_N(mallocx)
#define je_rallocx JEMALLOC_N(rallocx)
#define je_xallocx JEMALLOC_N(xallocx)
#define je_sallocx JEMALLOC_N(sallocx)
#define je_dallocx JEMALLOC_N(dallocx)
#define je_nallocx JEMALLOC_N(nallocx)
#define je_mallctl JEMALLOC_N(mallctl)
#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
#define je_mallctlbymib JEMALLOC_N(mallctlbymib)
#define je_navsnprintf JEMALLOC_N(navsnprintf)
#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
| 1,536 | 45.575758 | 70 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/jemalloc_internal.h | #ifndef JEMALLOC_INTERNAL_H
#define JEMALLOC_INTERNAL_H
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
# define JEMALLOC_N(n) jet_##n
# include "jemalloc/internal/public_namespace.h"
# define JEMALLOC_NO_RENAME
# include "jemalloc/jemalloc.h"
# undef JEMALLOC_NO_RENAME
#else
# define JEMALLOC_N(n) je_vmem_je_##n
# include "jemalloc/jemalloc.h"
#endif
#include "jemalloc/internal/private_namespace.h"
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool have_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool config_munmap =
#ifdef JEMALLOC_MUNMAP
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_tcache =
#ifdef JEMALLOC_TCACHE
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_utrace =
#ifdef JEMALLOC_UTRACE
true
#else
false
#endif
;
static const bool config_valgrind =
#ifdef JEMALLOC_VALGRIND
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_ivsalloc =
#ifdef JEMALLOC_IVSALLOC
true
#else
false
#endif
;
#ifdef JEMALLOC_ATOMIC9
#include <machine/atomic.h>
#endif
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
#ifdef JEMALLOC_ZONE
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#include <malloc/malloc.h>
#endif
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
#include "jemalloc/internal/ql.h"
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
* but there are circular dependencies that cannot be broken without
* substantial performance degradation. In order to reduce the effect on
* visual code flow, read the header files in multiple passes, with one of the
* following cpp variables defined during each pass:
*
* JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
#define JEMALLOC_H_TYPES
#include "jemalloc/internal/jemalloc_internal_macros.h"
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
/* Smallest size class to support. */
#define LG_TINY_MIN 3
#define TINY_MIN (1U << LG_TINY_MIN)
/*
* Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size
* classes).
*/
#ifndef LG_QUANTUM
# if (defined(__i386__) || defined(_M_IX86))
# define LG_QUANTUM 4
# endif
# ifdef __ia64__
# define LG_QUANTUM 4
# endif
# ifdef __alpha__
# define LG_QUANTUM 4
# endif
# ifdef __sparc64__
# define LG_QUANTUM 4
# endif
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
# define LG_QUANTUM 4
# endif
# ifdef __arm__
# define LG_QUANTUM 3
# endif
# ifdef __aarch64__
# define LG_QUANTUM 4
# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
# ifdef __SH4__
# define LG_QUANTUM 4
# endif
# ifdef __tile__
# define LG_QUANTUM 4
# endif
# ifdef __le32__
# define LG_QUANTUM 4
# endif
# ifndef LG_QUANTUM
# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS"
# endif
#endif
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
#define QUANTUM_MASK (QUANTUM - 1)
/* Return the smallest quantum multiple that is >= a. */
#define QUANTUM_CEILING(a) \
(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
#define LONG ((size_t)(1U << LG_SIZEOF_LONG))
#define LONG_MASK (LONG - 1)
/* Return the smallest long multiple that is >= a. */
#define LONG_CEILING(a) \
(((a) + LONG_MASK) & ~LONG_MASK)
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
#define PTR_MASK (SIZEOF_PTR - 1)
/* Return the smallest (void *) multiple that is >= a. */
#define PTR_CEILING(a) \
(((a) + PTR_MASK) & ~PTR_MASK)
/*
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
* In addition, this controls the spacing of cacheline-spaced size classes.
*
* CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
* only handle raw constants.
*/
#define LG_CACHELINE 6
#define CACHELINE 64
#define CACHELINE_MASK (CACHELINE - 1)
/* Return the smallest cacheline multiple that is >= s. */
#define CACHELINE_CEILING(s) \
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
#ifdef PAGE_MASK
# undef PAGE_MASK
#endif
#define LG_PAGE STATIC_PAGE_SHIFT
#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
#define PAGE_MASK ((size_t)(PAGE - 1))
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
/* Return the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2BASE(a, alignment) \
((void *)((uintptr_t)(a) & (-(alignment))))
/* Return the offset between a and the nearest aligned address at or below a. */
#define ALIGNMENT_ADDR2OFFSET(a, alignment) \
((size_t)((uintptr_t)(a) & ((alignment) - 1)))
/* Return the smallest alignment multiple that is >= s. */
#define ALIGNMENT_CEILING(s, alignment) \
(((s) + ((alignment) - 1)) & (-(alignment)))
/* Declare a variable length array */
#if __STDC_VERSION__ < 199901L
# ifdef _MSC_VER
# include <malloc.h>
#ifndef alloca
# define alloca _alloca
#endif
# else
# ifdef JEMALLOC_HAS_ALLOCA_H
# include <alloca.h>
# else
# include <stdlib.h>
# endif
# endif
# define VARIABLE_ARRAY(type, name, count) \
type *name = alloca(sizeof(type) * (count))
#else
# define VARIABLE_ARRAY(type, name, count) type name[(count)]
#endif
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#include "jemalloc/internal/pool.h"
#include "jemalloc/internal/vector.h"
#undef JEMALLOC_H_TYPES
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#include "jemalloc/internal/pool.h"
#include "jemalloc/internal/vector.h"
typedef struct {
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
/*
* The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
extern bool opt_junk;
extern size_t opt_quarantine;
extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;
extern size_t opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */
extern unsigned ncpus;
extern unsigned npools;
extern unsigned npools_cnt;
extern pool_t base_pool;
extern pool_t **pools;
extern malloc_mutex_t pools_lock;
extern void *(*base_malloc_fn)(size_t);
extern void (*base_free_fn)(void *);
extern bool pools_shared_data_create(void);
arena_t *arenas_extend(pool_t *pool, unsigned ind);
bool arenas_tsd_extend(tsd_pool_t *tsd, unsigned len);
void arenas_cleanup(void *arg);
arena_t *choose_arena_hard(pool_t *pool);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/arena.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/tcache.h"
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#include "jemalloc/internal/prof.h"
#include "jemalloc/internal/pool.h"
#include "jemalloc/internal/vector.h"
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/pool.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
/*
* Include arena.h the first time in order to provide inline functions for this
* header's inlines.
*/
#define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_A
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, tsd_pool_t)
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
unsigned narenas_total_get(pool_t *pool);
arena_t *choose_arena(arena_t *arena);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
/*
* Map of pthread_self() --> arenas[???], used for selecting an arena to use
* for allocations.
*/
malloc_tsd_externs(arenas, tsd_pool_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, tsd_pool_t, {0},
arenas_cleanup)
/*
* Check if the arena is dummy.
*/
JEMALLOC_ALWAYS_INLINE bool
is_arena_dummy(arena_t *arena) {
return (arena->ind == ARENA_DUMMY_IND);
}
/*
* Compute usable size that would result from allocating an object with the
* specified size.
*/
JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size)
{
if (size <= SMALL_MAXCLASS)
return (small_s2u(size));
if (size <= arena_maxclass)
return (PAGE_CEILING(size));
return (CHUNK_CEILING(size));
}
/*
* Compute usable size that would result from allocating an object with the
* specified size and alignment.
*/
JEMALLOC_ALWAYS_INLINE size_t
sa2u(size_t size, size_t alignment)
{
size_t usize;
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/*
* Round size up to the nearest multiple of alignment.
*
* This done, we can take advantage of the fact that for each small
* size class, every object is aligned at the smallest power of two
* that is non-zero in the base two representation of the size. For
* example:
*
* Size | Base 2 | Minimum alignment
* -----+----------+------------------
* 96 | 1100000 | 32
* 144 | 10100000 | 32
* 192 | 11000000 | 64
*/
usize = ALIGNMENT_CEILING(size, alignment);
/*
* (usize < size) protects against the combination of maximal
* alignment and size greater than maximal alignment.
*/
if (usize < size) {
/* size_t overflow. */
return (0);
}
if (usize <= arena_maxclass && alignment <= PAGE) {
if (usize <= SMALL_MAXCLASS)
return (small_s2u(usize));
return (PAGE_CEILING(usize));
} else {
size_t run_size;
/*
* We can't achieve subpage alignment, so round up alignment
* permanently; it makes later calculations simpler.
*/
alignment = PAGE_CEILING(alignment);
usize = PAGE_CEILING(size);
/*
* (usize < size) protects against very large sizes within
* PAGE of SIZE_T_MAX.
*
* (usize + alignment < usize) protects against the
* combination of maximal alignment and usize large enough
* to cause overflow. This is similar to the first overflow
* check above, but it needs to be repeated due to the new
* usize value, which may now be *equal* to maximal
* alignment, whereas before we only detected overflow if the
* original size was *greater* than maximal alignment.
*/
if (usize < size || usize + alignment < usize) {
/* size_t overflow. */
return (0);
}
/*
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
* If the run wouldn't fit within a chunk, round up to a huge
* allocation size.
*/
run_size = usize + alignment - PAGE;
if (run_size <= arena_maxclass)
return (PAGE_CEILING(usize));
return (CHUNK_CEILING(usize));
}
}
JEMALLOC_INLINE unsigned
narenas_total_get(pool_t *pool)
{
unsigned narenas;
malloc_rwlock_rdlock(&pool->arenas_lock);
narenas = pool->narenas_total;
malloc_rwlock_unlock(&pool->arenas_lock);
return (narenas);
}
/*
* Choose an arena based on a per-thread value.
* Arena pointer must be either a valid arena pointer or a dummy arena with
* pool field filled.
*/
JEMALLOC_INLINE arena_t *
choose_arena(arena_t *arena)
{
arena_t *ret;
tsd_pool_t *tsd;
pool_t *pool;
if (!is_arena_dummy(arena))
return (arena);
pool = arena->pool;
tsd = arenas_tsd_get();
/* expand arenas array if necessary */
if ((tsd->npools <= pool->pool_id) &&
arenas_tsd_extend(tsd, pool->pool_id)) {
return (NULL);
}
if ( (tsd->seqno[pool->pool_id] != pool->seqno) ||
(ret = tsd->arenas[pool->pool_id]) == NULL) {
ret = choose_arena_hard(pool);
assert(ret != NULL);
}
return (ret);
}
#endif
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
/*
* Include arena.h the second and third times in order to resolve circular
* dependencies with tcache.h.
*/
#define JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/tcache.h"
#define JEMALLOC_ARENA_INLINE_C
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_C
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
void *imalloct(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
void *pool_imalloc(pool_t *pool, size_t size);
void *icalloct(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
void *pool_icalloc(pool_t *pool, size_t size);
void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
void *pool_ipalloc(pool_t *pool, size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t pool_isalloc(pool_t *pool, const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
void idalloct(void *ptr, bool try_tcache);
void pool_idalloct(pool_t *pool, void *ptr, bool try_tcache);
void idalloc(void *ptr);
void iqalloct(void *ptr, bool try_tcache);
void pool_iqalloct(pool_t *pool, void *ptr, bool try_tcache);
void iqalloc(void *ptr);
void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero);
void *pool_iralloc(pool_t *pool, void *ptr, size_t size, size_t extra,
size_t alignment, bool zero);
bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero);
int msc_clz(unsigned int val);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
# ifdef _MSC_VER
JEMALLOC_ALWAYS_INLINE int
msc_clz(unsigned int val)
{
unsigned int res = 0;
# if LG_SIZEOF_INT == 2
if (_BitScanReverse(&res, val)) {
return 31 - res;
}
else {
return 32;
}
# elif LG_SIZEOF_INT == 3
if (_BitScanReverse64(&res, val)) {
return 63 - res;
}
else {
return 64;
}
# else
# error "Unsupported clz function for that size of int"
# endif
}
#endif
JEMALLOC_ALWAYS_INLINE void *
imalloct(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
if (size <= arena_maxclass)
return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(arena, size, false));
}
JEMALLOC_ALWAYS_INLINE void *
imalloc(size_t size)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, &base_pool);
return (imalloct(size, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
pool_imalloc(pool_t *pool, size_t size)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
return (imalloct(size, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
icalloct(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(arena, size, true));
}
JEMALLOC_ALWAYS_INLINE void *
icalloc(size_t size)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, &base_pool);
return (icalloct(size, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
pool_icalloc(pool_t *pool, size_t size)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
return (icalloct(size, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
if (usize <= arena_maxclass && alignment <= PAGE)
ret = arena_malloc(arena, usize, zero, try_tcache);
else {
if (usize <= arena_maxclass) {
ret = arena_palloc(choose_arena(arena), usize,
alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(arena, usize, zero);
else
ret = huge_palloc(arena, usize, alignment, zero);
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, &base_pool);
return (ipalloct(usize, alignment, zero, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
pool_ipalloc(pool_t *pool, size_t usize, size_t alignment, bool zero)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
return (ipalloct(usize, alignment, zero, true, &dummy));
}
/*
* Typical usage:
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
ret = arena_salloc(ptr, demote);
else
ret = huge_salloc(ptr);
return (ret);
}
/*
* Typical usage:
* void *ptr = [...]
* size_t sz = isalloc(ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
pool_isalloc(pool_t *pool, const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || demote == false);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
ret = arena_salloc(ptr, demote);
else
ret = huge_pool_salloc(pool, ptr);
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
ivsalloc(const void *ptr, bool demote)
{
size_t i;
malloc_mutex_lock(&pools_lock);
unsigned n = npools;
for (i = 0; i < n; ++i) {
pool_t *pool = pools[i];
if (pool == NULL)
continue;
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
if (rtree_get(pool->chunks_rtree,
(uintptr_t)CHUNK_ADDR2BASE(ptr)) != 0)
break;
}
malloc_mutex_unlock(&pools_lock);
if (i == n)
return 0;
return (isalloc(ptr, demote));
}
JEMALLOC_INLINE size_t
u2rz(size_t usize)
{
size_t ret;
if (usize <= SMALL_MAXCLASS) {
size_t binind = small_size2bin(usize);
assert(binind < NBINS);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;
return (ret);
}
JEMALLOC_INLINE size_t
p2rz(const void *ptr)
{
size_t usize = isalloc(ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
idalloct(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk, ptr, try_tcache);
else
huge_dalloc(&base_pool, ptr);
}
JEMALLOC_ALWAYS_INLINE void
pool_idalloct(pool_t *pool, void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
arena_dalloc(chunk, ptr, try_tcache);
else
huge_dalloc(pool, ptr);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr)
{
idalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void
iqalloct(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
idalloct(ptr, try_tcache);
}
JEMALLOC_ALWAYS_INLINE void
pool_iqalloct(pool_t *pool, void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
pool_idalloct(pool, ptr, try_tcache);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(void *ptr)
{
iqalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena)
{
void *p;
size_t usize, copysize;
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
if (p == NULL)
return (NULL);
}
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
pool_iqalloct(arena->pool, ptr, try_tcache_dalloc);
return (p);
}
JEMALLOC_ALWAYS_INLINE void *
iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
return (iralloct_realign(ptr, oldsize, size, extra, alignment,
zero, try_tcache_alloc, try_tcache_dalloc, arena));
}
if (size + extra <= arena_maxclass) {
void *ret;
ret = arena_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_alloc,
try_tcache_dalloc);
if ((ret != NULL) || (size + extra > oldsize))
return (ret);
if (oldsize > chunksize) {
size_t old_usize JEMALLOC_CC_SILENCE_INIT(0);
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (config_valgrind && in_valgrind) {
old_usize = isalloc(ptr, config_prof);
old_rzsize = config_prof ?
p2rz(ptr) : u2rz(old_usize);
}
ret = huge_ralloc(arena, ptr, oldsize, chunksize, 0,
alignment, zero, try_tcache_dalloc);
JEMALLOC_VALGRIND_REALLOC(true, ret, s2u(chunksize),
true, ptr, old_usize, old_rzsize, true, false);
if (ret != NULL) {
/* Now, it should succeed... */
return arena_ralloc(arena, ret, chunksize, size,
extra, alignment, zero, try_tcache_alloc,
try_tcache_dalloc);
}
}
return NULL;
} else {
return (huge_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, try_tcache_dalloc));
}
}
JEMALLOC_ALWAYS_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, &base_pool);
return (iralloct(ptr, size, extra, alignment, zero, true, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE void *
pool_iralloc(pool_t *pool, void *ptr, size_t size, size_t extra,
size_t alignment, bool zero)
{
arena_t dummy;
DUMMY_ARENA_INITIALIZE(dummy, pool);
return (iralloct(ptr, size, extra, alignment, zero, true, true, &dummy));
}
JEMALLOC_ALWAYS_INLINE bool
ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
size_t oldsize;
assert(ptr != NULL);
assert(size != 0);
oldsize = isalloc(ptr, config_prof);
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return (true);
}
if (size <= arena_maxclass)
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
else
return (huge_ralloc_no_move(&base_pool, ptr, oldsize, size, extra, zero));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
#endif
#include "jemalloc/internal/prof.h"
#undef JEMALLOC_H_INLINES
#ifdef _WIN32
#define __builtin_clz(x) msc_clz(x)
#endif
/******************************************************************************/
#endif /* JEMALLOC_INTERNAL_H */
| 27,780 | 24.095754 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/internal/private_unnamespace.h | #undef a0calloc
#undef a0free
#undef a0malloc
#undef arena_alloc_junk_small
#undef arena_bin_index
#undef arena_bin_info
#undef arena_boot
#undef arena_chunk_alloc_huge
#undef arena_chunk_dalloc_huge
#undef arena_dalloc
#undef arena_dalloc_bin
#undef arena_dalloc_bin_locked
#undef arena_dalloc_junk_large
#undef arena_dalloc_junk_small
#undef arena_dalloc_large
#undef arena_dalloc_large_locked
#undef arena_dalloc_small
#undef arena_dss_prec_get
#undef arena_dss_prec_set
#undef arena_malloc
#undef arena_malloc_large
#undef arena_malloc_small
#undef arena_mapbits_allocated_get
#undef arena_mapbits_binind_get
#undef arena_mapbits_dirty_get
#undef arena_mapbits_get
#undef arena_mapbits_large_binind_set
#undef arena_mapbits_large_get
#undef arena_mapbits_large_set
#undef arena_mapbits_large_size_get
#undef arena_mapbits_small_runind_get
#undef arena_mapbits_small_set
#undef arena_mapbits_unallocated_set
#undef arena_mapbits_unallocated_size_get
#undef arena_mapbits_unallocated_size_set
#undef arena_mapbits_unzeroed_get
#undef arena_mapbits_unzeroed_set
#undef arena_mapbitsp_get
#undef arena_mapbitsp_read
#undef arena_mapbitsp_write
#undef arena_mapelm_to_pageind
#undef arena_mapp_get
#undef arena_maxclass
#undef arena_new
#undef arena_palloc
#undef arena_postfork_child
#undef arena_postfork_parent
#undef arena_prefork
#undef arena_prof_accum
#undef arena_prof_accum_impl
#undef arena_prof_accum_locked
#undef arena_prof_ctx_get
#undef arena_prof_ctx_set
#undef arena_prof_promoted
#undef arena_ptr_small_binind_get
#undef arena_purge_all
#undef arena_quarantine_junk_small
#undef arena_ralloc
#undef arena_ralloc_junk_large
#undef arena_ralloc_no_move
#undef arena_redzone_corruption
#undef arena_run_regind
#undef arena_runs_avail_tree_iter
#undef arena_salloc
#undef arena_stats_merge
#undef arena_tcache_fill_small
#undef arenas
#undef pools
#undef arenas_booted
#undef arenas_cleanup
#undef arenas_extend
#undef arenas_initialized
#undef arenas_lock
#undef arenas_tls
#undef arenas_tsd
#undef arenas_tsd_boot
#undef arenas_tsd_cleanup_wrapper
#undef arenas_tsd_get
#undef arenas_tsd_get_wrapper
#undef arenas_tsd_init_head
#undef arenas_tsd_set
#undef atomic_add_u
#undef atomic_add_uint32
#undef atomic_add_uint64
#undef atomic_add_z
#undef atomic_sub_u
#undef atomic_sub_uint32
#undef atomic_sub_uint64
#undef atomic_sub_z
#undef base_alloc
#undef base_boot
#undef base_calloc
#undef base_free_fn
#undef base_malloc_fn
#undef base_node_alloc
#undef base_node_dalloc
#undef base_pool
#undef base_postfork_child
#undef base_postfork_parent
#undef base_prefork
#undef bitmap_full
#undef bitmap_get
#undef bitmap_info_init
#undef bitmap_info_ngroups
#undef bitmap_init
#undef bitmap_set
#undef bitmap_sfu
#undef bitmap_size
#undef bitmap_unset
#undef bt_init
#undef buferror
#undef choose_arena
#undef choose_arena_hard
#undef chunk_alloc_arena
#undef chunk_alloc_base
#undef chunk_alloc_default
#undef chunk_alloc_dss
#undef chunk_alloc_mmap
#undef chunk_global_boot
#undef chunk_boot
#undef chunk_dalloc_default
#undef chunk_dalloc_mmap
#undef chunk_dss_boot
#undef chunk_dss_postfork_child
#undef chunk_dss_postfork_parent
#undef chunk_dss_prec_get
#undef chunk_dss_prec_set
#undef chunk_dss_prefork
#undef chunk_in_dss
#undef chunk_npages
#undef chunk_postfork_child
#undef chunk_postfork_parent
#undef chunk_prefork
#undef chunk_unmap
#undef chunk_record
#undef chunks_mtx
#undef chunks_rtree
#undef chunksize
#undef chunksize_mask
#undef ckh_bucket_search
#undef ckh_count
#undef ckh_delete
#undef ckh_evict_reloc_insert
#undef ckh_insert
#undef ckh_isearch
#undef ckh_iter
#undef ckh_new
#undef ckh_pointer_hash
#undef ckh_pointer_keycomp
#undef ckh_rebuild
#undef ckh_remove
#undef ckh_search
#undef ckh_string_hash
#undef ckh_string_keycomp
#undef ckh_try_bucket_insert
#undef ckh_try_insert
#undef ctl_boot
#undef ctl_bymib
#undef ctl_byname
#undef ctl_nametomib
#undef ctl_postfork_child
#undef ctl_postfork_parent
#undef ctl_prefork
#undef dss_prec_names
#undef extent_tree_ad_first
#undef extent_tree_ad_insert
#undef extent_tree_ad_iter
#undef extent_tree_ad_iter_recurse
#undef extent_tree_ad_iter_start
#undef extent_tree_ad_last
#undef extent_tree_ad_new
#undef extent_tree_ad_next
#undef extent_tree_ad_nsearch
#undef extent_tree_ad_prev
#undef extent_tree_ad_psearch
#undef extent_tree_ad_remove
#undef extent_tree_ad_reverse_iter
#undef extent_tree_ad_reverse_iter_recurse
#undef extent_tree_ad_reverse_iter_start
#undef extent_tree_ad_search
#undef extent_tree_szad_first
#undef extent_tree_szad_insert
#undef extent_tree_szad_iter
#undef extent_tree_szad_iter_recurse
#undef extent_tree_szad_iter_start
#undef extent_tree_szad_last
#undef extent_tree_szad_new
#undef extent_tree_szad_next
#undef extent_tree_szad_nsearch
#undef extent_tree_szad_prev
#undef extent_tree_szad_psearch
#undef extent_tree_szad_remove
#undef extent_tree_szad_reverse_iter
#undef extent_tree_szad_reverse_iter_recurse
#undef extent_tree_szad_reverse_iter_start
#undef extent_tree_szad_search
#undef get_errno
#undef hash
#undef hash_fmix_32
#undef hash_fmix_64
#undef hash_get_block_32
#undef hash_get_block_64
#undef hash_rotl_32
#undef hash_rotl_64
#undef hash_x64_128
#undef hash_x86_128
#undef hash_x86_32
#undef huge_allocated
#undef huge_boot
#undef huge_dalloc
#undef huge_dalloc_junk
#undef huge_malloc
#undef huge_ndalloc
#undef huge_nmalloc
#undef huge_palloc
#undef huge_postfork_child
#undef huge_postfork_parent
#undef huge_prefork
#undef huge_prof_ctx_get
#undef huge_prof_ctx_set
#undef huge_ralloc
#undef huge_ralloc_no_move
#undef huge_salloc
#undef icalloc
#undef icalloct
#undef idalloc
#undef idalloct
#undef imalloc
#undef imalloct
#undef in_valgrind
#undef ipalloc
#undef ipalloct
#undef iqalloc
#undef iqalloct
#undef iralloc
#undef iralloct
#undef iralloct_realign
#undef isalloc
#undef isthreaded
#undef ivsalloc
#undef ixalloc
#undef jemalloc_postfork_child
#undef jemalloc_postfork_parent
#undef jemalloc_prefork
#undef lg_floor
#undef malloc_cprintf
#undef malloc_mutex_init
#undef malloc_mutex_lock
#undef malloc_mutex_postfork_child
#undef malloc_mutex_postfork_parent
#undef malloc_mutex_prefork
#undef malloc_mutex_unlock
#undef malloc_rwlock_init
#undef malloc_rwlock_postfork_child
#undef malloc_rwlock_postfork_parent
#undef malloc_rwlock_prefork
#undef malloc_rwlock_rdlock
#undef malloc_rwlock_wrlock
#undef malloc_rwlock_unlock
#undef malloc_rwlock_destroy
#undef malloc_printf
#undef malloc_snprintf
#undef malloc_strtoumax
#undef malloc_tsd_boot
#undef malloc_tsd_cleanup_register
#undef malloc_tsd_dalloc
#undef malloc_tsd_malloc
#undef malloc_tsd_no_cleanup
#undef malloc_vcprintf
#undef malloc_vsnprintf
#undef malloc_write
#undef map_bias
#undef mb_write
#undef mutex_boot
#undef narenas_auto
#undef narenas_total
#undef narenas_total_get
#undef ncpus
#undef nhbins
#undef npools
#undef npools_cnt
#undef opt_abort
#undef opt_dss
#undef opt_junk
#undef opt_lg_chunk
#undef opt_lg_dirty_mult
#undef opt_lg_prof_interval
#undef opt_lg_prof_sample
#undef opt_lg_tcache_max
#undef opt_narenas
#undef opt_prof
#undef opt_prof_accum
#undef opt_prof_active
#undef opt_prof_final
#undef opt_prof_gdump
#undef opt_prof_leak
#undef opt_prof_prefix
#undef opt_quarantine
#undef opt_redzone
#undef opt_stats_print
#undef opt_tcache
#undef opt_utrace
#undef opt_xmalloc
#undef opt_zero
#undef p2rz
#undef pages_purge
#undef pools_shared_data_initialized
#undef pow2_ceil
#undef prof_backtrace
#undef prof_boot0
#undef prof_boot1
#undef prof_boot2
#undef prof_bt_count
#undef prof_ctx_get
#undef prof_ctx_set
#undef prof_dump_open
#undef prof_free
#undef prof_gdump
#undef prof_idump
#undef prof_interval
#undef prof_lookup
#undef prof_malloc
#undef prof_malloc_record_object
#undef prof_mdump
#undef prof_postfork_child
#undef prof_postfork_parent
#undef prof_prefork
#undef prof_realloc
#undef prof_sample_accum_update
#undef prof_sample_threshold_update
#undef prof_tdata_booted
#undef prof_tdata_cleanup
#undef prof_tdata_get
#undef prof_tdata_init
#undef prof_tdata_initialized
#undef prof_tdata_tls
#undef prof_tdata_tsd
#undef prof_tdata_tsd_boot
#undef prof_tdata_tsd_cleanup_wrapper
#undef prof_tdata_tsd_get
#undef prof_tdata_tsd_get_wrapper
#undef prof_tdata_tsd_init_head
#undef prof_tdata_tsd_set
#undef quarantine
#undef quarantine_alloc_hook
#undef quarantine_boot
#undef quarantine_booted
#undef quarantine_cleanup
#undef quarantine_init
#undef quarantine_tls
#undef quarantine_tsd
#undef quarantine_tsd_boot
#undef quarantine_tsd_cleanup_wrapper
#undef quarantine_tsd_get
#undef quarantine_tsd_get_wrapper
#undef quarantine_tsd_init_head
#undef quarantine_tsd_set
#undef register_zone
#undef rtree_delete
#undef rtree_get
#undef rtree_get_locked
#undef rtree_new
#undef rtree_postfork_child
#undef rtree_postfork_parent
#undef rtree_prefork
#undef rtree_set
#undef s2u
#undef sa2u
#undef set_errno
#undef small_bin2size
#undef small_bin2size_compute
#undef small_bin2size_lookup
#undef small_bin2size_tab
#undef small_s2u
#undef small_s2u_compute
#undef small_s2u_lookup
#undef small_size2bin
#undef small_size2bin_compute
#undef small_size2bin_lookup
#undef small_size2bin_tab
#undef stats_cactive
#undef stats_cactive_add
#undef stats_cactive_get
#undef stats_cactive_sub
#undef stats_chunks
#undef stats_print
#undef tcache_alloc_easy
#undef tcache_alloc_large
#undef tcache_alloc_small
#undef tcache_alloc_small_hard
#undef tcache_arena_associate
#undef tcache_arena_dissociate
#undef tcache_bin_flush_large
#undef tcache_bin_flush_small
#undef tcache_bin_info
#undef tcache_boot0
#undef tcache_boot1
#undef tcache_booted
#undef tcache_create
#undef tcache_dalloc_large
#undef tcache_dalloc_small
#undef tcache_destroy
#undef tcache_enabled_booted
#undef tcache_enabled_get
#undef tcache_enabled_initialized
#undef tcache_enabled_set
#undef tcache_enabled_tls
#undef tcache_enabled_tsd
#undef tcache_enabled_tsd_boot
#undef tcache_enabled_tsd_cleanup_wrapper
#undef tcache_enabled_tsd_get
#undef tcache_enabled_tsd_get_wrapper
#undef tcache_enabled_tsd_init_head
#undef tcache_enabled_tsd_set
#undef tcache_event
#undef tcache_event_hard
#undef tcache_flush
#undef tcache_get
#undef tcache_get_hard
#undef tcache_initialized
#undef tcache_maxclass
#undef tcache_salloc
#undef tcache_stats_merge
#undef tcache_thread_cleanup
#undef tcache_tls
#undef tcache_tsd
#undef tcache_tsd_boot
#undef tcache_tsd_cleanup_wrapper
#undef tcache_tsd_get
#undef tcache_tsd_get_wrapper
#undef tcache_tsd_init_head
#undef tcache_tsd_set
#undef thread_allocated_booted
#undef thread_allocated_initialized
#undef thread_allocated_tls
#undef thread_allocated_tsd
#undef thread_allocated_tsd_boot
#undef thread_allocated_tsd_cleanup_wrapper
#undef thread_allocated_tsd_get
#undef thread_allocated_tsd_get_wrapper
#undef thread_allocated_tsd_init_head
#undef thread_allocated_tsd_set
#undef tsd_init_check_recursion
#undef tsd_init_finish
#undef u2rz
#undef valgrind_freelike_block
#undef valgrind_make_mem_defined
#undef valgrind_make_mem_noaccess
#undef valgrind_make_mem_undefined
#undef pool_new
#undef pool_destroy
#undef pools_lock
#undef pool_base_lock
#undef pool_prefork
#undef pool_postfork_parent
#undef pool_postfork_child
#undef pool_alloc
#undef vec_get
#undef vec_set
#undef vec_delete
| 11,246 | 23.396963 | 44 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/err.h | /*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* err.h - error and warning messages
*/
#ifndef ERR_H
#define ERR_H 1
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
/*
* err - windows implementation of unix err function
*/
__declspec(noreturn) static void
err(int eval, const char *fmt, ...)
{
va_list vl;
va_start(vl, fmt);
vfprintf(stderr, fmt, vl);
va_end(vl);
exit(eval);
}
/*
* warn - windows implementation of unix warn function
*/
static void
warn(const char *fmt, ...)
{
va_list vl;
va_start(vl, fmt);
fprintf(stderr, "Warning: ");
vfprintf(stderr, fmt, vl);
va_end(vl);
}
#endif /* ERR_H */
| 2,190 | 29.859155 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sched.h | /*
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sched.h
*/
| 1,620 | 44.027778 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/win_mmap.h | /*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* win_mmap.h -- (internal) tracks the regions mapped by mmap
*/
#ifndef WIN_MMAP_H
#define WIN_MMAP_H 1
#include "queue.h"
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define rounddown(x, y) (((x) / (y)) * (y))
void win_mmap_init(void);
void win_mmap_fini(void);
/* allocation/mmap granularity */
extern unsigned long long Mmap_align;
typedef enum FILE_MAPPING_TRACKER_FLAGS {
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED = 0x0001,
/*
* This should hold the value of all flags ORed for debug purpose.
*/
FILE_MAPPING_TRACKER_FLAGS_MASK =
FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED
} FILE_MAPPING_TRACKER_FLAGS;
/*
* this structure tracks the file mappings outstanding per file handle
*/
typedef struct FILE_MAPPING_TRACKER {
SORTEDQ_ENTRY(FILE_MAPPING_TRACKER) ListEntry;
HANDLE FileHandle;
HANDLE FileMappingHandle;
void *BaseAddress;
void *EndAddress;
DWORD Access;
os_off_t Offset;
size_t FileLen;
FILE_MAPPING_TRACKER_FLAGS Flags;
} FILE_MAPPING_TRACKER, *PFILE_MAPPING_TRACKER;
extern SRWLOCK FileMappingQLock;
extern SORTEDQ_HEAD(FMLHead, FILE_MAPPING_TRACKER) FileMappingQHead;
#endif /* WIN_MMAP_H */
| 2,817 | 33.790123 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/platform.h | /*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* platform.h -- dirty hacks to compile Linux code on Windows using VC++
*
* This is included to each source file using "/FI" (forced include) option.
*
* XXX - it is a subject for refactoring
*/
#ifndef PLATFORM_H
#define PLATFORM_H 1
#pragma warning(disable : 4996)
#pragma warning(disable : 4200) /* allow flexible array member */
#pragma warning(disable : 4819) /* non unicode characteres */
#ifdef __cplusplus
extern "C" {
#endif
/* Prevent PMDK compilation for 32-bit platforms */
#if defined(_WIN32) && !defined(_WIN64)
#error "32-bit builds of PMDK are not supported!"
#endif
#define _CRT_RAND_S /* rand_s() */
#include <windows.h>
#include <stdint.h>
#include <time.h>
#include <io.h>
#include <process.h>
#include <fcntl.h>
#include <sys/types.h>
#include <malloc.h>
#include <signal.h>
#include <intrin.h>
#include <direct.h>
/* use uuid_t definition from util.h */
#ifdef uuid_t
#undef uuid_t
#endif
/* a few trivial substitutions */
#define PATH_MAX MAX_PATH
#define __thread __declspec(thread)
#define __func__ __FUNCTION__
#ifdef _DEBUG
#define DEBUG
#endif
/*
* The inline keyword is available only in VC++.
* https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx
*/
#ifndef __cplusplus
#define inline __inline
#endif
/* XXX - no equivalents in VC++ */
#define __attribute__(a)
#define __builtin_constant_p(cnd) 0
/*
* missing definitions
*/
/* errno.h */
#define ELIBACC 79 /* cannot access a needed shared library */
/* sys/stat.h */
#define S_IRUSR S_IREAD
#define S_IWUSR S_IWRITE
#define S_IRGRP S_IRUSR
#define S_IWGRP S_IWUSR
#define O_SYNC 0
typedef int mode_t;
#define fchmod(fd, mode) 0 /* XXX - dummy */
#define setlinebuf(fp) setvbuf(fp, NULL, _IOLBF, BUFSIZ);
/* unistd.h */
typedef long long os_off_t;
typedef long long ssize_t;
int setenv(const char *name, const char *value, int overwrite);
int unsetenv(const char *name);
/* fcntl.h */
int posix_fallocate(int fd, os_off_t offset, os_off_t len);
/* string.h */
#define strtok_r strtok_s
/* time.h */
#define CLOCK_MONOTONIC 1
#define CLOCK_REALTIME 2
int clock_gettime(int id, struct timespec *ts);
/* signal.h */
typedef unsigned long long sigset_t; /* one bit for each signal */
C_ASSERT(NSIG <= sizeof(sigset_t) * 8);
struct sigaction {
void (*sa_handler) (int signum);
/* void (*sa_sigaction)(int, siginfo_t *, void *); */
sigset_t sa_mask;
int sa_flags;
void (*sa_restorer) (void);
};
__inline int
sigemptyset(sigset_t *set)
{
*set = 0;
return 0;
}
__inline int
sigfillset(sigset_t *set)
{
*set = ~0;
return 0;
}
__inline int
sigaddset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set |= (1ULL << (signum - 1));
return 0;
}
__inline int
sigdelset(sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
*set &= ~(1ULL << (signum - 1));
return 0;
}
__inline int
sigismember(const sigset_t *set, int signum)
{
if (signum <= 0 || signum >= NSIG) {
errno = EINVAL;
return -1;
}
return ((*set & (1ULL << (signum - 1))) ? 1 : 0);
}
/* sched.h */
/*
* sched_yield -- yield the processor
*/
__inline int
sched_yield(void)
{
SwitchToThread();
return 0; /* always succeeds */
}
/*
* helper macros for library ctor/dtor function declarations
*/
#define MSVC_CONSTR(func) \
void func(void); \
__pragma(comment(linker, "/include:_" #func)) \
__pragma(section(".CRT$XCU", read)) \
__declspec(allocate(".CRT$XCU")) \
const void (WINAPI *_##func)(void) = (const void (WINAPI *)(void))func;
#define MSVC_DESTR(func) \
void func(void); \
static void _##func##_reg(void) { atexit(func); }; \
MSVC_CONSTR(_##func##_reg)
#ifdef __cplusplus
}
#endif
#endif /* PLATFORM_H */
| 5,389 | 22.744493 | 76 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/libgen.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake libgen.h
*/
| 1,621 | 44.055556 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/endian.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* endian.h -- convert values between host and big-/little-endian byte order
*/
#ifndef ENDIAN_H
#define ENDIAN_H 1
/*
* XXX: On Windows we can assume little-endian architecture
*/
#include <intrin.h>
#define htole16(a) (a)
#define htole32(a) (a)
#define htole64(a) (a)
#define le16toh(a) (a)
#define le32toh(a) (a)
#define le64toh(a) (a)
#define htobe16(x) _byteswap_ushort(x)
#define htobe32(x) _byteswap_ulong(x)
#define htobe64(x) _byteswap_uint64(x)
#define be16toh(x) _byteswap_ushort(x)
#define be32toh(x) _byteswap_ulong(x)
#define be64toh(x) _byteswap_uint64(x)
#endif /* ENDIAN_H */
| 2,211 | 34.677419 | 76 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/features.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake features.h
*/
| 1,623 | 44.111111 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/unistd.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* unistd.h -- compatibility layer for POSIX operating system API
*/
#ifndef UNISTD_H
#define UNISTD_H 1
#include <stdio.h>
#define _SC_PAGESIZE 0
#define _SC_NPROCESSORS_ONLN 1
#define R_OK 04
#define W_OK 02
#define X_OK 00 /* execute permission doesn't exist on Windows */
#define F_OK 00
/*
* sysconf -- get configuration information at run time
*/
static __inline long
sysconf(int p)
{
SYSTEM_INFO si;
int ret = 0;
switch (p) {
case _SC_PAGESIZE:
GetSystemInfo(&si);
return si.dwPageSize;
case _SC_NPROCESSORS_ONLN:
for (int i = 0; i < GetActiveProcessorGroupCount(); i++) {
ret += GetActiveProcessorCount(i);
}
return ret;
default:
return 0;
}
}
#define getpid _getpid
/*
* pread -- read from a file descriptor at given offset
*/
static ssize_t
pread(int fd, void *buf, size_t count, os_off_t offset)
{
__int64 position = _lseeki64(fd, 0, SEEK_CUR);
_lseeki64(fd, offset, SEEK_SET);
int ret = _read(fd, buf, (unsigned)count);
_lseeki64(fd, position, SEEK_SET);
return ret;
}
/*
* pwrite -- write to a file descriptor at given offset
*/
static ssize_t
pwrite(int fd, const void *buf, size_t count, os_off_t offset)
{
__int64 position = _lseeki64(fd, 0, SEEK_CUR);
_lseeki64(fd, offset, SEEK_SET);
int ret = _write(fd, buf, (unsigned)count);
_lseeki64(fd, position, SEEK_SET);
return ret;
}
#define S_ISBLK(x) 0 /* BLK devices not exist on Windows */
/*
* basename -- parse pathname and return filename component
*/
static char *
basename(char *path)
{
char fname[_MAX_FNAME];
char ext[_MAX_EXT];
_splitpath(path, NULL, NULL, fname, ext);
sprintf(path, "%s%s", fname, ext);
return path;
}
/*
* dirname -- parse pathname and return directory component
*/
static char *
dirname(char *path)
{
if (path == NULL)
return ".";
size_t len = strlen(path);
if (len == 0)
return ".";
char *end = path + len;
/* strip trailing forslashes and backslashes */
while ((--end) > path) {
if (*end != '\\' && *end != '/') {
*(end + 1) = '\0';
break;
}
}
/* strip basename */
while ((--end) > path) {
if (*end == '\\' || *end == '/') {
*end = '\0';
break;
}
}
if (end != path) {
return path;
/* handle edge cases */
} else if (*end == '\\' || *end == '/') {
*(end + 1) = '\0';
} else {
*end++ = '.';
*end = '\0';
}
return path;
}
#endif /* UNISTD_H */
| 3,962 | 22.873494 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/strings.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake strings.h
*/
| 1,627 | 44.222222 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/dirent.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake dirent.h
*/
| 1,626 | 44.194444 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/uio.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/uio.h -- definition of iovec structure
*/
#ifndef SYS_UIO_H
#define SYS_UIO_H 1
#include <pmemcompat.h>
#ifdef __cplusplus
extern "C" {
#endif
ssize_t writev(int fd, const struct iovec *iov, int iovcnt);
#ifdef __cplusplus
}
#endif
#endif /* SYS_UIO_H */
| 1,874 | 34.377358 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/file.h | /*
* Copyright 2015-2018, Intel Corporation
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/file.h -- file locking
*/
| 1,706 | 45.135135 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/statvfs.h | /*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake statvfs.h
*/
| 1,622 | 44.083333 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/param.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/param.h -- a few useful macros
*/
#ifndef SYS_PARAM_H
#define SYS_PARAM_H 1
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define howmany(x, y) (((x) + ((y) - 1)) / (y))
#define BPB 8 /* bits per byte */
#define setbit(b, i) ((b)[(i) / BPB] |= 1 << ((i) % BPB))
#define isset(b, i) ((b)[(i) / BPB] & (1 << ((i) % BPB)))
#define isclr(b, i) (((b)[(i) / BPB] & (1 << ((i) % BPB))) == 0)
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif /* SYS_PARAM_H */
| 2,127 | 39.150943 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/mount.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/mount.h
*/
| 1,629 | 44.277778 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/mman.h | /*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* sys/mman.h -- memory-mapped files for Windows
*/
#ifndef SYS_MMAN_H
#define SYS_MMAN_H 1
#ifdef __cplusplus
extern "C" {
#endif
#define PROT_NONE 0x0
#define PROT_READ 0x1
#define PROT_WRITE 0x2
#define PROT_EXEC 0x4
#define MAP_SHARED 0x1
#define MAP_PRIVATE 0x2
#define MAP_FIXED 0x10
#define MAP_ANONYMOUS 0x20
#define MAP_ANON MAP_ANONYMOUS
#define MAP_NORESERVE 0x04000
#define MS_ASYNC 1
#define MS_SYNC 4
#define MS_INVALIDATE 2
#define MAP_FAILED ((void *)(-1))
void *mmap(void *addr, size_t len, int prot, int flags,
int fd, os_off_t offset);
int munmap(void *addr, size_t len);
int msync(void *addr, size_t len, int flags);
int mprotect(void *addr, size_t len, int prot);
#ifdef __cplusplus
}
#endif
#endif /* SYS_MMAN_H */
| 2,357 | 30.026316 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/resource.h | /*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/resource.h
*/
| 1,627 | 44.222222 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/sys/wait.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fake sys/wait.h
*/
| 1,628 | 44.25 | 74 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/include/linux/limits.h | /*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* linux/limits.h -- fake header file
*/
/*
* XXX - The only purpose of this empty file is to avoid preprocessor
* errors when including a Linux-specific header file that has no equivalent
* on Windows. With this cheap trick, we don't need a lot of preprocessor
* conditionals in all the source code files.
*
* In the future, this will be addressed in some other way.
*/
| 1,986 | 43.155556 | 76 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/coverage.sh | #!/bin/sh
set -e
objdir=$1
suffix=$2
shift 2
objs=$@
gcov -b -p -f -o "${objdir}" ${objs}
# Move gcov outputs so that subsequent gcov invocations won't clobber results
# for the same sources with different compilation flags.
for f in `find . -maxdepth 1 -type f -name '*.gcov'` ; do
mv "${f}" "${f}.${suffix}"
done
| 321 | 17.941176 | 77 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/autogen.sh | #!/bin/sh
for i in autoconf; do
echo "$i"
$i
if [ $? -ne 0 ]; then
echo "Error $? in $i"
exit 1
fi
done
echo "./configure --enable-autogen $@"
./configure --enable-autogen $@
if [ $? -ne 0 ]; then
echo "Error $? in ./configure"
exit 1
fi
| 266 | 13.833333 | 38 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/win_autogen.sh | #!/bin/sh
# Copyright 2016, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
JEMALLOC_GEN=./../windows/jemalloc_gen
AC_PATH=./../../jemalloc
autoconf
if [ $? -ne 0 ]; then
echo "Error $? in $i"
exit 1
fi
if [ ! -d "$JEMALLOC_GEN" ]; then
echo Creating... $JEMALLOC_GEN
mkdir "$JEMALLOC_GEN"
fi
cd $JEMALLOC_GEN
echo "Run configure..."
$AC_PATH/configure \
--enable-autogen \
CC=cl \
--enable-lazy-lock=no \
--without-export \
--with-jemalloc-prefix=je_vmem_ \
--with-private-namespace=je_vmem_ \
--disable-xmalloc \
--disable-munmap \
EXTRA_CFLAGS="-DJEMALLOC_LIBVMEM"
if [ $? -ne 0 ]; then
echo "Error $? in $AC_PATH/configure"
exit 1
fi
| 2,161 | 32.261538 | 73 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/prof_accum.h | #include "test/jemalloc_test.h"
#define NTHREADS 4
#define NALLOCS_PER_THREAD 50
#define DUMP_INTERVAL 1
#define BT_COUNT_CHECK_INTERVAL 5
#define alloc_n_proto(n) \
void *alloc_##n(unsigned bits);
alloc_n_proto(0)
alloc_n_proto(1)
#define alloc_n_gen(n) \
void * \
alloc_##n(unsigned bits) \
{ \
void *p; \
\
if (bits == 0) \
p = mallocx(1, 0); \
else { \
switch (bits & 0x1U) { \
case 0: \
p = (alloc_0(bits >> 1)); \
break; \
case 1: \
p = (alloc_1(bits >> 1)); \
break; \
default: not_reached(); \
} \
} \
/* Intentionally sabotage tail call optimization. */ \
assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
return (p); \
}
| 794 | 21.083333 | 59 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/unit/pool.h | #include "test/jemalloc_test.h"
#define TEST_POOL_SIZE (16L * 1024L * 1024L)
#define TEST_TOO_SMALL_POOL_SIZE (2L * 1024L * 1024L)
#define TEST_VALUE 123456
#define TEST_MALLOC_FREE_LOOPS 2
#define TEST_MALLOC_SIZE 1024
#define TEST_ALLOCS_SIZE (TEST_POOL_SIZE / 8)
#define TEST_BUFFOR_CMP_SIZE (4L * 1024L * 1024L)
static char mem_pool[TEST_POOL_SIZE];
static char mem_extend_ok[TEST_POOL_SIZE];
static void* allocs[TEST_ALLOCS_SIZE];
static int custom_allocs;
TEST_BEGIN(test_pool_create_errors) {
pool_t *pool;
memset(mem_pool, 1, TEST_POOL_SIZE);
pool = pool_create(mem_pool, 0, 0, 1);
assert_ptr_null(pool, "pool_create() should return NULL for size 0");
pool = pool_create(NULL, TEST_POOL_SIZE, 0, 1);
assert_ptr_null(pool, "pool_create() should return NULL for input addr NULL");
}
TEST_END
TEST_BEGIN(test_pool_create) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
assert_ptr_eq(pool, mem_pool, "pool_create() should return addr with valid input");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_malloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_malloc(pool, sizeof(int));
assert_ptr_not_null(test, "pool_malloc should return valid ptr");
*test = TEST_VALUE;
assert_x_eq(*test, TEST_VALUE, "ptr should be usable");
assert_lu_gt((uintptr_t)test, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)test, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_malloc() should return pointer to memory from pool");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_free) {
pool_t *pool;
int i, j, s = 0, prev_s = 0;
int allocs = TEST_POOL_SIZE/TEST_MALLOC_SIZE;
void *arr[allocs];
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
for (i = 0; i < TEST_MALLOC_FREE_LOOPS; ++i) {
for (j = 0; j < allocs; ++j) {
arr[j] = pool_malloc(pool, TEST_MALLOC_SIZE);
if (arr[j] != NULL) {
s++;
}
}
for (j = 0; j < allocs; ++j) {
if (arr[j] != NULL) {
pool_free(pool, arr[j]);
}
}
if (prev_s != 0) {
assert_x_eq(s, prev_s,
"pool_free() should record back used chunks");
}
prev_s = s;
s = 0;
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_calloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 1, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
int *test = pool_calloc(pool, 1, sizeof(int));
assert_ptr_not_null(test, "pool_calloc should return valid ptr");
assert_x_eq(*test, 0, "pool_calloc should return zeroed memory");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_realloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_ralloc(pool, NULL, sizeof(int));
assert_ptr_not_null(test, "pool_ralloc with NULL addr should return valid ptr");
int *test2 = pool_ralloc(pool, test, sizeof(int)*2);
assert_ptr_not_null(test, "pool_ralloc should return valid ptr");
test2[0] = TEST_VALUE;
test2[1] = TEST_VALUE;
assert_x_eq(test[1], TEST_VALUE, "ptr should be usable");
pool_free(pool, test2);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_aligned_alloc) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
int *test = pool_aligned_alloc(pool, 1024, 1024);
assert_ptr_not_null(test, "pool_aligned_alloc should return valid ptr");
assert_x_eq(((uintptr_t)(test) & 1023), 0, "ptr should be aligned");
assert_lu_gt((uintptr_t)test, (uintptr_t)mem_pool,
"pool_aligned_alloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)test, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_aligned_alloc() should return pointer to memory from pool");
*test = TEST_VALUE;
assert_x_eq(*test, TEST_VALUE, "ptr should be usable");
pool_free(pool, test);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_reuse_pool) {
pool_t *pool;
size_t pool_num = 0;
custom_allocs = 0;
/* create and destroy pool multiple times */
for (; pool_num<100; ++pool_num) {
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
if (pool == NULL) {
break;
}
void *prev = NULL;
size_t i = 0;
/* allocate memory from pool */
for (; i<100; ++i) {
void **next = pool_malloc(pool, sizeof (void *));
assert_lu_gt((uintptr_t)next, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)next, (uintptr_t)mem_pool+TEST_POOL_SIZE,
"pool_malloc() should return pointer to memory from pool");
*next = prev;
prev = next;
}
/* free all allocated memory from pool */
while (prev != NULL) {
void **act = prev;
prev = *act;
pool_free(pool, act);
}
pool_delete(pool);
}
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_check_memory) {
pool_t *pool;
size_t pool_size = POOL_MINIMAL_SIZE;
assert_lu_lt(POOL_MINIMAL_SIZE, TEST_POOL_SIZE, "Too small pool size");
size_t object_size;
size_t size_allocated;
size_t i;
size_t j;
for (object_size = 8; object_size <= TEST_BUFFOR_CMP_SIZE ; object_size *= 2) {
custom_allocs = 0;
pool = pool_create(mem_pool, pool_size, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
size_allocated = 0;
memset(allocs, 0, TEST_ALLOCS_SIZE * sizeof(void *));
for (i = 0; i < TEST_ALLOCS_SIZE;++i) {
allocs[i] = pool_malloc(pool, object_size);
if (allocs[i] == NULL) {
/* out of memory in pool */
break;
}
assert_lu_gt((uintptr_t)allocs[i], (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)allocs[i], (uintptr_t)mem_pool+pool_size,
"pool_malloc() should return pointer to memory from pool");
size_allocated += object_size;
/* fill each allocation with a unique value */
memset(allocs[i], (char)i, object_size);
}
assert_ptr_not_null(allocs[0], "pool_malloc should return valid ptr");
assert_lu_lt(i + 1, TEST_ALLOCS_SIZE, "All memory should be used");
/* check for unexpected modifications of prepare data */
for (i = 0; i < TEST_ALLOCS_SIZE && allocs[i] != NULL; ++i) {
char *buffer = allocs[i];
for (j = 0; j < object_size; ++j)
if (buffer[j] != (char)i) {
assert_true(0, "Content of data object was modified unexpectedly"
" for object size: %zu, id: %zu", object_size, j);
break;
}
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
}
TEST_END
TEST_BEGIN(test_pool_use_all_memory) {
pool_t *pool;
size_t size = 0;
size_t pool_size = POOL_MINIMAL_SIZE;
assert_lu_lt(POOL_MINIMAL_SIZE, TEST_POOL_SIZE, "Too small pool size");
custom_allocs = 0;
pool = pool_create(mem_pool, pool_size, 0, 1);
assert_ptr_not_null(pool, "Can not create pool!!!");
void *prev = NULL;
for (;;) {
void **next = pool_malloc(pool, sizeof (void *));
if (next == NULL) {
/* Out of memory in pool, test end */
break;
}
size += sizeof (void *);
assert_ptr_not_null(next, "pool_malloc should return valid ptr");
assert_lu_gt((uintptr_t)next, (uintptr_t)mem_pool,
"pool_malloc() should return pointer to memory from pool");
assert_lu_lt((uintptr_t)next, (uintptr_t)mem_pool+pool_size,
"pool_malloc() should return pointer to memory from pool");
*next = prev;
assert_x_eq((uintptr_t)(*next), (uintptr_t)(prev), "ptr should be usable");
prev = next;
}
assert_lu_gt(size, 0, "Can not alloc any memory from pool");
/* Free all allocated memory from pool */
while (prev != NULL) {
void **act = prev;
prev = *act;
pool_free(pool, act);
}
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend_errors) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
memset(mem_extend_ok, 0, TEST_TOO_SMALL_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_TOO_SMALL_POOL_SIZE, 0);
assert_zu_eq(usable_size, 0, "pool_extend() should return 0"
" when provided with memory size smaller then chunksize");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
memset(mem_extend_ok, 0, TEST_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 0);
assert_zu_ne(usable_size, 0, "pool_extend() should return value"
" after alignment when provided with enough memory");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
TEST_BEGIN(test_pool_extend_after_out_of_memory) {
pool_t *pool;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
/* use the all memory from pool and from base allocator */
while (pool_malloc(pool, sizeof (void *)));
pool->base_next_addr = pool->base_past_addr;
memset(mem_extend_ok, 0, TEST_POOL_SIZE);
size_t usable_size = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 0);
assert_zu_ne(usable_size, 0, "pool_extend() should return value"
" after alignment when provided with enough memory");
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
}
TEST_END
/*
* print_jemalloc_messages -- custom print function, for jemalloc
*/
static void
print_jemalloc_messages(void* ignore, const char *s)
{
}
TEST_BEGIN(test_pool_check_extend) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
custom_allocs = 0;
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
pool_malloc(pool, 100);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
size_t size_extend = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 1);
assert_zu_ne(size_extend, 0, "pool_extend() should add some free space");
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
pool_malloc(pool, 100);
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
TEST_BEGIN(test_pool_check_memory_out_of_range) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
custom_allocs = 0;
pool = pool_create(mem_pool, TEST_POOL_SIZE, 0, 1);
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
void *usable_addr = (void *)CHUNK_CEILING((uintptr_t)mem_extend_ok);
size_t usable_size = (TEST_POOL_SIZE - (uintptr_t)(usable_addr -
(void *)mem_extend_ok)) & ~chunksize_mask;
chunk_record(pool,
&pool->chunks_szad_mmap, &pool->chunks_ad_mmap,
usable_addr, usable_size, 0);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
pool_delete(pool);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() return error");
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
TEST_BEGIN(test_pool_check_memory_overlap) {
je_malloc_message = print_jemalloc_messages;
pool_t *pool;
pool_t *pool2;
custom_allocs = 0;
memset(mem_pool, 0, TEST_POOL_SIZE);
pool = pool_create(mem_pool, TEST_POOL_SIZE, 1, 1);
size_t size_extend = pool_extend(pool, mem_extend_ok, TEST_POOL_SIZE, 1);
assert_zu_ne(size_extend, 0, "pool_extend() should add some free space");
assert_d_eq(je_pool_check(pool), 1, "je_pool_check() return error");
/* create another pool in the same memory region */
pool2 = pool_create(mem_extend_ok, TEST_POOL_SIZE, 0, 1);
assert_d_ne(je_pool_check(pool), 1, "je_pool_check() not return error");
assert_d_ne(je_pool_check(pool2), 1, "je_pool_check() not return error");
pool_delete(pool2);
pool_delete(pool);
assert_d_eq(custom_allocs, 0, "memory leak when using custom allocator");
je_malloc_message = NULL;
}
TEST_END
#define POOL_TEST_CASES\
test_pool_create_errors, \
test_pool_create, \
test_pool_malloc, \
test_pool_free, \
test_pool_calloc, \
test_pool_realloc, \
test_pool_aligned_alloc, \
test_pool_reuse_pool, \
test_pool_check_memory, \
test_pool_use_all_memory, \
test_pool_extend_errors, \
test_pool_extend, \
test_pool_extend_after_out_of_memory, \
test_pool_check_extend, \
test_pool_check_memory_out_of_range, \
test_pool_check_memory_overlap
| 13,511 | 27.267782 | 84 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS_H
#define SFMT_PARAMS_H
#if !defined(MEXP)
#ifdef __GNUC__
#warning "MEXP is not defined. I assume MEXP is 19937."
#endif
#define MEXP 19937
#endif
/*-----------------
BASIC DEFINITIONS
-----------------*/
/** Mersenne Exponent. The period of the sequence
* is a multiple of 2^MEXP-1.
* #define MEXP 19937 */
/** SFMT generator has an internal state array of 128-bit integers,
* and N is its size. */
#define N (MEXP / 128 + 1)
/** N32 is the size of internal state array when regarded as an array
* of 32-bit integers.*/
#define N32 (N * 4)
/** N64 is the size of internal state array when regarded as an array
* of 64-bit integers.*/
#define N64 (N * 2)
/*----------------------
the parameters of SFMT
following definitions are in paramsXXXX.h file.
----------------------*/
/** the pick up position of the array.
#define POS1 122
*/
/** the parameter of shift left as four 32-bit registers.
#define SL1 18
*/
/** the parameter of shift left as one 128-bit register.
* The 128-bit integer is shifted by (SL2 * 8) bits.
#define SL2 1
*/
/** the parameter of shift right as four 32-bit registers.
#define SR1 11
*/
/** the parameter of shift right as one 128-bit register.
* The 128-bit integer is shifted by (SL2 * 8) bits.
#define SR2 1
*/
/** A bitmask, used in the recursion. These parameters are introduced
* to break symmetry of SIMD.
#define MSK1 0xdfffffefU
#define MSK2 0xddfecb7fU
#define MSK3 0xbffaffffU
#define MSK4 0xbffffff6U
*/
/** These definitions are part of a 128-bit period certification vector.
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0xc98e126aU
*/
#if MEXP == 607
#include "test/SFMT-params607.h"
#elif MEXP == 1279
#include "test/SFMT-params1279.h"
#elif MEXP == 2281
#include "test/SFMT-params2281.h"
#elif MEXP == 4253
#include "test/SFMT-params4253.h"
#elif MEXP == 11213
#include "test/SFMT-params11213.h"
#elif MEXP == 19937
#include "test/SFMT-params19937.h"
#elif MEXP == 44497
#include "test/SFMT-params44497.h"
#elif MEXP == 86243
#include "test/SFMT-params86243.h"
#elif MEXP == 132049
#include "test/SFMT-params132049.h"
#elif MEXP == 216091
#include "test/SFMT-params216091.h"
#else
#ifdef __GNUC__
#error "MEXP is not valid."
#undef MEXP
#else
#undef MEXP
#endif
#endif
#endif /* SFMT_PARAMS_H */
| 4,286 | 31.233083 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params4253.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS4253_H
#define SFMT_PARAMS4253_H
#define POS1 17
#define SL1 20
#define SL2 1
#define SR1 7
#define SR2 1
#define MSK1 0x9f7bffffU
#define MSK2 0x9fffff5fU
#define MSK3 0x3efffffbU
#define MSK4 0xfffff7bbU
#define PARITY1 0xa8000001U
#define PARITY2 0xaf5390a3U
#define PARITY3 0xb740b3f8U
#define PARITY4 0x6c11486dU
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
#define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb"
#endif /* SFMT_PARAMS4253_H */
| 3,552 | 42.329268 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params607.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS607_H
#define SFMT_PARAMS607_H
#define POS1 2
#define SL1 15
#define SL2 3
#define SR1 13
#define SR2 3
#define MSK1 0xfdff37ffU
#define MSK2 0xef7f3f7dU
#define MSK3 0xff777b7dU
#define MSK4 0x7ff7fb2fU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0x5986f054U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
#define ALTI_SR2_PERM \
(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
#define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
#define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
#define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
#endif /* For OSX */
#define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f"
#endif /* SFMT_PARAMS607_H */
| 3,558 | 42.402439 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params216091.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS216091_H
#define SFMT_PARAMS216091_H
#define POS1 627
#define SL1 11
#define SL2 3
#define SR1 10
#define SR2 1
#define MSK1 0xbff7bff7U
#define MSK2 0xbfffffffU
#define MSK3 0xbffffa7fU
#define MSK4 0xffddfbfbU
#define PARITY1 0xf8000001U
#define PARITY2 0x89e80709U
#define PARITY3 0x3bd2b64bU
#define PARITY4 0x0c64b1e4U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
#define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb"
#endif /* SFMT_PARAMS216091_H */
| 3,566 | 42.5 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/mq.h | /*
* Simple templated message queue implementation that relies on only mutexes for
* synchronization (which reduces portability issues). Given the following
* setup:
*
* typedef struct mq_msg_s mq_msg_t;
* struct mq_msg_s {
* mq_msg(mq_msg_t) link;
* [message data]
* };
* mq_gen(, mq_, mq_t, mq_msg_t, link)
*
* The API is as follows:
*
* bool mq_init(mq_t *mq);
* void mq_fini(mq_t *mq);
* unsigned mq_count(mq_t *mq);
* mq_msg_t *mq_tryget(mq_t *mq);
* mq_msg_t *mq_get(mq_t *mq);
* void mq_put(mq_t *mq, mq_msg_t *msg);
*
* The message queue linkage embedded in each message is to be treated as
* externally opaque (no need to initialize or clean up externally). mq_fini()
* does not perform any cleanup of messages, since it knows nothing of their
* payloads.
*/
#define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type)
#define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \
typedef struct { \
mtx_t lock; \
ql_head(a_mq_msg_type) msgs; \
unsigned count; \
} a_mq_type; \
a_attr bool \
a_prefix##init(a_mq_type *mq) { \
\
if (mtx_init(&mq->lock)) \
return (true); \
ql_new(&mq->msgs); \
mq->count = 0; \
return (false); \
} \
a_attr void \
a_prefix##fini(a_mq_type *mq) \
{ \
\
mtx_fini(&mq->lock); \
} \
a_attr unsigned \
a_prefix##count(a_mq_type *mq) \
{ \
unsigned count; \
\
mtx_lock(&mq->lock); \
count = mq->count; \
mtx_unlock(&mq->lock); \
return (count); \
} \
a_attr a_mq_msg_type * \
a_prefix##tryget(a_mq_type *mq) \
{ \
a_mq_msg_type *msg; \
\
mtx_lock(&mq->lock); \
msg = ql_first(&mq->msgs); \
if (msg != NULL) { \
ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \
mq->count--; \
} \
mtx_unlock(&mq->lock); \
return (msg); \
} \
a_attr a_mq_msg_type * \
a_prefix##get(a_mq_type *mq) \
{ \
a_mq_msg_type *msg; \
struct timespec timeout; \
\
msg = a_prefix##tryget(mq); \
if (msg != NULL) \
return (msg); \
\
timeout.tv_sec = 0; \
timeout.tv_nsec = 1; \
while (true) { \
nanosleep(&timeout, NULL); \
msg = a_prefix##tryget(mq); \
if (msg != NULL) \
return (msg); \
if (timeout.tv_sec == 0) { \
/* Double sleep time, up to max 1 second. */ \
timeout.tv_nsec <<= 1; \
if (timeout.tv_nsec >= 1000*1000*1000) { \
timeout.tv_sec = 1; \
timeout.tv_nsec = 0; \
} \
} \
} \
} \
a_attr void \
a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \
{ \
\
mtx_lock(&mq->lock); \
ql_elm_new(msg, a_field); \
ql_tail_insert(&mq->msgs, msg, a_field); \
mq->count++; \
mtx_unlock(&mq->lock); \
}
| 2,992 | 25.963964 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params1279.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS1279_H
#define SFMT_PARAMS1279_H
#define POS1 7
#define SL1 14
#define SL2 3
#define SR1 5
#define SR2 1
#define MSK1 0xf7fefffdU
#define MSK2 0x7fefcfffU
#define MSK3 0xaff3ef3fU
#define MSK4 0xb5ffff7fU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0x20000000U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
#define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f"
#endif /* SFMT_PARAMS1279_H */
| 3,552 | 42.329268 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params11213.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS11213_H
#define SFMT_PARAMS11213_H
#define POS1 68
#define SL1 14
#define SL2 3
#define SR1 7
#define SR2 3
#define MSK1 0xeffff7fbU
#define MSK2 0xffffffefU
#define MSK3 0xdfdfbfffU
#define MSK4 0x7fffdbfdU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0xe8148000U
#define PARITY4 0xd0c7afa3U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
#define ALTI_SR2_PERM \
(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
#define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
#define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
#define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
#endif /* For OSX */
#define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd"
#endif /* SFMT_PARAMS11213_H */
| 3,566 | 42.5 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-sse2.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file SFMT-sse2.h
* @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2
*
* @author Mutsuo Saito (Hiroshima University)
* @author Makoto Matsumoto (Hiroshima University)
*
* @note We assume LITTLE ENDIAN in this file
*
* Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* The new BSD License is applied to this software, see LICENSE.txt
*/
#ifndef SFMT_SSE2_H
#define SFMT_SSE2_H
/**
* This function represents the recursion formula.
* @param a a 128-bit part of the interal state array
* @param b a 128-bit part of the interal state array
* @param c a 128-bit part of the interal state array
* @param d a 128-bit part of the interal state array
* @param mask 128-bit mask
* @return output
*/
JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b,
__m128i c, __m128i d, __m128i mask) {
__m128i v, x, y, z;
x = _mm_load_si128(a);
y = _mm_srli_epi32(*b, SR1);
z = _mm_srli_si128(c, SR2);
v = _mm_slli_epi32(d, SL1);
z = _mm_xor_si128(z, x);
z = _mm_xor_si128(z, v);
x = _mm_slli_si128(x, SL2);
y = _mm_and_si128(y, mask);
z = _mm_xor_si128(z, x);
z = _mm_xor_si128(z, y);
return z;
}
/**
* This function fills the internal state array with pseudorandom
* integers.
*/
JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
int i;
__m128i r, r1, r2, mask;
mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
for (i = 0; i < N - POS1; i++) {
r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
mask);
_mm_store_si128(&ctx->sfmt[i].si, r);
r1 = r2;
r2 = r;
}
for (; i < N; i++) {
r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2,
mask);
_mm_store_si128(&ctx->sfmt[i].si, r);
r1 = r2;
r2 = r;
}
}
/**
* This function fills the user-specified array with pseudorandom
* integers.
*
* @param array an 128-bit array to be filled by pseudorandom numbers.
* @param size number of 128-bit pesudorandom numbers to be generated.
*/
JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
int i, j;
__m128i r, r1, r2, mask;
mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1);
r1 = _mm_load_si128(&ctx->sfmt[N - 2].si);
r2 = _mm_load_si128(&ctx->sfmt[N - 1].si);
for (i = 0; i < N - POS1; i++) {
r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2,
mask);
_mm_store_si128(&array[i].si, r);
r1 = r2;
r2 = r;
}
for (; i < N; i++) {
r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2,
mask);
_mm_store_si128(&array[i].si, r);
r1 = r2;
r2 = r;
}
/* main loop */
for (; i < size - N; i++) {
r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
mask);
_mm_store_si128(&array[i].si, r);
r1 = r2;
r2 = r;
}
for (j = 0; j < 2 * N - size; j++) {
r = _mm_load_si128(&array[j + size - N].si);
_mm_store_si128(&ctx->sfmt[j].si, r);
}
for (; i < size; i++) {
r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2,
mask);
_mm_store_si128(&array[i].si, r);
_mm_store_si128(&ctx->sfmt[j++].si, r);
r1 = r2;
r2 = r;
}
}
#endif
| 5,215 | 32.012658 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/math.h | #ifndef JEMALLOC_ENABLE_INLINE
double ln_gamma(double x);
double i_gamma(double x, double p, double ln_gamma_p);
double pt_norm(double p);
double pt_chi2(double p, double df, double ln_gamma_df_2);
double pt_gamma(double p, double shape, double scale, double ln_gamma_shape);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_))
/*
* Compute the natural log of Gamma(x), accurate to 10 decimal places.
*
* This implementation is based on:
*
* Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function
* [S14]. Communications of the ACM 9(9):684.
*/
JEMALLOC_INLINE double
ln_gamma(double x)
{
double f, z;
assert(x > 0.0);
if (x < 7.0) {
f = 1.0;
z = x;
while (z < 7.0) {
f *= z;
z += 1.0;
}
x = z;
f = -log(f);
} else
f = 0.0;
z = 1.0 / (x * x);
return (f + (x-0.5) * log(x) - x + 0.918938533204673 +
(((-0.000595238095238 * z + 0.000793650793651) * z -
0.002777777777778) * z + 0.083333333333333) / x);
}
/*
* Compute the incomplete Gamma ratio for [0..x], where p is the shape
* parameter, and ln_gamma_p is ln_gamma(p).
*
* This implementation is based on:
*
* Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral.
* Applied Statistics 19:285-287.
*/
JEMALLOC_INLINE double
i_gamma(double x, double p, double ln_gamma_p)
{
double acu, factor, oflo, gin, term, rn, a, b, an, dif;
double pn[6];
unsigned i;
assert(p > 0.0);
assert(x >= 0.0);
if (x == 0.0)
return (0.0);
acu = 1.0e-10;
oflo = 1.0e30;
gin = 0.0;
factor = exp(p * log(x) - x - ln_gamma_p);
if (x <= 1.0 || x < p) {
/* Calculation by series expansion. */
gin = 1.0;
term = 1.0;
rn = p;
while (true) {
rn += 1.0;
term *= x / rn;
gin += term;
if (term <= acu) {
gin *= factor / p;
return (gin);
}
}
} else {
/* Calculation by continued fraction. */
a = 1.0 - p;
b = a + x + 1.0;
term = 0.0;
pn[0] = 1.0;
pn[1] = x;
pn[2] = x + 1.0;
pn[3] = x * b;
gin = pn[2] / pn[3];
while (true) {
a += 1.0;
b += 2.0;
term += 1.0;
an = a * term;
for (i = 0; i < 2; i++)
pn[i+4] = b * pn[i+2] - an * pn[i];
if (pn[5] != 0.0) {
rn = pn[4] / pn[5];
dif = fabs(gin - rn);
if (dif <= acu && dif <= acu * rn) {
gin = 1.0 - factor * gin;
return (gin);
}
gin = rn;
}
for (i = 0; i < 4; i++)
pn[i] = pn[i+2];
if (fabs(pn[4]) >= oflo) {
for (i = 0; i < 4; i++)
pn[i] /= oflo;
}
}
}
}
/*
* Given a value p in [0..1] of the lower tail area of the normal distribution,
* compute the limit on the definite integral from [-inf..z] that satisfies p,
* accurate to 16 decimal places.
*
* This implementation is based on:
*
* Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal
* distribution. Applied Statistics 37(3):477-484.
*/
JEMALLOC_INLINE double
pt_norm(double p)
{
double q, r, ret;
assert(p > 0.0 && p < 1.0);
q = p - 0.5;
if (fabs(q) <= 0.425) {
/* p close to 1/2. */
r = 0.180625 - q * q;
return (q * (((((((2.5090809287301226727e3 * r +
3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r
+ 4.5921953931549871457e4) * r + 1.3731693765509461125e4) *
r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2)
* r + 3.3871328727963666080e0) /
(((((((5.2264952788528545610e3 * r +
2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r
+ 2.1213794301586595867e4) * r + 5.3941960214247511077e3) *
r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1)
* r + 1.0));
} else {
if (q < 0.0)
r = p;
else
r = 1.0 - p;
assert(r > 0.0);
r = sqrt(-log(r));
if (r <= 5.0) {
/* p neither close to 1/2 nor 0 or 1. */
r -= 1.6;
ret = ((((((((7.74545014278341407640e-4 * r +
2.27238449892691845833e-2) * r +
2.41780725177450611770e-1) * r +
1.27045825245236838258e0) * r +
3.64784832476320460504e0) * r +
5.76949722146069140550e0) * r +
4.63033784615654529590e0) * r +
1.42343711074968357734e0) /
(((((((1.05075007164441684324e-9 * r +
5.47593808499534494600e-4) * r +
1.51986665636164571966e-2)
* r + 1.48103976427480074590e-1) * r +
6.89767334985100004550e-1) * r +
1.67638483018380384940e0) * r +
2.05319162663775882187e0) * r + 1.0));
} else {
/* p near 0 or 1. */
r -= 5.0;
ret = ((((((((2.01033439929228813265e-7 * r +
2.71155556874348757815e-5) * r +
1.24266094738807843860e-3) * r +
2.65321895265761230930e-2) * r +
2.96560571828504891230e-1) * r +
1.78482653991729133580e0) * r +
5.46378491116411436990e0) * r +
6.65790464350110377720e0) /
(((((((2.04426310338993978564e-15 * r +
1.42151175831644588870e-7) * r +
1.84631831751005468180e-5) * r +
7.86869131145613259100e-4) * r +
1.48753612908506148525e-2) * r +
1.36929880922735805310e-1) * r +
5.99832206555887937690e-1)
* r + 1.0));
}
if (q < 0.0)
ret = -ret;
return (ret);
}
}
/*
* Given a value p in [0..1] of the lower tail area of the Chi^2 distribution
* with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute
* the upper limit on the definite integral from [0..z] that satisfies p,
* accurate to 12 decimal places.
*
* This implementation is based on:
*
* Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of
* the Chi^2 distribution. Applied Statistics 24(3):385-388.
*
* Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage
* points of the Chi^2 distribution. Applied Statistics 40(1):233-235.
*/
JEMALLOC_INLINE double
pt_chi2(double p, double df, double ln_gamma_df_2)
{
double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6;
unsigned i;
assert(p >= 0.0 && p < 1.0);
assert(df > 0.0);
e = 5.0e-7;
aa = 0.6931471805;
xx = 0.5 * df;
c = xx - 1.0;
if (df < -1.24 * log(p)) {
/* Starting approximation for small Chi^2. */
ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx);
if (ch - e < 0.0)
return (ch);
} else {
if (df > 0.32) {
x = pt_norm(p);
/*
* Starting approximation using Wilson and Hilferty
* estimate.
*/
p1 = 0.222222 / df;
ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0);
/* Starting approximation for p tending to 1. */
if (ch > 2.2 * df + 6.0) {
ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) +
ln_gamma_df_2);
}
} else {
ch = 0.4;
a = log(1.0 - p);
while (true) {
q = ch;
p1 = 1.0 + ch * (4.67 + ch);
p2 = ch * (6.73 + ch * (6.66 + ch));
t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch
* (13.32 + 3.0 * ch)) / p2;
ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch +
c * aa) * p2 / p1) / t;
if (fabs(q / ch - 1.0) - 0.01 <= 0.0)
break;
}
}
}
for (i = 0; i < 20; i++) {
/* Calculation of seven-term Taylor series. */
q = ch;
p1 = 0.5 * ch;
if (p1 < 0.0)
return (-1.0);
p2 = p - i_gamma(p1, xx, ln_gamma_df_2);
t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch));
b = t / ch;
a = 0.5 * t - b * c;
s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 +
60.0 * a))))) / 420.0;
s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 *
a)))) / 2520.0;
s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0;
s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a *
(889.0 + 1740.0 * a))) / 5040.0;
s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0;
s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0;
ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3
- b * (s4 - b * (s5 - b * s6))))));
if (fabs(q / ch - 1.0) <= e)
break;
}
return (ch);
}
/*
* Given a value p in [0..1] and Gamma distribution shape and scale parameters,
* compute the upper limit on the definite integeral from [0..z] that satisfies
* p.
*/
JEMALLOC_INLINE double
pt_gamma(double p, double shape, double scale, double ln_gamma_shape)
{
return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale);
}
#endif
| 8,173 | 25.198718 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/mtx.h | /*
* mtx is a slightly simplified version of malloc_mutex. This code duplication
* is unfortunate, but there are allocator bootstrapping considerations that
* would leak into the test infrastructure if malloc_mutex were used directly
* in tests.
*/
typedef struct {
#ifdef _WIN32
CRITICAL_SECTION lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#else
pthread_mutex_t lock;
#endif
} mtx_t;
bool mtx_init(mtx_t *mtx);
void mtx_fini(mtx_t *mtx);
void mtx_lock(mtx_t *mtx);
void mtx_unlock(mtx_t *mtx);
| 520 | 22.681818 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params2281.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS2281_H
#define SFMT_PARAMS2281_H
#define POS1 12
#define SL1 19
#define SL2 1
#define SR1 5
#define SR2 1
#define MSK1 0xbff7ffbfU
#define MSK2 0xfdfffffeU
#define MSK3 0xf7ffef7fU
#define MSK4 0xf2f7cbbfU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0x41dfa600U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
#define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf"
#endif /* SFMT_PARAMS2281_H */
| 3,552 | 42.329268 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params19937.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS19937_H
#define SFMT_PARAMS19937_H
#define POS1 122
#define SL1 18
#define SL2 1
#define SR1 11
#define SR2 1
#define MSK1 0xdfffffefU
#define MSK2 0xddfecb7fU
#define MSK3 0xbffaffffU
#define MSK4 0xbffffff6U
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0x13c9e684U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
#define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6"
#endif /* SFMT_PARAMS19937_H */
| 3,560 | 42.426829 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/test.h | #define ASSERT_BUFSIZE 256
#define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \
t a_ = (a); \
t b_ = (b); \
if (!(a_ cmp b_)) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) "#cmp" (%s) --> " \
"%"pri" "#neg_cmp" %"pri": ", \
__func__, __FILE__, __LINE__, \
#a, #b, a_, b_); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \
!=, "p", __VA_ARGS__)
#define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \
==, "p", __VA_ARGS__)
#define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \
!=, "p", __VA_ARGS__)
#define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \
==, "p", __VA_ARGS__)
#define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__)
#define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__)
#define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__)
#define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__)
#define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__)
#define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__)
#define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__)
#define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__)
#define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__)
#define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__)
#define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__)
#define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__)
#define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__)
#define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__)
#define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__)
#define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__)
#define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__)
#define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__)
#define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__)
#define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__)
#define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__)
#define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__)
#define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__)
#define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__)
#define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \
!=, "ld", __VA_ARGS__)
#define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \
==, "ld", __VA_ARGS__)
#define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \
>=, "ld", __VA_ARGS__)
#define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \
>, "ld", __VA_ARGS__)
#define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \
<, "ld", __VA_ARGS__)
#define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \
<=, "ld", __VA_ARGS__)
#define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \
a, b, ==, !=, "lu", __VA_ARGS__)
#define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \
a, b, !=, ==, "lu", __VA_ARGS__)
#define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \
a, b, <, >=, "lu", __VA_ARGS__)
#define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \
a, b, <=, >, "lu", __VA_ARGS__)
#define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \
a, b, >=, <, "lu", __VA_ARGS__)
#define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \
a, b, >, <=, "lu", __VA_ARGS__)
#define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \
!=, "qd", __VA_ARGS__)
#define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \
==, "qd", __VA_ARGS__)
#define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \
>=, "qd", __VA_ARGS__)
#define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \
>, "qd", __VA_ARGS__)
#define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \
<, "qd", __VA_ARGS__)
#define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \
<=, "qd", __VA_ARGS__)
#define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \
a, b, ==, !=, "qu", __VA_ARGS__)
#define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \
a, b, !=, ==, "qu", __VA_ARGS__)
#define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \
a, b, <, >=, "qu", __VA_ARGS__)
#define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \
a, b, <=, >, "qu", __VA_ARGS__)
#define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \
a, b, >=, <, "qu", __VA_ARGS__)
#define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \
a, b, >, <=, "qu", __VA_ARGS__)
#define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \
!=, "jd", __VA_ARGS__)
#define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \
==, "jd", __VA_ARGS__)
#define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \
>=, "jd", __VA_ARGS__)
#define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \
>, "jd", __VA_ARGS__)
#define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \
<, "jd", __VA_ARGS__)
#define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \
<=, "jd", __VA_ARGS__)
#define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \
!=, "ju", __VA_ARGS__)
#define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \
==, "ju", __VA_ARGS__)
#define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \
>=, "ju", __VA_ARGS__)
#define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \
>, "ju", __VA_ARGS__)
#define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \
<, "ju", __VA_ARGS__)
#define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \
<=, "ju", __VA_ARGS__)
#define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \
!=, "zd", __VA_ARGS__)
#define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \
==, "zd", __VA_ARGS__)
#define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \
>=, "zd", __VA_ARGS__)
#define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \
>, "zd", __VA_ARGS__)
#define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \
<, "zd", __VA_ARGS__)
#define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \
<=, "zd", __VA_ARGS__)
#define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \
!=, "zu", __VA_ARGS__)
#define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \
==, "zu", __VA_ARGS__)
#define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \
>=, "zu", __VA_ARGS__)
#define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \
>, "zu", __VA_ARGS__)
#define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \
<, "zu", __VA_ARGS__)
#define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \
<=, "zu", __VA_ARGS__)
#define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \
!=, PRId32, __VA_ARGS__)
#define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \
==, PRId32, __VA_ARGS__)
#define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \
>=, PRId32, __VA_ARGS__)
#define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \
>, PRId32, __VA_ARGS__)
#define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \
<, PRId32, __VA_ARGS__)
#define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \
<=, PRId32, __VA_ARGS__)
#define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \
!=, PRIu32, __VA_ARGS__)
#define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \
==, PRIu32, __VA_ARGS__)
#define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \
>=, PRIu32, __VA_ARGS__)
#define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \
>, PRIu32, __VA_ARGS__)
#define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \
<, PRIu32, __VA_ARGS__)
#define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \
<=, PRIu32, __VA_ARGS__)
#define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \
!=, PRId64, __VA_ARGS__)
#define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \
==, PRId64, __VA_ARGS__)
#define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \
>=, PRId64, __VA_ARGS__)
#define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \
>, PRId64, __VA_ARGS__)
#define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \
<, PRId64, __VA_ARGS__)
#define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \
<=, PRId64, __VA_ARGS__)
#define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \
!=, PRIu64, __VA_ARGS__)
#define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \
==, PRIu64, __VA_ARGS__)
#define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \
>=, PRIu64, __VA_ARGS__)
#define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \
>, PRIu64, __VA_ARGS__)
#define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \
<, PRIu64, __VA_ARGS__)
#define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \
<=, PRIu64, __VA_ARGS__)
#define assert_b_eq(a, b, ...) do { \
bool a_ = (a); \
bool b_ = (b); \
if (!(a_ == b_)) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) == (%s) --> %s != %s: ", \
__func__, __FILE__, __LINE__, \
#a, #b, a_ ? "true" : "false", \
b_ ? "true" : "false"); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_b_ne(a, b, ...) do { \
bool a_ = (a); \
bool b_ = (b); \
if (!(a_ != b_)) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) != (%s) --> %s == %s: ", \
__func__, __FILE__, __LINE__, \
#a, #b, a_ ? "true" : "false", \
b_ ? "true" : "false"); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__)
#define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__)
#define assert_str_eq(a, b, ...) do { \
if (strcmp((a), (b))) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) same as (%s) --> " \
"\"%s\" differs from \"%s\": ", \
__func__, __FILE__, __LINE__, #a, #b, a, b); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_str_ne(a, b, ...) do { \
if (!strcmp((a), (b))) { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Failed assertion: " \
"(%s) differs from (%s) --> " \
"\"%s\" same as \"%s\": ", \
__func__, __FILE__, __LINE__, #a, #b, a, b); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} \
} while (0)
#define assert_not_reached(...) do { \
char prefix[ASSERT_BUFSIZE]; \
char message[ASSERT_BUFSIZE]; \
malloc_snprintf(prefix, sizeof(prefix), \
"%s:%s:%d: Unreachable code reached: ", \
__func__, __FILE__, __LINE__); \
malloc_snprintf(message, sizeof(message), __VA_ARGS__); \
p_test_fail(prefix, message); \
} while (0)
/*
* If this enum changes, corresponding changes in test/test.sh.in are also
* necessary.
*/
typedef enum {
test_status_pass = 0,
test_status_skip = 1,
test_status_fail = 2,
test_status_count = 3
} test_status_t;
typedef void (test_t)(void);
#define TEST_BEGIN(f) \
static void \
f(void) \
{ \
p_test_init(#f);
#define TEST_END \
goto label_test_end; \
label_test_end: \
p_test_fini(); \
}
#define test(...) \
p_test(__VA_ARGS__, NULL)
#define test_not_init(...) \
p_test_not_init(__VA_ARGS__, NULL)
#define test_skip_if(e) do { \
if (e) { \
test_skip("%s:%s:%d: Test skipped: (%s)", \
__func__, __FILE__, __LINE__, #e); \
goto label_test_end; \
} \
} while (0)
void test_skip(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
void test_fail(const char *format, ...) JEMALLOC_ATTR(format(printf, 1, 2));
/* For private use by macros. */
test_status_t p_test(test_t *t, ...);
test_status_t p_test_not_init(test_t *t, ...);
void p_test_init(const char *name);
void p_test_fini(void);
void p_test_fail(const char *prefix, const char *message);
| 13,309 | 38.731343 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file SFMT.h
*
* @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom
* number generator
*
* @author Mutsuo Saito (Hiroshima University)
* @author Makoto Matsumoto (Hiroshima University)
*
* Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* The new BSD License is applied to this software.
* see LICENSE.txt
*
* @note We assume that your system has inttypes.h. If your system
* doesn't have inttypes.h, you have to typedef uint32_t and uint64_t,
* and you have to define PRIu64 and PRIx64 in this file as follows:
* @verbatim
typedef unsigned int uint32_t
typedef unsigned long long uint64_t
#define PRIu64 "llu"
#define PRIx64 "llx"
@endverbatim
* uint32_t must be exactly 32-bit unsigned integer type (no more, no
* less), and uint64_t must be exactly 64-bit unsigned integer type.
* PRIu64 and PRIx64 are used for printf function to print 64-bit
* unsigned int and 64-bit unsigned int in hexadecimal format.
*/
#ifndef SFMT_H
#define SFMT_H
typedef struct sfmt_s sfmt_t;
uint32_t gen_rand32(sfmt_t *ctx);
uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit);
uint64_t gen_rand64(sfmt_t *ctx);
uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit);
void fill_array32(sfmt_t *ctx, uint32_t *array, int size);
void fill_array64(sfmt_t *ctx, uint64_t *array, int size);
sfmt_t *init_gen_rand(uint32_t seed);
sfmt_t *init_by_array(uint32_t *init_key, int key_length);
void fini_gen_rand(sfmt_t *ctx);
const char *get_idstring(void);
int get_min_array_size32(void);
int get_min_array_size64(void);
#ifndef JEMALLOC_ENABLE_INLINE
double to_real1(uint32_t v);
double genrand_real1(sfmt_t *ctx);
double to_real2(uint32_t v);
double genrand_real2(sfmt_t *ctx);
double to_real3(uint32_t v);
double genrand_real3(sfmt_t *ctx);
double to_res53(uint64_t v);
double to_res53_mix(uint32_t x, uint32_t y);
double genrand_res53(sfmt_t *ctx);
double genrand_res53_mix(sfmt_t *ctx);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_))
/* These real versions are due to Isaku Wada */
/** generates a random number on [0,1]-real-interval */
JEMALLOC_INLINE double to_real1(uint32_t v)
{
return v * (1.0/4294967295.0);
/* divided by 2^32-1 */
}
/** generates a random number on [0,1]-real-interval */
JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx)
{
return to_real1(gen_rand32(ctx));
}
/** generates a random number on [0,1)-real-interval */
JEMALLOC_INLINE double to_real2(uint32_t v)
{
return v * (1.0/4294967296.0);
/* divided by 2^32 */
}
/** generates a random number on [0,1)-real-interval */
JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx)
{
return to_real2(gen_rand32(ctx));
}
/** generates a random number on (0,1)-real-interval */
JEMALLOC_INLINE double to_real3(uint32_t v)
{
return (((double)v) + 0.5)*(1.0/4294967296.0);
/* divided by 2^32 */
}
/** generates a random number on (0,1)-real-interval */
JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx)
{
return to_real3(gen_rand32(ctx));
}
/** These real versions are due to Isaku Wada */
/** generates a random number on [0,1) with 53-bit resolution*/
JEMALLOC_INLINE double to_res53(uint64_t v)
{
return v * (1.0/18446744073709551616.0L);
}
/** generates a random number on [0,1) with 53-bit resolution from two
* 32 bit integers */
JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y)
{
return to_res53(x | ((uint64_t)y << 32));
}
/** generates a random number on [0,1) with 53-bit resolution
*/
JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx)
{
return to_res53(gen_rand64(ctx));
}
/** generates a random number on [0,1) with 53-bit resolution
using 32bit integer.
*/
JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx)
{
uint32_t x, y;
x = gen_rand32(ctx);
y = gen_rand32(ctx);
return to_res53_mix(x, y);
}
#endif
#endif
| 5,805 | 32.755814 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params44497.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS44497_H
#define SFMT_PARAMS44497_H
#define POS1 330
#define SL1 5
#define SL2 3
#define SR1 9
#define SR2 3
#define MSK1 0xeffffffbU
#define MSK2 0xdfbebfffU
#define MSK3 0xbfbf7befU
#define MSK4 0x9ffd7bffU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0xa3ac4000U
#define PARITY4 0xecc1327aU
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2)
#define ALTI_SR2_PERM \
(vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10}
#define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2}
#define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12}
#define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12}
#endif /* For OSX */
#define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff"
#endif /* SFMT_PARAMS44497_H */
| 3,566 | 42.5 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-alti.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file SFMT-alti.h
*
* @brief SIMD oriented Fast Mersenne Twister(SFMT)
* pseudorandom number generator
*
* @author Mutsuo Saito (Hiroshima University)
* @author Makoto Matsumoto (Hiroshima University)
*
* Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* The new BSD License is applied to this software.
* see LICENSE.txt
*/
#ifndef SFMT_ALTI_H
#define SFMT_ALTI_H
/**
* This function represents the recursion formula in AltiVec and BIG ENDIAN.
* @param a a 128-bit part of the interal state array
* @param b a 128-bit part of the interal state array
* @param c a 128-bit part of the interal state array
* @param d a 128-bit part of the interal state array
* @return output
*/
JEMALLOC_ALWAYS_INLINE
vector unsigned int vec_recursion(vector unsigned int a,
vector unsigned int b,
vector unsigned int c,
vector unsigned int d) {
const vector unsigned int sl1 = ALTI_SL1;
const vector unsigned int sr1 = ALTI_SR1;
#ifdef ONLY64
const vector unsigned int mask = ALTI_MSK64;
const vector unsigned char perm_sl = ALTI_SL2_PERM64;
const vector unsigned char perm_sr = ALTI_SR2_PERM64;
#else
const vector unsigned int mask = ALTI_MSK;
const vector unsigned char perm_sl = ALTI_SL2_PERM;
const vector unsigned char perm_sr = ALTI_SR2_PERM;
#endif
vector unsigned int v, w, x, y, z;
x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
v = a;
y = vec_sr(b, sr1);
z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
w = vec_sl(d, sl1);
z = vec_xor(z, w);
y = vec_and(y, mask);
v = vec_xor(v, x);
z = vec_xor(z, y);
z = vec_xor(z, v);
return z;
}
/**
* This function fills the internal state array with pseudorandom
* integers.
*/
JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) {
int i;
vector unsigned int r, r1, r2;
r1 = ctx->sfmt[N - 2].s;
r2 = ctx->sfmt[N - 1].s;
for (i = 0; i < N - POS1; i++) {
r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
ctx->sfmt[i].s = r;
r1 = r2;
r2 = r;
}
for (; i < N; i++) {
r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2);
ctx->sfmt[i].s = r;
r1 = r2;
r2 = r;
}
}
/**
* This function fills the user-specified array with pseudorandom
* integers.
*
* @param array an 128-bit array to be filled by pseudorandom numbers.
* @param size number of 128-bit pesudorandom numbers to be generated.
*/
JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) {
int i, j;
vector unsigned int r, r1, r2;
r1 = ctx->sfmt[N - 2].s;
r2 = ctx->sfmt[N - 1].s;
for (i = 0; i < N - POS1; i++) {
r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2);
array[i].s = r;
r1 = r2;
r2 = r;
}
for (; i < N; i++) {
r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2);
array[i].s = r;
r1 = r2;
r2 = r;
}
/* main loop */
for (; i < size - N; i++) {
r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
array[i].s = r;
r1 = r2;
r2 = r;
}
for (j = 0; j < 2 * N - size; j++) {
ctx->sfmt[j].s = array[j + size - N].s;
}
for (; i < size; i++) {
r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
array[i].s = r;
ctx->sfmt[j++].s = r;
r1 = r2;
r2 = r;
}
}
#ifndef ONLY64
#if defined(__APPLE__)
#define ALTI_SWAP (vector unsigned char) \
(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11)
#else
#define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}
#endif
/**
* This function swaps high and low 32-bit of 64-bit integers in user
* specified array.
*
* @param array an 128-bit array to be swaped.
* @param size size of 128-bit array.
*/
JEMALLOC_INLINE void swap(w128_t *array, int size) {
int i;
const vector unsigned char perm = ALTI_SWAP;
for (i = 0; i < size; i++) {
array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
}
}
#endif
#endif
| 5,921 | 30.668449 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params86243.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS86243_H
#define SFMT_PARAMS86243_H
#define POS1 366
#define SL1 6
#define SL2 7
#define SR1 19
#define SR2 1
#define MSK1 0xfdbffbffU
#define MSK2 0xbff7ff3fU
#define MSK3 0xfd77efffU
#define MSK4 0xbf9ff3ffU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0x00000000U
#define PARITY4 0xe9528d85U
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6}
#define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff"
#endif /* SFMT_PARAMS86243_H */
| 3,564 | 42.47561 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/thd.h | /* Abstraction layer for threading in tests */
#ifdef _WIN32
typedef HANDLE thd_t;
#else
typedef pthread_t thd_t;
#endif
void thd_create(thd_t *thd, void *(*proc)(void *), void *arg);
void thd_join(thd_t thd, void **ret);
| 223 | 21.4 | 62 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/test/include/test/SFMT-params132049.h | /*
* This file derives from SFMT 1.3.3
* (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was
* released under the terms of the following license:
*
* Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
* University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Hiroshima University nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef SFMT_PARAMS132049_H
#define SFMT_PARAMS132049_H
#define POS1 110
#define SL1 19
#define SL2 1
#define SR1 21
#define SR2 1
#define MSK1 0xffffbb5fU
#define MSK2 0xfb6ebf95U
#define MSK3 0xfffefffaU
#define MSK4 0xcff77fffU
#define PARITY1 0x00000001U
#define PARITY2 0x00000000U
#define PARITY3 0xcb520000U
#define PARITY4 0xc7e91c7dU
/* PARAMETERS FOR ALTIVEC */
#if defined(__APPLE__) /* For OSX */
#define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1)
#define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1)
#define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4)
#define ALTI_MSK64 \
(vector unsigned int)(MSK2, MSK1, MSK4, MSK3)
#define ALTI_SL2_PERM \
(vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8)
#define ALTI_SL2_PERM64 \
(vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0)
#define ALTI_SR2_PERM \
(vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14)
#define ALTI_SR2_PERM64 \
(vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14)
#else /* For OTHER OSs(Linux?) */
#define ALTI_SL1 {SL1, SL1, SL1, SL1}
#define ALTI_SR1 {SR1, SR1, SR1, SR1}
#define ALTI_MSK {MSK1, MSK2, MSK3, MSK4}
#define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3}
#define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8}
#define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0}
#define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14}
#define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14}
#endif /* For OSX */
#define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff"
#endif /* SFMT_PARAMS132049_H */
| 3,564 | 42.47561 | 79 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/windows_extra.h | #ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
#define MSVC_COMPAT_WINDOWS_EXTRA_H
#ifndef ENOENT
# define ENOENT ERROR_PATH_NOT_FOUND
#endif
#ifndef EINVAL
# define EINVAL ERROR_BAD_ARGUMENTS
#endif
#ifndef EAGAIN
# define EAGAIN ERROR_OUTOFMEMORY
#endif
#ifndef EPERM
# define EPERM ERROR_WRITE_FAULT
#endif
#ifndef EFAULT
# define EFAULT ERROR_INVALID_ADDRESS
#endif
#ifndef ENOMEM
# define ENOMEM ERROR_NOT_ENOUGH_MEMORY
#endif
#ifndef ERANGE
# define ERANGE ERROR_INVALID_DATA
#endif
#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
| 529 | 18.62963 | 40 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/inttypes.h | // ISO C9x compliant inttypes.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_INTTYPES_H_ // [
#define _MSC_INTTYPES_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include "stdint.h"
// 7.8 Format conversion of integer types
typedef struct {
intmax_t quot;
intmax_t rem;
} imaxdiv_t;
// 7.8.1 Macros for format specifiers
#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
#ifdef _WIN64
# define __PRI64_PREFIX "l"
# define __PRIPTR_PREFIX "l"
#else
# define __PRI64_PREFIX "ll"
# define __PRIPTR_PREFIX
#endif
// The fprintf macros for signed integers are:
#define PRId8 "d"
#define PRIi8 "i"
#define PRIdLEAST8 "d"
#define PRIiLEAST8 "i"
#define PRIdFAST8 "d"
#define PRIiFAST8 "i"
#define PRId16 "hd"
#define PRIi16 "hi"
#define PRIdLEAST16 "hd"
#define PRIiLEAST16 "hi"
#define PRIdFAST16 "hd"
#define PRIiFAST16 "hi"
#define PRId32 "d"
#define PRIi32 "i"
#define PRIdLEAST32 "d"
#define PRIiLEAST32 "i"
#define PRIdFAST32 "d"
#define PRIiFAST32 "i"
#define PRId64 __PRI64_PREFIX "d"
#define PRIi64 __PRI64_PREFIX "i"
#define PRIdLEAST64 __PRI64_PREFIX "d"
#define PRIiLEAST64 __PRI64_PREFIX "i"
#define PRIdFAST64 __PRI64_PREFIX "d"
#define PRIiFAST64 __PRI64_PREFIX "i"
#define PRIdMAX __PRI64_PREFIX "d"
#define PRIiMAX __PRI64_PREFIX "i"
#define PRIdPTR __PRIPTR_PREFIX "d"
#define PRIiPTR __PRIPTR_PREFIX "i"
// The fprintf macros for unsigned integers are:
#define PRIo8 "o"
#define PRIu8 "u"
#define PRIx8 "x"
#define PRIX8 "X"
#define PRIoLEAST8 "o"
#define PRIuLEAST8 "u"
#define PRIxLEAST8 "x"
#define PRIXLEAST8 "X"
#define PRIoFAST8 "o"
#define PRIuFAST8 "u"
#define PRIxFAST8 "x"
#define PRIXFAST8 "X"
#define PRIo16 "ho"
#define PRIu16 "hu"
#define PRIx16 "hx"
#define PRIX16 "hX"
#define PRIoLEAST16 "ho"
#define PRIuLEAST16 "hu"
#define PRIxLEAST16 "hx"
#define PRIXLEAST16 "hX"
#define PRIoFAST16 "ho"
#define PRIuFAST16 "hu"
#define PRIxFAST16 "hx"
#define PRIXFAST16 "hX"
#define PRIo32 "o"
#define PRIu32 "u"
#define PRIx32 "x"
#define PRIX32 "X"
#define PRIoLEAST32 "o"
#define PRIuLEAST32 "u"
#define PRIxLEAST32 "x"
#define PRIXLEAST32 "X"
#define PRIoFAST32 "o"
#define PRIuFAST32 "u"
#define PRIxFAST32 "x"
#define PRIXFAST32 "X"
#define PRIo64 __PRI64_PREFIX "o"
#define PRIu64 __PRI64_PREFIX "u"
#define PRIx64 __PRI64_PREFIX "x"
#define PRIX64 __PRI64_PREFIX "X"
#define PRIoLEAST64 __PRI64_PREFIX "o"
#define PRIuLEAST64 __PRI64_PREFIX "u"
#define PRIxLEAST64 __PRI64_PREFIX "x"
#define PRIXLEAST64 __PRI64_PREFIX "X"
#define PRIoFAST64 __PRI64_PREFIX "o"
#define PRIuFAST64 __PRI64_PREFIX "u"
#define PRIxFAST64 __PRI64_PREFIX "x"
#define PRIXFAST64 __PRI64_PREFIX "X"
#define PRIoMAX __PRI64_PREFIX "o"
#define PRIuMAX __PRI64_PREFIX "u"
#define PRIxMAX __PRI64_PREFIX "x"
#define PRIXMAX __PRI64_PREFIX "X"
#define PRIoPTR __PRIPTR_PREFIX "o"
#define PRIuPTR __PRIPTR_PREFIX "u"
#define PRIxPTR __PRIPTR_PREFIX "x"
#define PRIXPTR __PRIPTR_PREFIX "X"
// The fscanf macros for signed integers are:
#define SCNd8 "d"
#define SCNi8 "i"
#define SCNdLEAST8 "d"
#define SCNiLEAST8 "i"
#define SCNdFAST8 "d"
#define SCNiFAST8 "i"
#define SCNd16 "hd"
#define SCNi16 "hi"
#define SCNdLEAST16 "hd"
#define SCNiLEAST16 "hi"
#define SCNdFAST16 "hd"
#define SCNiFAST16 "hi"
#define SCNd32 "ld"
#define SCNi32 "li"
#define SCNdLEAST32 "ld"
#define SCNiLEAST32 "li"
#define SCNdFAST32 "ld"
#define SCNiFAST32 "li"
#define SCNd64 "I64d"
#define SCNi64 "I64i"
#define SCNdLEAST64 "I64d"
#define SCNiLEAST64 "I64i"
#define SCNdFAST64 "I64d"
#define SCNiFAST64 "I64i"
#define SCNdMAX "I64d"
#define SCNiMAX "I64i"
#ifdef _WIN64 // [
# define SCNdPTR "I64d"
# define SCNiPTR "I64i"
#else // _WIN64 ][
# define SCNdPTR "ld"
# define SCNiPTR "li"
#endif // _WIN64 ]
// The fscanf macros for unsigned integers are:
#define SCNo8 "o"
#define SCNu8 "u"
#define SCNx8 "x"
#define SCNX8 "X"
#define SCNoLEAST8 "o"
#define SCNuLEAST8 "u"
#define SCNxLEAST8 "x"
#define SCNXLEAST8 "X"
#define SCNoFAST8 "o"
#define SCNuFAST8 "u"
#define SCNxFAST8 "x"
#define SCNXFAST8 "X"
#define SCNo16 "ho"
#define SCNu16 "hu"
#define SCNx16 "hx"
#define SCNX16 "hX"
#define SCNoLEAST16 "ho"
#define SCNuLEAST16 "hu"
#define SCNxLEAST16 "hx"
#define SCNXLEAST16 "hX"
#define SCNoFAST16 "ho"
#define SCNuFAST16 "hu"
#define SCNxFAST16 "hx"
#define SCNXFAST16 "hX"
#define SCNo32 "lo"
#define SCNu32 "lu"
#define SCNx32 "lx"
#define SCNX32 "lX"
#define SCNoLEAST32 "lo"
#define SCNuLEAST32 "lu"
#define SCNxLEAST32 "lx"
#define SCNXLEAST32 "lX"
#define SCNoFAST32 "lo"
#define SCNuFAST32 "lu"
#define SCNxFAST32 "lx"
#define SCNXFAST32 "lX"
#define SCNo64 "I64o"
#define SCNu64 "I64u"
#define SCNx64 "I64x"
#define SCNX64 "I64X"
#define SCNoLEAST64 "I64o"
#define SCNuLEAST64 "I64u"
#define SCNxLEAST64 "I64x"
#define SCNXLEAST64 "I64X"
#define SCNoFAST64 "I64o"
#define SCNuFAST64 "I64u"
#define SCNxFAST64 "I64x"
#define SCNXFAST64 "I64X"
#define SCNoMAX "I64o"
#define SCNuMAX "I64u"
#define SCNxMAX "I64x"
#define SCNXMAX "I64X"
#ifdef _WIN64 // [
# define SCNoPTR "I64o"
# define SCNuPTR "I64u"
# define SCNxPTR "I64x"
# define SCNXPTR "I64X"
#else // _WIN64 ][
# define SCNoPTR "lo"
# define SCNuPTR "lu"
# define SCNxPTR "lx"
# define SCNXPTR "lX"
#endif // _WIN64 ]
#endif // __STDC_FORMAT_MACROS ]
// 7.8.2 Functions for greatest-width integer types
// 7.8.2.1 The imaxabs function
#define imaxabs _abs64
// 7.8.2.2 The imaxdiv function
// This is modified version of div() function from Microsoft's div.c found
// in %MSVC.NET%\crt\src\div.c
#ifdef STATIC_IMAXDIV // [
static
#else // STATIC_IMAXDIV ][
_inline
#endif // STATIC_IMAXDIV ]
imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
{
imaxdiv_t result;
result.quot = numer / denom;
result.rem = numer % denom;
if (numer < 0 && result.rem > 0) {
// did division wrong; must fix up
++result.quot;
result.rem -= denom;
}
return result;
}
// 7.8.2.3 The strtoimax and strtoumax functions
#define strtoimax _strtoi64
#define strtoumax _strtoui64
// 7.8.2.4 The wcstoimax and wcstoumax functions
#define wcstoimax _wcstoi64
#define wcstoumax _wcstoui64
#endif // _MSC_INTTYPES_H_ ]
| 8,491 | 26.044586 | 94 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/stdint.h | // ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
#define INTMAX_C INT64_C
#define UINTMAX_C UINT64_C
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_STDINT_H_ ]
| 7,728 | 30.165323 | 122 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/strings.h | #ifndef strings_h
#define strings_h
/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
* for both */
#ifdef _MSC_VER
# include <intrin.h>
# pragma intrinsic(_BitScanForward)
static __forceinline int ffsl(long x)
{
unsigned long i;
if (_BitScanForward(&i, x))
return (i + 1);
return (0);
}
static __forceinline int ffs(int x)
{
return (ffsl(x));
}
# ifdef _M_X64
# pragma intrinsic(_BitScanForward64)
# endif
static __forceinline int ffsll(unsigned __int64 x)
{
unsigned long i;
#ifdef _M_X64
if (_BitScanForward64(&i, x))
return (i + 1);
return (0);
#else
// Fallback for 32-bit build where 64-bit version not available
// assuming little endian
union {
unsigned __int64 ll;
unsigned long l[2];
} s;
s.ll = x;
if (_BitScanForward(&i, s.l[0]))
return (i + 1);
else if(_BitScanForward(&i, s.l[1]))
return (i + 33);
return (0);
#endif
}
#else
# define ffsll(x) __builtin_ffsll(x)
# define ffsl(x) __builtin_ffsl(x)
# define ffs(x) __builtin_ffs(x)
#endif
#endif /* strings_h */
| 1,047 | 16.466667 | 72 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/C99/inttypes.h | // ISO C9x compliant inttypes.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_INTTYPES_H_ // [
#define _MSC_INTTYPES_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include "stdint.h"
// 7.8 Format conversion of integer types
typedef struct {
intmax_t quot;
intmax_t rem;
} imaxdiv_t;
// 7.8.1 Macros for format specifiers
#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
#ifdef _WIN64
# define __PRI64_PREFIX "l"
# define __PRIPTR_PREFIX "l"
#else
# define __PRI64_PREFIX "ll"
# define __PRIPTR_PREFIX
#endif
// The fprintf macros for signed integers are:
#define PRId8 "d"
#define PRIi8 "i"
#define PRIdLEAST8 "d"
#define PRIiLEAST8 "i"
#define PRIdFAST8 "d"
#define PRIiFAST8 "i"
#define PRId16 "hd"
#define PRIi16 "hi"
#define PRIdLEAST16 "hd"
#define PRIiLEAST16 "hi"
#define PRIdFAST16 "hd"
#define PRIiFAST16 "hi"
#define PRId32 "d"
#define PRIi32 "i"
#define PRIdLEAST32 "d"
#define PRIiLEAST32 "i"
#define PRIdFAST32 "d"
#define PRIiFAST32 "i"
#define PRId64 __PRI64_PREFIX "d"
#define PRIi64 __PRI64_PREFIX "i"
#define PRIdLEAST64 __PRI64_PREFIX "d"
#define PRIiLEAST64 __PRI64_PREFIX "i"
#define PRIdFAST64 __PRI64_PREFIX "d"
#define PRIiFAST64 __PRI64_PREFIX "i"
#define PRIdMAX __PRI64_PREFIX "d"
#define PRIiMAX __PRI64_PREFIX "i"
#define PRIdPTR __PRIPTR_PREFIX "d"
#define PRIiPTR __PRIPTR_PREFIX "i"
// The fprintf macros for unsigned integers are:
#define PRIo8 "o"
#define PRIu8 "u"
#define PRIx8 "x"
#define PRIX8 "X"
#define PRIoLEAST8 "o"
#define PRIuLEAST8 "u"
#define PRIxLEAST8 "x"
#define PRIXLEAST8 "X"
#define PRIoFAST8 "o"
#define PRIuFAST8 "u"
#define PRIxFAST8 "x"
#define PRIXFAST8 "X"
#define PRIo16 "ho"
#define PRIu16 "hu"
#define PRIx16 "hx"
#define PRIX16 "hX"
#define PRIoLEAST16 "ho"
#define PRIuLEAST16 "hu"
#define PRIxLEAST16 "hx"
#define PRIXLEAST16 "hX"
#define PRIoFAST16 "ho"
#define PRIuFAST16 "hu"
#define PRIxFAST16 "hx"
#define PRIXFAST16 "hX"
#define PRIo32 "o"
#define PRIu32 "u"
#define PRIx32 "x"
#define PRIX32 "X"
#define PRIoLEAST32 "o"
#define PRIuLEAST32 "u"
#define PRIxLEAST32 "x"
#define PRIXLEAST32 "X"
#define PRIoFAST32 "o"
#define PRIuFAST32 "u"
#define PRIxFAST32 "x"
#define PRIXFAST32 "X"
#define PRIo64 __PRI64_PREFIX "o"
#define PRIu64 __PRI64_PREFIX "u"
#define PRIx64 __PRI64_PREFIX "x"
#define PRIX64 __PRI64_PREFIX "X"
#define PRIoLEAST64 __PRI64_PREFIX "o"
#define PRIuLEAST64 __PRI64_PREFIX "u"
#define PRIxLEAST64 __PRI64_PREFIX "x"
#define PRIXLEAST64 __PRI64_PREFIX "X"
#define PRIoFAST64 __PRI64_PREFIX "o"
#define PRIuFAST64 __PRI64_PREFIX "u"
#define PRIxFAST64 __PRI64_PREFIX "x"
#define PRIXFAST64 __PRI64_PREFIX "X"
#define PRIoMAX __PRI64_PREFIX "o"
#define PRIuMAX __PRI64_PREFIX "u"
#define PRIxMAX __PRI64_PREFIX "x"
#define PRIXMAX __PRI64_PREFIX "X"
#define PRIoPTR __PRIPTR_PREFIX "o"
#define PRIuPTR __PRIPTR_PREFIX "u"
#define PRIxPTR __PRIPTR_PREFIX "x"
#define PRIXPTR __PRIPTR_PREFIX "X"
// The fscanf macros for signed integers are:
#define SCNd8 "d"
#define SCNi8 "i"
#define SCNdLEAST8 "d"
#define SCNiLEAST8 "i"
#define SCNdFAST8 "d"
#define SCNiFAST8 "i"
#define SCNd16 "hd"
#define SCNi16 "hi"
#define SCNdLEAST16 "hd"
#define SCNiLEAST16 "hi"
#define SCNdFAST16 "hd"
#define SCNiFAST16 "hi"
#define SCNd32 "ld"
#define SCNi32 "li"
#define SCNdLEAST32 "ld"
#define SCNiLEAST32 "li"
#define SCNdFAST32 "ld"
#define SCNiFAST32 "li"
#define SCNd64 "I64d"
#define SCNi64 "I64i"
#define SCNdLEAST64 "I64d"
#define SCNiLEAST64 "I64i"
#define SCNdFAST64 "I64d"
#define SCNiFAST64 "I64i"
#define SCNdMAX "I64d"
#define SCNiMAX "I64i"
#ifdef _WIN64 // [
# define SCNdPTR "I64d"
# define SCNiPTR "I64i"
#else // _WIN64 ][
# define SCNdPTR "ld"
# define SCNiPTR "li"
#endif // _WIN64 ]
// The fscanf macros for unsigned integers are:
#define SCNo8 "o"
#define SCNu8 "u"
#define SCNx8 "x"
#define SCNX8 "X"
#define SCNoLEAST8 "o"
#define SCNuLEAST8 "u"
#define SCNxLEAST8 "x"
#define SCNXLEAST8 "X"
#define SCNoFAST8 "o"
#define SCNuFAST8 "u"
#define SCNxFAST8 "x"
#define SCNXFAST8 "X"
#define SCNo16 "ho"
#define SCNu16 "hu"
#define SCNx16 "hx"
#define SCNX16 "hX"
#define SCNoLEAST16 "ho"
#define SCNuLEAST16 "hu"
#define SCNxLEAST16 "hx"
#define SCNXLEAST16 "hX"
#define SCNoFAST16 "ho"
#define SCNuFAST16 "hu"
#define SCNxFAST16 "hx"
#define SCNXFAST16 "hX"
#define SCNo32 "lo"
#define SCNu32 "lu"
#define SCNx32 "lx"
#define SCNX32 "lX"
#define SCNoLEAST32 "lo"
#define SCNuLEAST32 "lu"
#define SCNxLEAST32 "lx"
#define SCNXLEAST32 "lX"
#define SCNoFAST32 "lo"
#define SCNuFAST32 "lu"
#define SCNxFAST32 "lx"
#define SCNXFAST32 "lX"
#define SCNo64 "I64o"
#define SCNu64 "I64u"
#define SCNx64 "I64x"
#define SCNX64 "I64X"
#define SCNoLEAST64 "I64o"
#define SCNuLEAST64 "I64u"
#define SCNxLEAST64 "I64x"
#define SCNXLEAST64 "I64X"
#define SCNoFAST64 "I64o"
#define SCNuFAST64 "I64u"
#define SCNxFAST64 "I64x"
#define SCNXFAST64 "I64X"
#define SCNoMAX "I64o"
#define SCNuMAX "I64u"
#define SCNxMAX "I64x"
#define SCNXMAX "I64X"
#ifdef _WIN64 // [
# define SCNoPTR "I64o"
# define SCNuPTR "I64u"
# define SCNxPTR "I64x"
# define SCNXPTR "I64X"
#else // _WIN64 ][
# define SCNoPTR "lo"
# define SCNuPTR "lu"
# define SCNxPTR "lx"
# define SCNXPTR "lX"
#endif // _WIN64 ]
#endif // __STDC_FORMAT_MACROS ]
// 7.8.2 Functions for greatest-width integer types
// 7.8.2.1 The imaxabs function
#define imaxabs _abs64
// 7.8.2.2 The imaxdiv function
// This is modified version of div() function from Microsoft's div.c found
// in %MSVC.NET%\crt\src\div.c
#ifdef STATIC_IMAXDIV // [
static
#else // STATIC_IMAXDIV ][
_inline
#endif // STATIC_IMAXDIV ]
imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
{
imaxdiv_t result;
result.quot = numer / denom;
result.rem = numer % denom;
if (numer < 0 && result.rem > 0) {
// did division wrong; must fix up
++result.quot;
result.rem -= denom;
}
return result;
}
// 7.8.2.3 The strtoimax and strtoumax functions
#define strtoimax _strtoi64
#define strtoumax _strtoui64
// 7.8.2.4 The wcstoimax and wcstoumax functions
#define wcstoimax _wcstoi64
#define wcstoumax _wcstoui64
#endif // _MSC_INTTYPES_H_ ]
| 8,491 | 26.044586 | 94 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/C99/stdbool.h | #ifndef stdbool_h
#define stdbool_h
#include <wtypes.h>
/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as
* a built-in type. */
#ifndef __clang__
typedef BOOL _Bool;
#endif
#define bool _Bool
#define true 1
#define false 0
#define __bool_true_false_are_defined 1
#endif /* stdbool_h */
| 449 | 20.428571 | 71 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/msvc_compat/C99/stdint.h | // ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
#define INTMAX_C INT64_C
#define UINTMAX_C UINT64_C
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_STDINT_H_ ]
| 7,728 | 30.165323 | 122 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/jemalloc_rename.sh | #!/bin/sh
public_symbols_txt=$1
cat <<EOF
/*
* Name mangling for public symbols is controlled by --with-mangling and
* --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
* these macro definitions.
*/
#ifndef JEMALLOC_NO_RENAME
EOF
for nm in `cat ${public_symbols_txt}` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
echo "# define je_${n} ${m}"
done
cat <<EOF
#endif
EOF
| 460 | 19.043478 | 79 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/jemalloc.sh | #!/bin/sh
objroot=$1
cat <<EOF
#ifndef JEMALLOC_H_
#define JEMALLOC_H_
#ifdef __cplusplus
extern "C" {
#endif
EOF
for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do
cat "${objroot}include/jemalloc/${hdr}" \
| grep -v 'Generated from .* by configure\.' \
| sed -e 's/^#define /#define /g' \
| sed -e 's/ $//g'
echo
done
cat <<EOF
#ifdef __cplusplus
}
#endif
#endif /* JEMALLOC_H_ */
EOF
| 498 | 16.206897 | 71 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/jemalloc_mangle.sh | #!/bin/sh
public_symbols_txt=$1
symbol_prefix=$2
cat <<EOF
/*
* By default application code must explicitly refer to mangled symbol names,
* so that it is possible to use jemalloc in conjunction with another allocator
* in the same application. Define JEMALLOC_MANGLE in order to cause automatic
* name mangling that matches the API prefixing that happened as a result of
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
# ifndef JEMALLOC_NO_DEMANGLE
# define JEMALLOC_NO_DEMANGLE
# endif
EOF
for nm in `cat ${public_symbols_txt}` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "# define ${n} ${symbol_prefix}${n}"
done
cat <<EOF
#endif
/*
* The ${symbol_prefix}* macros can be used as stable alternative names for the
* public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
* meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
EOF
for nm in `cat ${public_symbols_txt}` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "# undef ${symbol_prefix}${n}"
done
cat <<EOF
#endif
EOF
| 1,258 | 26.369565 | 79 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/public_unnamespace.sh | #!/bin/sh
for nm in `cat $1` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "#undef je_${n}"
done
| 111 | 15 | 46 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/mutex.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct malloc_mutex_s malloc_mutex_t;
#if (defined(_WIN32) || defined(JEMALLOC_OSSPIN)\
|| defined(JEMALLOC_MUTEX_INIT_CB)\
|| defined(JEMALLOC_DISABLE_BSD_MALLOC_HOOKS))
#define JEMALLOC_NO_RWLOCKS
typedef malloc_mutex_t malloc_rwlock_t;
#else
typedef struct malloc_rwlock_s malloc_rwlock_t;
#endif
#if (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER {0}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
#else
# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
# endif
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct malloc_mutex_s {
#ifdef _WIN32
CRITICAL_SECTION lock;
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLock lock;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
pthread_mutex_t lock;
malloc_mutex_t *postponed_next;
#else
pthread_mutex_t lock;
#endif
};
#ifndef JEMALLOC_NO_RWLOCKS
struct malloc_rwlock_s {
pthread_rwlock_t lock;
};
#endif
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
#else
# undef isthreaded /* Undo private_namespace.h definition. */
# define isthreaded true
#endif
bool malloc_mutex_init(malloc_mutex_t *mutex);
void malloc_mutex_prefork(malloc_mutex_t *mutex);
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
bool mutex_boot(void);
#ifdef JEMALLOC_NO_RWLOCKS
#undef malloc_rwlock_init
#undef malloc_rwlock_destroy
#define malloc_rwlock_init malloc_mutex_init
#define malloc_rwlock_destroy malloc_mutex_destroy
#endif
void malloc_rwlock_prefork(malloc_rwlock_t *rwlock);
void malloc_rwlock_postfork_parent(malloc_rwlock_t *rwlock);
void malloc_rwlock_postfork_child(malloc_rwlock_t *rwlock);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
void malloc_mutex_lock(malloc_mutex_t *mutex);
void malloc_mutex_unlock(malloc_mutex_t *mutex);
void malloc_mutex_destroy(malloc_mutex_t *mutex);
#ifndef JEMALLOC_NO_RWLOCKS
bool malloc_rwlock_init(malloc_rwlock_t *rwlock);
void malloc_rwlock_destroy(malloc_rwlock_t *rwlock);
#endif
void malloc_rwlock_rdlock(malloc_rwlock_t *rwlock);
void malloc_rwlock_wrlock(malloc_rwlock_t *rwlock);
void malloc_rwlock_unlock(malloc_rwlock_t *rwlock);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
malloc_mutex_lock(malloc_mutex_t *mutex)
{
if (isthreaded) {
#ifdef _WIN32
EnterCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&mutex->lock);
#else
pthread_mutex_lock(&mutex->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_mutex_unlock(malloc_mutex_t *mutex)
{
if (isthreaded) {
#ifdef _WIN32
LeaveCriticalSection(&mutex->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&mutex->lock);
#else
pthread_mutex_unlock(&mutex->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_mutex_destroy(malloc_mutex_t *mutex)
{
#if (!defined(_WIN32) && !defined(JEMALLOC_OSSPIN)\
&& !defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_JET))
pthread_mutex_destroy(&mutex->lock);
#endif
}
JEMALLOC_INLINE void
malloc_rwlock_rdlock(malloc_rwlock_t *rwlock)
{
if (isthreaded) {
#ifdef _WIN32
EnterCriticalSection(&rwlock->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&rwlock->lock);
#elif (defined(JEMALLOC_NO_RWLOCKS))
pthread_mutex_lock(&rwlock->lock);
#else
pthread_rwlock_rdlock(&rwlock->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_rwlock_wrlock(malloc_rwlock_t *rwlock)
{
if (isthreaded) {
#ifdef _WIN32
EnterCriticalSection(&rwlock->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockLock(&rwlock->lock);
#elif (defined(JEMALLOC_NO_RWLOCKS))
pthread_mutex_lock(&rwlock->lock);
#else
pthread_rwlock_wrlock(&rwlock->lock);
#endif
}
}
JEMALLOC_INLINE void
malloc_rwlock_unlock(malloc_rwlock_t *rwlock)
{
if (isthreaded) {
#ifdef _WIN32
LeaveCriticalSection(&rwlock->lock);
#elif (defined(JEMALLOC_OSSPIN))
OSSpinLockUnlock(&rwlock->lock);
#elif (defined(JEMALLOC_NO_RWLOCKS))
pthread_mutex_unlock(&rwlock->lock);
#else
pthread_rwlock_unlock(&rwlock->lock);
#endif
}
}
#ifndef JEMALLOC_NO_RWLOCKS
JEMALLOC_INLINE bool
malloc_rwlock_init(malloc_rwlock_t *rwlock)
{
if (isthreaded) {
if (pthread_rwlock_init(&rwlock->lock, NULL) != 0)
return (true);
}
return (false);
}
JEMALLOC_INLINE void
malloc_rwlock_destroy(malloc_rwlock_t *rwlock)
{
if (isthreaded) {
pthread_rwlock_destroy(&rwlock->lock);
}
}
#endif
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 5,281 | 24.516908 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/ctl.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ctl_node_s ctl_node_t;
typedef struct ctl_named_node_s ctl_named_node_t;
typedef struct ctl_indexed_node_s ctl_indexed_node_t;
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
typedef struct ctl_stats_s ctl_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct ctl_node_s {
bool named;
};
struct ctl_named_node_s {
struct ctl_node_s node;
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
int (*ctl)(const size_t *, size_t, void *, size_t *,
void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
};
struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
const char *dss;
size_t pactive;
size_t pdirty;
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
size_t allocated_small;
uint64_t nmalloc_small;
uint64_t ndalloc_small;
uint64_t nrequests_small;
malloc_bin_stats_t bstats[NBINS];
malloc_large_stats_t *lstats; /* nlclasses elements. */
};
struct ctl_stats_s {
struct {
size_t current; /* stats_chunks.curchunks */
uint64_t total; /* stats_chunks.nchunks */
size_t high; /* stats_chunks.highchunks */
} chunks;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen);
int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(void);
void ctl_postfork_parent(void);
void ctl_postfork_child(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_printf( \
"<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \
name); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_printf("<jemalloc>: Failure in " \
"xmallctlnametomib(\"%s\", ...)\n", name); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \
newlen) != 0) { \
malloc_write( \
"<jemalloc>: Failure in xmallctlbymib()\n"); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 3,172 | 27.845455 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/vector.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct vector_s vector_t;
typedef struct vec_list_s vec_list_t;
#define VECTOR_MIN_PART_SIZE 8
#define VECTOR_INITIALIZER JEMALLOC_ARG_CONCAT({.data = NULL, .size = 0})
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct vec_list_s {
vec_list_t *next;
int length;
void *data[];
};
struct vector_s {
vec_list_t *list;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *vec_get(vector_t *vector, int index);
void vec_set(vector_t *vector, int index, void *val);
void vec_delete(vector_t *vector);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,063 | 26.282051 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/arena.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
* as small as possible such that this setting is still honored, without
* violating other constraints. The goal is to make runs as small as possible
* without exceeding a per run external fragmentation threshold.
*
* We use binary fixed point math for overhead computations, where the binary
* point is implicitly RUN_BFP bits to the left.
*
* Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be
* honored for some/all object sizes, since when heap profiling is enabled
* there is one pointer of header overhead per object (plus a constant). This
* constraint is relaxed (ignored) for runs that are so small that the
* per-region overhead is greater than:
*
* (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP))
*/
#define RUN_BFP 12
/* \/ Implicit binary fixed point. */
#define RUN_MAX_OVRHD 0x0000003dU
#define RUN_MAX_OVRHD_RELAX 0x00001800U
/* Maximum number of regions in one run. */
#define LG_RUN_MAXREGS 11
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
/*
* Minimum redzone size. Redzones may be larger than this if necessary to
* preserve region alignment.
*/
#define REDZONE_MINSIZE 16
/*
* The minimum ratio of active:dirty pages per arena is computed as:
*
* (nactive >> opt_lg_dirty_mult) >= ndirty
*
* So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
* as many active pages as dirty pages.
*/
#define LG_DIRTY_MULT_DEFAULT 3
typedef struct arena_chunk_map_s arena_chunk_map_t;
typedef struct arena_chunk_s arena_chunk_t;
typedef struct arena_run_s arena_run_t;
typedef struct arena_bin_info_s arena_bin_info_t;
typedef struct arena_bin_s arena_bin_t;
typedef struct arena_s arena_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Each element of the chunk map corresponds to one page within the chunk. */
struct arena_chunk_map_s {
#ifndef JEMALLOC_PROF
/*
* Overlay prof_ctx in order to allow it to be referenced by dead code.
* Such antics aren't warranted for per arena data structures, but
* chunk map overhead accounts for a percentage of memory, rather than
* being just a fixed cost.
*/
union {
#endif
union {
/*
* Linkage for run trees. There are two disjoint uses:
*
* 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use
* non-full runs, rather than directly embedding linkage.
*/
rb_node(arena_chunk_map_t) rb_link;
/*
* List of runs currently in purgatory. arena_chunk_purge()
* temporarily allocates runs that contain dirty pages while
* purging, so that other threads cannot use the runs while the
* purging thread is operating without the arena lock held.
*/
ql_elm(arena_chunk_map_t) ql_link;
} u;
/* Profile counters, used for large object runs. */
prof_ctx_t *prof_ctx;
#ifndef JEMALLOC_PROF
}; /* union { ... }; */
#endif
/*
* Run address (or size) and various flags are stored together. The bit
* layout looks like (assuming 32-bit system):
*
* ???????? ???????? ????nnnn nnnndula
*
* ? : Unallocated: Run address for first/last pages, unset for internal
* pages.
* Small: Run page offset.
* Large: Run size for first page, unset for trailing pages.
* n : binind for small size class, BININD_INVALID for large size class.
* d : dirty?
* u : unzeroed?
* l : large?
* a : allocated?
*
* Following are example bit patterns for the three types of runs.
*
* p : run page offset
* s : run size
* n : binind for size class; large objects set these to BININD_INVALID
* x : don't care
* - : 0
* + : 1
* [DULA] : bit set
* [dula] : bit unset
*
* Unallocated (clean):
* ssssssss ssssssss ssss++++ ++++du-a
* xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx
* ssssssss ssssssss ssss++++ ++++dU-a
*
* Unallocated (dirty):
* ssssssss ssssssss ssss++++ ++++D--a
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
* ssssssss ssssssss ssss++++ ++++D--a
*
* Small:
* pppppppp pppppppp ppppnnnn nnnnd--A
* pppppppp pppppppp ppppnnnn nnnn---A
* pppppppp pppppppp ppppnnnn nnnnd--A
*
* Large:
* ssssssss ssssssss ssss++++ ++++D-LA
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
* -------- -------- ----++++ ++++D-LA
*
* Large (sampled, size <= PAGE):
* ssssssss ssssssss ssssnnnn nnnnD-LA
*
* Large (not sampled, size == PAGE):
* ssssssss ssssssss ssss++++ ++++D-LA
*/
size_t bits;
#define CHUNK_MAP_BININD_SHIFT 4
#define BININD_INVALID ((size_t)0xffU)
/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */
#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U)
#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU)
#define CHUNK_MAP_DIRTY ((size_t)0x8U)
#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
#define CHUNK_MAP_LARGE ((size_t)0x2U)
#define CHUNK_MAP_ALLOCATED ((size_t)0x1U)
#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED
};
typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t;
/* Arena chunk header. */
struct arena_chunk_s {
/* Arena that owns the chunk. */
arena_t *arena;
/* Linkage for tree of arena chunks that contain dirty runs. */
rb_node(arena_chunk_t) dirty_link;
/* Number of dirty pages. */
size_t ndirty;
/* Number of available runs. */
size_t nruns_avail;
/*
* Number of available run adjacencies that purging could coalesce.
* Clean and dirty available runs are not coalesced, which causes
* virtual memory fragmentation. The ratio of
* (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this
* fragmentation.
*/
size_t nruns_adjac;
/*
* Map of pages within chunk that keeps track of free/large/small. The
* first map_bias entries are omitted, since the chunk header does not
* need to be tracked in the map. This omission saves a header page
* for common chunk sizes (e.g. 4 MiB).
*/
arena_chunk_map_t map[1]; /* Dynamically sized. */
};
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
struct arena_run_s {
/* Bin this run is associated with. */
arena_bin_t *bin;
/* Index of next region that has never been allocated, or nregs. */
uint32_t nextind;
/* Number of free regions in run. */
unsigned nfree;
};
/*
* Read-only information associated with each element of arena_t's bins array
* is stored separately, partly to reduce memory usage (only one copy, rather
* than one per arena), but mainly to avoid false cacheline sharing.
*
* Each run has the following layout:
*
* /--------------------\
* | arena_run_t header |
* | ... |
* bitmap_offset | bitmap |
* | ... |
* |--------------------|
* | redzone |
* reg0_offset | region 0 |
* | redzone |
* |--------------------| \
* | redzone | |
* | region 1 | > reg_interval
* | redzone | /
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | redzone |
* | region nregs-1 |
* | redzone |
* |--------------------|
* | alignment pad? |
* \--------------------/
*
* reg_interval has at least the same minimum alignment as reg_size; this
* preserves the alignment constraint that sa2u() depends on. Alignment pad is
* either 0 or redzone_size; it is present only if needed to align reg0_offset.
*/
struct arena_bin_info_s {
/* Size of regions in a run for this bin's size class. */
size_t reg_size;
/* Redzone size. */
size_t redzone_size;
/* Interval between regions (reg_size + (redzone_size << 1)). */
size_t reg_interval;
/* Total size of a run for this bin's size class. */
size_t run_size;
/* Total number of regions in a run for this bin's size class. */
uint32_t nregs;
/*
* Offset of first bitmap_t element in a run header for this bin's size
* class.
*/
uint32_t bitmap_offset;
/*
* Metadata used to manipulate bitmaps for runs associated with this
* bin.
*/
bitmap_info_t bitmap_info;
/* Offset of first region in a run for this bin's size class. */
uint32_t reg0_offset;
};
struct arena_bin_s {
/*
* All operations on runcur, runs, and stats require that lock be
* locked. Run allocation/deallocation are protected by the arena lock,
* which may be acquired while holding one or more bin locks, but not
* vise versa.
*/
malloc_mutex_t lock;
/*
* Current run being used to service allocations of this bin's size
* class.
*/
arena_run_t *runcur;
/*
* Tree of non-full runs. This tree is used when looking for an
* existing run when runcur is no longer usable. We choose the
* non-full run that is lowest in memory; this policy tends to keep
* objects packed well, and it can also help reduce the number of
* almost-empty chunks.
*/
arena_run_tree_t runs;
/* Bin statistics. */
malloc_bin_stats_t stats;
};
struct arena_s {
/* This arena's index within the arenas array. */
unsigned ind;
/* This arena's pool. */
pool_t *pool;
/*
* Number of threads currently assigned to this arena. This field is
* protected by arenas_lock.
*/
unsigned nthreads;
/*
* There are three classes of arena operations from a locking
* perspective:
* 1) Thread asssignment (modifies nthreads) is protected by
* arenas_lock.
* 2) Bin-related operations are protected by bin locks.
* 3) Chunk- and run-related operations are protected by this mutex.
*/
malloc_mutex_t lock;
arena_stats_t stats;
/*
* List of tcaches for extant threads associated with this arena.
* Stats from these are merged incrementally, and at exit.
*/
ql_head(tcache_t) tcache_ql;
uint64_t prof_accumbytes;
dss_prec_t dss_prec;
/* Tree of dirty-page-containing chunks this arena manages. */
arena_chunk_tree_t chunks_dirty;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
* recently freed chunk. The spare is left in the arena's chunk trees
* until it is deleted.
*
* There is one spare chunk per arena, rather than one spare total, in
* order to avoid interactions between multiple threads that could make
* a single spare inadequate.
*/
arena_chunk_t *spare;
/* Number of pages in active runs and huge regions. */
size_t nactive;
/*
* Current count of pages within unused runs that are potentially
* dirty, and for which madvise(... MADV_DONTNEED) has not been called.
* By tracking this, we can institute a limit on how much dirty unused
* memory is mapped for each arena.
*/
size_t ndirty;
/*
* Approximate number of pages being purged. It is possible for
* multiple threads to purge dirty pages concurrently, and they use
* npurgatory to indicate the total number of pages all threads are
* attempting to purge.
*/
size_t npurgatory;
/*
* Size/address-ordered trees of this arena's available runs. The trees
* are used for first-best-fit run allocation.
*/
arena_avail_tree_t runs_avail;
/*
* user-configureable chunk allocation and deallocation functions.
*/
chunk_alloc_t *chunk_alloc;
chunk_dalloc_t *chunk_dalloc;
/* bins is used to store trees of free regions. */
arena_bin_t bins[NBINS];
};
arena_chunk_map_t *
arena_runs_avail_tree_iter(arena_t *arena, arena_chunk_map_t *(*cb)
(arena_avail_tree_t *, arena_chunk_map_t *, void *), void *arg);
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern ssize_t opt_lg_dirty_mult;
/*
* small_size2bin_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via small_size2bin().
*/
extern uint8_t const small_size2bin_tab[];
/*
* small_bin2size_tab duplicates information in arena_bin_info, but in a const
* array, for which it is easier for the compiler to optimize repeated
* dereferences.
*/
extern uint32_t const small_bin2size_tab[NBINS];
extern arena_bin_info_t arena_bin_info[NBINS];
/* Number of large size classes. */
#define nlclasses (chunk_npages - map_bias)
void *arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero);
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size);
void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
size_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
uint8_t);
extern arena_redzone_corruption_t *arena_redzone_corruption;
typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
#else
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
#endif
void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
void arena_prof_promoted(const void *ptr, size_t size);
void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm);
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm);
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind);
#ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
#endif
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
#ifdef JEMALLOC_JET
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif
bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
bool try_tcache_dalloc);
dss_prec_t arena_dss_prec_get(arena_t *arena);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats);
bool arena_new(pool_t *pool, arena_t *arena, unsigned ind);
bool arena_boot(arena_t *arena);
void arena_params_boot(void);
void arena_prefork(arena_t *arena);
void arena_postfork_parent(arena_t *arena);
void arena_postfork_child(arena_t *arena);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t small_size2bin_compute(size_t size);
size_t small_size2bin_lookup(size_t size);
size_t small_size2bin(size_t size);
size_t small_bin2size_compute(size_t binind);
size_t small_bin2size_lookup(size_t binind);
size_t small_bin2size(size_t binind);
size_t small_s2u_compute(size_t size);
size_t small_s2u_lookup(size_t size);
size_t small_s2u(size_t size);
size_t arena_mapelm_to_pageind(arena_chunk_map_t *mapelm);
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size);
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
size_t binind);
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
size_t runind, size_t binind, size_t flags);
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_ctx_t *arena_prof_ctx_get(const void *ptr);
void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_INLINE size_t
small_size2bin_compute(size_t size)
{
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
} else
#endif
{
size_t x = lg_floor((size<<1)-1);
size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
size_t grp = shift << LG_SIZE_CLASS_GROUP;
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t mod = ((size - 1) >> lg_delta) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
size_t bin = NTBINS + grp + mod;
return (bin);
}
}
JEMALLOC_ALWAYS_INLINE size_t
small_size2bin_lookup(size_t size)
{
assert(size <= LOOKUP_MAXCLASS);
{
size_t ret = ((size_t)(small_size2bin_tab[(size-1) >>
LG_TINY_MIN]));
assert(ret == small_size2bin_compute(size));
return (ret);
}
}
JEMALLOC_ALWAYS_INLINE size_t
small_size2bin(size_t size)
{
assert(size > 0);
if (size <= LOOKUP_MAXCLASS)
return (small_size2bin_lookup(size));
else
return (small_size2bin_compute(size));
}
JEMALLOC_INLINE size_t
small_bin2size_compute(size_t binind)
{
#if (NTBINS > 0)
if (binind < NTBINS)
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + binind));
else
#endif
{
size_t reduced_binind = binind - NTBINS;
size_t grp = reduced_binind >> LG_SIZE_CLASS_GROUP;
size_t mod = reduced_binind & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_QUANTUM-1);
size_t mod_size = (mod+1) << lg_delta;
size_t usize = grp_size + mod_size;
return (usize);
}
}
JEMALLOC_ALWAYS_INLINE size_t
small_bin2size_lookup(size_t binind)
{
assert(binind < NBINS);
{
size_t ret = ((size_t)(small_bin2size_tab[binind]));
assert(ret == small_bin2size_compute(binind));
return (ret);
}
}
JEMALLOC_ALWAYS_INLINE size_t
small_bin2size(size_t binind)
{
return (small_bin2size_lookup(binind));
}
JEMALLOC_ALWAYS_INLINE size_t
small_s2u_compute(size_t size)
{
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
size_t lg_ceil = lg_floor(pow2_ceil(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
} else
#endif
{
size_t x = lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (size + delta_mask) & ~delta_mask;
return (usize);
}
}
JEMALLOC_ALWAYS_INLINE size_t
small_s2u_lookup(size_t size)
{
size_t ret = (small_bin2size(small_size2bin(size)));
assert(ret == small_s2u_compute(size));
return (ret);
}
JEMALLOC_ALWAYS_INLINE size_t
small_s2u(size_t size)
{
assert(size > 0);
if (size <= LOOKUP_MAXCLASS)
return (small_s2u_lookup(size));
else
return (small_s2u_compute(size));
}
# endif /* JEMALLOC_ARENA_INLINE_A */
# ifdef JEMALLOC_ARENA_INLINE_B
JEMALLOC_ALWAYS_INLINE size_t
arena_mapelm_to_pageind(arena_chunk_map_t *mapelm)
{
uintptr_t map_offset =
CHUNK_ADDR2OFFSET(mapelm) - offsetof(arena_chunk_t, map);
return ((map_offset / sizeof(arena_chunk_map_t)) + map_bias);
}
JEMALLOC_ALWAYS_INLINE arena_chunk_map_t *
arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
assert(pageind < chunk_npages);
return (&chunk->map[pageind-map_bias]);
}
JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
{
return (&arena_mapp_get(chunk, pageind)->bits);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbitsp_read(size_t *mapbitsp)
{
return (*mapbitsp);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
return (mapbits & ~PAGE_MASK);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
return (mapbits & ~PAGE_MASK);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
CHUNK_MAP_ALLOCATED);
return (mapbits >> LG_PAGE);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
size_t binind;
mapbits = arena_mapbits_get(chunk, pageind);
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
assert(binind < NBINS || binind == BININD_INVALID);
return (binind);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
return (mapbits & CHUNK_MAP_DIRTY);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
return (mapbits & CHUNK_MAP_UNZEROED);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
return (mapbits & CHUNK_MAP_LARGE);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
mapbits = arena_mapbits_get(chunk, pageind);
return (mapbits & CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
{
*mapbitsp = mapbits;
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((size & PAGE_MASK) == 0);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_DIRTY) == flags);
unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
| unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
size_t binind)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
(binind << CHUNK_MAP_BININD_SHIFT));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
size_t binind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
assert(binind < BININD_INVALID);
assert(pageind - runind >= map_bias);
assert((flags & CHUNK_MAP_DIRTY) == flags);
unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
unzeroed);
}
JEMALLOC_INLINE bool
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
assert(prof_interval != 0);
arena->prof_accumbytes += accumbytes;
if (arena->prof_accumbytes >= prof_interval) {
arena->prof_accumbytes -= prof_interval;
return (true);
}
return (false);
}
JEMALLOC_INLINE bool
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
if (prof_interval == 0)
return (false);
return (arena_prof_accum_impl(arena, accumbytes));
}
JEMALLOC_INLINE bool
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
if (prof_interval == 0)
return (false);
{
bool ret;
malloc_mutex_lock(&arena->lock);
ret = arena_prof_accum_impl(arena, accumbytes);
malloc_mutex_unlock(&arena->lock);
return (ret);
}
}
JEMALLOC_ALWAYS_INLINE size_t
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
{
size_t binind;
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
if (config_debug) {
arena_chunk_t *chunk;
arena_t *arena;
size_t pageind;
size_t actual_mapbits;
arena_run_t *run;
arena_bin_t *bin;
size_t actual_binind;
arena_bin_info_t *bin_info;
assert(binind != BININD_INVALID);
assert(binind < NBINS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
actual_mapbits = arena_mapbits_get(chunk, pageind);
assert(mapbits == actual_mapbits);
assert(arena_mapbits_large_get(chunk, pageind) == 0);
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(actual_mapbits >> LG_PAGE)) << LG_PAGE));
bin = run->bin;
actual_binind = bin - arena->bins;
assert(binind == actual_binind);
bin_info = &arena_bin_info[actual_binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
== 0);
}
return (binind);
}
# endif /* JEMALLOC_ARENA_INLINE_B */
# ifdef JEMALLOC_ARENA_INLINE_C
JEMALLOC_INLINE size_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
size_t binind = bin - arena->bins;
assert(binind < NBINS);
return (binind);
}
JEMALLOC_INLINE unsigned
arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
{
unsigned shift, diff, regind;
size_t interval;
/*
* Freeing a pointer lower than region zero can cause assertion
* failure.
*/
assert((uintptr_t)ptr >= (uintptr_t)run +
(uintptr_t)bin_info->reg0_offset);
/*
* Avoid doing division with a variable divisor if possible. Using
* actual division here can reduce allocator throughput by over 20%!
*/
diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run -
bin_info->reg0_offset);
/* Rescale (factor powers of 2 out of the numerator and denominator). */
interval = bin_info->reg_interval;
shift = jemalloc_ffs((int)interval) - 1;
diff >>= shift;
interval >>= shift;
if (interval == 1) {
/* The divisor was a power of 2. */
regind = diff;
} else {
/*
* To divide by a number D that is not a power of two we
* multiply by (2^21 / D) and then right shift by 21 positions.
*
* X / D
*
* becomes
*
* (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT
*
* We can omit the first three elements, because we never
* divide by 0, and 1 and 2 are both powers of two, which are
* handled above.
*/
#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS)
#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1)
static const unsigned interval_invs[] = {
SIZE_INV(3),
SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7),
SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11),
SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15),
SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19),
SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23),
SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27),
SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31)
};
if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) +
2)) {
regind = (diff * interval_invs[interval - 3]) >>
SIZE_INV_SHIFT;
} else
regind = diff / (unsigned)interval;
#undef SIZE_INV
#undef SIZE_INV_SHIFT
}
assert(diff == regind * interval);
assert(regind < bin_info->nregs);
return (regind);
}
JEMALLOC_INLINE prof_ctx_t *
arena_prof_ctx_get(const void *ptr)
{
prof_ctx_t *ret;
arena_chunk_t *chunk;
size_t pageind, mapbits;
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0)
ret = (prof_ctx_t *)(uintptr_t)1U;
else
ret = arena_mapp_get(chunk, pageind)->prof_ctx;
return (ret);
}
JEMALLOC_INLINE void
arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
size_t pageind;
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (arena_mapbits_large_get(chunk, pageind) != 0)
arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
}
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache)
{
tcache_t *tcache;
pool_t *pool = arena->pool;
assert(size != 0);
assert(size <= arena_maxclass);
if (size <= SMALL_MAXCLASS) {
if (try_tcache && (tcache = tcache_get(pool, true)) != NULL)
return (tcache_alloc_small(tcache, size, zero));
else {
return (arena_malloc_small(choose_arena(arena), size,
zero));
}
} else {
/*
* Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization.
*/
if (try_tcache && size <= tcache_maxclass && (tcache =
tcache_get(pool, true)) != NULL)
return (tcache_alloc_large(tcache, size, zero));
else {
return (arena_malloc_large(choose_arena(arena), size,
zero));
}
}
}
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
size_t pageind, binind;
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
binind = arena_mapbits_binind_get(chunk, pageind);
if (binind == BININD_INVALID || (config_prof && demote == false &&
arena_mapbits_large_get(chunk, pageind) != 0)) {
/*
* Large allocation. In the common case (demote == true), and
* as this is an inline function, most callers will only end up
* looking at binind to determine that ptr is a small
* allocation.
*/
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = arena_mapbits_large_size_get(chunk, pageind);
assert(ret != 0);
assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
pageind+(ret>>LG_PAGE)-1) == 0);
assert(binind == arena_mapbits_binind_get(chunk,
pageind+(ret>>LG_PAGE)-1));
assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
} else {
/* Small allocation (possibly promoted to a large object). */
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) == binind);
ret = small_bin2size(binind);
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache)
{
size_t pageind, mapbits;
tcache_t *tcache;
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = arena_mapbits_get(chunk, pageind);
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
/* Small allocation. */
if (try_tcache && (tcache = tcache_get(chunk->arena->pool, false)) != NULL) {
size_t binind;
binind = arena_ptr_small_binind_get(ptr, mapbits);
tcache_dalloc_small(tcache, ptr, binind);
} else
arena_dalloc_small(chunk->arena, chunk, ptr, pageind);
} else {
size_t size = arena_mapbits_large_size_get(chunk, pageind);
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (try_tcache && size <= tcache_maxclass && (tcache =
tcache_get(chunk->arena->pool, false)) != NULL) {
tcache_dalloc_large(tcache, ptr, size);
} else
arena_dalloc_large(chunk->arena, chunk, ptr);
}
}
# endif /* JEMALLOC_ARENA_INLINE_C */
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 36,460 | 29.562448 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/ql.h | /*
* List definitions.
*/
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
#define ql_head_initializer(a_head) {NULL}
#define ql_elm(a_type) qr(a_type)
/* List functions. */
#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
#define ql_first(a_head) ((a_head)->qlh_first)
#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
ql_first(a_head) = NULL; \
} \
} while (0)
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
| 2,373 | 27.261905 | 65 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/qr.h | /* Ring definitions. */
#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
(a_qr)->a_field.qre_prev = (a_qrelm); \
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
t = (a_qr_a)->a_field.qre_prev; \
(a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
(a_qr_b)->a_field.qre_prev = t; \
} while (0)
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
= (a_qr)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
? (var)->a_field.qre_prev : NULL))
| 2,255 | 32.176471 | 78 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/public_namespace.sh | #!/bin/sh
for nm in `cat $1` ; do
n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
echo "#define je_${n} JEMALLOC_N(${n})"
done
| 129 | 17.571429 | 46 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/chunk_mmap.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool pages_purge(void *addr, size_t length, bool file_mapped);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
bool chunk_dalloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 819 | 34.652174 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/private_unnamespace.sh | #!/bin/sh
for symbol in `cat $1` ; do
echo "#undef ${symbol}"
done
| 70 | 10.833333 | 27 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/chunk.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* Size and alignment of memory chunks that are allocated by the OS's virtual
* memory system.
*/
#define LG_CHUNK_DEFAULT 22
/* Return the chunk address for allocation address a. */
#define CHUNK_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~chunksize_mask))
/* Return the chunk offset of address a. */
#define CHUNK_ADDR2OFFSET(a) \
((size_t)((uintptr_t)(a) & chunksize_mask))
/* Return the smallest chunk multiple that is >= s. */
#define CHUNK_CEILING(s) \
(((s) + chunksize_mask) & ~chunksize_mask)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk;
extern const char *opt_dss;
extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages;
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc_base(pool_t *pool, size_t size);
void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
chunk_dalloc_t *chunk_dalloc, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero);
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
bool *zero, unsigned arena_ind, pool_t *pool);
void chunk_unmap(pool_t *pool, void *chunk, size_t size);
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind, pool_t *pool);
void chunk_record(pool_t *pool, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, void *chunk, size_t size, bool zeroed);
bool chunk_global_boot();
bool chunk_boot(pool_t *pool);
bool chunk_init(pool_t *pool);
void chunk_prefork0(pool_t *pool);
void chunk_prefork1(pool_t *pool);
void chunk_postfork_parent0(pool_t *pool);
void chunk_postfork_parent1(pool_t *pool);
void chunk_postfork_child0(pool_t *pool);
void chunk_postfork_child1(pool_t *pool);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#include "jemalloc/internal/chunk_dss.h"
#include "jemalloc/internal/chunk_mmap.h"
| 2,490 | 35.632353 | 86 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/ckh.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ckh_s ckh_t;
typedef struct ckhc_s ckhc_t;
/* Typedefs to allow easy function pointer passing. */
typedef void ckh_hash_t (const void *, size_t[2]);
typedef bool ckh_keycomp_t (const void *, const void *);
/* Maintain counters used to get an idea of performance. */
/* #define CKH_COUNT */
/* Print counter values in ckh_delete() (requires CKH_COUNT). */
/* #define CKH_VERBOSE */
/*
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
/* Hash table cell. */
struct ckhc_s {
const void *key;
const void *data;
};
struct ckh_s {
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */
uint64_t ngrows;
uint64_t nshrinks;
uint64_t nshrinkfails;
uint64_t ninserts;
uint64_t nrelocs;
#endif
/* Used for pseudo-random number generation. */
#define CKH_A 1103515241
#define CKH_C 12347
uint32_t prng_state;
/* Total number of items. */
size_t count;
/*
* Minimum and current number of hash table buckets. There are
* 2^LG_CKH_BUCKET_CELLS cells per bucket.
*/
unsigned lg_minbuckets;
unsigned lg_curbuckets;
/* Hash and comparison functions. */
ckh_hash_t *hash;
ckh_keycomp_t *keycomp;
/* Hash table with 2^lg_curbuckets buckets. */
ckhc_t *tab;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
void ckh_delete(ckh_t *ckh);
size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
bool ckh_insert(ckh_t *ckh, const void *key, const void *data);
bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);
bool ckh_string_keycomp(const void *k1, const void *k2);
void ckh_pointer_hash(const void *key, size_t r_hash[2]);
bool ckh_pointer_keycomp(const void *k1, const void *k2);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 2,646 | 28.741573 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/rb.h | /*-
*******************************************************************************
*
* cpp macro implementation of left-leaning 2-3 red-black trees. Parent
* pointers are not used, and color bits are stored in the least significant
* bit of right-child pointers (if RB_COMPACT is defined), thus making node
* linkage as compact as is possible for red-black trees.
*
* Usage:
*
* #include <stdint.h>
* #include <stdbool.h>
* #define NDEBUG // (Optional, see assert(3).)
* #include <assert.h>
* #define RB_COMPACT // (Optional, embed color bits in right-child pointers.)
* #include <rb.h>
* ...
*
*******************************************************************************
*/
#ifndef RB_H_
#define RB_H_
/* XXX Avoid super-slow compile with older versions of clang */
#define NOSANITIZE
#if (__clang_major__ == 3 && __clang_minor__ < 9)
#if __has_attribute(__no_sanitize__)
#undef NOSANITIZE
#define NOSANITIZE __attribute__((no_sanitize("undefined")))
#endif
#endif
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right_red; \
}
#else
#define rb_node(a_type) \
struct { \
a_type *rbn_left; \
a_type *rbn_right; \
bool rbn_red; \
}
#endif
/* Root structure. */
#define rb_tree(a_type) \
struct { \
a_type *rbt_root; \
a_type rbt_nil; \
}
/* Left accessors. */
#define rbtn_left_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_left)
#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \
(a_node)->a_field.rbn_left = a_left; \
} while (0)
#ifdef RB_COMPACT
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \
& ((ssize_t)-2)))
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \
| (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \
} while (0)
/* Color accessors. */
#define rbtn_red_get(a_type, a_field, a_node) \
((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \
& ((size_t)1)))
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \
| ((ssize_t)a_red)); \
} while (0)
#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \
(a_node)->a_field.rbn_right_red) | ((size_t)1)); \
} while (0)
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \
(a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \
} while (0)
#else
/* Right accessors. */
#define rbtn_right_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_right)
#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \
(a_node)->a_field.rbn_right = a_right; \
} while (0)
/* Color accessors. */
#define rbtn_red_get(a_type, a_field, a_node) \
((a_node)->a_field.rbn_red)
#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \
(a_node)->a_field.rbn_red = (a_red); \
} while (0)
#define rbtn_red_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = true; \
} while (0)
#define rbtn_black_set(a_type, a_field, a_node) do { \
(a_node)->a_field.rbn_red = false; \
} while (0)
#endif
/* Node initializer. */
#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \
rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \
rbtn_red_set(a_type, a_field, (a_node)); \
} while (0)
/* Tree initializer. */
#define rb_new(a_type, a_field, a_rbt) do { \
(a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \
rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \
rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \
} while (0)
/* Internal utility macros. */
#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \
for (; \
rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\
(r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \
} \
} \
} while (0)
#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \
(r_node) = (a_root); \
if ((r_node) != &(a_rbt)->rbt_nil) { \
for (; rbtn_right_get(a_type, a_field, (r_node)) != \
&(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \
(r_node))) { \
} \
} \
} while (0)
#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_right_get(a_type, a_field, (a_node)); \
rbtn_right_set(a_type, a_field, (a_node), \
rbtn_left_get(a_type, a_field, (r_node))); \
rbtn_left_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \
(r_node) = rbtn_left_get(a_type, a_field, (a_node)); \
rbtn_left_set(a_type, a_field, (a_node), \
rbtn_right_get(a_type, a_field, (r_node))); \
rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
/*
* The rb_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to rb_gen().
*/
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree); \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key); \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node); \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node); \
a_attr a_type * \
a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg); \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg);
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
* based on the above cpp macros.
*
* Arguments:
*
* a_attr : Function attribute for generated functions (ex: static).
* a_prefix : Prefix for generated functions (ex: ex_).
* a_rb_type : Type for red-black tree data structure (ex: ex_t).
* a_type : Type for red-black tree node data structure (ex: ex_node_t).
* a_field : Name of red-black tree node linkage (ex: ex_link).
* a_cmp : Node comparison function name, with the following prototype:
* int (a_cmp *)(a_type *a_node, a_type *a_other);
* ^^^^^^
* or a_key
* Interpretation of comparison function return values:
* -1 : a_node < a_other
* 0 : a_node == a_other
* 1 : a_node > a_other
* In all cases, the a_node or a_key macro argument is the first
* argument to the comparison function, which makes it possible
* to write comparison functions that treat the first argument
* specially.
*
* Assuming the following setup:
*
* typedef struct ex_node_s ex_node_t;
* struct ex_node_s {
* rb_node(ex_node_t) ex_link;
* };
* typedef rb_tree(ex_node_t) ex_t;
* rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp)
*
* The following API is generated:
*
* static void
* ex_new(ex_t *tree);
* Description: Initialize a red-black tree structure.
* Args:
* tree: Pointer to an uninitialized red-black tree object.
*
* static ex_node_t *
* ex_first(ex_t *tree);
* static ex_node_t *
* ex_last(ex_t *tree);
* Description: Get the first/last node in tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* Ret: First/last node in tree, or NULL if tree is empty.
*
* static ex_node_t *
* ex_next(ex_t *tree, ex_node_t *node);
* static ex_node_t *
* ex_prev(ex_t *tree, ex_node_t *node);
* Description: Get node's successor/predecessor.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: A node in tree.
* Ret: node's successor/predecessor in tree, or NULL if node is
* last/first.
*
* static ex_node_t *
* ex_search(ex_t *tree, ex_node_t *key);
* Description: Search for node that matches key.
* Args:
* tree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in tree that matches key, or NULL if no match.
*
* static ex_node_t *
* ex_nsearch(ex_t *tree, ex_node_t *key);
* static ex_node_t *
* ex_psearch(ex_t *tree, ex_node_t *key);
* Description: Search for node that matches key. If no match is found,
* return what would be key's successor/predecessor, were
* key in tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* key : Search key.
* Ret: Node in tree that matches key, or if no match, hypothetical node's
* successor/predecessor (NULL if no successor/predecessor).
*
* static void
* ex_insert(ex_t *tree, ex_node_t *node);
* Description: Insert node into tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: Node to be inserted into tree.
*
* static void
* ex_remove(ex_t *tree, ex_node_t *node);
* Description: Remove node from tree.
* Args:
* tree: Pointer to an initialized red-black tree object.
* node: Node in tree to be removed.
*
* static ex_node_t *
* ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *,
* ex_node_t *, void *), void *arg);
* static ex_node_t *
* ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *,
* ex_node_t *, void *), void *arg);
* Description: Iterate forward/backward over tree, starting at node. If
* tree is modified, iteration must be immediately
* terminated by the callback function that causes the
* modification.
* Args:
* tree : Pointer to an initialized red-black tree object.
* start: Node at which to start iteration, or NULL to start at
* first/last node.
* cb : Callback function, which is called for each node during
* iteration. Under normal circumstances the callback function
* should return NULL, which causes iteration to continue. If a
* callback function returns non-NULL, iteration is immediately
* terminated and the non-NULL return value is returned by the
* iterator. This is useful for re-starting iteration after
* modifying tree.
* arg : Opaque pointer passed to cb().
* Ret: NULL if iteration completed, or the non-NULL callback return value
* that caused termination of the iteration.
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
} \
a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode != &rbtree->rbt_nil); \
ret = &rbtree->rbt_nil; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
ret = tnode; \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
break; \
} \
assert(tnode != &rbtree->rbt_nil); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
a_type *ret; \
if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \
rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \
a_field, node), ret); \
} else { \
a_type *tnode = rbtree->rbt_root; \
assert(tnode != &rbtree->rbt_nil); \
ret = &rbtree->rbt_nil; \
while (true) { \
int cmp = (a_cmp)(node, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
ret = tnode; \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
break; \
} \
assert(tnode != &rbtree->rbt_nil); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
int cmp; \
ret = rbtree->rbt_root; \
while (ret != &rbtree->rbt_nil \
&& (cmp = (a_cmp)(key, ret)) != 0) { \
if (cmp < 0) { \
ret = rbtn_left_get(a_type, a_field, ret); \
} else { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret = &rbtree->rbt_nil; \
while (tnode != &rbtree->rbt_nil) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
ret = tnode; \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
ret = tnode; \
break; \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \
a_type *ret; \
a_type *tnode = rbtree->rbt_root; \
ret = &rbtree->rbt_nil; \
while (tnode != &rbtree->rbt_nil) { \
int cmp = (a_cmp)(key, tnode); \
if (cmp < 0) { \
tnode = rbtn_left_get(a_type, a_field, tnode); \
} else if (cmp > 0) { \
ret = tnode; \
tnode = rbtn_right_get(a_type, a_field, tnode); \
} else { \
ret = tnode; \
break; \
} \
} \
if (ret == &rbtree->rbt_nil) { \
ret = (NULL); \
} \
return (ret); \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} path[sizeof(void *) << 4], *pathp; \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */ \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
assert(cmp != 0); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
} \
} \
pathp->node = node; \
/* Unwind. */ \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
a_type *cnode = pathp->node; \
if (pathp->cmp < 0) { \
a_type *left = pathp[1].node; \
rbtn_left_set(a_type, a_field, cnode, left); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* Fix up 4-node. */ \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, cnode, tnode); \
cnode = tnode; \
} \
} else { \
return; \
} \
} else { \
a_type *right = pathp[1].node; \
rbtn_right_set(a_type, a_field, cnode, right); \
if (rbtn_red_get(a_type, a_field, right)) { \
a_type *left = rbtn_left_get(a_type, a_field, cnode); \
if (rbtn_red_get(a_type, a_field, left)) { \
/* Split 4-node. */ \
rbtn_black_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, right); \
rbtn_red_set(a_type, a_field, cnode); \
} else { \
/* Lean left. */ \
a_type *tnode; \
bool tred = rbtn_red_get(a_type, a_field, cnode); \
rbtn_rotate_left(a_type, a_field, cnode, tnode); \
rbtn_color_set(a_type, a_field, tnode, tred); \
rbtn_red_set(a_type, a_field, cnode); \
cnode = tnode; \
} \
} else { \
return; \
} \
} \
pathp->node = cnode; \
} \
/* Set root, and make it black. */ \
rbtree->rbt_root = path->node; \
rbtn_black_set(a_type, a_field, rbtree->rbt_root); \
} \
a_attr void NOSANITIZE \
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
struct { \
a_type *node; \
int cmp; \
} *pathp, *nodep, path[sizeof(void *) << 4]; \
/* Wind. */ \
nodep = NULL; /* Silence compiler warning. */ \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
if (cmp < 0) { \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} else { \
pathp[1].node = rbtn_right_get(a_type, a_field, \
pathp->node); \
if (cmp == 0) { \
/* Find node's successor, in preparation for swap. */ \
pathp->cmp = 1; \
nodep = pathp; \
for (pathp++; pathp->node != &rbtree->rbt_nil; \
pathp++) { \
pathp->cmp = -1; \
pathp[1].node = rbtn_left_get(a_type, a_field, \
pathp->node); \
} \
break; \
} \
} \
} \
assert(nodep->node == node); \
pathp--; \
if (pathp->node != node) { \
/* Swap node with its successor. */ \
bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
rbtn_color_set(a_type, a_field, pathp->node, \
rbtn_red_get(a_type, a_field, node)); \
rbtn_left_set(a_type, a_field, pathp->node, \
rbtn_left_get(a_type, a_field, node)); \
/* If node's successor is its right child, the following code */\
/* will do the wrong thing for the right child pointer. */\
/* However, it doesn't matter, because the pointer will be */\
/* properly set when the successor is pruned. */\
rbtn_right_set(a_type, a_field, pathp->node, \
rbtn_right_get(a_type, a_field, node)); \
rbtn_color_set(a_type, a_field, node, tred); \
/* The pruned leaf node's child pointers are never accessed */\
/* again, so don't bother setting them to nil. */\
nodep->node = pathp->node; \
pathp->node = node; \
if (nodep == path) { \
rbtree->rbt_root = nodep->node; \
} else { \
if (nodep[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, nodep[-1].node, \
nodep->node); \
} else { \
rbtn_right_set(a_type, a_field, nodep[-1].node, \
nodep->node); \
} \
} \
} else { \
a_type *left = rbtn_left_get(a_type, a_field, node); \
if (left != &rbtree->rbt_nil) { \
/* node has no successor, but it has a left child. */\
/* Splice node out, without losing the left child. */\
assert(rbtn_red_get(a_type, a_field, node) == false); \
assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
rbtree->rbt_root = left; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
left); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
left); \
} \
} \
return; \
} else if (pathp == path) { \
/* The tree only contained one node. */ \
rbtree->rbt_root = &rbtree->rbt_nil; \
return; \
} \
} \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */ \
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
&rbtree->rbt_nil); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */\
/* restored. */\
pathp->node = &rbtree->rbt_nil; \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
assert(pathp->cmp != 0); \
if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \
== false); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
a_type *tnode; \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
/* In the following diagrams, ||, //, and \\ */\
/* indicate the path to the removed node. */\
/* */\
/* || */\
/* pathp(r) */\
/* // \ */\
/* (b) (b) */\
/* / */\
/* (r) */\
/* */\
rbtn_black_set(a_type, a_field, pathp->node); \
rbtn_rotate_right(a_type, a_field, right, tnode); \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
} else { \
/* || */\
/* pathp(r) */\
/* // \ */\
/* (b) (b) */\
/* / */\
/* (b) */\
/* */\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
} \
/* Balance restored, but rotation modified subtree */\
/* root. */\
assert((uintptr_t)pathp > (uintptr_t)path); \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
return; \
} else { \
a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \
a_type *rightleft = rbtn_left_get(a_type, a_field, \
right); \
if (rbtn_red_get(a_type, a_field, rightleft)) { \
/* || */\
/* pathp(b) */\
/* // \ */\
/* (b) (b) */\
/* / */\
/* (r) */\
a_type *tnode; \
rbtn_black_set(a_type, a_field, rightleft); \
rbtn_rotate_right(a_type, a_field, right, tnode); \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */\
/* subree root, which may actually be the tree */\
/* root. */\
if (pathp == path) { \
/* Set root. */ \
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, \
pathp[-1].node, tnode); \
} else { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
} \
return; \
} else { \
/* || */\
/* pathp(b) */\
/* // \ */\
/* (b) (b) */\
/* / */\
/* (b) */\
a_type *tnode; \
rbtn_red_set(a_type, a_field, pathp->node); \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
pathp->node = tnode; \
} \
} \
} else { \
a_type *left; \
rbtn_right_set(a_type, a_field, pathp->node, \
pathp[1].node); \
left = rbtn_left_get(a_type, a_field, pathp->node); \
if (rbtn_red_get(a_type, a_field, left)) { \
a_type *tnode; \
a_type *leftright = rbtn_right_get(a_type, a_field, \
left); \
a_type *leftrightleft = rbtn_left_get(a_type, a_field, \
leftright); \
if (rbtn_red_get(a_type, a_field, leftrightleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
/* (r) (b) */\
/* \ */\
/* (b) */\
/* / */\
/* (r) */\
a_type *unode; \
rbtn_black_set(a_type, a_field, leftrightleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
unode); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_right_set(a_type, a_field, unode, tnode); \
rbtn_rotate_left(a_type, a_field, unode, tnode); \
} else { \
/* || */\
/* pathp(b) */\
/* / \\ */\
/* (r) (b) */\
/* \ */\
/* (b) */\
/* / */\
/* (b) */\
assert(leftright != &rbtree->rbt_nil); \
rbtn_red_set(a_type, a_field, leftright); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_black_set(a_type, a_field, tnode); \
} \
/* Balance restored, but rotation modified subtree */\
/* root, which may actually be the tree root. */\
if (pathp == path) { \
/* Set root. */ \
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
} \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* || */\
/* pathp(r) */\
/* / \\ */\
/* (b) (b) */\
/* / */\
/* (r) */\
a_type *tnode; \
rbtn_black_set(a_type, a_field, pathp->node); \
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */\
/* subtree root. */\
assert((uintptr_t)pathp > (uintptr_t)path); \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
tnode); \
} else { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
return; \
} else { \
/* || */\
/* pathp(r) */\
/* / \\ */\
/* (b) (b) */\
/* / */\
/* (b) */\
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, pathp->node); \
/* Balance restored. */ \
return; \
} \
} else { \
a_type *leftleft = rbtn_left_get(a_type, a_field, left);\
if (rbtn_red_get(a_type, a_field, leftleft)) { \
/* || */\
/* pathp(b) */\
/* / \\ */\
/* (b) (b) */\
/* / */\
/* (r) */\
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
/* Balance restored, but rotation modified */\
/* subtree root, which may actually be the tree */\
/* root. */\
if (pathp == path) { \
/* Set root. */ \
rbtree->rbt_root = tnode; \
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, \
pathp[-1].node, tnode); \
} else { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
} \
return; \
} else { \
/* || */\
/* pathp(b) */\
/* / \\ */\
/* (b) (b) */\
/* / */\
/* (b) */\
rbtn_red_set(a_type, a_field, left); \
} \
} \
} \
} \
/* Set root. */ \
rbtree->rbt_root = path->node; \
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \
} \
a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == &rbtree->rbt_nil) { \
return (&rbtree->rbt_nil); \
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) != &rbtree->rbt_nil \
|| (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
int cmp = a_cmp(start, node); \
if (cmp < 0) { \
a_type *ret; \
if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) != \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
} else if (cmp > 0) { \
return (a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
a_rbt_type *, a_type *, void *), void *arg) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \
cb, arg); \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == &rbtree->rbt_nil) { \
return (&rbtree->rbt_nil); \
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
void *arg) { \
int cmp = a_cmp(start, node); \
if (cmp > 0) { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != \
&rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
} else if (cmp < 0) { \
return (a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
} \
} \
a_attr a_type * \
a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
a_type *ret; \
if (start != NULL) { \
ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtree->rbt_root, cb, arg); \
} else { \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
if (ret == &rbtree->rbt_nil) { \
ret = NULL; \
} \
return (ret); \
}
#endif /* RB_H_ */
| 37,224 | 37.023493 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/rtree.h | /*
* This radix tree implementation is tailored to the singular purpose of
* tracking which chunks are currently owned by jemalloc. This functionality
* is mandatory for OS X, where jemalloc must be able to respond to object
* ownership queries.
*
*******************************************************************************
*/
#ifdef JEMALLOC_H_TYPES
typedef struct rtree_s rtree_t;
/*
* Size of each radix tree node (must be a power of 2). This impacts tree
* depth.
*/
#define RTREE_NODESIZE (1U << 16)
typedef void *(rtree_alloc_t)(pool_t *, size_t);
typedef void (rtree_dalloc_t)(pool_t *, void *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_s {
rtree_alloc_t *alloc;
rtree_dalloc_t *dalloc;
pool_t *pool;
malloc_mutex_t mutex;
void **root;
unsigned height;
unsigned level2bits[1]; /* Dynamically sized. */
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc,
pool_t *pool);
void rtree_delete(rtree_t *rtree);
void rtree_prefork(rtree_t *rtree);
void rtree_postfork_parent(rtree_t *rtree);
void rtree_postfork_child(rtree_t *rtree);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
#ifdef JEMALLOC_DEBUG
uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
#endif
uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */ \
JEMALLOC_INLINE uint8_t \
f(rtree_t *rtree, uintptr_t key) \
{ \
uint8_t ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
\
RTREE_LOCK(&rtree->mutex); \
for (i = lshift = 0, height = rtree->height, node = rtree->root;\
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
return (0); \
} \
} \
\
/* \
* node is a leaf, so it contains values rather than node \
* pointers. \
*/ \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
{ \
uint8_t *leaf = (uint8_t *)node; \
ret = leaf[subkey]; \
} \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
return (ret); \
}
#ifdef JEMALLOC_DEBUG
# define RTREE_LOCK(l) malloc_mutex_lock(l)
# define RTREE_UNLOCK(l) malloc_mutex_unlock(l)
# define RTREE_GET_VALIDATE
RTREE_GET_GENERATE(rtree_get_locked)
# undef RTREE_LOCK
# undef RTREE_UNLOCK
# undef RTREE_GET_VALIDATE
#endif
#define RTREE_LOCK(l)
#define RTREE_UNLOCK(l)
#ifdef JEMALLOC_DEBUG
/*
* Suppose that it were possible for a jemalloc-allocated chunk to be
* munmap()ped, followed by a different allocator in another thread re-using
* overlapping virtual memory, all without invalidating the cached rtree
* value. The result would be a false positive (the rtree would claim that
* jemalloc owns memory that it had actually discarded). This scenario
* seems impossible, but the following assertion is a prudent sanity check.
*/
# define RTREE_GET_VALIDATE \
assert(rtree_get_locked(rtree, key) == ret);
#else
# define RTREE_GET_VALIDATE
#endif
RTREE_GET_GENERATE(rtree_get)
#undef RTREE_LOCK
#undef RTREE_UNLOCK
#undef RTREE_GET_VALIDATE
JEMALLOC_INLINE bool
rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
{
uintptr_t subkey;
unsigned i, lshift, height, bits;
void **node, **child;
malloc_mutex_lock(&rtree->mutex);
for (i = lshift = 0, height = rtree->height, node = rtree->root;
i < height - 1;
i++, lshift += bits, node = child) {
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
bits);
child = (void**)node[subkey];
if (child == NULL) {
size_t size = ((i + 1 < height - 1) ? sizeof(void *)
: (sizeof(uint8_t))) << rtree->level2bits[i+1];
child = (void**)rtree->alloc(rtree->pool, size);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
memset(child, 0, size);
node[subkey] = child;
}
}
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
{
uint8_t *leaf = (uint8_t *)node;
leaf[subkey] = val;
}
malloc_mutex_unlock(&rtree->mutex);
return (false);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 5,206 | 28.754286 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/size_classes.sh | #!/bin/sh
# The following limits are chosen such that they cover all supported platforms.
# Pointer sizes.
lg_zarr="2 3"
# Quanta.
lg_qarr="3 4"
# The range of tiny size classes is [2^lg_tmin..2^(lg_q-1)].
lg_tmin=3
# Maximum lookup size.
lg_kmax=12
# Page sizes.
lg_parr="12 13 16"
# Size class group size (number of size classes for each size doubling).
lg_g=2
pow2() {
e=$1
pow2_result=1
while [ ${e} -gt 0 ] ; do
pow2_result=$((${pow2_result} + ${pow2_result}))
e=$((${e} - 1))
done
}
lg() {
x=$1
lg_result=0
while [ ${x} -gt 1 ] ; do
lg_result=$((${lg_result} + 1))
x=$((${x} / 2))
done
}
size_class() {
index=$1
lg_grp=$2
lg_delta=$3
ndelta=$4
lg_p=$5
lg_kmax=$6
lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta}
if [ ${pow2_result} -lt ${ndelta} ] ; then
rem="yes"
else
rem="no"
fi
lg_size=${lg_grp}
if [ $((${lg_delta} + ${lg_ndelta})) -eq ${lg_grp} ] ; then
lg_size=$((${lg_grp} + 1))
else
lg_size=${lg_grp}
rem="yes"
fi
if [ ${lg_size} -lt ${lg_p} ] ; then
bin="yes"
else
bin="no"
fi
if [ ${lg_size} -lt ${lg_kmax} \
-o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
lg_delta_lookup=${lg_delta}
else
lg_delta_lookup="no"
fi
printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup}
# Defined upon return:
# - lg_delta_lookup (${lg_delta} or "no")
# - bin ("yes" or "no")
}
sep_line() {
echo " \\"
}
size_classes() {
lg_z=$1
lg_q=$2
lg_t=$3
lg_p=$4
lg_g=$5
pow2 $((${lg_z} + 3)); ptr_bits=${pow2_result}
pow2 ${lg_g}; g=${pow2_result}
echo "#define SIZE_CLASSES \\"
echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\"
ntbins=0
nlbins=0
lg_tiny_maxclass='"NA"'
nbins=0
# Tiny size classes.
ndelta=0
index=0
lg_grp=${lg_t}
lg_delta=${lg_grp}
while [ ${lg_grp} -lt ${lg_q} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
if [ ${lg_delta_lookup} != "no" ] ; then
nlbins=$((${index} + 1))
fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
fi
ntbins=$((${ntbins} + 1))
lg_tiny_maxclass=${lg_grp} # Final written value is correct.
index=$((${index} + 1))
lg_delta=${lg_grp}
lg_grp=$((${lg_grp} + 1))
done
# First non-tiny group.
if [ ${ntbins} -gt 0 ] ; then
sep_line
# The first size class has an unusual encoding, because the size has to be
# split between grp and delta*ndelta.
lg_grp=$((${lg_grp} - 1))
ndelta=1
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
fi
while [ ${ndelta} -lt ${g} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
done
# All remaining groups.
lg_grp=$((${lg_grp} + ${lg_g}))
while [ ${lg_grp} -lt ${ptr_bits} ] ; do
sep_line
ndelta=1
if [ ${lg_grp} -eq $((${ptr_bits} - 1)) ] ; then
ndelta_limit=$((${g} - 1))
else
ndelta_limit=${g}
fi
while [ ${ndelta} -le ${ndelta_limit} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
if [ ${lg_delta_lookup} != "no" ] ; then
nlbins=$((${index} + 1))
# Final written value is correct:
lookup_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
# Final written value is correct:
small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
fi
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
done
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
done
echo
# Defined upon completion:
# - ntbins
# - nlbins
# - nbins
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
}
cat <<EOF
/* This file was automatically generated by size_classes.sh. */
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/*
* This header requires LG_SIZEOF_PTR, LG_TINY_MIN, LG_QUANTUM, and LG_PAGE to
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
* SIZE_CLASSES: Complete table of
* SC(index, lg_delta, size, bin, lg_delta_lookup) tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* bin: 'yes' if a small bin size class, 'no' otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
* NLBINS: Number of bins supported by the lookup table.
* NBINS: Number of small size class bins.
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
*/
#define LG_SIZE_CLASS_GROUP ${lg_g}
EOF
for lg_z in ${lg_zarr} ; do
for lg_q in ${lg_qarr} ; do
lg_t=${lg_tmin}
while [ ${lg_t} -le ${lg_q} ] ; do
# Iterate through page sizes and compute how many bins there are.
for lg_p in ${lg_parr} ; do
echo "#if (LG_SIZEOF_PTR == ${lg_z} && LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})"
size_classes ${lg_z} ${lg_q} ${lg_t} ${lg_p} ${lg_g}
echo "#define SIZE_CLASSES_DEFINED"
echo "#define NTBINS ${ntbins}"
echo "#define NLBINS ${nlbins}"
echo "#define NBINS ${nbins}"
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
echo "#define SMALL_MAXCLASS ${small_maxclass}"
echo "#endif"
echo
done
lg_t=$((${lg_t} + 1))
done
done
done
cat <<EOF
#ifndef SIZE_CLASSES_DEFINED
# error "No size class definitions match configuration"
#endif
#undef SIZE_CLASSES_DEFINED
/*
* The small_size2bin lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to
* 255 since all small size classes, plus a "not small" size class must be
* stored in 8 bits of arena_chunk_map_t's bits field.
*/
#if (NBINS > 255)
# error "Too many small size classes"
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
EOF
| 7,216 | 26.029963 | 119 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/private_namespace.sh | #!/bin/sh
for symbol in `cat $1` ; do
echo "#define ${symbol} JEMALLOC_N(${symbol})"
done
| 93 | 14.666667 | 48 | sh |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/stats.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct arena_stats_s arena_stats_t;
typedef struct chunk_stats_s chunk_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
* bin.
*/
uint64_t nrequests;
};
struct malloc_bin_stats_s {
/*
* Current number of bytes allocated, including objects currently
* cached by tcache.
*/
size_t allocated;
/*
* Total number of allocation/deallocation requests served directly by
* the bin. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to the size of this
* bin. This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
/* Total number of runs created for this bin's size class. */
uint64_t nruns;
/*
* Total number of runs reused by extracting them from the runs tree for
* this bin's size class.
*/
uint64_t reruns;
/* Current number of runs in this bin. */
size_t curruns;
};
struct malloc_large_stats_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena. Note that tcache may allocate an object, then recycle it
* many times, resulting many increments to nrequests, but only one
* each to nmalloc and ndalloc.
*/
uint64_t nmalloc;
uint64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
uint64_t nrequests;
/* Current number of runs of this size class. */
size_t curruns;
};
struct arena_stats_s {
/* Number of bytes currently mapped. */
size_t mapped;
/*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
*/
uint64_t npurge;
uint64_t nmadvise;
uint64_t purged;
/* Per-size-category statistics. */
size_t allocated_large;
uint64_t nmalloc_large;
uint64_t ndalloc_large;
uint64_t nrequests_large;
size_t allocated_huge;
uint64_t nmalloc_huge;
uint64_t ndalloc_huge;
uint64_t nrequests_huge;
/*
* One element for each possible size class, including sizes that
* overlap with bin size classes. This is necessary because ipalloc()
* sometimes has to use such large objects in order to assure proper
* alignment.
*/
malloc_large_stats_t *lstats;
};
struct chunk_stats_s {
/* Number of chunks that were allocated. */
uint64_t nchunks;
/* High-water mark for number of chunks allocated. */
size_t highchunks;
/*
* Current number of chunks allocated. This value isn't maintained for
* any other purpose, so keep track of it in order to be able to set
* highchunks.
*/
size_t curchunks;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_stats_print;
void stats_print(pool_t *pool, void (*write)(void *, const char *), void *cbopaque,
const char *opts);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(pool_t *pool);
void stats_cactive_add(pool_t *pool, size_t size);
void stats_cactive_sub(pool_t *pool, size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE size_t
stats_cactive_get(pool_t *pool)
{
return (atomic_read_z(&(pool->stats_cactive)));
}
JEMALLOC_INLINE void
stats_cactive_add(pool_t *pool, size_t size)
{
atomic_add_z(&(pool->stats_cactive), size);
}
JEMALLOC_INLINE void
stats_cactive_sub(pool_t *pool, size_t size)
{
atomic_sub_z(&(pool->stats_cactive), size);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 4,604 | 25.016949 | 83 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/util.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Size of stack-allocated buffer passed to buferror(). */
#define BUFERROR_BUF 64
/*
* Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be
* large enough for all possible uses within jemalloc.
*/
#define MALLOC_PRINTF_BUFSIZE 4096
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/*
* Silence compiler warnings due to uninitialized values. This is used
* wherever the compiler fails to recognize that the variable is never used
* uninitialized.
*/
#ifdef JEMALLOC_CC_SILENCE
# define JEMALLOC_CC_SILENCE_INIT(v) = v
#else
# define JEMALLOC_CC_SILENCE_INIT(v)
#endif
#ifndef likely
#ifdef __GNUC__
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else
#define likely(x) !!(x)
#define unlikely(x) !!(x)
#endif
#endif
/*
* Define a custom assert() in order to reduce the chances of deadlock during
* assertion failure.
*/
#ifndef assert
#define assert(e) do { \
if (config_debug && !(e)) { \
malloc_printf( \
"<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \
__FILE__, __LINE__, #e); \
abort(); \
} \
} while (0)
#endif
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
malloc_printf( \
"<jemalloc>: %s:%d: Unreachable code reached\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef not_implemented
#define not_implemented() do { \
if (config_debug) { \
malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \
__FILE__, __LINE__); \
abort(); \
} \
} while (0)
#endif
#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (config_debug && !(e)) \
not_implemented(); \
} while (0)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if ((c) == false) \
not_reached(); \
} while (0)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *restrict nptr,
char **restrict endptr, int base);
void malloc_write(const char *s);
/*
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
int malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
int malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_printf(const char *format, ...)
JEMALLOC_ATTR(format(printf, 1, 2));
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
int jemalloc_ffsl(long bitmap);
int jemalloc_ffs(int bitmap);
size_t pow2_ceil(size_t x);
size_t lg_floor(size_t x);
void set_errno(int errnum);
int get_errno(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_))
/* Sanity check: */
#if !defined(JEMALLOC_INTERNAL_FFSL) || !defined(JEMALLOC_INTERNAL_FFS)
# error Both JEMALLOC_INTERNAL_FFSL && JEMALLOC_INTERNAL_FFS should have been defined by configure
#endif
JEMALLOC_ALWAYS_INLINE int
jemalloc_ffsl(long bitmap)
{
return (JEMALLOC_INTERNAL_FFSL(bitmap));
}
JEMALLOC_ALWAYS_INLINE int
jemalloc_ffs(int bitmap)
{
return (JEMALLOC_INTERNAL_FFS(bitmap));
}
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil(size_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
#if (LG_SIZEOF_PTR == 3)
x |= x >> 32;
#endif
x++;
return (x);
}
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
size_t ret;
asm ("bsr %1, %0"
: "=r"(ret) // Outputs.
: "r"(x) // Inputs.
);
return (ret);
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
unsigned long ret;
#if (LG_SIZEOF_PTR == 3)
_BitScanReverse64(&ret, x);
#elif (LG_SIZEOF_PTR == 2)
_BitScanReverse(&ret, x);
#else
# error "Unsupported type size for lg_floor()"
#endif
return ((unsigned)ret);
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
#else
# error "Unsupported type sizes for lg_floor()"
#endif
}
#else
JEMALLOC_INLINE size_t
lg_floor(size_t x)
{
x |= (x >> 1);
x |= (x >> 2);
x |= (x >> 4);
x |= (x >> 8);
x |= (x >> 16);
#if (LG_SIZEOF_PTR == 3 && LG_SIZEOF_PTR == LG_SIZEOF_LONG)
x |= (x >> 32);
if (x == KZU(0xffffffffffffffff))
return (63);
x++;
return (jemalloc_ffsl(x) - 2);
#elif (LG_SIZEOF_PTR == 2)
if (x == KZU(0xffffffff))
return (31);
x++;
return (jemalloc_ffs(x) - 2);
#else
# error "Unsupported type sizes for lg_floor()"
#endif
}
#endif
/* Sets error code */
JEMALLOC_INLINE void
set_errno(int errnum)
{
#ifdef _WIN32
int err = errnum;
errno = err;
SetLastError(errnum);
#else
errno = errnum;
#endif
}
/* Get last error code */
JEMALLOC_INLINE int
get_errno(void)
{
#ifdef _WIN32
return (GetLastError());
#else
return (errno);
#endif
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 6,088 | 21.977358 | 99 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/tcache.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct tcache_bin_info_s tcache_bin_info_t;
typedef struct tcache_bin_s tcache_bin_t;
typedef struct tcache_s tcache_t;
typedef struct tsd_tcache_s tsd_tcache_t;
/*
* tcache pointers close to NULL are used to encode state information that is
* used for two purposes: preventing thread caching on a per thread basis and
* cleaning up during thread shutdown.
*/
#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1)
#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2)
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
/*
* Absolute maximum number of cache slots for each small bin in the thread
* cache. This is an additional constraint beyond that imposed as: twice the
* number of regions per run for this size class.
*
* This constant must be an even number.
*/
#define TCACHE_NSLOTS_SMALL_MAX 200
/* Number of cache slots for large size classes. */
#define TCACHE_NSLOTS_LARGE 20
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
#define LG_TCACHE_MAXCLASS_DEFAULT 15
/*
* TCACHE_GC_SWEEP is the approximate number of allocation events between
* full GC sweeps. Integer rounding may cause the actual number to be
* slightly higher, since GC is performed incrementally.
*/
#define TCACHE_GC_SWEEP 8192
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
#define TSD_TCACHE_INITIALIZER JEMALLOC_ARG_CONCAT({.npools = 0, .seqno = NULL, .tcaches = NULL})
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
typedef enum {
tcache_enabled_false = 0, /* Enable cast to/from bool. */
tcache_enabled_true = 1,
tcache_enabled_default = 2
} tcache_enabled_t;
/*
* Read-only information associated with each element of tcache_t's tbins array
* is stored separately, mainly to reduce memory usage.
*/
struct tcache_bin_info_s {
unsigned ncached_max; /* Upper limit on ncached. */
};
struct tcache_bin_s {
tcache_bin_stats_t tstats;
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
void **avail; /* Stack of available objects. */
};
struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
arena_t *arena; /* This thread's arena. */
unsigned ev_cnt; /* Event count since incremental GC. */
unsigned next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
* array. During tcache initialization, the avail pointer in each
* element of tbins is initialized to point to the proper offset within
* this array.
*/
};
struct tsd_tcache_s {
size_t npools;
unsigned *seqno; /* Sequence number of pool */
tcache_t **tcaches;
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extern bool opt_tcache;
extern ssize_t opt_lg_tcache_max;
extern tcache_bin_info_t *tcache_bin_info;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern size_t nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tcache_t *tcache);
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
size_t binind);
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache);
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache);
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
void tcache_arena_dissociate(tcache_t *tcache);
tcache_t *tcache_get_hard(tcache_t *tcache, pool_t *pool, bool create);
tcache_t *tcache_create(arena_t *arena);
void tcache_destroy(tcache_t *tcache);
bool tcache_tsd_extend(tsd_tcache_t *tsd, unsigned len);
void tcache_thread_cleanup(void *arg);
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
bool tcache_boot0(void);
bool tcache_boot1(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tsd_tcache_t)
malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t)
void tcache_event(tcache_t *tcache);
void tcache_flush(pool_t *pool);
bool tcache_enabled_get(void);
tcache_t *tcache_get(pool_t *pool, bool create);
void tcache_enabled_set(bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin);
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
/* Map of thread-specific caches. */
malloc_tsd_externs(tcache, tsd_tcache_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tsd_tcache_t, { 0 },
tcache_thread_cleanup)
/* Per thread flag that allows thread caches to be disabled. */
malloc_tsd_externs(tcache_enabled, tcache_enabled_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t,
tcache_enabled_default, malloc_tsd_no_cleanup)
JEMALLOC_INLINE void
tcache_flush(pool_t *pool)
{
tsd_tcache_t *tsd = tcache_tsd_get();
tcache_t *tcache = tsd->tcaches[pool->pool_id];
if (tsd->seqno[pool->pool_id] == pool->seqno) {
cassert(config_tcache);
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX)
return;
tcache_destroy(tcache);
}
tsd->tcaches[pool->pool_id] = NULL;
}
JEMALLOC_INLINE bool
tcache_enabled_get(void)
{
tcache_enabled_t tcache_enabled;
cassert(config_tcache);
tcache_enabled = *tcache_enabled_tsd_get();
if (tcache_enabled == tcache_enabled_default) {
tcache_enabled = (tcache_enabled_t)opt_tcache;
tcache_enabled_tsd_set(&tcache_enabled);
}
return ((bool)tcache_enabled);
}
JEMALLOC_INLINE void
tcache_enabled_set(bool enabled)
{
tcache_enabled_t tcache_enabled;
tsd_tcache_t *tsd;
tcache_t *tcache;
int i;
cassert(config_tcache);
tcache_enabled = (tcache_enabled_t)enabled;
tcache_enabled_tsd_set(&tcache_enabled);
tsd = tcache_tsd_get();
malloc_mutex_lock(&pools_lock);
for (i = 0; i < tsd->npools; i++) {
tcache = tsd->tcaches[i];
if (tcache != NULL) {
if (enabled) {
if (tcache == TCACHE_STATE_DISABLED) {
tsd->tcaches[i] = NULL;
}
} else /* disabled */ {
if (tcache > TCACHE_STATE_MAX) {
if (pools[i] != NULL && tsd->seqno[i] == pools[i]->seqno)
tcache_destroy(tcache);
tcache = NULL;
}
if (tcache == NULL) {
tsd->tcaches[i] = TCACHE_STATE_DISABLED;
}
}
}
}
malloc_mutex_unlock(&pools_lock);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcache_get(pool_t *pool, bool create)
{
tcache_t *tcache;
tsd_tcache_t *tsd;
if (config_tcache == false)
return (NULL);
if (config_lazy_lock && isthreaded == false)
return (NULL);
tsd = tcache_tsd_get();
/* expand tcaches array if necessary */
if ((tsd->npools <= pool->pool_id) &&
tcache_tsd_extend(tsd, pool->pool_id)) {
return (NULL);
}
/*
* All subsequent pools with the same id have to cleanup tcache before
* calling tcache_get_hard.
*/
if (tsd->seqno[pool->pool_id] != pool->seqno) {
tsd->tcaches[pool->pool_id] = NULL;
}
tcache = tsd->tcaches[pool->pool_id];
if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) {
if (tcache == TCACHE_STATE_DISABLED)
return (NULL);
tcache = tcache_get_hard(tcache, pool, create);
}
return (tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_event(tcache_t *tcache)
{
if (TCACHE_GC_INCR == 0)
return;
tcache->ev_cnt++;
assert(tcache->ev_cnt <= TCACHE_GC_INCR);
if (tcache->ev_cnt == TCACHE_GC_INCR)
tcache_event_hard(tcache);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_easy(tcache_bin_t *tbin)
{
void *ret;
if (tbin->ncached == 0) {
tbin->low_water = -1;
return (NULL);
}
tbin->ncached--;
if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
ret = tbin->avail[tbin->ncached];
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
size_t binind;
tcache_bin_t *tbin;
binind = small_size2bin(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
size = small_bin2size(binind);
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
ret = tcache_alloc_small_hard(tcache, tbin, binind);
if (ret == NULL)
return (NULL);
}
assert(tcache_salloc(ret) == size);
if (zero == false) {
if (config_fill) {
if (opt_junk) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (opt_zero)
memset(ret, 0, size);
}
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
memset(ret, 0, size);
}
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += size;
tcache_event(tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
{
void *ret;
size_t binind;
tcache_bin_t *tbin;
size = PAGE_CEILING(size);
assert(size <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1;
assert(binind < nhbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
ret = arena_malloc_large(tcache->arena, size, zero);
if (ret == NULL)
return (NULL);
} else {
if (config_prof && size == PAGE) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
LG_PAGE);
arena_mapbits_large_binind_set(chunk, pageind,
BININD_INVALID);
}
if (zero == false) {
if (config_fill) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
} else
memset(ret, 0, size);
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += size;
}
tcache_event(tcache);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
if (config_fill && opt_junk)
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
tcache_event(tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
{
size_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(ptr) <= tcache_maxclass);
binind = NBINS + (size >> LG_PAGE) - 1;
if (config_fill && opt_junk)
memset(ptr, 0x5a, size);
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
tbin->ncached++;
tcache_event(tcache);
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 12,206 | 26.187082 | 97 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/base.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void *base_alloc(pool_t *pool, size_t size);
void *base_calloc(pool_t *pool, size_t number, size_t size);
extent_node_t *base_node_alloc(pool_t *pool);
void base_node_dalloc(pool_t *pool, extent_node_t *node);
size_t base_node_prealloc(pool_t *pool, size_t number);
bool base_boot(pool_t *pool);
bool base_init(pool_t *pool);
void base_prefork(pool_t *pool);
void base_postfork_parent(pool_t *pool);
void base_postfork_child(pool_t *pool);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 1,078 | 36.206897 | 80 | h |
null | NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/jemalloc/include/jemalloc/internal/bitmap.h | /******************************************************************************/
#ifdef JEMALLOC_H_TYPES
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS
typedef struct bitmap_level_s bitmap_level_t;
typedef struct bitmap_info_s bitmap_info_t;
typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Number of bits per group. */
#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3)
#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS)
#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1)
/* Maximum number of levels possible. */
#define BITMAP_MAX_LEVELS \
(LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \
+ !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct bitmap_level_s {
/* Offset of this level's groups within the array of groups. */
size_t group_offset;
};
struct bitmap_info_s {
/* Logical number of bits in bitmap (stored at bottom level). */
size_t nbits;
/* Number of levels necessary for nbits. */
unsigned nlevels;
/*
* Only the first (nlevels+1) elements are used, and levels are ordered
* bottom to top (e.g. the bottom level is stored in levels[0]).
*/
bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
size_t bitmap_info_ngroups(const bitmap_info_t *binfo);
size_t bitmap_size(size_t nbits);
void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo);
bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo);
void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
JEMALLOC_INLINE bool
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
bitmap_t rg = bitmap[rgoff];
/* The bitmap is full iff the root group is 0. */
return (rg == 0);
}
JEMALLOC_INLINE bool
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t g;
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))));
}
JEMALLOC_INLINE void
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit) == false);
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit));
/* Propagate group state transitions up the tree. */
if (g == 0) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
if (bitmap != NULL)
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)));
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (g != 0)
break;
}
}
}
/* sfu: set first unset. */
JEMALLOC_INLINE size_t
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
{
size_t bit;
bitmap_t g;
unsigned i;
assert(bitmap_full(bitmap, binfo) == false);
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = jemalloc_ffsl(g) - 1;
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (jemalloc_ffsl(g) - 1);
}
bitmap_set(bitmap, binfo, bit);
return (bit);
}
JEMALLOC_INLINE void
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
{
size_t goff;
bitmap_t *gp;
bitmap_t g;
bool propagate;
assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff];
g = *gp;
propagate = (g == 0);
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
assert(bitmap_get(bitmap, binfo, bit) == false);
/* Propagate group state transitions up the tree. */
if (propagate) {
unsigned i;
for (i = 1; i < binfo->nlevels; i++) {
bit = goff;
goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[binfo->levels[i].group_offset + goff];
g = *gp;
propagate = (g == 0);
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))
== 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g;
if (propagate == false)
break;
}
}
}
#endif
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
| 5,240 | 27.177419 | 80 | h |
Subsets and Splits