repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/blk.cpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* blk.cpp -- pmemblk benchmarks definitions
*/
#include "benchmark.hpp"
#include "file.h"
#include "libpmem.h"
#include "libpmemblk.h"
#include "libpmempool.h"
#include "os.h"
#include "poolset_util.hpp"
#include "rand.h"
#include <cassert>
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <unistd.h>
struct blk_bench;
struct blk_worker;
/*
* op_type -- type of operation
*/
enum op_type {
OP_TYPE_UNKNOWN,
OP_TYPE_BLK,
OP_TYPE_FILE,
OP_TYPE_MEMCPY,
};
/*
* op_mode -- mode of the copy process
*/
enum op_mode {
OP_MODE_UNKNOWN,
OP_MODE_STAT, /* read/write always the same chunk */
OP_MODE_SEQ, /* read/write chunk by chunk */
OP_MODE_RAND /* read/write to chunks selected randomly */
};
/*
* typedef for the worker function
*/
typedef int (*worker_fn)(struct blk_bench *, struct benchmark_args *,
struct blk_worker *, os_off_t);
/*
* blk_args -- benchmark specific arguments
*/
struct blk_args {
size_t fsize; /* requested file size */
bool no_warmup; /* don't do warmup */
unsigned seed; /* seed for randomization */
char *type_str; /* type: blk, file, memcpy */
char *mode_str; /* mode: stat, seq, rand */
};
/*
* blk_bench -- pmemblk benchmark context
*/
struct blk_bench {
PMEMblkpool *pbp; /* pmemblk handle */
char *addr; /* address of user data (memcpy) */
int fd; /* file descr. for file io */
size_t nblocks; /* actual number of blocks */
size_t blocks_per_thread; /* number of blocks per thread */
worker_fn worker; /* worker function */
enum op_type type;
enum op_mode mode;
};
/*
* struct blk_worker -- pmemblk worker context
*/
struct blk_worker {
os_off_t *blocks; /* array with block numbers */
char *buff; /* buffer for read/write */
rng_t rng; /* worker RNG state */
};
/*
* parse_op_type -- parse command line "--operation" argument
*
* Returns proper operation type.
*/
static enum op_type
parse_op_type(const char *arg)
{
if (strcmp(arg, "blk") == 0)
return OP_TYPE_BLK;
else if (strcmp(arg, "file") == 0)
return OP_TYPE_FILE;
else if (strcmp(arg, "memcpy") == 0)
return OP_TYPE_MEMCPY;
else
return OP_TYPE_UNKNOWN;
}
/*
* parse_op_mode -- parse command line "--mode" argument
*
* Returns proper operation mode.
*/
static enum op_mode
parse_op_mode(const char *arg)
{
if (strcmp(arg, "stat") == 0)
return OP_MODE_STAT;
else if (strcmp(arg, "seq") == 0)
return OP_MODE_SEQ;
else if (strcmp(arg, "rand") == 0)
return OP_MODE_RAND;
else
return OP_MODE_UNKNOWN;
}
/*
* blk_do_warmup -- perform warm-up by writing to each block
*/
static int
blk_do_warmup(struct blk_bench *bb, struct benchmark_args *args)
{
size_t lba;
int ret = 0;
auto *buff = (char *)calloc(1, args->dsize);
if (!buff) {
perror("calloc");
return -1;
}
for (lba = 0; lba < bb->nblocks; ++lba) {
switch (bb->type) {
case OP_TYPE_FILE: {
size_t off = lba * args->dsize;
if (pwrite(bb->fd, buff, args->dsize, off) !=
(ssize_t)args->dsize) {
perror("pwrite");
ret = -1;
goto out;
}
} break;
case OP_TYPE_BLK:
if (pmemblk_write(bb->pbp, buff, lba) < 0) {
perror("pmemblk_write");
ret = -1;
goto out;
}
break;
case OP_TYPE_MEMCPY: {
size_t off = lba * args->dsize;
pmem_memcpy_persist((char *)bb->addr + off,
buff, args->dsize);
} break;
default:
perror("unknown type");
ret = -1;
goto out;
}
}
out:
free(buff);
return ret;
}
/*
* blk_read -- read function for pmemblk
*/
static int
blk_read(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
if (pmemblk_read(bb->pbp, bworker->buff, off) < 0) {
perror("pmemblk_read");
return -1;
}
return 0;
}
/*
* fileio_read -- read function for file io
*/
static int
fileio_read(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
os_off_t file_off = off * ba->dsize;
if (pread(bb->fd, bworker->buff, ba->dsize, file_off) !=
(ssize_t)ba->dsize) {
perror("pread");
return -1;
}
return 0;
}
/*
* memcpy_read -- read function for memcpy
*/
static int
memcpy_read(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
os_off_t file_off = off * ba->dsize;
memcpy(bworker->buff, (char *)bb->addr + file_off, ba->dsize);
return 0;
}
/*
* blk_write -- write function for pmemblk
*/
static int
blk_write(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
if (pmemblk_write(bb->pbp, bworker->buff, off) < 0) {
perror("pmemblk_write");
return -1;
}
return 0;
}
/*
* memcpy_write -- write function for memcpy
*/
static int
memcpy_write(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
os_off_t file_off = off * ba->dsize;
pmem_memcpy_persist((char *)bb->addr + file_off, bworker->buff,
ba->dsize);
return 0;
}
/*
* fileio_write -- write function for file io
*/
static int
fileio_write(struct blk_bench *bb, struct benchmark_args *ba,
struct blk_worker *bworker, os_off_t off)
{
os_off_t file_off = off * ba->dsize;
if (pwrite(bb->fd, bworker->buff, ba->dsize, file_off) !=
(ssize_t)ba->dsize) {
perror("pwrite");
return -1;
}
return 0;
}
/*
* blk_operation -- main operations for blk_read and blk_write benchmark
*/
static int
blk_operation(struct benchmark *bench, struct operation_info *info)
{
auto *bb = (struct blk_bench *)pmembench_get_priv(bench);
auto *bworker = (struct blk_worker *)info->worker->priv;
os_off_t off = bworker->blocks[info->index];
return bb->worker(bb, info->args, bworker, off);
}
/*
* blk_init_worker -- initialize worker
*/
static int
blk_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
struct blk_worker *bworker =
(struct blk_worker *)malloc(sizeof(*bworker));
if (!bworker) {
perror("malloc");
return -1;
}
auto *bb = (struct blk_bench *)pmembench_get_priv(bench);
auto *bargs = (struct blk_args *)args->opts;
randomize_r(&bworker->rng, bargs->seed);
bworker->buff = (char *)malloc(args->dsize);
if (!bworker->buff) {
perror("malloc");
goto err_buff;
}
/* fill buffer with some random data */
memset(bworker->buff, (char)rnd64_r(&bworker->rng), args->dsize);
assert(args->n_ops_per_thread != 0);
bworker->blocks = (os_off_t *)malloc(sizeof(*bworker->blocks) *
args->n_ops_per_thread);
if (!bworker->blocks) {
perror("malloc");
goto err_blocks;
}
switch (bb->mode) {
case OP_MODE_RAND:
for (size_t i = 0; i < args->n_ops_per_thread; i++) {
bworker->blocks[i] =
worker->index * bb->blocks_per_thread +
rnd64_r(&bworker->rng) %
bb->blocks_per_thread;
}
break;
case OP_MODE_SEQ:
for (size_t i = 0; i < args->n_ops_per_thread; i++)
bworker->blocks[i] = i % bb->blocks_per_thread;
break;
case OP_MODE_STAT:
for (size_t i = 0; i < args->n_ops_per_thread; i++)
bworker->blocks[i] = 0;
break;
default:
perror("unknown mode");
goto err_mode;
}
worker->priv = bworker;
return 0;
err_mode:
free(bworker->blocks);
err_blocks:
free(bworker->buff);
err_buff:
free(bworker);
return -1;
}
/*
* blk_free_worker -- cleanup worker
*/
static void
blk_free_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *bworker = (struct blk_worker *)worker->priv;
free(bworker->blocks);
free(bworker->buff);
free(bworker);
}
/*
* blk_init -- function for initialization benchmark
*/
static int
blk_init(struct blk_bench *bb, struct benchmark_args *args)
{
auto *ba = (struct blk_args *)args->opts;
assert(ba != nullptr);
char path[PATH_MAX];
if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0)
return -1;
bb->type = parse_op_type(ba->type_str);
if (bb->type == OP_TYPE_UNKNOWN) {
fprintf(stderr, "Invalid operation argument '%s'",
ba->type_str);
return -1;
}
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
if (bb->type == OP_TYPE_FILE && type == TYPE_DEVDAX) {
fprintf(stderr, "fileio not supported on device dax\n");
return -1;
}
bb->mode = parse_op_mode(ba->mode_str);
if (bb->mode == OP_MODE_UNKNOWN) {
fprintf(stderr, "Invalid mode argument '%s'", ba->mode_str);
return -1;
}
if (ba->fsize == 0)
ba->fsize = PMEMBLK_MIN_POOL;
size_t req_fsize = ba->fsize;
if (ba->fsize / args->dsize < args->n_threads ||
ba->fsize < PMEMBLK_MIN_POOL) {
fprintf(stderr, "too small file size\n");
return -1;
}
if (args->dsize >= ba->fsize) {
fprintf(stderr, "block size bigger than file size\n");
return -1;
}
if (args->is_poolset || type == TYPE_DEVDAX) {
if (args->fsize < ba->fsize) {
fprintf(stderr, "file size too large\n");
return -1;
}
ba->fsize = 0;
} else if (args->is_dynamic_poolset) {
int ret = dynamic_poolset_create(args->fname, ba->fsize);
if (ret == -1)
return -1;
if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0)
return -1;
ba->fsize = 0;
}
bb->fd = -1;
/*
* Create pmemblk in order to get the number of blocks
* even for file-io mode.
*/
bb->pbp = pmemblk_create(path, args->dsize, ba->fsize, args->fmode);
if (bb->pbp == nullptr) {
perror("pmemblk_create");
return -1;
}
bb->nblocks = pmemblk_nblock(bb->pbp);
/* limit the number of used blocks */
if (bb->nblocks > req_fsize / args->dsize)
bb->nblocks = req_fsize / args->dsize;
if (bb->nblocks < args->n_threads) {
fprintf(stderr, "too small file size");
goto out_close;
}
if (bb->type == OP_TYPE_FILE) {
pmemblk_close(bb->pbp);
bb->pbp = nullptr;
int flags = O_RDWR | O_CREAT | O_SYNC;
#ifdef _WIN32
flags |= O_BINARY;
#endif
bb->fd = os_open(args->fname, flags, args->fmode);
if (bb->fd < 0) {
perror("open");
return -1;
}
} else if (bb->type == OP_TYPE_MEMCPY) {
/* skip pool header, so addr points to the first block */
bb->addr = (char *)bb->pbp + 8192;
}
bb->blocks_per_thread = bb->nblocks / args->n_threads;
if (!ba->no_warmup) {
if (blk_do_warmup(bb, args) != 0)
goto out_close;
}
return 0;
out_close:
if (bb->type == OP_TYPE_FILE)
os_close(bb->fd);
else
pmemblk_close(bb->pbp);
return -1;
}
/*
* blk_read_init - function for initializing blk_read benchmark
*/
static int
blk_read_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
int ret;
auto *bb = (struct blk_bench *)malloc(sizeof(struct blk_bench));
if (bb == nullptr) {
perror("malloc");
return -1;
}
pmembench_set_priv(bench, bb);
ret = blk_init(bb, args);
if (ret != 0) {
free(bb);
return ret;
}
switch (bb->type) {
case OP_TYPE_FILE:
bb->worker = fileio_read;
break;
case OP_TYPE_BLK:
bb->worker = blk_read;
break;
case OP_TYPE_MEMCPY:
bb->worker = memcpy_read;
break;
default:
perror("unknown operation type");
return -1;
}
return ret;
}
/*
* blk_write_init - function for initializing blk_write benchmark
*/
static int
blk_write_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
int ret;
auto *bb = (struct blk_bench *)malloc(sizeof(struct blk_bench));
if (bb == nullptr) {
perror("malloc");
return -1;
}
pmembench_set_priv(bench, bb);
ret = blk_init(bb, args);
if (ret != 0) {
free(bb);
return ret;
}
switch (bb->type) {
case OP_TYPE_FILE:
bb->worker = fileio_write;
break;
case OP_TYPE_BLK:
bb->worker = blk_write;
break;
case OP_TYPE_MEMCPY:
bb->worker = memcpy_write;
break;
default:
perror("unknown operation type");
return -1;
}
return ret;
}
/*
* blk_exit -- function for de-initialization benchmark
*/
static int
blk_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *bb = (struct blk_bench *)pmembench_get_priv(bench);
char path[PATH_MAX];
if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0)
return -1;
if (args->is_dynamic_poolset) {
if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0)
return -1;
}
int result;
switch (bb->type) {
case OP_TYPE_FILE:
os_close(bb->fd);
break;
case OP_TYPE_BLK:
pmemblk_close(bb->pbp);
result = pmemblk_check(path, args->dsize);
if (result < 0) {
perror("pmemblk_check error");
return -1;
} else if (result == 0) {
perror("pmemblk_check: not consistent");
return -1;
}
break;
case OP_TYPE_MEMCPY:
pmemblk_close(bb->pbp);
break;
default:
perror("unknown operation type");
return -1;
}
free(bb);
return 0;
}
static struct benchmark_clo blk_clo[5];
static struct benchmark_info blk_read_info;
static struct benchmark_info blk_write_info;
CONSTRUCTOR(blk_constructor)
void
blk_constructor(void)
{
blk_clo[0].opt_short = 'o';
blk_clo[0].opt_long = "operation";
blk_clo[0].descr = "Operation type - blk, file, memcpy";
blk_clo[0].type = CLO_TYPE_STR;
blk_clo[0].off = clo_field_offset(struct blk_args, type_str);
blk_clo[0].def = "blk";
blk_clo[1].opt_short = 'w';
blk_clo[1].opt_long = "no-warmup";
blk_clo[1].descr = "Don't do warmup";
blk_clo[1].type = CLO_TYPE_FLAG;
blk_clo[1].off = clo_field_offset(struct blk_args, no_warmup);
blk_clo[2].opt_short = 'm';
blk_clo[2].opt_long = "mode";
blk_clo[2].descr = "Reading/writing mode - stat, seq, rand";
blk_clo[2].type = CLO_TYPE_STR;
blk_clo[2].off = clo_field_offset(struct blk_args, mode_str);
blk_clo[2].def = "seq";
blk_clo[3].opt_short = 'S';
blk_clo[3].opt_long = "seed";
blk_clo[3].descr = "Random seed";
blk_clo[3].off = clo_field_offset(struct blk_args, seed);
blk_clo[3].def = "1";
blk_clo[3].type = CLO_TYPE_UINT;
blk_clo[3].type_uint.size = clo_field_size(struct blk_args, seed);
blk_clo[3].type_uint.base = CLO_INT_BASE_DEC;
blk_clo[3].type_uint.min = 1;
blk_clo[3].type_uint.max = UINT_MAX;
blk_clo[4].opt_short = 's';
blk_clo[4].opt_long = "file-size";
blk_clo[4].descr = "Requested file size in bytes - 0 means minimum";
blk_clo[4].type = CLO_TYPE_UINT;
blk_clo[4].off = clo_field_offset(struct blk_args, fsize);
blk_clo[4].def = "0";
blk_clo[4].type_uint.size = clo_field_size(struct blk_args, fsize);
blk_clo[4].type_uint.base = CLO_INT_BASE_DEC;
blk_clo[4].type_uint.min = 0;
blk_clo[4].type_uint.max = ~0;
blk_read_info.name = "blk_read";
blk_read_info.brief = "Benchmark for blk_read() operation";
blk_read_info.init = blk_read_init;
blk_read_info.exit = blk_exit;
blk_read_info.multithread = true;
blk_read_info.multiops = true;
blk_read_info.init_worker = blk_init_worker;
blk_read_info.free_worker = blk_free_worker;
blk_read_info.operation = blk_operation;
blk_read_info.clos = blk_clo;
blk_read_info.nclos = ARRAY_SIZE(blk_clo);
blk_read_info.opts_size = sizeof(struct blk_args);
blk_read_info.rm_file = true;
blk_read_info.allow_poolset = true;
REGISTER_BENCHMARK(blk_read_info);
blk_write_info.name = "blk_write";
blk_write_info.brief = "Benchmark for blk_write() operation";
blk_write_info.init = blk_write_init;
blk_write_info.exit = blk_exit;
blk_write_info.multithread = true;
blk_write_info.multiops = true;
blk_write_info.init_worker = blk_init_worker;
blk_write_info.free_worker = blk_free_worker;
blk_write_info.operation = blk_operation;
blk_write_info.clos = blk_clo;
blk_write_info.nclos = ARRAY_SIZE(blk_clo);
blk_write_info.opts_size = sizeof(struct blk_args);
blk_write_info.rm_file = true;
blk_write_info.allow_poolset = true;
REGISTER_BENCHMARK(blk_write_info);
}
| 15,825 | 21.673352 | 72 | cpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/benchmark_worker.cpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* benchmark_worker.cpp -- benchmark_worker module definitions
*/
#include <cassert>
#include <err.h>
#include "benchmark_worker.hpp"
#include "sys_util.h"
/*
* worker_state_wait_for_transition -- wait for transition from and to
* specified states
*/
static void
worker_state_wait_for_transition(struct benchmark_worker *worker,
enum benchmark_worker_state state,
enum benchmark_worker_state new_state)
{
while (worker->state == state)
os_cond_wait(&worker->cond, &worker->lock);
assert(worker->state == new_state);
}
/*
* worker_state_transition -- change worker state from and to specified states
*/
static void
worker_state_transition(struct benchmark_worker *worker,
enum benchmark_worker_state old_state,
enum benchmark_worker_state new_state)
{
assert(worker->state == old_state);
worker->state = new_state;
os_cond_signal(&worker->cond);
}
/*
* thread_func -- (internal) callback for os_thread
*/
static void *
thread_func(void *arg)
{
assert(arg != nullptr);
auto *worker = (struct benchmark_worker *)arg;
util_mutex_lock(&worker->lock);
worker_state_wait_for_transition(worker, WORKER_STATE_IDLE,
WORKER_STATE_INIT);
if (worker->init)
worker->ret_init = worker->init(worker->bench, worker->args,
&worker->info);
worker_state_transition(worker, WORKER_STATE_INIT,
WORKER_STATE_INITIALIZED);
if (worker->ret_init) {
util_mutex_unlock(&worker->lock);
return nullptr;
}
worker_state_wait_for_transition(worker, WORKER_STATE_INITIALIZED,
WORKER_STATE_RUN);
worker->ret = worker->func(worker->bench, &worker->info);
worker_state_transition(worker, WORKER_STATE_RUN, WORKER_STATE_END);
worker_state_wait_for_transition(worker, WORKER_STATE_END,
WORKER_STATE_EXIT);
if (worker->exit)
worker->exit(worker->bench, worker->args, &worker->info);
worker_state_transition(worker, WORKER_STATE_EXIT, WORKER_STATE_DONE);
util_mutex_unlock(&worker->lock);
return nullptr;
}
/*
* benchmark_worker_alloc -- allocate benchmark worker
*/
struct benchmark_worker *
benchmark_worker_alloc(void)
{
struct benchmark_worker *w =
(struct benchmark_worker *)calloc(1, sizeof(*w));
if (!w)
return nullptr;
util_mutex_init(&w->lock);
if (os_cond_init(&w->cond))
goto err_destroy_mutex;
if (os_thread_create(&w->thread, nullptr, thread_func, w))
goto err_destroy_cond;
return w;
err_destroy_cond:
os_cond_destroy(&w->cond);
err_destroy_mutex:
util_mutex_destroy(&w->lock);
free(w);
return nullptr;
}
/*
* benchmark_worker_free -- release benchmark worker
*/
void
benchmark_worker_free(struct benchmark_worker *w)
{
os_thread_join(&w->thread, nullptr);
os_cond_destroy(&w->cond);
util_mutex_destroy(&w->lock);
free(w);
}
/*
* benchmark_worker_init -- call init function for worker
*/
int
benchmark_worker_init(struct benchmark_worker *worker)
{
util_mutex_lock(&worker->lock);
worker_state_transition(worker, WORKER_STATE_IDLE, WORKER_STATE_INIT);
worker_state_wait_for_transition(worker, WORKER_STATE_INIT,
WORKER_STATE_INITIALIZED);
int ret = worker->ret_init;
util_mutex_unlock(&worker->lock);
return ret;
}
/*
* benchmark_worker_exit -- call exit function for worker
*/
void
benchmark_worker_exit(struct benchmark_worker *worker)
{
util_mutex_lock(&worker->lock);
worker_state_transition(worker, WORKER_STATE_END, WORKER_STATE_EXIT);
worker_state_wait_for_transition(worker, WORKER_STATE_EXIT,
WORKER_STATE_DONE);
util_mutex_unlock(&worker->lock);
}
/*
* benchmark_worker_run -- run benchmark worker
*/
int
benchmark_worker_run(struct benchmark_worker *worker)
{
int ret = 0;
util_mutex_lock(&worker->lock);
worker_state_transition(worker, WORKER_STATE_INITIALIZED,
WORKER_STATE_RUN);
util_mutex_unlock(&worker->lock);
return ret;
}
/*
* benchmark_worker_join -- join benchmark worker
*/
int
benchmark_worker_join(struct benchmark_worker *worker)
{
util_mutex_lock(&worker->lock);
worker_state_wait_for_transition(worker, WORKER_STATE_RUN,
WORKER_STATE_END);
util_mutex_unlock(&worker->lock);
return 0;
}
| 4,177 | 20.316327 | 78 | cpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/benchmark_time.cpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* benchmark_time.cpp -- benchmark_time module definitions
*/
#include "benchmark_time.hpp"
#include "os.h"
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#define NSECPSEC 1000000000
/*
* benchmark_time_get -- get timestamp from clock source
*/
void
benchmark_time_get(benchmark_time_t *time)
{
os_clock_gettime(CLOCK_MONOTONIC, time);
}
/*
* benchmark_time_diff -- get time interval
*/
void
benchmark_time_diff(benchmark_time_t *d, benchmark_time_t *t1,
benchmark_time_t *t2)
{
long long nsecs = (t2->tv_sec - t1->tv_sec) * NSECPSEC + t2->tv_nsec -
t1->tv_nsec;
assert(nsecs >= 0);
d->tv_sec = nsecs / NSECPSEC;
d->tv_nsec = nsecs % NSECPSEC;
}
/*
* benchmark_time_get_secs -- get total number of seconds
*/
double
benchmark_time_get_secs(benchmark_time_t *t)
{
return (double)t->tv_sec + (double)t->tv_nsec / NSECPSEC;
}
/*
* benchmark_time_get_nsecs -- get total number of nanoseconds
*/
unsigned long long
benchmark_time_get_nsecs(benchmark_time_t *t)
{
unsigned long long ret = t->tv_nsec;
ret += t->tv_sec * NSECPSEC;
return ret;
}
/*
* benchmark_time_compare -- compare two moments in time
*/
int
benchmark_time_compare(const benchmark_time_t *t1, const benchmark_time_t *t2)
{
if (t1->tv_sec == t2->tv_sec)
return (int)((long long)t1->tv_nsec - (long long)t2->tv_nsec);
else
return (int)((long long)t1->tv_sec - (long long)t2->tv_sec);
}
/*
* benchmark_time_set -- set time using number of nanoseconds
*/
void
benchmark_time_set(benchmark_time_t *time, unsigned long long nsecs)
{
time->tv_sec = nsecs / NSECPSEC;
time->tv_nsec = nsecs % NSECPSEC;
}
/*
* number of samples used to calculate average time required to get a current
* time from the system
*/
#define N_PROBES_GET_TIME 10000000UL
/*
* benchmark_get_avg_get_time -- calculates average time required to get the
* current time from the system in nanoseconds
*/
unsigned long long
benchmark_get_avg_get_time(void)
{
benchmark_time_t time;
benchmark_time_t start;
benchmark_time_t stop;
benchmark_time_get(&start);
for (size_t i = 0; i < N_PROBES_GET_TIME; i++) {
benchmark_time_get(&time);
}
benchmark_time_get(&stop);
benchmark_time_diff(&time, &start, &stop);
unsigned long long avg =
benchmark_time_get_nsecs(&time) / N_PROBES_GET_TIME;
return avg;
}
| 2,411 | 20.535714 | 78 | cpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/benchmark_empty.cpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* benchmark_empty.cpp -- empty template for benchmarks
*/
#include <cassert>
#include <cerrno>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <sys/file.h>
#include <sys/mman.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "libpmemobj.h"
/*
* prog_args -- benchmark specific command line options
*/
struct prog_args {
int my_value;
};
/*
* obj_bench -- benchmark context
*/
struct obj_bench {
struct prog_args *pa; /* prog_args structure */
};
/*
* benchmark_empty_op -- actual benchmark operation
*/
static int
benchmark_empty_op(struct benchmark *bench, struct operation_info *info)
{
return 0;
}
/*
* benchmark_empty_init -- initialization function
*/
static int
benchmark_empty_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
return 0;
}
/*
* benchmark_empty_exit -- benchmark cleanup function
*/
static int
benchmark_empty_exit(struct benchmark *bench, struct benchmark_args *args)
{
return 0;
}
static struct benchmark_clo benchmark_empty_clo[0];
/* Stores information about benchmark. */
static struct benchmark_info benchmark_empty_info;
CONSTRUCTOR(benchmark_empty_constructor)
void
benchmark_empty_constructor(void)
{
benchmark_empty_info.name = "benchmark_empty";
benchmark_empty_info.brief = "Benchmark for benchmark_empty() "
"operation";
benchmark_empty_info.init = benchmark_empty_init;
benchmark_empty_info.exit = benchmark_empty_exit;
benchmark_empty_info.multithread = true;
benchmark_empty_info.multiops = true;
benchmark_empty_info.operation = benchmark_empty_op;
benchmark_empty_info.measure_time = true;
benchmark_empty_info.clos = benchmark_empty_clo;
benchmark_empty_info.nclos = ARRAY_SIZE(benchmark_empty_clo);
benchmark_empty_info.opts_size = sizeof(struct prog_args);
benchmark_empty_info.rm_file = true;
benchmark_empty_info.allow_poolset = true;
REGISTER_BENCHMARK(benchmark_empty_info);
};
| 2,136 | 22.228261 | 74 | cpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/obj_lanes.cpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_lanes.cpp -- lane benchmark definition
*/
#include <cassert>
#include <cerrno>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "libpmemobj.h"
/* an internal libpmemobj code */
#include "lane.h"
/*
* The number of times to repeat the operation, used to get more accurate
* results, because the operation time was minimal compared to the framework
* overhead.
*/
#define OPERATION_REPEAT_COUNT 10000
/*
* obj_bench - variables used in benchmark, passed within functions
*/
struct obj_bench {
PMEMobjpool *pop; /* persistent pool handle */
struct prog_args *pa; /* prog_args structure */
};
/*
* lanes_init -- benchmark initialization
*/
static int
lanes_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
auto *ob = (struct obj_bench *)malloc(sizeof(struct obj_bench));
if (ob == nullptr) {
perror("malloc");
return -1;
}
pmembench_set_priv(bench, ob);
ob->pa = (struct prog_args *)args->opts;
size_t psize;
if (args->is_poolset || type == TYPE_DEVDAX)
psize = 0;
else
psize = PMEMOBJ_MIN_POOL;
/* create pmemobj pool */
ob->pop = pmemobj_create(args->fname, "obj_lanes", psize, args->fmode);
if (ob->pop == nullptr) {
fprintf(stderr, "%s\n", pmemobj_errormsg());
goto err;
}
return 0;
err:
free(ob);
return -1;
}
/*
* lanes_exit -- benchmark clean up
*/
static int
lanes_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
pmemobj_close(ob->pop);
free(ob);
return 0;
}
/*
* lanes_op -- performs the lane hold and release operations
*/
static int
lanes_op(struct benchmark *bench, struct operation_info *info)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
struct lane *lane;
for (int i = 0; i < OPERATION_REPEAT_COUNT; i++) {
lane_hold(ob->pop, &lane);
lane_release(ob->pop);
}
return 0;
}
static struct benchmark_info lanes_info;
CONSTRUCTOR(obj_lines_constructor)
void
obj_lines_constructor(void)
{
lanes_info.name = "obj_lanes";
lanes_info.brief = "Benchmark for internal lanes "
"operation";
lanes_info.init = lanes_init;
lanes_info.exit = lanes_exit;
lanes_info.multithread = true;
lanes_info.multiops = true;
lanes_info.operation = lanes_op;
lanes_info.measure_time = true;
lanes_info.clos = NULL;
lanes_info.nclos = 0;
lanes_info.opts_size = 0;
lanes_info.rm_file = true;
lanes_info.allow_poolset = true;
REGISTER_BENCHMARK(lanes_info);
}
| 2,813 | 20.157895 | 76 | cpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/pmemobj_tx_add_range.cpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmemobj_tx_add_range.cpp -- pmemobj_tx_add_range benchmarks definition
*/
#include <cassert>
#include <cerrno>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fcntl.h>
#include <sys/file.h>
#include <sys/mman.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "libpmemobj.h"
#define LAYOUT_NAME "tx_add_range_benchmark"
/*
* POOL_SIZE_COEFFICIENT -- pool has to hold every allocated object with
* its snapshot (1 + 1), plus 0.5 because of fragmentation
*/
#define POOL_SIZE_COEFFICIENT (1 + 1 + 0.5)
/*
* MAX_ALLOC_SIZE -- maximum size of one allocation (128 MiB)
*/
#define MAX_ALLOC_SIZE (1024 * 1024 * 128)
/*
* ranged_obj -- ranged object
*/
struct ranged_obj {
void *ptr; /* address of allocated object */
size_t size; /* size of allocated object */
};
/*
* obj_bench_args -- benchmark specific command line options
*/
struct obj_bench_args {
uint64_t nranges; /* number of allocated objects */
bool shuffle_objs; /* shuffles the array of allocated objects */
};
/*
* obj_bench -- benchmark context
*/
struct obj_bench {
PMEMobjpool *pop; /* persistent pool handle */
struct ranged_obj *ranges; /* array of ranges */
size_t obj_size; /* size of a single range */
uint64_t nranges; /* number of ranges */
uint64_t nallocs; /* number of allocations */
bool shuffle_objs; /* shuffles array of ranges */
rng_t rng; /* PRNG */
};
/*
* shuffle_ranges -- randomly shuffles elements in an array
* to avoid sequential pattern in the transaction loop
*/
static void
shuffle_ranges(struct ranged_obj *ranged, uint64_t nranges, rng_t *rng)
{
struct ranged_obj tmp;
uint64_t dest;
for (uint64_t n = 0; n < nranges; ++n) {
dest = RRAND_R(rng, nranges - 1, 0);
tmp = ranged[n];
ranged[n] = ranged[dest];
ranged[dest] = tmp;
}
}
/*
* init_ranges -- allocate persistent objects and carve ranges from them
*/
static int
init_ranges(struct obj_bench *ob)
{
assert(ob->nranges != 0);
ob->ranges = (struct ranged_obj *)malloc((ob->nranges) *
sizeof(struct ranged_obj));
if (!ob->ranges) {
perror("malloc");
return -1;
}
size_t nranges_per_object = MAX_ALLOC_SIZE / ob->obj_size;
for (size_t i = 0, n = 0; n < ob->nranges && i < ob->nallocs; i++) {
PMEMoid oid;
if (pmemobj_alloc(ob->pop, &oid, MAX_ALLOC_SIZE, 0, nullptr,
nullptr)) {
perror("pmemobj_alloc");
goto err;
}
for (size_t j = 0; j < nranges_per_object; j++) {
void *ptr = (char *)pmemobj_direct(oid) +
(j * ob->obj_size);
struct ranged_obj range = {ptr, ob->obj_size};
ob->ranges[n++] = range;
if (n == ob->nranges)
break;
}
}
if (ob->shuffle_objs == true)
shuffle_ranges(ob->ranges, ob->nranges, &ob->rng);
return 0;
err:
free(ob->ranges);
return -1;
}
/*
* tx_add_range_init -- initialization function
*/
static int
tx_add_range_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
struct obj_bench_args *bargs = (struct obj_bench_args *)args->opts;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
auto *ob = (struct obj_bench *)malloc(sizeof(struct obj_bench));
if (ob == nullptr) {
perror("malloc");
return -1;
}
/* let's calculate number of allocations */
ob->nallocs = (args->dsize * bargs->nranges / MAX_ALLOC_SIZE) + 1;
size_t pool_size;
if (args->is_poolset || type == TYPE_DEVDAX)
pool_size = 0;
else {
pool_size =
ob->nallocs * MAX_ALLOC_SIZE * POOL_SIZE_COEFFICIENT;
}
/* create pmemobj pool */
ob->pop = pmemobj_create(args->fname, LAYOUT_NAME, pool_size,
args->fmode);
if (ob->pop == nullptr) {
fprintf(stderr, "%s\n", pmemobj_errormsg());
goto err;
}
ob->nranges = bargs->nranges;
ob->obj_size = args->dsize;
ob->shuffle_objs = bargs->shuffle_objs;
randomize_r(&ob->rng, args->seed);
if (init_ranges(ob))
goto err_pop_close;
pmembench_set_priv(bench, ob);
return 0;
err_pop_close:
pmemobj_close(ob->pop);
err:
free(ob);
return -1;
}
/*
* tx_add_range_op -- actual benchmark operation
*/
static int
tx_add_range_op(struct benchmark *bench, struct operation_info *info)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
int ret = 0;
TX_BEGIN(ob->pop)
{
for (size_t i = 0; i < ob->nranges; i++) {
struct ranged_obj *r = &ob->ranges[i];
pmemobj_tx_add_range_direct(r->ptr, r->size);
}
}
TX_ONABORT
{
fprintf(stderr, "transaction failed\n");
ret = -1;
}
TX_END
return ret;
}
/*
* tx_add_range_exit -- benchmark cleanup function
*/
static int
tx_add_range_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *ob = (struct obj_bench *)pmembench_get_priv(bench);
pmemobj_close(ob->pop);
free(ob->ranges);
free(ob);
return 0;
}
static struct benchmark_clo tx_add_range_clo[2];
/* Stores information about benchmark. */
static struct benchmark_info tx_add_range_info;
CONSTRUCTOR(tx_add_range_constructor)
void
tx_add_range_constructor(void)
{
tx_add_range_clo[0].opt_short = 0;
tx_add_range_clo[0].opt_long = "num-of-ranges";
tx_add_range_clo[0].descr = "Number of ranges";
tx_add_range_clo[0].def = "1000";
tx_add_range_clo[0].off =
clo_field_offset(struct obj_bench_args, nranges);
tx_add_range_clo[0].type = CLO_TYPE_UINT;
tx_add_range_clo[0].type_uint.size =
clo_field_size(struct obj_bench_args, nranges);
tx_add_range_clo[0].type_uint.base = CLO_INT_BASE_DEC;
tx_add_range_clo[0].type_uint.min = 1;
tx_add_range_clo[0].type_uint.max = ULONG_MAX;
tx_add_range_clo[1].opt_short = 's';
tx_add_range_clo[1].opt_long = "shuffle";
tx_add_range_clo[1].descr =
"Use shuffle objects - "
"randomly shuffles array of allocated objects";
tx_add_range_clo[1].def = "false";
tx_add_range_clo[1].off =
clo_field_offset(struct obj_bench_args, shuffle_objs);
tx_add_range_clo[1].type = CLO_TYPE_FLAG;
tx_add_range_info.name = "pmemobj_tx_add_range";
tx_add_range_info.brief = "Benchmark for pmemobj_tx_add_range() "
"operation";
tx_add_range_info.init = tx_add_range_init;
tx_add_range_info.exit = tx_add_range_exit;
tx_add_range_info.multithread = true;
tx_add_range_info.multiops = true;
tx_add_range_info.operation = tx_add_range_op;
tx_add_range_info.measure_time = true;
tx_add_range_info.clos = tx_add_range_clo;
tx_add_range_info.nclos = ARRAY_SIZE(tx_add_range_clo);
tx_add_range_info.opts_size = sizeof(struct obj_bench_args);
tx_add_range_info.rm_file = true;
tx_add_range_info.allow_poolset = true;
REGISTER_BENCHMARK(tx_add_range_info);
};
| 6,782 | 23.311828 | 73 | cpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/benchmark.hpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* benchmark.hpp -- This file contains interface for creating benchmarks to the
* pmembench framework. The _most_ important data structure is
* struct benchmark_info which should be properly filled and registered by the
* benchmark. Some fields should be filled by meta-data and information about
* the benchmark like: name, brief description, supported operation modes etc.
* The other group of fields are function callbacks which may be implemented by
* the benchmark. Some callbacks are required, others are optional. This is
* indicated in the structure description.
*
* To register a benchmark you can use the special macro
* REGISTER_BENCHMARK() which takes static benchmark_info data structure as an
* argument. You can also use the pmembench_register() function. Please note
* that registering a benchmark should be done at initialization time. You can
* achieve this by specifying pmembench_init macro in function attributes:
*
* static void pmembench_init my_benchmark_init()
* {
* pmembench_register(&my_benchmark);
* }
*
* However using the REGISTER_BENCHMARK() macro is recommended.
*/
#ifndef _BENCHMARK_H
#define _BENCHMARK_H
#include <climits>
#include <cstdbool>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include "benchmark_time.hpp"
#include "os.h"
#include "rand.h"
#include "util.h"
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
#endif
#define RRAND(max, min) (rand() % ((max) - (min)) + (min))
#define RRAND_R(rng, max, min) (rnd64_r(rng) % ((max) - (min)) + (min))
struct benchmark;
/*
* benchmark_args - Arguments for benchmark.
*
* It contains set of common arguments and pointer to benchmark's specific
* arguments which are automatically processed by framework according to
* clos, nclos and opt_size in benchmark_info structure.
*/
struct benchmark_args {
const char *fname; /* path to test file */
size_t fsize; /* size of test file */
bool is_poolset; /* test file is a poolset */
bool is_dynamic_poolset; /* test file is directory in which
benchmark creates reusable files */
mode_t fmode; /* test file's permissions */
unsigned n_threads; /* number of working threads */
size_t n_ops_per_thread; /* number of operations per thread */
bool thread_affinity; /* set worker threads CPU affinity mask */
ssize_t main_affinity; /* main thread affinity */
char *affinity_list; /* set CPU affinity order */
size_t dsize; /* data size */
unsigned seed; /* PRNG seed */
unsigned repeats; /* number of repeats of one scenario */
unsigned min_exe_time; /* minimal execution time */
bool help; /* print help for benchmark */
void *opts; /* benchmark specific arguments */
};
/*
* benchmark_results - Benchmark's execution results.
*/
struct benchmark_results {
uint64_t nbytes; /* number of bytes processed */
uint64_t nops; /* number of operations executed */
benchmark_time_t time; /* total execution time */
};
/*
* struct results -- statistics for total measurements
*/
struct results {
double min;
double max;
double avg;
double std_dev;
double med;
};
/*
* struct latency -- statistics for latency measurements
*/
struct latency {
uint64_t max;
uint64_t min;
uint64_t avg;
double std_dev;
uint64_t pctl50_0p;
uint64_t pctl99_0p;
uint64_t pctl99_9p;
};
/*
* struct thread_results -- results of a single thread
*/
struct thread_results {
benchmark_time_t beg;
benchmark_time_t end;
benchmark_time_t end_op[];
};
/*
* struct bench_results -- results of the whole benchmark
*/
struct bench_results {
struct thread_results **thres;
};
/*
* struct total_results -- results and statistics of the whole benchmark
*/
struct total_results {
size_t nrepeats;
size_t nthreads;
size_t nops;
double nopsps;
struct results total;
struct latency latency;
struct bench_results *res;
};
/*
* Command Line Option integer value base.
*/
#define CLO_INT_BASE_NONE 0x0
#define CLO_INT_BASE_DEC 0x1
#define CLO_INT_BASE_HEX 0x2
#define CLO_INT_BASE_OCT 0x4
/*
* Command Line Option type.
*/
enum clo_type {
CLO_TYPE_FLAG,
CLO_TYPE_STR,
CLO_TYPE_INT,
CLO_TYPE_UINT,
CLO_TYPE_MAX,
};
/*
* Description of command line option.
*
* This structure is used to declare command line options by the benchmark
* which will be automatically parsed by the framework.
*
* opt_short : Short option char. If there is no short option write 0.
* opt_long : Long option string.
* descr : Description of command line option.
* off : Offset in data structure in which the value should be stored.
* type : Type of command line option.
* def : Default value. If set to NULL, this options is required.
* ignore_in_res: Do not print in results.
* check : Optional callback for checking the command line option value.
* type_int : Parameters for signed integer.
* type_uint : Parameters for unsigned integer.
* type_str : Parameters for string.
*
* size : Size of integer value. Valid values: 1, 2, 4, 8.
* base : Integer base system from which the parsing should be
* performed. This field may be used as bit mask by logically
* adding different base types.
* limit_min : Indicates whether value should be limited by the minimum
* value.
* limit_max : Indicates whether value should be limited by the maximum
* value.
* min : Minimum value when limit_min is set.
* max : Maximum value when limit_min is set.
*
* alloc : If set to true the framework should allocate memory for the
* value. The memory will be freed by the framework at the end of
* execution. Otherwise benchmark must provide valid pointer in
* opt_var and max_size parameter must be set properly.
* max_size : Maximum size of string.
*/
struct benchmark_clo {
int opt_short;
const char *opt_long;
enum clo_type type;
const char *descr;
size_t off;
const char *def;
bool ignore_in_res;
struct {
size_t size;
int base;
int64_t min;
int64_t max;
} type_int;
struct {
size_t size;
int base;
uint64_t min;
uint64_t max;
} type_uint;
int used;
};
#define clo_field_offset(s, f) ((size_t) & ((s *)0)->f)
#define clo_field_size(s, f) (sizeof(((s *)0)->f))
/*
* worker_info - Worker thread's information structure.
*/
struct worker_info {
size_t index; /* index of worker thread */
struct operation_info *opinfo; /* operation info structure */
size_t nops; /* number of operations */
void *priv; /* worker's private data */
benchmark_time_t beg; /* start time */
benchmark_time_t end; /* end time */
};
/*
* operation_info - Information about operation.
*/
struct operation_info {
struct worker_info *worker; /* worker's info */
struct benchmark_args *args; /* benchmark arguments */
size_t index; /* operation's index */
benchmark_time_t end; /* operation's end time */
};
/*
* struct benchmark_info -- benchmark descriptor
* name : Name of benchmark.
* brief : Brief description of benchmark.
* clos : Command line options which will be automatically parsed by
* framework.
* nclos : Number of command line options.
* opts_size : Size of data structure where the parsed values should be
* stored in.
* print_help : Callback for printing help message.
* pre_init : Function for initialization of the benchmark before parsing
* command line arguments.
* init : Function for initialization of the benchmark after parsing
* command line arguments.
* exit : Function for de-initialization of the benchmark.
* multithread : Indicates whether the benchmark operation function may be
* run in many threads.
* multiops : Indicates whether the benchmark operation function may be
* run many time in a loop.
* measure_time : Indicates whether the benchmark framework should measure the
* execution time of operation function. If set to false, the
* benchmark must report the execution time by itself.
* init_worker : Callback for initialization thread specific data. Invoked in
* the worker thread but globally serialized.
* operation : Callback function which does the main job of benchmark.
* rm_file : Indicates whether the test file should be removed by
* framework before the init function will be called.
* allow_poolset: Indicates whether benchmark may use poolset files.
* If set to false and fname points to a poolset, an error
* will be returned.
* According to multithread and single_operation flags it may be
* invoked in different ways:
* +-------------+----------+-------------------------------------+
* | multithread | multiops | description |
* +-------------+----------+-------------------------------------+
* | false | false | invoked once, in one thread |
* +-------------+----------+-------------------------------------+
* | false | true | invoked many times, in one thread |
* +-------------+----------+-------------------------------------+
* | true | false | invoked once, in many threads |
* +-------------+----------+-------------------------------------+
* | true | true | invoked many times, in many threads |
* +-------------+----------+-------------------------------------+
*
*/
struct benchmark_info {
const char *name;
const char *brief;
struct benchmark_clo *clos;
size_t nclos;
size_t opts_size;
void (*print_help)(struct benchmark *bench);
int (*pre_init)(struct benchmark *bench);
int (*init)(struct benchmark *bench, struct benchmark_args *args);
int (*exit)(struct benchmark *bench, struct benchmark_args *args);
int (*init_worker)(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker);
void (*free_worker)(struct benchmark *bench,
struct benchmark_args *args,
struct worker_info *worker);
int (*operation)(struct benchmark *bench, struct operation_info *info);
void (*print_extra_headers)();
void (*print_extra_values)(struct benchmark *bench,
struct benchmark_args *args,
struct total_results *res);
bool multithread;
bool multiops;
bool measure_time;
bool rm_file;
bool allow_poolset;
bool print_bandwidth;
};
void *pmembench_get_priv(struct benchmark *bench);
void pmembench_set_priv(struct benchmark *bench, void *priv);
struct benchmark_info *pmembench_get_info(struct benchmark *bench);
int pmembench_register(struct benchmark_info *bench_info);
#define REGISTER_BENCHMARK(bench) \
if (pmembench_register(&(bench))) { \
fprintf(stderr, "Unable to register benchmark '%s'\n", \
(bench).name); \
}
#endif /* _BENCHMARK_H */
| 11,048 | 32.892638 | 80 | hpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/obj_locks.cpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_locks.cpp -- main source file for PMEM locks benchmark
*/
#include <cassert>
#include <cerrno>
#include "benchmark.hpp"
#include "libpmemobj.h"
#include "file.h"
#include "lane.h"
#include "list.h"
#include "memops.h"
#include "obj.h"
#include "os_thread.h"
#include "out.h"
#include "pmalloc.h"
#include "sync.h"
struct prog_args {
bool use_system_threads; /* use system locks instead of PMEM locks */
unsigned n_locks; /* number of mutex/rwlock objects */
bool run_id_increment; /* increment run_id after each lock/unlock */
uint64_t runid_initial_value; /* initial value of run_id */
char *lock_mode; /* "1by1" or "all-lock" */
char *lock_type; /* "mutex", "rwlock" or "ram-mutex" */
bool use_rdlock; /* use read lock, instead of write lock */
};
/*
* mutex similar to PMEMmutex, but with os_mutex_t in RAM
*/
typedef union padded_volatile_pmemmutex {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
os_mutex_t *mutexp; /* pointer to os_thread mutex in RAM */
} volatile_pmemmutex;
} PMEM_volatile_mutex;
typedef union lock_union {
PMEMmutex pm_mutex;
PMEMrwlock pm_rwlock;
PMEM_volatile_mutex pm_vmutex;
os_mutex_t pt_mutex;
os_rwlock_t pt_rwlock;
} lock_t;
POBJ_LAYOUT_BEGIN(pmembench_lock_layout);
POBJ_LAYOUT_ROOT(pmembench_lock_layout, struct my_root);
POBJ_LAYOUT_TOID(pmembench_lock_layout, lock_t);
POBJ_LAYOUT_END(pmembench_lock_layout);
/*
* my_root -- root object structure
*/
struct my_root {
TOID(lock_t) locks; /* an array of locks */
};
/*
* lock usage
*/
enum operation_mode {
OP_MODE_1BY1, /* lock and unlock one lock at a time */
OP_MODE_ALL_LOCK, /* grab all locks, then unlock them all */
OP_MODE_MAX,
};
/*
* lock type
*/
enum benchmark_mode {
BENCH_MODE_MUTEX, /* PMEMmutex vs. os_mutex_t */
BENCH_MODE_RWLOCK, /* PMEMrwlock vs. os_rwlock_t */
BENCH_MODE_VOLATILE_MUTEX, /* PMEMmutex with os_thread mutex in RAM */
BENCH_MODE_MAX
};
struct mutex_bench;
struct bench_ops {
int (*bench_init)(struct mutex_bench *);
int (*bench_exit)(struct mutex_bench *);
int (*bench_op)(struct mutex_bench *);
};
/*
* mutex_bench -- stores variables used in benchmark, passed within functions
*/
struct mutex_bench {
PMEMobjpool *pop; /* pointer to the persistent pool */
TOID(struct my_root) root; /* OID of the root object */
struct prog_args *pa; /* prog_args structure */
enum operation_mode lock_mode; /* lock usage mode */
enum benchmark_mode lock_type; /* lock type */
lock_t *locks; /* pointer to the array of locks */
struct bench_ops *ops;
};
#define GET_VOLATILE_MUTEX(pop, mutexp) \
(os_mutex_t *)get_lock( \
(pop)->run_id, &(mutexp)->volatile_pmemmutex.runid, \
(mutexp)->volatile_pmemmutex.mutexp, \
(int (*)(void **lock, void *arg))volatile_mutex_init)
typedef int (*lock_fun_wrapper)(PMEMobjpool *pop, void *lock);
/*
* bench_operation_1by1 -- acquire lock and unlock release locks
*/
static void
bench_operation_1by1(lock_fun_wrapper flock, lock_fun_wrapper funlock,
struct mutex_bench *mb, PMEMobjpool *pop)
{
for (unsigned i = 0; i < (mb)->pa->n_locks; (i)++) {
auto *o = (void *)(&(mb)->locks[i]);
flock(pop, o);
funlock(pop, o);
}
}
/*
* bench_operation_all_lock -- acquire all locks and release all locks
*/
static void
bench_operation_all_lock(lock_fun_wrapper flock, lock_fun_wrapper funlock,
struct mutex_bench *mb, PMEMobjpool *pop)
{
for (unsigned i = 0; i < (mb)->pa->n_locks; (i)++) {
auto *o = (void *)(&(mb)->locks[i]);
flock(pop, o);
}
for (unsigned i = 0; i < (mb)->pa->n_locks; i++) {
auto *o = (void *)(&(mb)->locks[i]);
funlock(pop, o);
}
}
/*
* get_lock -- atomically initialize and return a lock
*/
static void *
get_lock(uint64_t pop_runid, volatile uint64_t *runid, void *lock,
int (*init_lock)(void **lock, void *arg))
{
uint64_t tmp_runid;
while ((tmp_runid = *runid) != pop_runid) {
if ((tmp_runid != (pop_runid - 1))) {
if (util_bool_compare_and_swap64(runid, tmp_runid,
(pop_runid - 1))) {
if (init_lock(&lock, nullptr)) {
util_fetch_and_and64(runid, 0);
return nullptr;
}
if (util_bool_compare_and_swap64(
runid, (pop_runid - 1),
pop_runid) == 0) {
return nullptr;
}
}
}
}
return lock;
}
/*
* volatile_mutex_init -- initialize the volatile mutex object
*
* Allocate memory for the os_thread mutex and initialize it.
* Set the runid to the same value as in the memory pool.
*/
static int
volatile_mutex_init(os_mutex_t **mutexp, void *attr)
{
if (*mutexp == nullptr) {
*mutexp = (os_mutex_t *)malloc(sizeof(os_mutex_t));
if (*mutexp == nullptr) {
perror("volatile_mutex_init alloc");
return ENOMEM;
}
}
return os_mutex_init(*mutexp);
}
/*
* volatile_mutex_lock -- initialize the mutex object if needed and lock it
*/
static int
volatile_mutex_lock(PMEMobjpool *pop, PMEM_volatile_mutex *mutexp)
{
auto *mutex = GET_VOLATILE_MUTEX(pop, mutexp);
if (mutex == nullptr)
return EINVAL;
return os_mutex_lock(mutex);
}
/*
* volatile_mutex_unlock -- unlock the mutex
*/
static int
volatile_mutex_unlock(PMEMobjpool *pop, PMEM_volatile_mutex *mutexp)
{
auto *mutex = (os_mutex_t *)GET_VOLATILE_MUTEX(pop, mutexp);
if (mutex == nullptr)
return EINVAL;
return os_mutex_unlock(mutex);
}
/*
* volatile_mutex_destroy -- destroy os_thread mutex and release memory
*/
static int
volatile_mutex_destroy(PMEMobjpool *pop, PMEM_volatile_mutex *mutexp)
{
auto *mutex = (os_mutex_t *)GET_VOLATILE_MUTEX(pop, mutexp);
if (mutex == nullptr)
return EINVAL;
int ret = os_mutex_destroy(mutex);
if (ret != 0)
return ret;
free(mutex);
return 0;
}
/*
* os_mutex_lock_wrapper -- wrapper for os_mutex_lock
*/
static int
os_mutex_lock_wrapper(PMEMobjpool *pop, void *lock)
{
return os_mutex_lock((os_mutex_t *)lock);
}
/*
* os_mutex_unlock_wrapper -- wrapper for os_mutex_unlock
*/
static int
os_mutex_unlock_wrapper(PMEMobjpool *pop, void *lock)
{
return os_mutex_unlock((os_mutex_t *)lock);
}
/*
* pmemobj_mutex_lock_wrapper -- wrapper for pmemobj_mutex_lock
*/
static int
pmemobj_mutex_lock_wrapper(PMEMobjpool *pop, void *lock)
{
return pmemobj_mutex_lock(pop, (PMEMmutex *)lock);
}
/*
* pmemobj_mutex_unlock_wrapper -- wrapper for pmemobj_mutex_unlock
*/
static int
pmemobj_mutex_unlock_wrapper(PMEMobjpool *pop, void *lock)
{
return pmemobj_mutex_unlock(pop, (PMEMmutex *)lock);
}
/*
* os_rwlock_wrlock_wrapper -- wrapper for os_rwlock_wrlock
*/
static int
os_rwlock_wrlock_wrapper(PMEMobjpool *pop, void *lock)
{
return os_rwlock_wrlock((os_rwlock_t *)lock);
}
/*
* os_rwlock_rdlock_wrapper -- wrapper for os_rwlock_rdlock
*/
static int
os_rwlock_rdlock_wrapper(PMEMobjpool *pop, void *lock)
{
return os_rwlock_rdlock((os_rwlock_t *)lock);
}
/*
* os_rwlock_unlock_wrapper -- wrapper for os_rwlock_unlock
*/
static int
os_rwlock_unlock_wrapper(PMEMobjpool *pop, void *lock)
{
return os_rwlock_unlock((os_rwlock_t *)lock);
}
/*
* pmemobj_rwlock_wrlock_wrapper -- wrapper for pmemobj_rwlock_wrlock
*/
static int
pmemobj_rwlock_wrlock_wrapper(PMEMobjpool *pop, void *lock)
{
return pmemobj_rwlock_wrlock(pop, (PMEMrwlock *)lock);
}
/*
* pmemobj_rwlock_rdlock_wrapper -- wrapper for pmemobj_rwlock_rdlock
*/
static int
pmemobj_rwlock_rdlock_wrapper(PMEMobjpool *pop, void *lock)
{
return pmemobj_rwlock_rdlock(pop, (PMEMrwlock *)lock);
}
/*
* pmemobj_rwlock_unlock_wrapper -- wrapper for pmemobj_rwlock_unlock
*/
static int
pmemobj_rwlock_unlock_wrapper(PMEMobjpool *pop, void *lock)
{
return pmemobj_rwlock_unlock(pop, (PMEMrwlock *)lock);
}
/*
* volatile_mutex_lock_wrapper -- wrapper for volatile_mutex_lock
*/
static int
volatile_mutex_lock_wrapper(PMEMobjpool *pop, void *lock)
{
return volatile_mutex_lock(pop, (PMEM_volatile_mutex *)lock);
}
/*
* volatile_mutex_unlock_wrapper -- wrapper for volatile_mutex_unlock
*/
static int
volatile_mutex_unlock_wrapper(PMEMobjpool *pop, void *lock)
{
return volatile_mutex_unlock(pop, (PMEM_volatile_mutex *)lock);
}
/*
* init_bench_mutex -- allocate and initialize mutex objects
*/
static int
init_bench_mutex(struct mutex_bench *mb)
{
POBJ_ZALLOC(mb->pop, &D_RW(mb->root)->locks, lock_t,
mb->pa->n_locks * sizeof(lock_t));
if (TOID_IS_NULL(D_RO(mb->root)->locks)) {
perror("POBJ_ZALLOC");
return -1;
}
struct my_root *root = D_RW(mb->root);
assert(root != nullptr);
mb->locks = D_RW(root->locks);
assert(mb->locks != nullptr);
if (!mb->pa->use_system_threads) {
/* initialize PMEM mutexes */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (PMEMmutex_internal *)&mb->locks[i];
p->pmemmutex.runid = mb->pa->runid_initial_value;
os_mutex_init(&p->PMEMmutex_lock);
}
} else {
/* initialize os_thread mutexes */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (os_mutex_t *)&mb->locks[i];
os_mutex_init(p);
}
}
return 0;
}
/*
* exit_bench_mutex -- destroy the mutex objects and release memory
*/
static int
exit_bench_mutex(struct mutex_bench *mb)
{
if (mb->pa->use_system_threads) {
/* deinitialize os_thread mutex objects */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (os_mutex_t *)&mb->locks[i];
os_mutex_destroy(p);
}
}
POBJ_FREE(&D_RW(mb->root)->locks);
return 0;
}
/*
* op_bench_mutex -- lock and unlock the mutex object
*
* If requested, increment the run_id of the memory pool. In case of PMEMmutex
* this will force the rwlock object(s) reinitialization at the lock operation.
*/
static int
op_bench_mutex(struct mutex_bench *mb)
{
if (!mb->pa->use_system_threads) {
if (mb->lock_mode == OP_MODE_1BY1) {
bench_operation_1by1(pmemobj_mutex_lock_wrapper,
pmemobj_mutex_unlock_wrapper, mb,
mb->pop);
} else {
bench_operation_all_lock(pmemobj_mutex_lock_wrapper,
pmemobj_mutex_unlock_wrapper,
mb, mb->pop);
}
if (mb->pa->run_id_increment)
mb->pop->run_id += 2; /* must be a multiple of 2 */
} else {
if (mb->lock_mode == OP_MODE_1BY1) {
bench_operation_1by1(os_mutex_lock_wrapper,
os_mutex_unlock_wrapper, mb,
nullptr);
} else {
bench_operation_all_lock(os_mutex_lock_wrapper,
os_mutex_unlock_wrapper, mb,
nullptr);
}
}
return 0;
}
/*
* init_bench_rwlock -- allocate and initialize rwlock objects
*/
static int
init_bench_rwlock(struct mutex_bench *mb)
{
struct my_root *root = D_RW(mb->root);
assert(root != nullptr);
POBJ_ZALLOC(mb->pop, &root->locks, lock_t,
mb->pa->n_locks * sizeof(lock_t));
if (TOID_IS_NULL(root->locks)) {
perror("POBJ_ZALLOC");
return -1;
}
mb->locks = D_RW(root->locks);
assert(mb->locks != nullptr);
if (!mb->pa->use_system_threads) {
/* initialize PMEM rwlocks */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (PMEMrwlock_internal *)&mb->locks[i];
p->pmemrwlock.runid = mb->pa->runid_initial_value;
os_rwlock_init(&p->PMEMrwlock_lock);
}
} else {
/* initialize os_thread rwlocks */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (os_rwlock_t *)&mb->locks[i];
os_rwlock_init(p);
}
}
return 0;
}
/*
* exit_bench_rwlock -- destroy the rwlocks and release memory
*/
static int
exit_bench_rwlock(struct mutex_bench *mb)
{
if (mb->pa->use_system_threads) {
/* deinitialize os_thread mutex objects */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (os_rwlock_t *)&mb->locks[i];
os_rwlock_destroy(p);
}
}
POBJ_FREE(&D_RW(mb->root)->locks);
return 0;
}
/*
* op_bench_rwlock -- lock and unlock the rwlock object
*
* If requested, increment the run_id of the memory pool. In case of PMEMrwlock
* this will force the rwlock object(s) reinitialization at the lock operation.
*/
static int
op_bench_rwlock(struct mutex_bench *mb)
{
if (!mb->pa->use_system_threads) {
if (mb->lock_mode == OP_MODE_1BY1) {
bench_operation_1by1(
!mb->pa->use_rdlock
? pmemobj_rwlock_wrlock_wrapper
: pmemobj_rwlock_rdlock_wrapper,
pmemobj_rwlock_unlock_wrapper, mb, mb->pop);
} else {
bench_operation_all_lock(
!mb->pa->use_rdlock
? pmemobj_rwlock_wrlock_wrapper
: pmemobj_rwlock_rdlock_wrapper,
pmemobj_rwlock_unlock_wrapper, mb, mb->pop);
}
if (mb->pa->run_id_increment)
mb->pop->run_id += 2; /* must be a multiple of 2 */
} else {
if (mb->lock_mode == OP_MODE_1BY1) {
bench_operation_1by1(
!mb->pa->use_rdlock ? os_rwlock_wrlock_wrapper
: os_rwlock_rdlock_wrapper,
os_rwlock_unlock_wrapper, mb, nullptr);
} else {
bench_operation_all_lock(
!mb->pa->use_rdlock ? os_rwlock_wrlock_wrapper
: os_rwlock_rdlock_wrapper,
os_rwlock_unlock_wrapper, mb, nullptr);
}
}
return 0;
}
/*
* init_bench_vmutex -- allocate and initialize mutexes
*/
static int
init_bench_vmutex(struct mutex_bench *mb)
{
struct my_root *root = D_RW(mb->root);
assert(root != nullptr);
POBJ_ZALLOC(mb->pop, &root->locks, lock_t,
mb->pa->n_locks * sizeof(lock_t));
if (TOID_IS_NULL(root->locks)) {
perror("POBJ_ZALLOC");
return -1;
}
mb->locks = D_RW(root->locks);
assert(mb->locks != nullptr);
/* initialize PMEM volatile mutexes */
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (PMEM_volatile_mutex *)&mb->locks[i];
p->volatile_pmemmutex.runid = mb->pa->runid_initial_value;
volatile_mutex_init(&p->volatile_pmemmutex.mutexp, nullptr);
}
return 0;
}
/*
* exit_bench_vmutex -- destroy the mutex objects and release their
* memory
*/
static int
exit_bench_vmutex(struct mutex_bench *mb)
{
for (unsigned i = 0; i < mb->pa->n_locks; i++) {
auto *p = (PMEM_volatile_mutex *)&mb->locks[i];
volatile_mutex_destroy(mb->pop, p);
}
POBJ_FREE(&D_RW(mb->root)->locks);
return 0;
}
/*
* op_bench_volatile_mutex -- lock and unlock the mutex object
*/
static int
op_bench_vmutex(struct mutex_bench *mb)
{
if (mb->lock_mode == OP_MODE_1BY1) {
bench_operation_1by1(volatile_mutex_lock_wrapper,
volatile_mutex_unlock_wrapper, mb,
mb->pop);
} else {
bench_operation_all_lock(volatile_mutex_lock_wrapper,
volatile_mutex_unlock_wrapper, mb,
mb->pop);
}
if (mb->pa->run_id_increment)
mb->pop->run_id += 2; /* must be a multiple of 2 */
return 0;
}
struct bench_ops benchmark_ops[BENCH_MODE_MAX] = {
{init_bench_mutex, exit_bench_mutex, op_bench_mutex},
{init_bench_rwlock, exit_bench_rwlock, op_bench_rwlock},
{init_bench_vmutex, exit_bench_vmutex, op_bench_vmutex}};
/*
* operation_mode -- parses command line "--mode" and returns
* proper operation mode
*/
static enum operation_mode
parse_op_mode(const char *arg)
{
if (strcmp(arg, "1by1") == 0)
return OP_MODE_1BY1;
else if (strcmp(arg, "all-lock") == 0)
return OP_MODE_ALL_LOCK;
else
return OP_MODE_MAX;
}
/*
* benchmark_mode -- parses command line "--bench_type" and returns
* proper benchmark ops
*/
static struct bench_ops *
parse_benchmark_mode(const char *arg)
{
if (strcmp(arg, "mutex") == 0)
return &benchmark_ops[BENCH_MODE_MUTEX];
else if (strcmp(arg, "rwlock") == 0)
return &benchmark_ops[BENCH_MODE_RWLOCK];
else if (strcmp(arg, "volatile-mutex") == 0)
return &benchmark_ops[BENCH_MODE_VOLATILE_MUTEX];
else
return nullptr;
}
/*
* locks_init -- allocates persistent memory, maps it, creates the appropriate
* objects in the allocated memory and initializes them
*/
static int
locks_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
int ret = 0;
size_t poolsize;
struct mutex_bench *mb = (struct mutex_bench *)malloc(sizeof(*mb));
if (mb == nullptr) {
perror("malloc");
return -1;
}
mb->pa = (struct prog_args *)args->opts;
mb->lock_mode = parse_op_mode(mb->pa->lock_mode);
if (mb->lock_mode >= OP_MODE_MAX) {
fprintf(stderr, "Invalid mutex mode: %s\n", mb->pa->lock_mode);
errno = EINVAL;
goto err_free_mb;
}
mb->ops = parse_benchmark_mode(mb->pa->lock_type);
if (mb->ops == nullptr) {
fprintf(stderr, "Invalid benchmark type: %s\n",
mb->pa->lock_type);
errno = EINVAL;
goto err_free_mb;
}
/* reserve some space for metadata */
poolsize = mb->pa->n_locks * sizeof(lock_t) + PMEMOBJ_MIN_POOL;
if (args->is_poolset || type == TYPE_DEVDAX) {
if (args->fsize < poolsize) {
fprintf(stderr, "file size too large\n");
goto err_free_mb;
}
poolsize = 0;
}
mb->pop = pmemobj_create(args->fname,
POBJ_LAYOUT_NAME(pmembench_lock_layout),
poolsize, args->fmode);
if (mb->pop == nullptr) {
ret = -1;
perror("pmemobj_create");
goto err_free_mb;
}
mb->root = POBJ_ROOT(mb->pop, struct my_root);
assert(!TOID_IS_NULL(mb->root));
ret = mb->ops->bench_init(mb);
if (ret != 0)
goto err_free_pop;
pmembench_set_priv(bench, mb);
return 0;
err_free_pop:
pmemobj_close(mb->pop);
err_free_mb:
free(mb);
return ret;
}
/*
* locks_exit -- destroys allocated objects and release memory
*/
static int
locks_exit(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
auto *mb = (struct mutex_bench *)pmembench_get_priv(bench);
assert(mb != nullptr);
mb->ops->bench_exit(mb);
pmemobj_close(mb->pop);
free(mb);
return 0;
}
/*
* locks_op -- actual benchmark operation
*
* Performs lock and unlock as by the program arguments.
*/
static int
locks_op(struct benchmark *bench, struct operation_info *info)
{
auto *mb = (struct mutex_bench *)pmembench_get_priv(bench);
assert(mb != nullptr);
assert(mb->pop != nullptr);
assert(!TOID_IS_NULL(mb->root));
assert(mb->locks != nullptr);
assert(mb->lock_mode < OP_MODE_MAX);
mb->ops->bench_op(mb);
return 0;
}
/* structure to define command line arguments */
static struct benchmark_clo locks_clo[7];
static struct benchmark_info locks_info;
CONSTRUCTOR(pmem_locks_constructor)
void
pmem_locks_constructor(void)
{
locks_clo[0].opt_short = 'p';
locks_clo[0].opt_long = "use_system_threads";
locks_clo[0].descr = "Use os_thread locks instead of PMEM, "
"does not matter for volatile mutex";
locks_clo[0].def = "false";
locks_clo[0].off =
clo_field_offset(struct prog_args, use_system_threads);
locks_clo[0].type = CLO_TYPE_FLAG;
locks_clo[1].opt_short = 'm';
locks_clo[1].opt_long = "numlocks";
locks_clo[1].descr = "The number of lock objects used "
"for benchmark";
locks_clo[1].def = "1";
locks_clo[1].off = clo_field_offset(struct prog_args, n_locks);
locks_clo[1].type = CLO_TYPE_UINT;
locks_clo[1].type_uint.size = clo_field_size(struct prog_args, n_locks);
locks_clo[1].type_uint.base = CLO_INT_BASE_DEC;
locks_clo[1].type_uint.min = 1;
locks_clo[1].type_uint.max = UINT_MAX;
locks_clo[2].opt_short = 0;
locks_clo[2].opt_long = "mode";
locks_clo[2].descr = "Locking mode";
locks_clo[2].type = CLO_TYPE_STR;
locks_clo[2].off = clo_field_offset(struct prog_args, lock_mode);
locks_clo[2].def = "1by1";
locks_clo[3].opt_short = 'r';
locks_clo[3].opt_long = "run_id";
locks_clo[3].descr = "Increment the run_id of PMEM object "
"pool after each operation";
locks_clo[3].def = "false";
locks_clo[3].off = clo_field_offset(struct prog_args, run_id_increment);
locks_clo[3].type = CLO_TYPE_FLAG;
locks_clo[4].opt_short = 'i';
locks_clo[4].opt_long = "run_id_init_val";
locks_clo[4].descr = "Use this value for initializing the "
"run_id of each PMEMmutex object";
locks_clo[4].def = "2";
locks_clo[4].off =
clo_field_offset(struct prog_args, runid_initial_value);
locks_clo[4].type = CLO_TYPE_UINT;
locks_clo[4].type_uint.size =
clo_field_size(struct prog_args, runid_initial_value);
locks_clo[4].type_uint.base = CLO_INT_BASE_DEC;
locks_clo[4].type_uint.min = 0;
locks_clo[4].type_uint.max = UINT64_MAX;
locks_clo[5].opt_short = 'b';
locks_clo[5].opt_long = "bench_type";
locks_clo[5].descr = "The Benchmark type: mutex, "
"rwlock or volatile-mutex";
locks_clo[5].type = CLO_TYPE_STR;
locks_clo[5].off = clo_field_offset(struct prog_args, lock_type);
locks_clo[5].def = "mutex";
locks_clo[6].opt_short = 'R';
locks_clo[6].opt_long = "rdlock";
locks_clo[6].descr = "Select read over write lock, only "
"valid when lock_type is \"rwlock\"";
locks_clo[6].type = CLO_TYPE_FLAG;
locks_clo[6].off = clo_field_offset(struct prog_args, use_rdlock);
locks_info.name = "obj_locks";
locks_info.brief = "Benchmark for pmem locks operations";
locks_info.init = locks_init;
locks_info.exit = locks_exit;
locks_info.multithread = false;
locks_info.multiops = true;
locks_info.operation = locks_op;
locks_info.measure_time = true;
locks_info.clos = locks_clo;
locks_info.nclos = ARRAY_SIZE(locks_clo);
locks_info.opts_size = sizeof(struct prog_args);
locks_info.rm_file = true;
locks_info.allow_poolset = true;
REGISTER_BENCHMARK(locks_info);
};
| 21,213 | 23.957647 | 80 | cpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/pmem_memset.cpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_memset.cpp -- benchmark for pmem_memset function
*/
#include <cassert>
#include <cerrno>
#include <cstring>
#include <fcntl.h>
#include <libpmem.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "os.h"
#define MAX_OFFSET 63
#define CONST_B 0xFF
struct memset_bench;
typedef int (*operation_fn)(void *dest, int c, size_t len);
/*
* memset_args -- benchmark specific command line options
*/
struct memset_args {
char *mode; /* operation mode: stat, seq, rand */
bool memset; /* use libc memset function */
bool persist; /* perform persist operation */
bool msync; /* perform msync operation */
bool no_warmup; /* do not do warmup */
size_t chunk_size; /* elementary chunk size */
size_t dest_off; /* destination address offset */
unsigned seed; /* seed for random numbers */
};
/*
* memset_bench -- benchmark context
*/
struct memset_bench {
struct memset_args *pargs; /* benchmark specific arguments */
uint64_t *offsets; /* random/sequential address offsets */
size_t n_offsets; /* number of random elements */
int const_b; /* memset() value */
size_t fsize; /* file size */
void *pmem_addr; /* mapped file address */
operation_fn func_op; /* operation function */
};
/*
* operation_mode -- mode of operation of memset()
*/
enum operation_mode {
OP_MODE_UNKNOWN,
OP_MODE_STAT, /* always use the same chunk */
OP_MODE_SEQ, /* use consecutive chunks */
OP_MODE_RAND /* use random chunks */
};
/*
* parse_op_mode -- parse operation mode from string
*/
static enum operation_mode
parse_op_mode(const char *arg)
{
if (strcmp(arg, "stat") == 0)
return OP_MODE_STAT;
else if (strcmp(arg, "seq") == 0)
return OP_MODE_SEQ;
else if (strcmp(arg, "rand") == 0)
return OP_MODE_RAND;
else
return OP_MODE_UNKNOWN;
}
/*
* init_offsets -- initialize offsets[] array depending on the selected mode
*/
static int
init_offsets(struct benchmark_args *args, struct memset_bench *mb,
enum operation_mode op_mode)
{
unsigned n_threads = args->n_threads;
size_t n_ops = args->n_ops_per_thread;
mb->n_offsets = n_ops * n_threads;
assert(mb->n_offsets != 0);
mb->offsets = (uint64_t *)malloc(mb->n_offsets * sizeof(*mb->offsets));
if (!mb->offsets) {
perror("malloc");
return -1;
}
rng_t rng;
randomize_r(&rng, mb->pargs->seed);
for (unsigned i = 0; i < n_threads; i++) {
for (size_t j = 0; j < n_ops; j++) {
size_t o;
switch (op_mode) {
case OP_MODE_STAT:
o = i;
break;
case OP_MODE_SEQ:
o = i * n_ops + j;
break;
case OP_MODE_RAND:
o = i * n_ops + rnd64_r(&rng) % n_ops;
break;
default:
assert(0);
return -1;
}
mb->offsets[i * n_ops + j] = o * mb->pargs->chunk_size;
}
}
return 0;
}
/*
* libpmem_memset_persist -- perform operation using libpmem
* pmem_memset_persist().
*/
static int
libpmem_memset_persist(void *dest, int c, size_t len)
{
pmem_memset_persist(dest, c, len);
return 0;
}
/*
* libpmem_memset_nodrain -- perform operation using libpmem
* pmem_memset_nodrain().
*/
static int
libpmem_memset_nodrain(void *dest, int c, size_t len)
{
pmem_memset_nodrain(dest, c, len);
return 0;
}
/*
* libc_memset_persist -- perform operation using libc memset() function
* followed by pmem_persist().
*/
static int
libc_memset_persist(void *dest, int c, size_t len)
{
memset(dest, c, len);
pmem_persist(dest, len);
return 0;
}
/*
* libc_memset_msync -- perform operation using libc memset() function
* followed by pmem_msync().
*/
static int
libc_memset_msync(void *dest, int c, size_t len)
{
memset(dest, c, len);
return pmem_msync(dest, len);
}
/*
* libc_memset -- perform operation using libc memset() function
* followed by pmem_flush().
*/
static int
libc_memset(void *dest, int c, size_t len)
{
memset(dest, c, len);
pmem_flush(dest, len);
return 0;
}
/*
* warmup_persist -- does the warmup by writing the whole pool area
*/
static int
warmup_persist(struct memset_bench *mb)
{
void *dest = mb->pmem_addr;
int c = mb->const_b;
size_t len = mb->fsize;
pmem_memset_persist(dest, c, len);
return 0;
}
/*
* warmup_msync -- does the warmup by writing the whole pool area
*/
static int
warmup_msync(struct memset_bench *mb)
{
void *dest = mb->pmem_addr;
int c = mb->const_b;
size_t len = mb->fsize;
return libc_memset_msync(dest, c, len);
}
/*
* memset_op -- actual benchmark operation. It can have one of the four
* functions assigned:
* libc_memset,
* libc_memset_persist,
* libpmem_memset_nodrain,
* libpmem_memset_persist.
*/
static int
memset_op(struct benchmark *bench, struct operation_info *info)
{
auto *mb = (struct memset_bench *)pmembench_get_priv(bench);
assert(info->index < mb->n_offsets);
size_t idx = info->worker->index * info->args->n_ops_per_thread +
info->index;
void *dest =
(char *)mb->pmem_addr + mb->offsets[idx] + mb->pargs->dest_off;
int c = mb->const_b;
size_t len = mb->pargs->chunk_size;
mb->func_op(dest, c, len);
return 0;
}
/*
* memset_init -- initialization function
*/
static int
memset_init(struct benchmark *bench, struct benchmark_args *args)
{
assert(bench != nullptr);
assert(args != nullptr);
assert(args->opts != nullptr);
int ret = 0;
size_t size;
size_t large;
size_t little;
size_t file_size = 0;
int flags = 0;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
int (*warmup_func)(struct memset_bench *) = warmup_persist;
auto *mb = (struct memset_bench *)malloc(sizeof(struct memset_bench));
if (!mb) {
perror("malloc");
return -1;
}
mb->pargs = (struct memset_args *)args->opts;
mb->pargs->chunk_size = args->dsize;
enum operation_mode op_mode = parse_op_mode(mb->pargs->mode);
if (op_mode == OP_MODE_UNKNOWN) {
fprintf(stderr, "Invalid operation mode argument '%s'\n",
mb->pargs->mode);
ret = -1;
goto err_free_mb;
}
size = MAX_OFFSET + mb->pargs->chunk_size;
large = size * args->n_ops_per_thread * args->n_threads;
little = size * args->n_threads;
mb->fsize = (op_mode == OP_MODE_STAT) ? little : large;
/* initialize offsets[] array depending on benchmark args */
if (init_offsets(args, mb, op_mode) < 0) {
ret = -1;
goto err_free_mb;
}
/* initialize memset() value */
mb->const_b = CONST_B;
if (type != TYPE_DEVDAX) {
file_size = mb->fsize;
flags = PMEM_FILE_CREATE | PMEM_FILE_EXCL;
}
/* create a pmem file and memory map it */
if ((mb->pmem_addr = pmem_map_file(args->fname, file_size, flags,
args->fmode, nullptr, nullptr)) ==
nullptr) {
perror(args->fname);
ret = -1;
goto err_free_offsets;
}
if (mb->pargs->memset) {
if (mb->pargs->persist && mb->pargs->msync) {
fprintf(stderr,
"Invalid benchmark parameters: persist and msync cannot be specified together\n");
ret = -1;
goto err_free_offsets;
}
if (mb->pargs->persist) {
mb->func_op = libc_memset_persist;
} else if (mb->pargs->msync) {
mb->func_op = libc_memset_msync;
warmup_func = warmup_msync;
} else {
mb->func_op = libc_memset;
}
} else {
mb->func_op = (mb->pargs->persist) ? libpmem_memset_persist
: libpmem_memset_nodrain;
}
if (!mb->pargs->no_warmup && type != TYPE_DEVDAX) {
ret = warmup_func(mb);
if (ret) {
perror("Pool warmup failed");
goto err_free_offsets;
}
}
pmembench_set_priv(bench, mb);
return ret;
err_free_offsets:
free(mb->offsets);
err_free_mb:
free(mb);
return ret;
}
/*
* memset_exit -- benchmark cleanup function
*/
static int
memset_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *mb = (struct memset_bench *)pmembench_get_priv(bench);
pmem_unmap(mb->pmem_addr, mb->fsize);
free(mb->offsets);
free(mb);
return 0;
}
static struct benchmark_clo memset_clo[7];
/* Stores information about benchmark. */
static struct benchmark_info memset_info;
CONSTRUCTOR(pmem_memset_constructor)
void
pmem_memset_constructor(void)
{
memset_clo[0].opt_short = 'M';
memset_clo[0].opt_long = "mem-mode";
memset_clo[0].descr = "Memory writing mode - "
"stat, seq, rand";
memset_clo[0].def = "seq";
memset_clo[0].off = clo_field_offset(struct memset_args, mode);
memset_clo[0].type = CLO_TYPE_STR;
memset_clo[1].opt_short = 'm';
memset_clo[1].opt_long = "memset";
memset_clo[1].descr = "Use libc memset()";
memset_clo[1].def = "false";
memset_clo[1].off = clo_field_offset(struct memset_args, memset);
memset_clo[1].type = CLO_TYPE_FLAG;
memset_clo[2].opt_short = 'p';
memset_clo[2].opt_long = "persist";
memset_clo[2].descr = "Use pmem_persist()";
memset_clo[2].def = "true";
memset_clo[2].off = clo_field_offset(struct memset_args, persist);
memset_clo[2].type = CLO_TYPE_FLAG;
memset_clo[3].opt_short = 'D';
memset_clo[3].opt_long = "dest-offset";
memset_clo[3].descr = "Destination cache line alignment "
"offset";
memset_clo[3].def = "0";
memset_clo[3].off = clo_field_offset(struct memset_args, dest_off);
memset_clo[3].type = CLO_TYPE_UINT;
memset_clo[3].type_uint.size =
clo_field_size(struct memset_args, dest_off);
memset_clo[3].type_uint.base = CLO_INT_BASE_DEC;
memset_clo[3].type_uint.min = 0;
memset_clo[3].type_uint.max = MAX_OFFSET;
memset_clo[4].opt_short = 'w';
memset_clo[4].opt_long = "no-warmup";
memset_clo[4].descr = "Don't do warmup";
memset_clo[4].def = "false";
memset_clo[4].type = CLO_TYPE_FLAG;
memset_clo[4].off = clo_field_offset(struct memset_args, no_warmup);
memset_clo[5].opt_short = 'S';
memset_clo[5].opt_long = "seed";
memset_clo[5].descr = "seed for random numbers";
memset_clo[5].def = "1";
memset_clo[5].off = clo_field_offset(struct memset_args, seed);
memset_clo[5].type = CLO_TYPE_UINT;
memset_clo[5].type_uint.size = clo_field_size(struct memset_args, seed);
memset_clo[5].type_uint.base = CLO_INT_BASE_DEC;
memset_clo[5].type_uint.min = 1;
memset_clo[5].type_uint.max = UINT_MAX;
memset_clo[6].opt_short = 's';
memset_clo[6].opt_long = "msync";
memset_clo[6].descr = "Use pmem_msync()";
memset_clo[6].def = "false";
memset_clo[6].off = clo_field_offset(struct memset_args, msync);
memset_clo[6].type = CLO_TYPE_FLAG;
memset_info.name = "pmem_memset";
memset_info.brief = "Benchmark for pmem_memset_persist() "
"and pmem_memset_nodrain() operations";
memset_info.init = memset_init;
memset_info.exit = memset_exit;
memset_info.multithread = true;
memset_info.multiops = true;
memset_info.operation = memset_op;
memset_info.measure_time = true;
memset_info.clos = memset_clo;
memset_info.nclos = ARRAY_SIZE(memset_clo);
memset_info.opts_size = sizeof(struct memset_args);
memset_info.rm_file = true;
memset_info.allow_poolset = false;
memset_info.print_bandwidth = true;
REGISTER_BENCHMARK(memset_info);
};
| 11,041 | 23.375276 | 86 | cpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/scenario.hpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* scenario.hpp -- scenario module declaration
*/
#include "queue.h"
#include <cstdbool>
struct kv {
PMDK_TAILQ_ENTRY(kv) next;
char *key;
char *value;
};
struct scenario {
PMDK_TAILQ_ENTRY(scenario) next;
PMDK_TAILQ_HEAD(scenariohead, kv) head;
char *name;
char *benchmark;
char *group;
};
struct scenarios {
PMDK_TAILQ_HEAD(scenarioshead, scenario) head;
};
#define FOREACH_SCENARIO(s, ss) PMDK_TAILQ_FOREACH((s), &(ss)->head, next)
#define FOREACH_KV(kv, s) PMDK_TAILQ_FOREACH((kv), &(s)->head, next)
struct kv *kv_alloc(const char *key, const char *value);
void kv_free(struct kv *kv);
struct scenario *scenario_alloc(const char *name, const char *bench);
void scenario_free(struct scenario *s);
void scenario_set_group(struct scenario *s, const char *group);
struct scenarios *scenarios_alloc(void);
void scenarios_free(struct scenarios *scenarios);
struct scenario *scenarios_get_scenario(struct scenarios *ss, const char *name);
bool contains_scenarios(int argc, char **argv, struct scenarios *ss);
struct scenario *clone_scenario(struct scenario *src_scenario);
struct kv *find_kv_in_scenario(const char *key,
const struct scenario *scenario);
| 1,271 | 26.06383 | 80 | hpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/log.cpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* log.cpp -- pmemlog benchmarks definitions
*/
#include <cassert>
#include <cerrno>
#include <cstring>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include <unistd.h>
#include "benchmark.hpp"
#include "file.h"
#include "libpmemlog.h"
#include "os.h"
#include "poolset_util.hpp"
#include "rand.h"
/*
* Size of pool header, pool descriptor
* and additional page alignment overhead
*/
#define POOL_HDR_SIZE (3 * 4096)
#define MIN_VEC_SIZE 1
/*
* prog_args - benchmark's specific command line arguments
*/
struct prog_args {
unsigned seed; /* seed for pseudo-random generator */
bool rand; /* use random numbers */
int vec_size; /* vector size */
size_t el_size; /* size of single append */
size_t min_size; /* minimum size for random mode */
bool no_warmup; /* don't do warmup */
bool fileio; /* use file io instead of pmemlog */
};
/*
* thread_info - thread specific data
*/
struct log_worker_info {
rng_t rng;
struct iovec *iov; /* io vector */
char *buf; /* buffer for write/read operations */
size_t buf_size; /* buffer size */
size_t buf_ptr; /* pointer for read operations */
size_t *rand_sizes;
size_t *vec_sizes; /* sum of sizes in vector */
};
/*
* log_bench - main context of benchmark
*/
struct log_bench {
size_t psize; /* size of pool */
PMEMlogpool *plp; /* pmemlog handle */
struct prog_args *args; /* benchmark specific arguments */
int fd; /* file descriptor for file io mode */
rng_t rng;
/*
* Pointer to the main benchmark operation. The appropriate function
* will be assigned depending on the benchmark specific arguments.
*/
int (*func_op)(struct benchmark *, struct operation_info *);
};
/*
* do_warmup -- do warmup by writing the whole pool area
*/
static int
do_warmup(struct log_bench *lb, size_t nops)
{
int ret = 0;
size_t bsize = lb->args->vec_size * lb->args->el_size;
auto *buf = (char *)calloc(1, bsize);
if (!buf) {
perror("calloc");
return -1;
}
if (!lb->args->fileio) {
for (size_t i = 0; i < nops; i++) {
if (pmemlog_append(lb->plp, buf, lb->args->el_size) <
0) {
ret = -1;
perror("pmemlog_append");
goto out;
}
}
pmemlog_rewind(lb->plp);
} else {
for (size_t i = 0; i < nops; i++) {
if (write(lb->fd, buf, (unsigned)lb->args->el_size) !=
(ssize_t)lb->args->el_size) {
ret = -1;
perror("write");
os_close(lb->fd);
goto out;
}
}
if (os_lseek(lb->fd, 0, SEEK_SET) < 0) {
ret = -1;
perror("lseek");
os_close(lb->fd);
}
}
out:
free(buf);
return ret;
}
/*
* log_append -- performs pmemlog_append operation
*/
static int
log_append(struct benchmark *bench, struct operation_info *info)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
assert(lb);
auto *worker_info = (struct log_worker_info *)info->worker->priv;
assert(worker_info);
size_t size = lb->args->rand ? worker_info->rand_sizes[info->index]
: lb->args->el_size;
if (pmemlog_append(lb->plp, worker_info->buf, size) < 0) {
perror("pmemlog_append");
return -1;
}
return 0;
}
/*
* log_appendv -- performs pmemlog_appendv operation
*/
static int
log_appendv(struct benchmark *bench, struct operation_info *info)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
assert(lb);
auto *worker_info = (struct log_worker_info *)info->worker->priv;
assert(worker_info);
struct iovec *iov = &worker_info->iov[info->index * lb->args->vec_size];
if (pmemlog_appendv(lb->plp, iov, lb->args->vec_size) < 0) {
perror("pmemlog_appendv");
return -1;
}
return 0;
}
/*
* fileio_append -- performs fileio append operation
*/
static int
fileio_append(struct benchmark *bench, struct operation_info *info)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
assert(lb);
auto *worker_info = (struct log_worker_info *)info->worker->priv;
assert(worker_info);
size_t size = lb->args->rand ? worker_info->rand_sizes[info->index]
: lb->args->el_size;
if (write(lb->fd, worker_info->buf, (unsigned)size) != (ssize_t)size) {
perror("write");
return -1;
}
return 0;
}
/*
* fileio_appendv -- performs fileio appendv operation
*/
static int
fileio_appendv(struct benchmark *bench, struct operation_info *info)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
assert(lb != nullptr);
auto *worker_info = (struct log_worker_info *)info->worker->priv;
assert(worker_info);
struct iovec *iov = &worker_info->iov[info->index * lb->args->vec_size];
size_t vec_size = worker_info->vec_sizes[info->index];
if (os_writev(lb->fd, iov, lb->args->vec_size) != (ssize_t)vec_size) {
perror("writev");
return -1;
}
return 0;
}
/*
* log_process_data -- callback function for pmemlog_walk.
*/
static int
log_process_data(const void *buf, size_t len, void *arg)
{
auto *worker_info = (struct log_worker_info *)arg;
size_t left = worker_info->buf_size - worker_info->buf_ptr;
if (len > left) {
worker_info->buf_ptr = 0;
left = worker_info->buf_size;
}
len = len < left ? len : left;
assert(len <= left);
void *buff = &worker_info->buf[worker_info->buf_ptr];
memcpy(buff, buf, len);
worker_info->buf_ptr += len;
return 1;
}
/*
* fileio_read -- perform single fileio read
*/
static int
fileio_read(int fd, ssize_t len, struct log_worker_info *worker_info)
{
ssize_t left = worker_info->buf_size - worker_info->buf_ptr;
if (len > left) {
worker_info->buf_ptr = 0;
left = worker_info->buf_size;
}
len = len < left ? len : left;
assert(len <= left);
size_t off = worker_info->buf_ptr;
void *buff = &worker_info->buf[off];
if ((len = pread(fd, buff, len, off)) < 0)
return -1;
worker_info->buf_ptr += len;
return 1;
}
/*
* log_read_op -- perform read operation
*/
static int
log_read_op(struct benchmark *bench, struct operation_info *info)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
assert(lb);
auto *worker_info = (struct log_worker_info *)info->worker->priv;
assert(worker_info);
worker_info->buf_ptr = 0;
size_t chunk_size = lb->args->rand
? worker_info->rand_sizes[info->index]
: lb->args->el_size;
if (!lb->args->fileio) {
pmemlog_walk(lb->plp, chunk_size, log_process_data,
worker_info);
return 0;
}
int ret;
while ((ret = fileio_read(lb->fd, chunk_size, worker_info)) == 1)
;
return ret;
}
/*
* log_init_worker -- init benchmark worker
*/
static int
log_init_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
int ret = 0;
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
size_t i_size, n_vectors;
assert(lb);
auto *worker_info = (struct log_worker_info *)malloc(
sizeof(struct log_worker_info));
if (!worker_info) {
perror("malloc");
return -1;
}
/* allocate buffer for append / read */
worker_info->buf_size = lb->args->el_size * lb->args->vec_size;
worker_info->buf = (char *)malloc(worker_info->buf_size);
if (!worker_info->buf) {
perror("malloc");
ret = -1;
goto err_free_worker_info;
}
/*
* For random mode, each operation has its own vector with
* random sizes. Otherwise there is only one vector with
* equal sizes.
*/
n_vectors = args->n_ops_per_thread;
worker_info->iov = (struct iovec *)malloc(
n_vectors * lb->args->vec_size * sizeof(struct iovec));
if (!worker_info->iov) {
perror("malloc");
ret = -1;
goto err_free_buf;
}
if (lb->args->rand) {
/* each thread has random seed */
randomize_r(&worker_info->rng, rnd64_r(&lb->rng));
/* each vector element has its own random size */
size_t n_sizes = args->n_ops_per_thread * lb->args->vec_size;
worker_info->rand_sizes = (size_t *)malloc(
n_sizes * sizeof(*worker_info->rand_sizes));
if (!worker_info->rand_sizes) {
perror("malloc");
ret = -1;
goto err_free_iov;
}
/* generate append sizes */
for (size_t i = 0; i < n_sizes; i++) {
size_t width = lb->args->el_size - lb->args->min_size;
worker_info->rand_sizes[i] =
rnd64_r(&worker_info->rng) % width +
lb->args->min_size;
}
} else {
worker_info->rand_sizes = nullptr;
}
worker_info->vec_sizes = (size_t *)calloc(
args->n_ops_per_thread, sizeof(*worker_info->vec_sizes));
if (!worker_info->vec_sizes) {
perror("malloc\n");
ret = -1;
goto err_free_rand_sizes;
}
/* fill up the io vectors */
i_size = 0;
for (size_t n = 0; n < args->n_ops_per_thread; n++) {
size_t buf_ptr = 0;
size_t vec_off = n * lb->args->vec_size;
for (int i = 0; i < lb->args->vec_size; ++i) {
size_t el_size = lb->args->rand
? worker_info->rand_sizes[i_size++]
: lb->args->el_size;
worker_info->iov[vec_off + i].iov_base =
&worker_info->buf[buf_ptr];
worker_info->iov[vec_off + i].iov_len = el_size;
worker_info->vec_sizes[n] += el_size;
buf_ptr += el_size;
}
}
worker->priv = worker_info;
return 0;
err_free_rand_sizes:
free(worker_info->rand_sizes);
err_free_iov:
free(worker_info->iov);
err_free_buf:
free(worker_info->buf);
err_free_worker_info:
free(worker_info);
return ret;
}
/*
* log_free_worker -- cleanup benchmark worker
*/
static void
log_free_worker(struct benchmark *bench, struct benchmark_args *args,
struct worker_info *worker)
{
auto *worker_info = (struct log_worker_info *)worker->priv;
assert(worker_info);
free(worker_info->buf);
free(worker_info->iov);
free(worker_info->rand_sizes);
free(worker_info->vec_sizes);
free(worker_info);
}
/*
* log_init -- benchmark initialization function
*/
static int
log_init(struct benchmark *bench, struct benchmark_args *args)
{
int ret = 0;
assert(bench);
assert(args != nullptr);
assert(args->opts != nullptr);
struct benchmark_info *bench_info;
char path[PATH_MAX];
if (util_safe_strcpy(path, args->fname, sizeof(path)) != 0)
return -1;
enum file_type type = util_file_get_type(args->fname);
if (type == OTHER_ERROR) {
fprintf(stderr, "could not check type of file %s\n",
args->fname);
return -1;
}
auto *lb = (struct log_bench *)malloc(sizeof(struct log_bench));
if (!lb) {
perror("malloc");
return -1;
}
lb->args = (struct prog_args *)args->opts;
lb->args->el_size = args->dsize;
if (lb->args->vec_size == 0)
lb->args->vec_size = 1;
if (lb->args->rand && lb->args->min_size > lb->args->el_size) {
errno = EINVAL;
ret = -1;
goto err_free_lb;
}
if (lb->args->rand && lb->args->min_size == lb->args->el_size)
lb->args->rand = false;
randomize_r(&lb->rng, lb->args->seed);
/* align pool size to ensure that we have enough usable space */
lb->psize =
ALIGN_UP(POOL_HDR_SIZE +
args->n_ops_per_thread * args->n_threads *
lb->args->vec_size * lb->args->el_size,
Mmap_align);
/* calculate a required pool size */
if (lb->psize < PMEMLOG_MIN_POOL)
lb->psize = PMEMLOG_MIN_POOL;
if (args->is_poolset || type == TYPE_DEVDAX) {
if (lb->args->fileio) {
fprintf(stderr,
"fileio not supported on device dax nor poolset\n");
ret = -1;
goto err_free_lb;
}
if (args->fsize < lb->psize) {
fprintf(stderr, "file size too large\n");
ret = -1;
goto err_free_lb;
}
lb->psize = 0;
} else if (args->is_dynamic_poolset) {
if (lb->args->fileio) {
fprintf(stderr,
"fileio not supported with dynamic poolset\n");
ret = -1;
goto err_free_lb;
}
ret = dynamic_poolset_create(args->fname, lb->psize);
if (ret == -1)
goto err_free_lb;
if (util_safe_strcpy(path, POOLSET_PATH, sizeof(path)) != 0)
goto err_free_lb;
lb->psize = 0;
}
bench_info = pmembench_get_info(bench);
if (!lb->args->fileio) {
if ((lb->plp = pmemlog_create(path, lb->psize, args->fmode)) ==
nullptr) {
perror("pmemlog_create");
ret = -1;
goto err_free_lb;
}
bench_info->operation =
(lb->args->vec_size > 1) ? log_appendv : log_append;
} else {
int flags = O_CREAT | O_RDWR | O_SYNC;
/* Create a file if it does not exist. */
if ((lb->fd = os_open(args->fname, flags, args->fmode)) < 0) {
perror(args->fname);
ret = -1;
goto err_free_lb;
}
/* allocate the pmem */
if ((errno = os_posix_fallocate(lb->fd, 0, lb->psize)) != 0) {
perror("posix_fallocate");
ret = -1;
goto err_close;
}
bench_info->operation = (lb->args->vec_size > 1)
? fileio_appendv
: fileio_append;
}
if (!lb->args->no_warmup && type != TYPE_DEVDAX) {
size_t warmup_nops = args->n_threads * args->n_ops_per_thread;
if (do_warmup(lb, warmup_nops)) {
fprintf(stderr, "warmup failed\n");
ret = -1;
goto err_close;
}
}
pmembench_set_priv(bench, lb);
return 0;
err_close:
if (lb->args->fileio)
os_close(lb->fd);
else
pmemlog_close(lb->plp);
err_free_lb:
free(lb);
return ret;
}
/*
* log_exit -- cleanup benchmark
*/
static int
log_exit(struct benchmark *bench, struct benchmark_args *args)
{
auto *lb = (struct log_bench *)pmembench_get_priv(bench);
if (!lb->args->fileio)
pmemlog_close(lb->plp);
else
os_close(lb->fd);
free(lb);
return 0;
}
/* command line options definition */
static struct benchmark_clo log_clo[6];
/* log_append benchmark info */
static struct benchmark_info log_append_info;
/* log_read benchmark info */
static struct benchmark_info log_read_info;
CONSTRUCTOR(log_constructor)
void
log_constructor(void)
{
log_clo[0].opt_short = 'r';
log_clo[0].opt_long = "random";
log_clo[0].descr = "Use random sizes for append/read";
log_clo[0].off = clo_field_offset(struct prog_args, rand);
log_clo[0].type = CLO_TYPE_FLAG;
log_clo[1].opt_short = 'S';
log_clo[1].opt_long = "seed";
log_clo[1].descr = "Random mode";
log_clo[1].off = clo_field_offset(struct prog_args, seed);
log_clo[1].def = "1";
log_clo[1].type = CLO_TYPE_UINT;
log_clo[1].type_uint.size = clo_field_size(struct prog_args, seed);
log_clo[1].type_uint.base = CLO_INT_BASE_DEC;
log_clo[1].type_uint.min = 1;
log_clo[1].type_uint.max = UINT_MAX;
log_clo[2].opt_short = 'i';
log_clo[2].opt_long = "file-io";
log_clo[2].descr = "File I/O mode";
log_clo[2].off = clo_field_offset(struct prog_args, fileio);
log_clo[2].type = CLO_TYPE_FLAG;
log_clo[3].opt_short = 'w';
log_clo[3].opt_long = "no-warmup";
log_clo[3].descr = "Don't do warmup", log_clo[3].type = CLO_TYPE_FLAG;
log_clo[3].off = clo_field_offset(struct prog_args, no_warmup);
log_clo[4].opt_short = 'm';
log_clo[4].opt_long = "min-size";
log_clo[4].descr = "Minimum size of append/read for "
"random mode";
log_clo[4].type = CLO_TYPE_UINT;
log_clo[4].off = clo_field_offset(struct prog_args, min_size);
log_clo[4].def = "1";
log_clo[4].type_uint.size = clo_field_size(struct prog_args, min_size);
log_clo[4].type_uint.base = CLO_INT_BASE_DEC;
log_clo[4].type_uint.min = 1;
log_clo[4].type_uint.max = UINT64_MAX;
/* this one is only for log_append */
log_clo[5].opt_short = 'v';
log_clo[5].opt_long = "vector";
log_clo[5].descr = "Vector size";
log_clo[5].off = clo_field_offset(struct prog_args, vec_size);
log_clo[5].def = "1";
log_clo[5].type = CLO_TYPE_INT;
log_clo[5].type_int.size = clo_field_size(struct prog_args, vec_size);
log_clo[5].type_int.base = CLO_INT_BASE_DEC;
log_clo[5].type_int.min = MIN_VEC_SIZE;
log_clo[5].type_int.max = INT_MAX;
log_append_info.name = "log_append";
log_append_info.brief = "Benchmark for pmemlog_append() "
"operation";
log_append_info.init = log_init;
log_append_info.exit = log_exit;
log_append_info.multithread = true;
log_append_info.multiops = true;
log_append_info.init_worker = log_init_worker;
log_append_info.free_worker = log_free_worker;
/* this will be assigned in log_init */
log_append_info.operation = nullptr;
log_append_info.measure_time = true;
log_append_info.clos = log_clo;
log_append_info.nclos = ARRAY_SIZE(log_clo);
log_append_info.opts_size = sizeof(struct prog_args);
log_append_info.rm_file = true;
log_append_info.allow_poolset = true;
REGISTER_BENCHMARK(log_append_info);
log_read_info.name = "log_read";
log_read_info.brief = "Benchmark for pmemlog_walk() "
"operation";
log_read_info.init = log_init;
log_read_info.exit = log_exit;
log_read_info.multithread = true;
log_read_info.multiops = true;
log_read_info.init_worker = log_init_worker;
log_read_info.free_worker = log_free_worker;
log_read_info.operation = log_read_op;
log_read_info.measure_time = true;
log_read_info.clos = log_clo;
/* without vector */
log_read_info.nclos = ARRAY_SIZE(log_clo) - 1;
log_read_info.opts_size = sizeof(struct prog_args);
log_read_info.rm_file = true;
log_read_info.allow_poolset = true;
REGISTER_BENCHMARK(log_read_info);
};
| 16,617 | 22.979798 | 73 | cpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/benchmarks/clo.hpp | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* clo.hpp -- command line options module declarations
*/
int benchmark_clo_parse(int argc, char *argv[], struct benchmark_clo *clos,
ssize_t nclo, struct clo_vec *clovec);
int benchmark_clo_parse_scenario(struct scenario *scenario,
struct benchmark_clo *clos, size_t nclo,
struct clo_vec *clovec);
const char *benchmark_clo_str(struct benchmark_clo *clo, void *args,
size_t size);
int clo_get_scenarios(int argc, char *argv[],
struct scenarios *available_scenarios,
struct scenarios *found_scenarios);
int benchmark_override_clos_in_scenario(struct scenario *scenario, int argc,
char *argv[],
struct benchmark_clo *clos, int nclos);
| 772 | 39.684211 | 76 | hpp |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/page_size.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, IBM Corporation */
#ifndef PMDK_PAGE_SIZE_H
#define PMDK_PAGE_SIZE_H
#if defined(__x86_64) || defined(_M_X64) || defined(__aarch64__)
#define PMEM_PAGESIZE 4096
#elif defined(__PPC64__)
#define PMEM_PAGESIZE 65536
#else
#error unable to recognize ISA at compile time
#endif
#endif /* PMDK_PAGE_SIZE_H */
| 374 | 16.045455 | 64 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/ctl.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* ctl.h -- internal declaration of statistics and control related structures
*/
#ifndef PMDK_CTL_H
#define PMDK_CTL_H 1
#include "queue.h"
#include "errno.h"
#include "out.h"
#ifdef __cplusplus
extern "C" {
#endif
struct ctl;
struct ctl_index {
const char *name;
long value;
PMDK_SLIST_ENTRY(ctl_index) entry;
};
PMDK_SLIST_HEAD(ctl_indexes, ctl_index);
enum ctl_query_source {
CTL_UNKNOWN_QUERY_SOURCE,
/* query executed directly from the program */
CTL_QUERY_PROGRAMMATIC,
/* query executed from the config file */
CTL_QUERY_CONFIG_INPUT,
MAX_CTL_QUERY_SOURCE
};
enum ctl_query_type {
CTL_QUERY_READ,
CTL_QUERY_WRITE,
CTL_QUERY_RUNNABLE,
MAX_CTL_QUERY_TYPE
};
typedef int (*node_callback)(void *ctx, enum ctl_query_source type,
void *arg, struct ctl_indexes *indexes);
enum ctl_node_type {
CTL_NODE_UNKNOWN,
CTL_NODE_NAMED,
CTL_NODE_LEAF,
CTL_NODE_INDEXED,
MAX_CTL_NODE
};
typedef int (*ctl_arg_parser)(const void *arg, void *dest, size_t dest_size);
struct ctl_argument_parser {
size_t dest_offset; /* offset of the field inside of the argument */
size_t dest_size; /* size of the field inside of the argument */
ctl_arg_parser parser;
};
struct ctl_argument {
size_t dest_size; /* sizeof the entire argument */
struct ctl_argument_parser parsers[]; /* array of 'fields' in arg */
};
#define sizeof_member(t, m) sizeof(((t *)0)->m)
#define CTL_ARG_PARSER(t, p)\
{0, sizeof(t), p}
#define CTL_ARG_PARSER_STRUCT(t, m, p)\
{offsetof(t, m), sizeof_member(t, m), p}
#define CTL_ARG_PARSER_END {0, 0, NULL}
/*
* CTL Tree node structure, do not use directly. All the necessary functionality
* is provided by the included macros.
*/
struct ctl_node {
const char *name;
enum ctl_node_type type;
node_callback cb[MAX_CTL_QUERY_TYPE];
const struct ctl_argument *arg;
const struct ctl_node *children;
};
struct ctl *ctl_new(void);
void ctl_delete(struct ctl *stats);
int ctl_load_config_from_string(struct ctl *ctl, void *ctx,
const char *cfg_string);
int ctl_load_config_from_file(struct ctl *ctl, void *ctx,
const char *cfg_file);
/* Use through CTL_REGISTER_MODULE, never directly */
void ctl_register_module_node(struct ctl *c,
const char *name, struct ctl_node *n);
int ctl_arg_boolean(const void *arg, void *dest, size_t dest_size);
#define CTL_ARG_BOOLEAN {sizeof(int),\
{{0, sizeof(int), ctl_arg_boolean},\
CTL_ARG_PARSER_END}};
int ctl_arg_integer(const void *arg, void *dest, size_t dest_size);
#define CTL_ARG_INT {sizeof(int),\
{{0, sizeof(int), ctl_arg_integer},\
CTL_ARG_PARSER_END}};
#define CTL_ARG_LONG_LONG {sizeof(long long),\
{{0, sizeof(long long), ctl_arg_integer},\
CTL_ARG_PARSER_END}};
int ctl_arg_string(const void *arg, void *dest, size_t dest_size);
#define CTL_ARG_STRING(len) {len,\
{{0, len, ctl_arg_string},\
CTL_ARG_PARSER_END}};
#define CTL_STR(name) #name
#define CTL_NODE_END {NULL, CTL_NODE_UNKNOWN, {NULL, NULL, NULL}, NULL, NULL}
#define CTL_NODE(name, ...)\
ctl_node_##__VA_ARGS__##_##name
int ctl_query(struct ctl *ctl, void *ctx, enum ctl_query_source source,
const char *name, enum ctl_query_type type, void *arg);
/* Declaration of a new child node */
#define CTL_CHILD(name, ...)\
{CTL_STR(name), CTL_NODE_NAMED, {NULL, NULL, NULL}, NULL,\
(struct ctl_node *)CTL_NODE(name, __VA_ARGS__)}
/* Declaration of a new indexed node */
#define CTL_INDEXED(name, ...)\
{CTL_STR(name), CTL_NODE_INDEXED, {NULL, NULL, NULL}, NULL,\
(struct ctl_node *)CTL_NODE(name, __VA_ARGS__)}
#define CTL_READ_HANDLER(name, ...)\
ctl_##__VA_ARGS__##_##name##_read
#define CTL_WRITE_HANDLER(name, ...)\
ctl_##__VA_ARGS__##_##name##_write
#define CTL_RUNNABLE_HANDLER(name, ...)\
ctl_##__VA_ARGS__##_##name##_runnable
#define CTL_ARG(name)\
ctl_arg_##name
/*
* Declaration of a new read-only leaf. If used the corresponding read function
* must be declared by CTL_READ_HANDLER macro.
*/
#define CTL_LEAF_RO(name, ...)\
{CTL_STR(name), CTL_NODE_LEAF, \
{CTL_READ_HANDLER(name, __VA_ARGS__), NULL, NULL}, NULL, NULL}
/*
* Declaration of a new write-only leaf. If used the corresponding write
* function must be declared by CTL_WRITE_HANDLER macro.
*/
#define CTL_LEAF_WO(name, ...)\
{CTL_STR(name), CTL_NODE_LEAF, \
{NULL, CTL_WRITE_HANDLER(name, __VA_ARGS__), NULL},\
&CTL_ARG(name), NULL}
/*
* Declaration of a new runnable leaf. If used the corresponding run
* function must be declared by CTL_RUNNABLE_HANDLER macro.
*/
#define CTL_LEAF_RUNNABLE(name, ...)\
{CTL_STR(name), CTL_NODE_LEAF, \
{NULL, NULL, CTL_RUNNABLE_HANDLER(name, __VA_ARGS__)},\
NULL, NULL}
/*
* Declaration of a new read-write leaf. If used both read and write function
* must be declared by CTL_READ_HANDLER and CTL_WRITE_HANDLER macros.
*/
#define CTL_LEAF_RW(name)\
{CTL_STR(name), CTL_NODE_LEAF,\
{CTL_READ_HANDLER(name), CTL_WRITE_HANDLER(name), NULL},\
&CTL_ARG(name), NULL}
#define CTL_REGISTER_MODULE(_ctl, name)\
ctl_register_module_node((_ctl), CTL_STR(name),\
(struct ctl_node *)CTL_NODE(name))
#ifdef __cplusplus
}
#endif
#endif
| 5,127 | 24.261084 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/set_badblocks.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* set_badblocks.h - poolset part of bad blocks API
*/
#ifndef PMDK_SET_BADBLOCKS_H
#define PMDK_SET_BADBLOCKS_H 1
#include "set.h"
#ifdef __cplusplus
extern "C" {
#endif
int badblocks_check_poolset(struct pool_set *set, int create);
int badblocks_clear_poolset(struct pool_set *set, int create);
char *badblocks_recovery_file_alloc(const char *file,
unsigned rep, unsigned part);
int badblocks_recovery_file_exists(struct pool_set *set);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_SET_BADBLOCKS_H */
| 604 | 19.862069 | 62 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/os_deep.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* os_deep.h -- abstraction layer for common usage of deep_* functions
*/
#ifndef PMDK_OS_DEEP_PERSIST_H
#define PMDK_OS_DEEP_PERSIST_H 1
#include <stdint.h>
#include <stddef.h>
#include "set.h"
#ifdef __cplusplus
extern "C" {
#endif
int os_range_deep_common(uintptr_t addr, size_t len);
int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr,
size_t len, int flush);
#ifdef __cplusplus
}
#endif
#endif
| 527 | 17.857143 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/ctl_global.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* ctl_global.h -- definitions for the global CTL namespace
*/
#ifndef PMDK_CTL_GLOBAL_H
#define PMDK_CTL_GLOBAL_H 1
#ifdef __cplusplus
extern "C" {
#endif
extern void ctl_prefault_register(void);
extern void ctl_sds_register(void);
extern void ctl_fallocate_register(void);
extern void ctl_cow_register(void);
static inline void
ctl_global_register(void)
{
ctl_prefault_register();
ctl_sds_register();
ctl_fallocate_register();
ctl_cow_register();
}
#ifdef __cplusplus
}
#endif
#endif
| 587 | 16.294118 | 59 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/file.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* file.h -- internal definitions for file module
*/
#ifndef PMDK_FILE_H
#define PMDK_FILE_H 1
#include <stddef.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <dirent.h>
#include <limits.h>
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _WIN32
#define NAME_MAX _MAX_FNAME
#endif
struct file_info {
char filename[NAME_MAX + 1];
int is_dir;
};
struct dir_handle {
const char *path;
#ifdef _WIN32
HANDLE handle;
char *_file;
#else
DIR *dirp;
#endif
};
enum file_type {
OTHER_ERROR = -2,
NOT_EXISTS = -1,
TYPE_NORMAL = 1,
TYPE_DEVDAX = 2
};
int util_file_dir_open(struct dir_handle *a, const char *path);
int util_file_dir_next(struct dir_handle *a, struct file_info *info);
int util_file_dir_close(struct dir_handle *a);
int util_file_dir_remove(const char *path);
int util_file_exists(const char *path);
enum file_type util_stat_get_type(const os_stat_t *st);
enum file_type util_fd_get_type(int fd);
enum file_type util_file_get_type(const char *path);
int util_ddax_region_find(const char *path, unsigned *region_id);
ssize_t util_file_get_size(const char *path);
ssize_t util_fd_get_size(int fd);
size_t util_file_device_dax_alignment(const char *path);
void *util_file_map_whole(const char *path);
int util_file_zero(const char *path, os_off_t off, size_t len);
ssize_t util_file_pread(const char *path, void *buffer, size_t size,
os_off_t offset);
ssize_t util_file_pwrite(const char *path, const void *buffer, size_t size,
os_off_t offset);
int util_tmpfile(const char *dir, const char *templ, int flags);
int util_is_absolute_path(const char *path);
int util_file_create(const char *path, size_t size, size_t minsize);
int util_file_open(const char *path, size_t *size, size_t minsize, int flags);
int util_unlink(const char *path);
int util_unlink_flock(const char *path);
int util_file_mkdir(const char *path, mode_t mode);
int util_write_all(int fd, const char *buf, size_t count);
#ifndef _WIN32
#define util_read read
#define util_write write
#else
static inline ssize_t
util_read(int fd, void *buf, size_t count)
{
/*
* Simulate short read, because Windows' _read uses "unsigned" as
* a type of the last argument and "int" as a return type.
* We have to limit "count" to what _read can return as a success,
* not what it can accept.
*/
if (count > INT_MAX)
count = INT_MAX;
return _read(fd, buf, (unsigned)count);
}
static inline ssize_t
util_write(int fd, const void *buf, size_t count)
{
/*
* Simulate short write, because Windows' _write uses "unsigned" as
* a type of the last argument and "int" as a return type.
* We have to limit "count" to what _write can return as a success,
* not what it can accept.
*/
if (count > INT_MAX)
count = INT_MAX;
return _write(fd, buf, (unsigned)count);
}
#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,013 | 24.982759 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/badblocks.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* badblocks.h -- bad blocks API based on the libpmem2 library
*/
#ifndef PMDK_BADBLOCKS_H
#define PMDK_BADBLOCKS_H 1
#include <string.h>
#include <stdint.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define B2SEC(n) ((n) >> 9) /* convert bytes to sectors */
#define SEC2B(n) ((n) << 9) /* convert sectors to bytes */
#define NO_HEALTHY_REPLICA ((int)(-1))
#define BB_NOT_SUPP \
"checking bad blocks is not supported on this OS, please switch off the CHECK_BAD_BLOCKS compat feature using 'pmempool-feature'"
/*
* 'struct badblock' is already defined in ndctl/libndctl.h,
* so we cannot use this name.
*
* libndctl returns offset relative to the beginning of the region,
* but in this structure we save offset relative to the beginning of:
* - namespace (before badblocks_get())
* and
* - file (before sync_recalc_badblocks())
* and
* - pool (after sync_recalc_badblocks())
*/
struct bad_block {
/*
* offset in bytes relative to the beginning of
* - namespace (before badblocks_get())
* and
* - file (before sync_recalc_badblocks())
* and
* - pool (after sync_recalc_badblocks())
*/
size_t offset;
/* length in bytes */
size_t length;
/* number of healthy replica to fix this bad block */
int nhealthy;
};
struct badblocks {
unsigned bb_cnt; /* number of bad blocks */
struct bad_block *bbv; /* array of bad blocks */
};
struct badblocks *badblocks_new(void);
void badblocks_delete(struct badblocks *bbs);
long badblocks_count(const char *path);
int badblocks_get(const char *file, struct badblocks *bbs);
int badblocks_clear(const char *path, struct badblocks *bbs);
int badblocks_clear_all(const char *file);
int badblocks_check_file(const char *path);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_BADBLOCKS_H */
| 1,878 | 23.089744 | 130 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/util_pmem.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* util_pmem.h -- internal definitions for pmem utils
*/
#ifndef PMDK_UTIL_PMEM_H
#define PMDK_UTIL_PMEM_H 1
#include "libpmem.h"
#include "out.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* util_persist -- flush to persistence
*/
static inline void
util_persist(int is_pmem, const void *addr, size_t len)
{
LOG(3, "is_pmem %d, addr %p, len %zu", is_pmem, addr, len);
if (is_pmem)
pmem_persist(addr, len);
else if (pmem_msync(addr, len))
FATAL("!pmem_msync");
}
/*
* util_persist_auto -- flush to persistence
*/
static inline void
util_persist_auto(int is_pmem, const void *addr, size_t len)
{
LOG(3, "is_pmem %d, addr %p, len %zu", is_pmem, addr, len);
util_persist(is_pmem || pmem_is_pmem(addr, len), addr, len);
}
#ifdef __cplusplus
}
#endif
#endif /* util_pmem.h */
| 883 | 17.416667 | 61 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/pmemcommon.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* pmemcommon.h -- definitions for "common" module
*/
#ifndef PMEMCOMMON_H
#define PMEMCOMMON_H 1
#include "mmap.h"
#include "pmemcore.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline void
common_init(const char *log_prefix, const char *log_level_var,
const char *log_file_var, int major_version,
int minor_version)
{
core_init(log_prefix, log_level_var, log_file_var, major_version,
minor_version);
util_mmap_init();
}
static inline void
common_fini(void)
{
util_mmap_fini();
core_fini();
}
#ifdef __cplusplus
}
#endif
#endif
| 642 | 15.075 | 66 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/shutdown_state.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* shutdown_state.h -- unsafe shudown detection
*/
#ifndef PMDK_SHUTDOWN_STATE_H
#define PMDK_SHUTDOWN_STATE_H 1
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
struct pool_replica;
struct shutdown_state {
uint64_t usc;
uint64_t uuid; /* UID checksum */
uint8_t dirty;
uint8_t reserved[39];
uint64_t checksum;
};
int shutdown_state_init(struct shutdown_state *sds, struct pool_replica *rep);
int shutdown_state_add_part(struct shutdown_state *sds, int fd,
struct pool_replica *rep);
void shutdown_state_set_dirty(struct shutdown_state *sds,
struct pool_replica *rep);
void shutdown_state_clear_dirty(struct shutdown_state *sds,
struct pool_replica *rep);
int shutdown_state_check(struct shutdown_state *curr_sds,
struct shutdown_state *pool_sds, struct pool_replica *rep);
#ifdef __cplusplus
}
#endif
#endif /* shutdown_state.h */
| 950 | 21.642857 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/uuid.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* uuid.h -- internal definitions for uuid module
*/
#ifndef PMDK_UUID_H
#define PMDK_UUID_H 1
#include <stdint.h>
#include <string.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Structure for binary version of uuid. From RFC4122,
* https://tools.ietf.org/html/rfc4122
*/
struct uuid {
uint32_t time_low;
uint16_t time_mid;
uint16_t time_hi_and_ver;
uint8_t clock_seq_hi;
uint8_t clock_seq_low;
uint8_t node[6];
};
#define POOL_HDR_UUID_LEN 16 /* uuid byte length */
#define POOL_HDR_UUID_STR_LEN 37 /* uuid string length */
#define POOL_HDR_UUID_GEN_FILE "/proc/sys/kernel/random/uuid"
typedef unsigned char uuid_t[POOL_HDR_UUID_LEN]; /* 16 byte binary uuid value */
int util_uuid_generate(uuid_t uuid);
int util_uuid_to_string(const uuid_t u, char *buf);
int util_uuid_from_string(const char uuid[POOL_HDR_UUID_STR_LEN],
struct uuid *ud);
/*
* uuidcmp -- compare two uuids
*/
static inline int
uuidcmp(const uuid_t uuid1, const uuid_t uuid2)
{
return memcmp(uuid1, uuid2, POOL_HDR_UUID_LEN);
}
#ifdef __cplusplus
}
#endif
#endif
| 1,145 | 19.464286 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/queue.h | /*
* Source: glibc 2.24 (git://sourceware.org/glibc.git /misc/sys/queue.h)
*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef _PMDK_QUEUE_H_
#define _PMDK_QUEUE_H_
/*
* This file defines five types of data structures: singly-linked lists,
* lists, simple queues, tail queues, and circular queues.
*
* A singly-linked list is headed by a single forward pointer. The
* elements are singly linked for minimum space and pointer manipulation
* overhead at the expense of O(n) removal for arbitrary elements. New
* elements can be added to the list after an existing element or at the
* head of the list. Elements being removed from the head of the list
* should use the explicit macro for this purpose for optimum
* efficiency. A singly-linked list may only be traversed in the forward
* direction. Singly-linked lists are ideal for applications with large
* datasets and few or no removals or for implementing a LIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A simple queue is headed by a pair of pointers, one the head of the
* list and the other to the tail of the list. The elements are singly
* linked to save space, so elements can only be removed from the
* head of the list. New elements can be added to the list after
* an existing element, at the head of the list, or at the end of the
* list. A simple queue may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* A circle queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the list.
* A circle queue may be traversed in either direction, but has a more
* complex end of list detection.
*
* For details on the use of these macros, see the queue(3) manual page.
*/
/*
* XXX This is a workaround for a bug in the llvm's static analyzer. For more
* info see https://github.com/pmem/issues/issues/309.
*/
#ifdef __clang_analyzer__
static void custom_assert(void)
{
abort();
}
#define ANALYZER_ASSERT(x) (__builtin_expect(!(x), 0) ? (void)0 : custom_assert())
#else
#define ANALYZER_ASSERT(x) do {} while (0)
#endif
/*
* List definitions.
*/
#define PMDK_LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define PMDK_LIST_HEAD_INITIALIZER(head) \
{ NULL }
#ifdef __cplusplus
#define PMDK__CAST_AND_ASSIGN(x, y) x = (__typeof__(x))y;
#else
#define PMDK__CAST_AND_ASSIGN(x, y) x = (void *)(y);
#endif
#define PMDK_LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List functions.
*/
#define PMDK_LIST_INIT(head) do { \
(head)->lh_first = NULL; \
} while (/*CONSTCOND*/0)
#define PMDK_LIST_INSERT_AFTER(listelm, elm, field) do { \
if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
(listelm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
(listelm)->field.le_next = (elm); \
(elm)->field.le_prev = &(listelm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define PMDK_LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define PMDK_LIST_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.le_next = (head)->lh_first) != NULL) \
(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
(head)->lh_first = (elm); \
(elm)->field.le_prev = &(head)->lh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_LIST_REMOVE(elm, field) do { \
ANALYZER_ASSERT((elm) != NULL); \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
} while (/*CONSTCOND*/0)
#define PMDK_LIST_FOREACH(var, head, field) \
for ((var) = ((head)->lh_first); \
(var); \
(var) = ((var)->field.le_next))
/*
* List access methods.
*/
#define PMDK_LIST_EMPTY(head) ((head)->lh_first == NULL)
#define PMDK_LIST_FIRST(head) ((head)->lh_first)
#define PMDK_LIST_NEXT(elm, field) ((elm)->field.le_next)
/*
* Singly-linked List definitions.
*/
#define PMDK_SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define PMDK_SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define PMDK_SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List functions.
*/
#define PMDK_SLIST_INIT(head) do { \
(head)->slh_first = NULL; \
} while (/*CONSTCOND*/0)
#define PMDK_SLIST_INSERT_AFTER(slistelm, elm, field) do { \
(elm)->field.sle_next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_SLIST_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
(head)->slh_first = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_SLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (/*CONSTCOND*/0)
#define PMDK_SLIST_REMOVE(head, elm, type, field) do { \
if ((head)->slh_first == (elm)) { \
PMDK_SLIST_REMOVE_HEAD((head), field); \
} \
else { \
struct type *curelm = (head)->slh_first; \
while(curelm->field.sle_next != (elm)) \
curelm = curelm->field.sle_next; \
curelm->field.sle_next = \
curelm->field.sle_next->field.sle_next; \
} \
} while (/*CONSTCOND*/0)
#define PMDK_SLIST_FOREACH(var, head, field) \
for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
/*
* Singly-linked List access methods.
*/
#define PMDK_SLIST_EMPTY(head) ((head)->slh_first == NULL)
#define PMDK_SLIST_FIRST(head) ((head)->slh_first)
#define PMDK_SLIST_NEXT(elm, field) ((elm)->field.sle_next)
/*
* Singly-linked Tail queue declarations.
*/
#define PMDK_STAILQ_HEAD(name, type) \
struct name { \
struct type *stqh_first; /* first element */ \
struct type **stqh_last; /* addr of last next element */ \
}
#define PMDK_STAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).stqh_first }
#define PMDK_STAILQ_ENTRY(type) \
struct { \
struct type *stqe_next; /* next element */ \
}
/*
* Singly-linked Tail queue functions.
*/
#define PMDK_STAILQ_INIT(head) do { \
(head)->stqh_first = NULL; \
(head)->stqh_last = &(head)->stqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
(head)->stqh_last = &(elm)->field.stqe_next; \
(head)->stqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.stqe_next = NULL; \
*(head)->stqh_last = (elm); \
(head)->stqh_last = &(elm)->field.stqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
(head)->stqh_last = &(elm)->field.stqe_next; \
(listelm)->field.stqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_REMOVE_HEAD(head, field) do { \
if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
(head)->stqh_last = &(head)->stqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_REMOVE(head, elm, type, field) do { \
if ((head)->stqh_first == (elm)) { \
PMDK_STAILQ_REMOVE_HEAD((head), field); \
} else { \
struct type *curelm = (head)->stqh_first; \
while (curelm->field.stqe_next != (elm)) \
curelm = curelm->field.stqe_next; \
if ((curelm->field.stqe_next = \
curelm->field.stqe_next->field.stqe_next) == NULL) \
(head)->stqh_last = &(curelm)->field.stqe_next; \
} \
} while (/*CONSTCOND*/0)
#define PMDK_STAILQ_FOREACH(var, head, field) \
for ((var) = ((head)->stqh_first); \
(var); \
(var) = ((var)->field.stqe_next))
#define PMDK_STAILQ_CONCAT(head1, head2) do { \
if (!PMDK_STAILQ_EMPTY((head2))) { \
*(head1)->stqh_last = (head2)->stqh_first; \
(head1)->stqh_last = (head2)->stqh_last; \
PMDK_STAILQ_INIT((head2)); \
} \
} while (/*CONSTCOND*/0)
/*
* Singly-linked Tail queue access methods.
*/
#define PMDK_STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
#define PMDK_STAILQ_FIRST(head) ((head)->stqh_first)
#define PMDK_STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
/*
* Simple queue definitions.
*/
#define PMDK_SIMPLEQ_HEAD(name, type) \
struct name { \
struct type *sqh_first; /* first element */ \
struct type **sqh_last; /* addr of last next element */ \
}
#define PMDK_SIMPLEQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).sqh_first }
#define PMDK_SIMPLEQ_ENTRY(type) \
struct { \
struct type *sqe_next; /* next element */ \
}
/*
* Simple queue functions.
*/
#define PMDK_SIMPLEQ_INIT(head) do { \
(head)->sqh_first = NULL; \
(head)->sqh_last = &(head)->sqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
(head)->sqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
*(head)->sqh_last = (elm); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
(head)->sqh_last = &(elm)->field.sqe_next; \
(listelm)->field.sqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_REMOVE_HEAD(head, field) do { \
if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
(head)->sqh_last = &(head)->sqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_REMOVE(head, elm, type, field) do { \
if ((head)->sqh_first == (elm)) { \
PMDK_SIMPLEQ_REMOVE_HEAD((head), field); \
} else { \
struct type *curelm = (head)->sqh_first; \
while (curelm->field.sqe_next != (elm)) \
curelm = curelm->field.sqe_next; \
if ((curelm->field.sqe_next = \
curelm->field.sqe_next->field.sqe_next) == NULL) \
(head)->sqh_last = &(curelm)->field.sqe_next; \
} \
} while (/*CONSTCOND*/0)
#define PMDK_SIMPLEQ_FOREACH(var, head, field) \
for ((var) = ((head)->sqh_first); \
(var); \
(var) = ((var)->field.sqe_next))
/*
* Simple queue access methods.
*/
#define PMDK_SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
#define PMDK_SIMPLEQ_FIRST(head) ((head)->sqh_first)
#define PMDK_SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
/*
* Tail queue definitions.
*/
#define PMDK__TAILQ_HEAD(name, type, qual) \
struct name { \
qual type *tqh_first; /* first element */ \
qual type *qual *tqh_last; /* addr of last next element */ \
}
#define PMDK_TAILQ_HEAD(name, type) PMDK__TAILQ_HEAD(name, struct type,)
#define PMDK_TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define PMDK__TAILQ_ENTRY(type, qual) \
struct { \
qual type *tqe_next; /* next element */ \
qual type *qual *tqe_prev; /* address of previous next element */\
}
#define PMDK_TAILQ_ENTRY(type) PMDK__TAILQ_ENTRY(struct type,)
/*
* Tail queue functions.
*/
#define PMDK_TAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_last = &(head)->tqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_prev = &(head)->tqh_first; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &(elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_REMOVE(head, elm, field) do { \
ANALYZER_ASSERT((elm) != NULL); \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_prev = \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_TAILQ_FOREACH(var, head, field) \
for ((var) = ((head)->tqh_first); \
(var); \
(var) = ((var)->field.tqe_next))
#define PMDK_TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
(var); \
(var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
#define PMDK_TAILQ_CONCAT(head1, head2, field) do { \
if (!PMDK_TAILQ_EMPTY(head2)) { \
*(head1)->tqh_last = (head2)->tqh_first; \
(head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
(head1)->tqh_last = (head2)->tqh_last; \
PMDK_TAILQ_INIT((head2)); \
} \
} while (/*CONSTCOND*/0)
/*
* Tail queue access methods.
*/
#define PMDK_TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
#define PMDK_TAILQ_FIRST(head) ((head)->tqh_first)
#define PMDK_TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define PMDK_TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
#define PMDK_TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
/*
* Circular queue definitions.
*/
#define PMDK_CIRCLEQ_HEAD(name, type) \
struct name { \
struct type *cqh_first; /* first element */ \
struct type *cqh_last; /* last element */ \
}
#define PMDK_CIRCLEQ_HEAD_INITIALIZER(head) \
{ (void *)&(head), (void *)&(head) }
#define PMDK_CIRCLEQ_ENTRY(type) \
struct { \
struct type *cqe_next; /* next element */ \
struct type *cqe_prev; /* previous element */ \
}
/*
* Circular queue functions.
*/
#define PMDK_CIRCLEQ_INIT(head) do { \
PMDK__CAST_AND_ASSIGN((head)->cqh_first, (head)); \
PMDK__CAST_AND_ASSIGN((head)->cqh_last, (head)); \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
(elm)->field.cqe_prev = (listelm); \
if ((listelm)->field.cqe_next == (void *)(head)) \
(head)->cqh_last = (elm); \
else \
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
(listelm)->field.cqe_next = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm); \
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
if ((listelm)->field.cqe_prev == (void *)(head)) \
(head)->cqh_first = (elm); \
else \
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
(listelm)->field.cqe_prev = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
(elm)->field.cqe_next = (head)->cqh_first; \
(elm)->field.cqe_prev = (void *)(head); \
if ((head)->cqh_last == (void *)(head)) \
(head)->cqh_last = (elm); \
else \
(head)->cqh_first->field.cqe_prev = (elm); \
(head)->cqh_first = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
PMDK__CAST_AND_ASSIGN((elm)->field.cqe_next, (head)); \
(elm)->field.cqe_prev = (head)->cqh_last; \
if ((head)->cqh_first == (void *)(head)) \
(head)->cqh_first = (elm); \
else \
(head)->cqh_last->field.cqe_next = (elm); \
(head)->cqh_last = (elm); \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_REMOVE(head, elm, field) do { \
if ((elm)->field.cqe_next == (void *)(head)) \
(head)->cqh_last = (elm)->field.cqe_prev; \
else \
(elm)->field.cqe_next->field.cqe_prev = \
(elm)->field.cqe_prev; \
if ((elm)->field.cqe_prev == (void *)(head)) \
(head)->cqh_first = (elm)->field.cqe_next; \
else \
(elm)->field.cqe_prev->field.cqe_next = \
(elm)->field.cqe_next; \
} while (/*CONSTCOND*/0)
#define PMDK_CIRCLEQ_FOREACH(var, head, field) \
for ((var) = ((head)->cqh_first); \
(var) != (const void *)(head); \
(var) = ((var)->field.cqe_next))
#define PMDK_CIRCLEQ_FOREACH_REVERSE(var, head, field) \
for ((var) = ((head)->cqh_last); \
(var) != (const void *)(head); \
(var) = ((var)->field.cqe_prev))
/*
* Circular queue access methods.
*/
#define PMDK_CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
#define PMDK_CIRCLEQ_FIRST(head) ((head)->cqh_first)
#define PMDK_CIRCLEQ_LAST(head) ((head)->cqh_last)
#define PMDK_CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
#define PMDK_CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
#define PMDK_CIRCLEQ_LOOP_NEXT(head, elm, field) \
(((elm)->field.cqe_next == (void *)(head)) \
? ((head)->cqh_first) \
: ((elm)->field.cqe_next))
#define PMDK_CIRCLEQ_LOOP_PREV(head, elm, field) \
(((elm)->field.cqe_prev == (void *)(head)) \
? ((head)->cqh_last) \
: ((elm)->field.cqe_prev))
/*
* Sorted queue functions.
*/
#define PMDK_SORTEDQ_HEAD(name, type) PMDK_CIRCLEQ_HEAD(name, type)
#define PMDK_SORTEDQ_HEAD_INITIALIZER(head) PMDK_CIRCLEQ_HEAD_INITIALIZER(head)
#define PMDK_SORTEDQ_ENTRY(type) PMDK_CIRCLEQ_ENTRY(type)
#define PMDK_SORTEDQ_INIT(head) PMDK_CIRCLEQ_INIT(head)
#define PMDK_SORTEDQ_INSERT(head, elm, field, type, comparer) { \
type *_elm_it; \
for (_elm_it = (head)->cqh_first; \
((_elm_it != (void *)(head)) && \
(comparer(_elm_it, (elm)) < 0)); \
_elm_it = _elm_it->field.cqe_next) \
/*NOTHING*/; \
if (_elm_it == (void *)(head)) \
PMDK_CIRCLEQ_INSERT_TAIL(head, elm, field); \
else \
PMDK_CIRCLEQ_INSERT_BEFORE(head, _elm_it, elm, field); \
}
#define PMDK_SORTEDQ_REMOVE(head, elm, field) PMDK_CIRCLEQ_REMOVE(head, elm, field)
#define PMDK_SORTEDQ_FOREACH(var, head, field) PMDK_CIRCLEQ_FOREACH(var, head, field)
#define PMDK_SORTEDQ_FOREACH_REVERSE(var, head, field) \
PMDK_CIRCLEQ_FOREACH_REVERSE(var, head, field)
/*
* Sorted queue access methods.
*/
#define PMDK_SORTEDQ_EMPTY(head) PMDK_CIRCLEQ_EMPTY(head)
#define PMDK_SORTEDQ_FIRST(head) PMDK_CIRCLEQ_FIRST(head)
#define PMDK_SORTEDQ_LAST(head) PMDK_CIRCLEQ_LAST(head)
#define PMDK_SORTEDQ_NEXT(elm, field) PMDK_CIRCLEQ_NEXT(elm, field)
#define PMDK_SORTEDQ_PREV(elm, field) PMDK_CIRCLEQ_PREV(elm, field)
#endif /* sys/queue.h */
| 22,165 | 33.907087 | 85 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/set.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* set.h -- internal definitions for set module
*/
#ifndef PMDK_SET_H
#define PMDK_SET_H 1
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <sys/types.h>
#include "out.h"
#include "vec.h"
#include "pool_hdr.h"
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* pool sets & replicas
*/
#define POOLSET_HDR_SIG "PMEMPOOLSET"
#define POOLSET_HDR_SIG_LEN 11 /* does NOT include '\0' */
#define POOLSET_REPLICA_SIG "REPLICA"
#define POOLSET_REPLICA_SIG_LEN 7 /* does NOT include '\0' */
#define POOLSET_OPTION_SIG "OPTION"
#define POOLSET_OPTION_SIG_LEN 6 /* does NOT include '\0' */
/* pool set option flags */
enum pool_set_option_flag {
OPTION_UNKNOWN = 0x0,
OPTION_SINGLEHDR = 0x1, /* pool headers only in the first part */
OPTION_NOHDRS = 0x2, /* no pool headers, remote replicas only */
};
struct pool_set_option {
const char *name;
enum pool_set_option_flag flag;
};
#define POOL_LOCAL 0
#define POOL_REMOTE 1
#define REPLICAS_DISABLED 0
#define REPLICAS_ENABLED 1
/* util_pool_open flags */
#define POOL_OPEN_COW 1 /* copy-on-write mode */
#define POOL_OPEN_IGNORE_SDS 2 /* ignore shutdown state */
#define POOL_OPEN_IGNORE_BAD_BLOCKS 4 /* ignore bad blocks */
#define POOL_OPEN_CHECK_BAD_BLOCKS 8 /* check bad blocks */
enum del_parts_mode {
DO_NOT_DELETE_PARTS, /* do not delete part files */
DELETE_CREATED_PARTS, /* delete only newly created parts files */
DELETE_ALL_PARTS /* force delete all parts files */
};
struct pool_set_part {
/* populated by a pool set file parser */
const char *path;
size_t filesize; /* aligned to page size */
int fd;
int flags; /* stores flags used when opening the file */
/* valid only if fd >= 0 */
int is_dev_dax; /* indicates if the part is on device dax */
size_t alignment; /* internal alignment (Device DAX only) */
int created; /* indicates newly created (zeroed) file */
/* util_poolset_open/create */
void *remote_hdr; /* allocated header for remote replica */
void *hdr; /* base address of header */
size_t hdrsize; /* size of the header mapping */
int hdr_map_sync; /* header mapped with MAP_SYNC */
void *addr; /* base address of the mapping */
size_t size; /* size of the mapping - page aligned */
int map_sync; /* part has been mapped with MAP_SYNC flag */
int rdonly; /* is set based on compat features, affects */
/* the whole poolset */
uuid_t uuid;
int has_bad_blocks; /* part file contains bad blocks */
int sds_dirty_modified; /* sds dirty flag was set */
};
struct pool_set_directory {
const char *path;
size_t resvsize; /* size of the address space reservation */
};
struct remote_replica {
void *rpp; /* RPMEMpool opaque handle */
char *node_addr; /* address of a remote node */
/* poolset descriptor is a pool set file name on a remote node */
char *pool_desc; /* descriptor of a poolset */
};
struct pool_replica {
unsigned nparts;
unsigned nallocated;
unsigned nhdrs; /* should be 0, 1 or nparts */
size_t repsize; /* total size of all the parts (mappings) */
size_t resvsize; /* min size of the address space reservation */
int is_pmem; /* true if all the parts are in PMEM */
struct remote_replica *remote; /* not NULL if the replica */
/* is a remote one */
VEC(, struct pool_set_directory) directory;
struct pool_set_part part[];
};
struct pool_set {
char *path; /* path of the poolset file */
unsigned nreplicas;
uuid_t uuid;
int rdonly;
int zeroed; /* true if all the parts are new files */
size_t poolsize; /* the smallest replica size */
int has_bad_blocks; /* pool set contains bad blocks */
int remote; /* true if contains a remote replica */
unsigned options; /* enabled pool set options */
int directory_based;
size_t resvsize;
unsigned next_id;
unsigned next_directory_id;
int ignore_sds; /* don't use shutdown state */
struct pool_replica *replica[];
};
struct part_file {
int is_remote;
/*
* Pointer to the part file structure -
* - not-NULL only for a local part file
*/
struct pool_set_part *part;
/*
* Pointer to the replica structure -
* - not-NULL only for a remote replica
*/
struct remote_replica *remote;
};
struct pool_attr {
char signature[POOL_HDR_SIG_LEN]; /* pool signature */
uint32_t major; /* format major version number */
features_t features; /* features flags */
unsigned char poolset_uuid[POOL_HDR_UUID_LEN]; /* pool uuid */
unsigned char first_part_uuid[POOL_HDR_UUID_LEN]; /* first part uuid */
unsigned char prev_repl_uuid[POOL_HDR_UUID_LEN]; /* prev replica uuid */
unsigned char next_repl_uuid[POOL_HDR_UUID_LEN]; /* next replica uuid */
unsigned char arch_flags[POOL_HDR_ARCH_LEN]; /* arch flags */
};
/* get index of the (r)th replica */
static inline unsigned
REPidx(const struct pool_set *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return r % set->nreplicas;
}
/* get index of the (r + 1)th replica */
static inline unsigned
REPNidx(const struct pool_set *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (r + 1) % set->nreplicas;
}
/* get index of the (r - 1)th replica */
static inline unsigned
REPPidx(const struct pool_set *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r - 1) % set->nreplicas;
}
/* get index of the (r)th part */
static inline unsigned
PARTidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return p % rep->nparts;
}
/* get index of the (r + 1)th part */
static inline unsigned
PARTNidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (p + 1) % rep->nparts;
}
/* get index of the (r - 1)th part */
static inline unsigned
PARTPidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (rep->nparts + p - 1) % rep->nparts;
}
/* get index of the (r)th part */
static inline unsigned
HDRidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nhdrs, 0);
return p % rep->nhdrs;
}
/* get index of the (r + 1)th part */
static inline unsigned
HDRNidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nhdrs, 0);
return (p + 1) % rep->nhdrs;
}
/* get index of the (r - 1)th part */
static inline unsigned
HDRPidx(const struct pool_replica *rep, unsigned p)
{
ASSERTne(rep->nhdrs, 0);
return (rep->nhdrs + p - 1) % rep->nhdrs;
}
/* get (r)th replica */
static inline struct pool_replica *
REP(const struct pool_set *set, unsigned r)
{
return set->replica[REPidx(set, r)];
}
/* get (r + 1)th replica */
static inline struct pool_replica *
REPN(const struct pool_set *set, unsigned r)
{
return set->replica[REPNidx(set, r)];
}
/* get (r - 1)th replica */
static inline struct pool_replica *
REPP(const struct pool_set *set, unsigned r)
{
return set->replica[REPPidx(set, r)];
}
/* get (p)th part */
static inline struct pool_set_part *
PART(struct pool_replica *rep, unsigned p)
{
return &rep->part[PARTidx(rep, p)];
}
/* get (p + 1)th part */
static inline struct pool_set_part *
PARTN(struct pool_replica *rep, unsigned p)
{
return &rep->part[PARTNidx(rep, p)];
}
/* get (p - 1)th part */
static inline struct pool_set_part *
PARTP(struct pool_replica *rep, unsigned p)
{
return &rep->part[PARTPidx(rep, p)];
}
/* get (p)th header */
static inline struct pool_hdr *
HDR(struct pool_replica *rep, unsigned p)
{
return (struct pool_hdr *)(rep->part[HDRidx(rep, p)].hdr);
}
/* get (p + 1)th header */
static inline struct pool_hdr *
HDRN(struct pool_replica *rep, unsigned p)
{
return (struct pool_hdr *)(rep->part[HDRNidx(rep, p)].hdr);
}
/* get (p - 1)th header */
static inline struct pool_hdr *
HDRP(struct pool_replica *rep, unsigned p)
{
return (struct pool_hdr *)(rep->part[HDRPidx(rep, p)].hdr);
}
extern int Prefault_at_open;
extern int Prefault_at_create;
extern int SDS_at_create;
extern int Fallocate_at_create;
extern int COW_at_open;
int util_poolset_parse(struct pool_set **setp, const char *path, int fd);
int util_poolset_read(struct pool_set **setp, const char *path);
int util_poolset_create_set(struct pool_set **setp, const char *path,
size_t poolsize, size_t minsize, int ignore_sds);
int util_poolset_open(struct pool_set *set);
void util_poolset_close(struct pool_set *set, enum del_parts_mode del);
void util_poolset_free(struct pool_set *set);
int util_poolset_chmod(struct pool_set *set, mode_t mode);
void util_poolset_fdclose(struct pool_set *set);
void util_poolset_fdclose_always(struct pool_set *set);
int util_is_poolset_file(const char *path);
int util_poolset_foreach_part_struct(struct pool_set *set,
int (*cb)(struct part_file *pf, void *arg), void *arg);
int util_poolset_foreach_part(const char *path,
int (*cb)(struct part_file *pf, void *arg), void *arg);
size_t util_poolset_size(const char *path);
int util_replica_deep_common(const void *addr, size_t len,
struct pool_set *set, unsigned replica_id, int flush);
int util_replica_deep_persist(const void *addr, size_t len,
struct pool_set *set, unsigned replica_id);
int util_replica_deep_drain(const void *addr, size_t len,
struct pool_set *set, unsigned replica_id);
int util_pool_create(struct pool_set **setp, const char *path, size_t poolsize,
size_t minsize, size_t minpartsize, const struct pool_attr *attr,
unsigned *nlanes, int can_have_rep);
int util_pool_create_uuids(struct pool_set **setp, const char *path,
size_t poolsize, size_t minsize, size_t minpartsize,
const struct pool_attr *attr, unsigned *nlanes, int can_have_rep,
int remote);
int util_part_open(struct pool_set_part *part, size_t minsize, int create_part);
void util_part_fdclose(struct pool_set_part *part);
int util_replica_open(struct pool_set *set, unsigned repidx, int flags);
int util_replica_set_attr(struct pool_replica *rep,
const struct rpmem_pool_attr *rattr);
void util_pool_hdr2attr(struct pool_attr *attr, struct pool_hdr *hdr);
void util_pool_attr2hdr(struct pool_hdr *hdr,
const struct pool_attr *attr);
int util_replica_close(struct pool_set *set, unsigned repidx);
int util_map_part(struct pool_set_part *part, void *addr, size_t size,
size_t offset, int flags, int rdonly);
int util_unmap_part(struct pool_set_part *part);
int util_unmap_parts(struct pool_replica *rep, unsigned start_index,
unsigned end_index);
int util_header_create(struct pool_set *set, unsigned repidx, unsigned partidx,
const struct pool_attr *attr, int overwrite);
int util_map_hdr(struct pool_set_part *part, int flags, int rdonly);
void util_unmap_hdr(struct pool_set_part *part);
int util_pool_has_device_dax(struct pool_set *set);
int util_pool_open_nocheck(struct pool_set *set, unsigned flags);
int util_pool_open(struct pool_set **setp, const char *path, size_t minpartsize,
const struct pool_attr *attr, unsigned *nlanes, void *addr,
unsigned flags);
int util_pool_open_remote(struct pool_set **setp, const char *path, int cow,
size_t minpartsize, struct rpmem_pool_attr *rattr);
void *util_pool_extend(struct pool_set *set, size_t *size, size_t minpartsize);
void util_remote_init(void);
void util_remote_fini(void);
int util_update_remote_header(struct pool_set *set, unsigned repn);
void util_remote_init_lock(void);
void util_remote_destroy_lock(void);
int util_pool_close_remote(RPMEMpool *rpp);
void util_remote_unload(void);
void util_replica_fdclose(struct pool_replica *rep);
int util_poolset_remote_open(struct pool_replica *rep, unsigned repidx,
size_t minsize, int create, void *pool_addr,
size_t pool_size, unsigned *nlanes);
int util_remote_load(void);
int util_replica_open_remote(struct pool_set *set, unsigned repidx, int flags);
int util_poolset_remote_replica_open(struct pool_set *set, unsigned repidx,
size_t minsize, int create, unsigned *nlanes);
int util_replica_close_local(struct pool_replica *rep, unsigned repn,
enum del_parts_mode del);
int util_replica_close_remote(struct pool_replica *rep, unsigned repn,
enum del_parts_mode del);
extern int (*Rpmem_persist)(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags);
extern int (*Rpmem_deep_persist)(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane);
extern int (*Rpmem_read)(RPMEMpool *rpp, void *buff, size_t offset,
size_t length, unsigned lane);
extern int (*Rpmem_close)(RPMEMpool *rpp);
extern int (*Rpmem_remove)(const char *target,
const char *pool_set_name, int flags);
extern int (*Rpmem_set_attr)(RPMEMpool *rpp,
const struct rpmem_pool_attr *rattr);
#ifdef __cplusplus
}
#endif
#endif
| 14,145 | 31.077098 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/dlsym.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* dlsym.h -- dynamic linking utilities with library-specific implementation
*/
#ifndef PMDK_DLSYM_H
#define PMDK_DLSYM_H 1
#include "out.h"
#if defined(USE_LIBDL) && !defined(_WIN32)
#include <dlfcn.h>
/*
* util_dlopen -- calls real dlopen()
*/
static inline void *
util_dlopen(const char *filename)
{
LOG(3, "filename %s", filename);
return dlopen(filename, RTLD_NOW);
}
/*
* util_dlerror -- calls real dlerror()
*/
static inline char *
util_dlerror(void)
{
return dlerror();
}
/*
* util_dlsym -- calls real dlsym()
*/
static inline void *
util_dlsym(void *handle, const char *symbol)
{
LOG(3, "handle %p symbol %s", handle, symbol);
return dlsym(handle, symbol);
}
/*
* util_dlclose -- calls real dlclose()
*/
static inline int
util_dlclose(void *handle)
{
LOG(3, "handle %p", handle);
return dlclose(handle);
}
#else /* empty functions */
/*
* util_dlopen -- empty function
*/
static inline void *
util_dlopen(const char *filename)
{
errno = ENOSYS;
return NULL;
}
/*
* util_dlerror -- empty function
*/
static inline char *
util_dlerror(void)
{
errno = ENOSYS;
return NULL;
}
/*
* util_dlsym -- empty function
*/
static inline void *
util_dlsym(void *handle, const char *symbol)
{
errno = ENOSYS;
return NULL;
}
/*
* util_dlclose -- empty function
*/
static inline int
util_dlclose(void *handle)
{
errno = ENOSYS;
return 0;
}
#endif
#endif
| 1,485 | 13.288462 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/sys_util.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* sys_util.h -- internal utility wrappers around system functions
*/
#ifndef PMDK_SYS_UTIL_H
#define PMDK_SYS_UTIL_H 1
#include <errno.h>
#include "os_thread.h"
#include "out.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* util_mutex_init -- os_mutex_init variant that never fails from
* caller perspective. If os_mutex_init failed, this function aborts
* the program.
*/
static inline void
util_mutex_init(os_mutex_t *m)
{
int tmp = os_mutex_init(m);
if (tmp) {
errno = tmp;
FATAL("!os_mutex_init");
}
}
/*
* util_mutex_destroy -- os_mutex_destroy variant that never fails from
* caller perspective. If os_mutex_destroy failed, this function aborts
* the program.
*/
static inline void
util_mutex_destroy(os_mutex_t *m)
{
int tmp = os_mutex_destroy(m);
if (tmp) {
errno = tmp;
FATAL("!os_mutex_destroy");
}
}
/*
* util_mutex_lock -- os_mutex_lock variant that never fails from
* caller perspective. If os_mutex_lock failed, this function aborts
* the program.
*/
static inline void
util_mutex_lock(os_mutex_t *m)
{
int tmp = os_mutex_lock(m);
if (tmp) {
errno = tmp;
FATAL("!os_mutex_lock");
}
}
/*
* util_mutex_trylock -- os_mutex_trylock variant that never fails from
* caller perspective (other than EBUSY). If util_mutex_trylock failed, this
* function aborts the program.
* Returns 0 if locked successfully, otherwise returns EBUSY.
*/
static inline int
util_mutex_trylock(os_mutex_t *m)
{
int tmp = os_mutex_trylock(m);
if (tmp && tmp != EBUSY) {
errno = tmp;
FATAL("!os_mutex_trylock");
}
return tmp;
}
/*
* util_mutex_unlock -- os_mutex_unlock variant that never fails from
* caller perspective. If os_mutex_unlock failed, this function aborts
* the program.
*/
static inline void
util_mutex_unlock(os_mutex_t *m)
{
int tmp = os_mutex_unlock(m);
if (tmp) {
errno = tmp;
FATAL("!os_mutex_unlock");
}
}
/*
* util_rwlock_init -- os_rwlock_init variant that never fails from
* caller perspective. If os_rwlock_init failed, this function aborts
* the program.
*/
static inline void
util_rwlock_init(os_rwlock_t *m)
{
int tmp = os_rwlock_init(m);
if (tmp) {
errno = tmp;
FATAL("!os_rwlock_init");
}
}
/*
* util_rwlock_rdlock -- os_rwlock_rdlock variant that never fails from
* caller perspective. If os_rwlock_rdlock failed, this function aborts
* the program.
*/
static inline void
util_rwlock_rdlock(os_rwlock_t *m)
{
int tmp = os_rwlock_rdlock(m);
if (tmp) {
errno = tmp;
FATAL("!os_rwlock_rdlock");
}
}
/*
* util_rwlock_wrlock -- os_rwlock_wrlock variant that never fails from
* caller perspective. If os_rwlock_wrlock failed, this function aborts
* the program.
*/
static inline void
util_rwlock_wrlock(os_rwlock_t *m)
{
int tmp = os_rwlock_wrlock(m);
if (tmp) {
errno = tmp;
FATAL("!os_rwlock_wrlock");
}
}
/*
* util_rwlock_unlock -- os_rwlock_unlock variant that never fails from
* caller perspective. If os_rwlock_unlock failed, this function aborts
* the program.
*/
static inline void
util_rwlock_unlock(os_rwlock_t *m)
{
int tmp = os_rwlock_unlock(m);
if (tmp) {
errno = tmp;
FATAL("!os_rwlock_unlock");
}
}
/*
* util_rwlock_destroy -- os_rwlock_destroy variant that never fails from
* caller perspective. If os_rwlock_destroy failed, this function aborts
* the program.
*/
static inline void
util_rwlock_destroy(os_rwlock_t *m)
{
int tmp = os_rwlock_destroy(m);
if (tmp) {
errno = tmp;
FATAL("!os_rwlock_destroy");
}
}
/*
* util_spin_init -- os_spin_init variant that logs on fail and sets errno.
*/
static inline int
util_spin_init(os_spinlock_t *lock, int pshared)
{
int tmp = os_spin_init(lock, pshared);
if (tmp) {
errno = tmp;
ERR("!os_spin_init");
}
return tmp;
}
/*
* util_spin_destroy -- os_spin_destroy variant that never fails from
* caller perspective. If os_spin_destroy failed, this function aborts
* the program.
*/
static inline void
util_spin_destroy(os_spinlock_t *lock)
{
int tmp = os_spin_destroy(lock);
if (tmp) {
errno = tmp;
FATAL("!os_spin_destroy");
}
}
/*
* util_spin_lock -- os_spin_lock variant that never fails from caller
* perspective. If os_spin_lock failed, this function aborts the program.
*/
static inline void
util_spin_lock(os_spinlock_t *lock)
{
int tmp = os_spin_lock(lock);
if (tmp) {
errno = tmp;
FATAL("!os_spin_lock");
}
}
/*
* util_spin_unlock -- os_spin_unlock variant that never fails
* from caller perspective. If os_spin_unlock failed,
* this function aborts the program.
*/
static inline void
util_spin_unlock(os_spinlock_t *lock)
{
int tmp = os_spin_unlock(lock);
if (tmp) {
errno = tmp;
FATAL("!os_spin_unlock");
}
}
/*
* util_semaphore_init -- os_semaphore_init variant that never fails
* from caller perspective. If os_semaphore_init failed,
* this function aborts the program.
*/
static inline void
util_semaphore_init(os_semaphore_t *sem, unsigned value)
{
if (os_semaphore_init(sem, value))
FATAL("!os_semaphore_init");
}
/*
* util_semaphore_destroy -- deletes a semaphore instance
*/
static inline void
util_semaphore_destroy(os_semaphore_t *sem)
{
if (os_semaphore_destroy(sem) != 0)
FATAL("!os_semaphore_destroy");
}
/*
* util_semaphore_wait -- decreases the value of the semaphore
*/
static inline void
util_semaphore_wait(os_semaphore_t *sem)
{
errno = 0;
int ret;
do {
ret = os_semaphore_wait(sem);
} while (errno == EINTR); /* signal interrupt */
if (ret != 0)
FATAL("!os_semaphore_wait");
}
/*
* util_semaphore_trywait -- tries to decrease the value of the semaphore
*/
static inline int
util_semaphore_trywait(os_semaphore_t *sem)
{
errno = 0;
int ret;
do {
ret = os_semaphore_trywait(sem);
} while (errno == EINTR); /* signal interrupt */
if (ret != 0 && errno != EAGAIN)
FATAL("!os_semaphore_trywait");
return ret;
}
/*
* util_semaphore_post -- increases the value of the semaphore
*/
static inline void
util_semaphore_post(os_semaphore_t *sem)
{
if (os_semaphore_post(sem) != 0)
FATAL("!os_semaphore_post");
}
static inline void
util_cond_init(os_cond_t *__restrict cond)
{
if (os_cond_init(cond))
FATAL("!os_cond_init");
}
static inline void
util_cond_destroy(os_cond_t *__restrict cond)
{
if (os_cond_destroy(cond))
FATAL("!os_cond_destroy");
}
#ifdef __cplusplus
}
#endif
#endif
| 6,387 | 19.21519 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/mmap.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* mmap.h -- internal definitions for mmap module
*/
#ifndef PMDK_MMAP_H
#define PMDK_MMAP_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <errno.h>
#include "out.h"
#include "queue.h"
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
extern int Mmap_no_random;
extern void *Mmap_hint;
extern char *Mmap_mapfile;
void *util_map_sync(void *addr, size_t len, int proto, int flags, int fd,
os_off_t offset, int *map_sync);
void *util_map(int fd, os_off_t off, size_t len, int flags, int rdonly,
size_t req_align, int *map_sync);
int util_unmap(void *addr, size_t len);
#ifdef __FreeBSD__
#define MAP_NORESERVE 0
#define OS_MAPFILE "/proc/curproc/map"
#else
#define OS_MAPFILE "/proc/self/maps"
#endif
#ifndef MAP_SYNC
#define MAP_SYNC 0x80000
#endif
#ifndef MAP_SHARED_VALIDATE
#define MAP_SHARED_VALIDATE 0x03
#endif
/*
* macros for micromanaging range protections for the debug version
*/
#ifdef DEBUG
#define RANGE(addr, len, is_dev_dax, type) do {\
if (!is_dev_dax) ASSERT(util_range_##type(addr, len) >= 0);\
} while (0)
#else
#define RANGE(addr, len, is_dev_dax, type) do {} while (0)
#endif
#define RANGE_RO(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, ro)
#define RANGE_RW(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, rw)
#define RANGE_NONE(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, none)
/* pmem mapping type */
enum pmem_map_type {
PMEM_DEV_DAX, /* device dax */
PMEM_MAP_SYNC, /* mapping with MAP_SYNC flag on dax fs */
MAX_PMEM_TYPE
};
/*
* this structure tracks the file mappings outstanding per file handle
*/
struct map_tracker {
PMDK_SORTEDQ_ENTRY(map_tracker) entry;
uintptr_t base_addr;
uintptr_t end_addr;
unsigned region_id;
enum pmem_map_type type;
#ifdef _WIN32
/* Windows-specific data */
HANDLE FileHandle;
HANDLE FileMappingHandle;
DWORD Access;
os_off_t Offset;
size_t FileLen;
#endif
};
void util_mmap_init(void);
void util_mmap_fini(void);
int util_range_ro(void *addr, size_t len);
int util_range_rw(void *addr, size_t len);
int util_range_none(void *addr, size_t len);
char *util_map_hint_unused(void *minaddr, size_t len, size_t align);
char *util_map_hint(size_t len, size_t req_align);
#define KILOBYTE ((uintptr_t)1 << 10)
#define MEGABYTE ((uintptr_t)1 << 20)
#define GIGABYTE ((uintptr_t)1 << 30)
/*
* util_map_hint_align -- choose the desired mapping alignment
*
* The smallest supported alignment is 2 megabytes because of the object
* alignment requirements. Changing this value to 4 kilobytes constitues a
* layout change.
*
* Use 1GB page alignment only if the mapping length is at least
* twice as big as the page size.
*/
static inline size_t
util_map_hint_align(size_t len, size_t req_align)
{
size_t align = 2 * MEGABYTE;
if (req_align)
align = req_align;
else if (len >= 2 * GIGABYTE)
align = GIGABYTE;
return align;
}
int util_range_register(const void *addr, size_t len, const char *path,
enum pmem_map_type type);
int util_range_unregister(const void *addr, size_t len);
struct map_tracker *util_range_find(uintptr_t addr, size_t len);
int util_range_is_pmem(const void *addr, size_t len);
#ifdef __cplusplus
}
#endif
#endif
| 3,328 | 22.27972 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/vecq.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* vecq.h -- vector queue (FIFO) interface
*/
#ifndef PMDK_VECQ_H
#define PMDK_VECQ_H 1
#include <stddef.h>
#include "util.h"
#include "out.h"
#include "alloc.h"
#ifdef __cplusplus
extern "C" {
#endif
#define VECQ_INIT_SIZE (64)
#define VECQ(name, type)\
struct name {\
type *buffer;\
size_t capacity;\
size_t front;\
size_t back;\
}
#define VECQ_INIT(vec) do {\
(vec)->buffer = NULL;\
(vec)->capacity = 0;\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#define VECQ_REINIT(vec) do {\
VALGRIND_ANNOTATE_NEW_MEMORY((vec), sizeof(*vec));\
VALGRIND_ANNOTATE_NEW_MEMORY((vec)->buffer,\
(sizeof(*(vec)->buffer) * ((vec)->capacity)));\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#define VECQ_FRONT_POS(vec)\
((vec)->front & ((vec)->capacity - 1))
#define VECQ_BACK_POS(vec)\
((vec)->back & ((vec)->capacity - 1))
#define VECQ_FRONT(vec)\
(vec)->buffer[VECQ_FRONT_POS(vec)]
#define VECQ_BACK(vec)\
(vec)->buffer[VECQ_BACK_POS(vec)]
#define VECQ_DEQUEUE(vec)\
((vec)->buffer[(((vec)->front++) & ((vec)->capacity - 1))])
#define VECQ_SIZE(vec)\
((vec)->back - (vec)->front)
static inline int
realloc_set(void **buf, size_t s)
{
void *tbuf = Realloc(*buf, s);
if (tbuf == NULL) {
ERR("!Realloc");
return -1;
}
*buf = tbuf;
return 0;
}
#define VECQ_NCAPACITY(vec)\
((vec)->capacity == 0 ? VECQ_INIT_SIZE : (vec)->capacity * 2)
#define VECQ_GROW(vec)\
(realloc_set((void **)&(vec)->buffer,\
VECQ_NCAPACITY(vec) * sizeof(*(vec)->buffer)) ? -1 :\
(memcpy((vec)->buffer + (vec)->capacity, (vec)->buffer,\
VECQ_FRONT_POS(vec) * sizeof(*(vec)->buffer)),\
(vec)->front = VECQ_FRONT_POS(vec),\
(vec)->back = (vec)->front + (vec)->capacity,\
(vec)->capacity = VECQ_NCAPACITY(vec),\
0\
))
#define VECQ_INSERT(vec, element)\
(VECQ_BACK(vec) = element, (vec)->back += 1, 0)
#define VECQ_ENQUEUE(vec, element)\
((vec)->capacity == VECQ_SIZE(vec) ?\
(VECQ_GROW(vec) == 0 ? VECQ_INSERT(vec, element) : -1) :\
VECQ_INSERT(vec, element))
#define VECQ_CAPACITY(vec)\
((vec)->capacity)
#define VECQ_FOREACH(el, vec)\
for (size_t _vec_i = 0;\
_vec_i < VECQ_SIZE(vec) &&\
(((el) = (vec)->buffer[_vec_i & ((vec)->capacity - 1)]), 1);\
++_vec_i)
#define VECQ_FOREACH_REVERSE(el, vec)\
for (size_t _vec_i = VECQ_SIZE(vec);\
_vec_i > 0 &&\
(((el) = (vec)->buffer[(_vec_i - 1) & ((vec)->capacity - 1)]), 1);\
--_vec_i)
#define VECQ_CLEAR(vec) do {\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#define VECQ_DELETE(vec) do {\
Free((vec)->buffer);\
(vec)->buffer = NULL;\
(vec)->capacity = 0;\
(vec)->front = 0;\
(vec)->back = 0;\
} while (0)
#ifdef __cplusplus
}
#endif
#endif /* PMDK_VECQ_H */
| 2,731 | 20.178295 | 68 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/vec.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* vec.h -- vector interface
*/
#ifndef PMDK_VEC_H
#define PMDK_VEC_H 1
#include <stddef.h>
#include "valgrind_internal.h"
#include "util.h"
#include "out.h"
#include "alloc.h"
#ifdef __cplusplus
extern "C" {
#endif
#define VEC_INIT_SIZE (64)
#define VEC(name, type)\
struct name {\
type *buffer;\
size_t size;\
size_t capacity;\
}
#define VEC_INITIALIZER {NULL, 0, 0}
#define VEC_INIT(vec) do {\
(vec)->buffer = NULL;\
(vec)->size = 0;\
(vec)->capacity = 0;\
} while (0)
#define VEC_MOVE(vecl, vecr) do {\
Free((vecl)->buffer);\
(vecl)->buffer = (vecr)->buffer;\
(vecl)->size = (vecr)->size;\
(vecl)->capacity = (vecr)->capacity;\
(vecr)->buffer = NULL;\
(vecr)->size = 0;\
(vecr)->capacity = 0;\
} while (0)
#define VEC_REINIT(vec) do {\
VALGRIND_ANNOTATE_NEW_MEMORY((vec), sizeof(*vec));\
VALGRIND_ANNOTATE_NEW_MEMORY((vec)->buffer,\
(sizeof(*(vec)->buffer) * ((vec)->capacity)));\
(vec)->size = 0;\
} while (0)
static inline int
vec_reserve(void *vec, size_t ncapacity, size_t s)
{
size_t ncap = ncapacity == 0 ? VEC_INIT_SIZE : ncapacity;
VEC(vvec, void) *vecp = (struct vvec *)vec;
void *tbuf = Realloc(vecp->buffer, s * ncap);
if (tbuf == NULL) {
ERR("!Realloc");
return -1;
}
vecp->buffer = tbuf;
vecp->capacity = ncap;
return 0;
}
#define VEC_RESERVE(vec, ncapacity)\
(((vec)->size == 0 || (ncapacity) > (vec)->size) ?\
vec_reserve((void *)vec, ncapacity, sizeof(*(vec)->buffer)) :\
0)
#define VEC_POP_BACK(vec) do {\
(vec)->size -= 1;\
} while (0)
#define VEC_FRONT(vec)\
(vec)->buffer[0]
#define VEC_BACK(vec)\
(vec)->buffer[(vec)->size - 1]
#define VEC_ERASE_BY_POS(vec, pos) do {\
if ((pos) != ((vec)->size - 1))\
(vec)->buffer[(pos)] = VEC_BACK(vec);\
VEC_POP_BACK(vec);\
} while (0)
#define VEC_ERASE_BY_PTR(vec, element) do {\
if ((element) != &VEC_BACK(vec))\
*(element) = VEC_BACK(vec);\
VEC_POP_BACK(vec);\
} while (0)
#define VEC_INSERT(vec, element)\
((vec)->buffer[(vec)->size - 1] = (element), 0)
#define VEC_INC_SIZE(vec)\
(((vec)->size++), 0)
#define VEC_INC_BACK(vec)\
((vec)->capacity == (vec)->size ?\
(VEC_RESERVE((vec), ((vec)->capacity * 2)) == 0 ?\
VEC_INC_SIZE(vec) : -1) :\
VEC_INC_SIZE(vec))
#define VEC_PUSH_BACK(vec, element)\
(VEC_INC_BACK(vec) == 0? VEC_INSERT(vec, element) : -1)
#define VEC_FOREACH(el, vec)\
for (size_t _vec_i = 0;\
_vec_i < (vec)->size && (((el) = (vec)->buffer[_vec_i]), 1);\
++_vec_i)
#define VEC_FOREACH_REVERSE(el, vec)\
for (size_t _vec_i = ((vec)->size);\
_vec_i != 0 && (((el) = (vec)->buffer[_vec_i - 1]), 1);\
--_vec_i)
#define VEC_FOREACH_BY_POS(elpos, vec)\
for ((elpos) = 0; (elpos) < (vec)->size; ++(elpos))
#define VEC_FOREACH_BY_PTR(el, vec)\
for (size_t _vec_i = 0;\
_vec_i < (vec)->size && (((el) = &(vec)->buffer[_vec_i]), 1);\
++_vec_i)
#define VEC_SIZE(vec)\
((vec)->size)
#define VEC_CAPACITY(vec)\
((vec)->capacity)
#define VEC_ARR(vec)\
((vec)->buffer)
#define VEC_GET(vec, id)\
(&(vec)->buffer[id])
#define VEC_CLEAR(vec) do {\
(vec)->size = 0;\
} while (0)
#define VEC_DELETE(vec) do {\
Free((vec)->buffer);\
(vec)->buffer = NULL;\
(vec)->size = 0;\
(vec)->capacity = 0;\
} while (0)
#ifdef __cplusplus
}
#endif
#endif /* PMDK_VEC_H */
| 3,300 | 19.892405 | 63 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/rand.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* rand.h -- random utils
*/
#ifndef RAND_H
#define RAND_H 1
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef uint64_t rng_t[4];
uint64_t hash64(uint64_t x);
uint64_t rnd64_r(rng_t *rng);
void randomize_r(rng_t *rng, uint64_t seed);
uint64_t rnd64(void);
void randomize(uint64_t seed);
#ifdef __cplusplus
}
#endif
#endif
| 432 | 13.433333 | 44 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/ravl.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* ravl.h -- internal definitions for ravl tree
*/
#ifndef LIBPMEMOBJ_RAVL_H
#define LIBPMEMOBJ_RAVL_H 1
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct ravl;
struct ravl_node;
enum ravl_predicate {
RAVL_PREDICATE_EQUAL = 1 << 0,
RAVL_PREDICATE_GREATER = 1 << 1,
RAVL_PREDICATE_LESS = 1 << 2,
RAVL_PREDICATE_LESS_EQUAL =
RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_LESS,
RAVL_PREDICATE_GREATER_EQUAL =
RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_GREATER,
};
typedef int ravl_compare(const void *lhs, const void *rhs);
typedef void ravl_cb(void *data, void *arg);
typedef void ravl_constr(void *data, size_t data_size, const void *arg);
struct ravl *ravl_new(ravl_compare *compare);
struct ravl *ravl_new_sized(ravl_compare *compare, size_t data_size);
void ravl_delete(struct ravl *ravl);
void ravl_delete_cb(struct ravl *ravl, ravl_cb cb, void *arg);
void ravl_foreach(struct ravl *ravl, ravl_cb cb, void *arg);
int ravl_empty(struct ravl *ravl);
void ravl_clear(struct ravl *ravl);
int ravl_insert(struct ravl *ravl, const void *data);
int ravl_emplace(struct ravl *ravl, ravl_constr constr, const void *arg);
int ravl_emplace_copy(struct ravl *ravl, const void *data);
struct ravl_node *ravl_find(struct ravl *ravl, const void *data,
enum ravl_predicate predicate_flags);
void *ravl_data(struct ravl_node *node);
void ravl_remove(struct ravl *ravl, struct ravl_node *node);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_RAVL_H */
| 1,556 | 27.309091 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/common/pool_hdr.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* pool_hdr.h -- internal definitions for pool header module
*/
#ifndef PMDK_POOL_HDR_H
#define PMDK_POOL_HDR_H 1
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include "uuid.h"
#include "shutdown_state.h"
#include "util.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Number of bits per type in alignment descriptor
*/
#define ALIGNMENT_DESC_BITS 4
/*
* architecture identification flags
*
* These flags allow to unambiguously determine the architecture
* on which the pool was created.
*
* The alignment_desc field contains information about alignment
* of the following basic types:
* - char
* - short
* - int
* - long
* - long long
* - size_t
* - os_off_t
* - float
* - double
* - long double
* - void *
*
* The alignment of each type is computed as an offset of field
* of specific type in the following structure:
* struct {
* char byte;
* type field;
* };
*
* The value is decremented by 1 and masked by 4 bits.
* Multiple alignments are stored on consecutive 4 bits of each
* type in the order specified above.
*
* The values used in the machine, and machine_class fields are in
* principle independent of operating systems, and object formats.
* In practice they happen to match constants used in ELF object headers.
*/
struct arch_flags {
uint64_t alignment_desc; /* alignment descriptor */
uint8_t machine_class; /* address size -- 64 bit or 32 bit */
uint8_t data; /* data encoding -- LE or BE */
uint8_t reserved[4];
uint16_t machine; /* required architecture */
};
#define POOL_HDR_ARCH_LEN sizeof(struct arch_flags)
/* possible values of the machine class field in the above struct */
#define PMDK_MACHINE_CLASS_64 2 /* 64 bit pointers, 64 bit size_t */
/* possible values of the machine field in the above struct */
#define PMDK_MACHINE_X86_64 62
#define PMDK_MACHINE_AARCH64 183
#define PMDK_MACHINE_PPC64 21
/* possible values of the data field in the above struct */
#define PMDK_DATA_LE 1 /* 2's complement, little endian */
#define PMDK_DATA_BE 2 /* 2's complement, big endian */
/*
* features flags
*/
typedef struct {
uint32_t compat; /* mask: compatible "may" features */
uint32_t incompat; /* mask: "must support" features */
uint32_t ro_compat; /* mask: force RO if unsupported */
} features_t;
/*
* header used at the beginning of all types of memory pools
*
* for pools build on persistent memory, the integer types
* below are stored in little-endian byte order.
*/
#define POOL_HDR_SIG_LEN 8
#define POOL_HDR_UNUSED_SIZE 1904
#define POOL_HDR_UNUSED2_SIZE 1976
#define POOL_HDR_ALIGN_PAD (PMEM_PAGESIZE - 4096)
struct pool_hdr {
char signature[POOL_HDR_SIG_LEN];
uint32_t major; /* format major version number */
features_t features; /* features flags */
uuid_t poolset_uuid; /* pool set UUID */
uuid_t uuid; /* UUID of this file */
uuid_t prev_part_uuid; /* prev part */
uuid_t next_part_uuid; /* next part */
uuid_t prev_repl_uuid; /* prev replica */
uuid_t next_repl_uuid; /* next replica */
uint64_t crtime; /* when created (seconds since epoch) */
struct arch_flags arch_flags; /* architecture identification flags */
unsigned char unused[POOL_HDR_UNUSED_SIZE]; /* must be zero */
/* not checksumed */
unsigned char unused2[POOL_HDR_UNUSED2_SIZE]; /* must be zero */
struct shutdown_state sds; /* shutdown status */
uint64_t checksum; /* checksum of above fields */
#if PMEM_PAGESIZE > 4096 /* prevent zero size array */
unsigned char align_pad[POOL_HDR_ALIGN_PAD]; /* alignment pad */
#endif
};
#define POOL_HDR_SIZE (sizeof(struct pool_hdr))
#define POOL_DESC_SIZE PMEM_PAGESIZE
void util_convert2le_hdr(struct pool_hdr *hdrp);
void util_convert2h_hdr_nocheck(struct pool_hdr *hdrp);
void util_get_arch_flags(struct arch_flags *arch_flags);
int util_check_arch_flags(const struct arch_flags *arch_flags);
features_t util_get_unknown_features(features_t features, features_t known);
int util_feature_check(struct pool_hdr *hdrp, features_t features);
int util_feature_cmp(features_t features, features_t ref);
int util_feature_is_zero(features_t features);
int util_feature_is_set(features_t features, features_t flag);
void util_feature_enable(features_t *features, features_t new_feature);
void util_feature_disable(features_t *features, features_t new_feature);
const char *util_feature2str(features_t feature, features_t *found);
features_t util_str2feature(const char *str);
uint32_t util_str2pmempool_feature(const char *str);
uint32_t util_feature2pmempool_feature(features_t feat);
/*
* set of macros for determining the alignment descriptor
*/
#define DESC_MASK ((1 << ALIGNMENT_DESC_BITS) - 1)
#define alignment_of(t) offsetof(struct { char c; t x; }, x)
#define alignment_desc_of(t) (((uint64_t)alignment_of(t) - 1) & DESC_MASK)
#define alignment_desc()\
(alignment_desc_of(char) << 0 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(short) << 1 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(int) << 2 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(long) << 3 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(long long) << 4 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(size_t) << 5 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(off_t) << 6 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(float) << 7 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(double) << 8 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(long double) << 9 * ALIGNMENT_DESC_BITS) |\
(alignment_desc_of(void *) << 10 * ALIGNMENT_DESC_BITS)
#define POOL_FEAT_ZERO 0x0000U
static const features_t features_zero =
{POOL_FEAT_ZERO, POOL_FEAT_ZERO, POOL_FEAT_ZERO};
/*
* compat features
*/
#define POOL_FEAT_CHECK_BAD_BLOCKS 0x0001U /* check bad blocks in a pool */
#define POOL_FEAT_COMPAT_ALL \
(POOL_FEAT_CHECK_BAD_BLOCKS)
#define FEAT_COMPAT(X) \
{POOL_FEAT_##X, POOL_FEAT_ZERO, POOL_FEAT_ZERO}
/*
* incompat features
*/
#define POOL_FEAT_SINGLEHDR 0x0001U /* pool header only in the first part */
#define POOL_FEAT_CKSUM_2K 0x0002U /* only first 2K of hdr checksummed */
#define POOL_FEAT_SDS 0x0004U /* check shutdown state */
#define POOL_FEAT_INCOMPAT_ALL \
(POOL_FEAT_SINGLEHDR | POOL_FEAT_CKSUM_2K | POOL_FEAT_SDS)
/*
* incompat features effective values (if applicable)
*/
#ifdef SDS_ENABLED
#define POOL_E_FEAT_SDS POOL_FEAT_SDS
#else
#define POOL_E_FEAT_SDS 0x0000U /* empty */
#endif
#define POOL_FEAT_COMPAT_VALID \
(POOL_FEAT_CHECK_BAD_BLOCKS)
#define POOL_FEAT_INCOMPAT_VALID \
(POOL_FEAT_SINGLEHDR | POOL_FEAT_CKSUM_2K | POOL_E_FEAT_SDS)
#if defined(_WIN32) || NDCTL_ENABLED
#define POOL_FEAT_INCOMPAT_DEFAULT \
(POOL_FEAT_CKSUM_2K | POOL_E_FEAT_SDS)
#else
/*
* shutdown state support on Linux requires root access on kernel < 4.20 with
* ndctl < 63 so it is disabled by default
*/
#define POOL_FEAT_INCOMPAT_DEFAULT \
(POOL_FEAT_CKSUM_2K)
#endif
#if NDCTL_ENABLED
#define POOL_FEAT_COMPAT_DEFAULT \
(POOL_FEAT_CHECK_BAD_BLOCKS)
#else
#define POOL_FEAT_COMPAT_DEFAULT \
(POOL_FEAT_ZERO)
#endif
#define FEAT_INCOMPAT(X) \
{POOL_FEAT_ZERO, POOL_FEAT_##X, POOL_FEAT_ZERO}
#define POOL_FEAT_VALID \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, POOL_FEAT_ZERO}
/*
* defines the first not checksummed field - all fields after this will be
* ignored during checksum calculations.
*/
#define POOL_HDR_CSUM_2K_END_OFF offsetof(struct pool_hdr, unused2)
#define POOL_HDR_CSUM_4K_END_OFF offsetof(struct pool_hdr, checksum)
/*
* pick the first not checksummed field. 2K variant is used if
* POOL_FEAT_CKSUM_2K incompat feature is set.
*/
#define POOL_HDR_CSUM_END_OFF(hdrp) \
((hdrp)->features.incompat & POOL_FEAT_CKSUM_2K) \
? POOL_HDR_CSUM_2K_END_OFF : POOL_HDR_CSUM_4K_END_OFF
/* ignore shutdown state if incompat feature is disabled */
#define IGNORE_SDS(hdrp) \
(((hdrp) != NULL) && (((hdrp)->features.incompat & POOL_FEAT_SDS) == 0))
#ifdef __cplusplus
}
#endif
#endif
| 7,980 | 29.696154 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/ex_common.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* ex_common.h -- examples utilities
*/
#ifndef EX_COMMON_H
#define EX_COMMON_H
#include <stdint.h>
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _WIN32
#include <unistd.h>
#define CREATE_MODE_RW (S_IWUSR | S_IRUSR)
/*
* file_exists -- checks if file exists
*/
static inline int
file_exists(char const *file)
{
return access(file, F_OK);
}
/*
* find_last_set_64 -- returns last set bit position or -1 if set bit not found
*/
static inline int
find_last_set_64(uint64_t val)
{
return 64 - __builtin_clzll(val) - 1;
}
#else
#include <windows.h>
#include <corecrt_io.h>
#include <process.h>
#define CREATE_MODE_RW (S_IWRITE | S_IREAD)
/*
* file_exists -- checks if file exists
*/
static inline int
file_exists(char const *file)
{
return _access(file, 0);
}
/*
* find_last_set_64 -- returns last set bit position or -1 if set bit not found
*/
static inline int
find_last_set_64(uint64_t val)
{
DWORD lz = 0;
if (BitScanReverse64(&lz, val))
return (int)lz;
else
return -1;
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* ex_common.h */
| 1,199 | 14.584416 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemlog/logfile/logentry.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* info prepended to each log entry...
*/
struct logentry {
size_t len; /* length of the rest of the log entry */
time_t timestamp;
#ifndef _WIN32
pid_t pid;
#else
int pid;
#endif
};
| 280 | 15.529412 | 55 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemblk/assetdb/asset.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
#define ASSET_NAME_MAX 256
#define ASSET_USER_NAME_MAX 64
#define ASSET_CHECKED_OUT 2
#define ASSET_FREE 1
struct asset {
char name[ASSET_NAME_MAX];
char user[ASSET_USER_NAME_MAX];
time_t time;
int state;
};
| 300 | 19.066667 | 44 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/slab_allocator/slab_allocator.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* slab_allocator.h -- slab-like mechanism for libpmemobj
*/
#ifndef SLAB_ALLOCATOR_H
#define SLAB_ALLOCATOR_H
#include <libpmemobj.h>
struct slab_allocator;
struct slab_allocator *slab_new(PMEMobjpool *pop, size_t size);
void slab_delete(struct slab_allocator *slab);
int slab_alloc(struct slab_allocator *slab, PMEMoid *oid,
pmemobj_constr constructor, void *arg);
PMEMoid slab_tx_alloc(struct slab_allocator *slab);
#endif /* SLAB_ALLOCATOR_H */
| 542 | 22.608696 | 63 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/string_store_tx_type/layout.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* layout.h -- example from introduction part 3
*/
#define MAX_BUF_LEN 10
POBJ_LAYOUT_BEGIN(string_store);
POBJ_LAYOUT_ROOT(string_store, struct my_root);
POBJ_LAYOUT_END(string_store);
struct my_root {
char buf[MAX_BUF_LEN];
};
| 324 | 18.117647 | 47 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/list_map/skiplist_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* skiplist_map.h -- sorted list collection implementation
*/
#ifndef SKIPLIST_MAP_H
#define SKIPLIST_MAP_H
#include <libpmemobj.h>
#ifndef SKIPLIST_MAP_TYPE_OFFSET
#define SKIPLIST_MAP_TYPE_OFFSET 2020
#endif
struct skiplist_map_node;
TOID_DECLARE(struct skiplist_map_node, SKIPLIST_MAP_TYPE_OFFSET + 0);
int skiplist_map_check(PMEMobjpool *pop, TOID(struct skiplist_map_node) map);
int skiplist_map_create(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map,
void *arg);
int skiplist_map_destroy(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map);
int skiplist_map_insert(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key, PMEMoid value);
int skiplist_map_insert_new(PMEMobjpool *pop,
TOID(struct skiplist_map_node) map, uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid skiplist_map_remove(PMEMobjpool *pop,
TOID(struct skiplist_map_node) map, uint64_t key);
int skiplist_map_remove_free(PMEMobjpool *pop,
TOID(struct skiplist_map_node) map, uint64_t key);
int skiplist_map_clear(PMEMobjpool *pop, TOID(struct skiplist_map_node) map);
PMEMoid skiplist_map_get(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key);
int skiplist_map_lookup(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
uint64_t key);
int skiplist_map_foreach(PMEMobjpool *pop, TOID(struct skiplist_map_node) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int skiplist_map_is_empty(PMEMobjpool *pop, TOID(struct skiplist_map_node) map);
#endif /* SKIPLIST_MAP_H */
| 1,688 | 36.533333 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/hashmap/hashmap.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
#ifndef HASHMAP_H
#define HASHMAP_H
/* common API provided by both implementations */
#include <stddef.h>
#include <stdint.h>
struct hashmap_args {
uint32_t seed;
};
enum hashmap_cmd {
HASHMAP_CMD_REBUILD,
HASHMAP_CMD_DEBUG,
};
#endif /* HASHMAP_H */
| 345 | 15.47619 | 49 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/hashmap/hashmap_tx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
#ifndef HASHMAP_TX_H
#define HASHMAP_TX_H
#include <stddef.h>
#include <stdint.h>
#include <hashmap.h>
#include <libpmemobj.h>
#ifndef HASHMAP_TX_TYPE_OFFSET
#define HASHMAP_TX_TYPE_OFFSET 1004
#endif
struct hashmap_tx;
TOID_DECLARE(struct hashmap_tx, HASHMAP_TX_TYPE_OFFSET + 0);
int hm_tx_check(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap);
int hm_tx_create(PMEMobjpool *pop, TOID(struct hashmap_tx) *map, void *arg);
int hm_tx_init(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap);
int hm_tx_insert(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key, PMEMoid value);
PMEMoid hm_tx_remove(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key);
PMEMoid hm_tx_get(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key);
int hm_tx_lookup(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key);
int hm_tx_foreach(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
size_t hm_tx_count(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap);
int hm_tx_cmd(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
unsigned cmd, uint64_t arg);
#endif /* HASHMAP_TX_H */
| 1,270 | 34.305556 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/hashmap/hashmap_rp.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
#ifndef HASHMAP_RP_H
#define HASHMAP_RP_H
#include <stddef.h>
#include <stdint.h>
#include <hashmap.h>
#include <libpmemobj.h>
#ifndef HASHMAP_RP_TYPE_OFFSET
#define HASHMAP_RP_TYPE_OFFSET 1008
#endif
/* Flags to indicate if insertion is being made during rebuild process */
#define HASHMAP_RP_REBUILD 1
#define HASHMAP_RP_NO_REBUILD 0
/* Initial number of entries for hashamap_rp */
#define INIT_ENTRIES_NUM_RP 16
/* Load factor to indicate resize threshold */
#define HASHMAP_RP_LOAD_FACTOR 0.5f
/* Maximum number of swaps allowed during single insertion */
#define HASHMAP_RP_MAX_SWAPS 150
/* Size of an action array used during single insertion */
#define HASHMAP_RP_MAX_ACTIONS (4 * HASHMAP_RP_MAX_SWAPS + 5)
struct hashmap_rp;
TOID_DECLARE(struct hashmap_rp, HASHMAP_RP_TYPE_OFFSET + 0);
int hm_rp_check(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap);
int hm_rp_create(PMEMobjpool *pop, TOID(struct hashmap_rp) *map, void *arg);
int hm_rp_init(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap);
int hm_rp_insert(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key, PMEMoid value);
PMEMoid hm_rp_remove(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key);
PMEMoid hm_rp_get(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key);
int hm_rp_lookup(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key);
int hm_rp_foreach(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
size_t hm_rp_count(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap);
int hm_rp_cmd(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
unsigned cmd, uint64_t arg);
#endif /* HASHMAP_RP_H */
| 1,780 | 36.104167 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/hashmap/hashmap_internal.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
#ifndef HASHSET_INTERNAL_H
#define HASHSET_INTERNAL_H
/* large prime number used as a hashing function coefficient */
#define HASH_FUNC_COEFF_P 32212254719ULL
/* initial number of buckets */
#define INIT_BUCKETS_NUM 10
/* number of values in a bucket which trigger hashtable rebuild check */
#define MIN_HASHSET_THRESHOLD 5
/* number of values in a bucket which force hashtable rebuild */
#define MAX_HASHSET_THRESHOLD 10
#endif
| 521 | 25.1 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/hashmap/hashmap_atomic.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
#ifndef HASHMAP_ATOMIC_H
#define HASHMAP_ATOMIC_H
#include <stddef.h>
#include <stdint.h>
#include <hashmap.h>
#include <libpmemobj.h>
#ifndef HASHMAP_ATOMIC_TYPE_OFFSET
#define HASHMAP_ATOMIC_TYPE_OFFSET 1000
#endif
struct hashmap_atomic;
TOID_DECLARE(struct hashmap_atomic, HASHMAP_ATOMIC_TYPE_OFFSET + 0);
int hm_atomic_check(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap);
int hm_atomic_create(PMEMobjpool *pop, TOID(struct hashmap_atomic) *map,
void *arg);
int hm_atomic_init(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap);
int hm_atomic_insert(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key, PMEMoid value);
PMEMoid hm_atomic_remove(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key);
PMEMoid hm_atomic_get(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key);
int hm_atomic_lookup(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key);
int hm_atomic_foreach(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
size_t hm_atomic_count(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap);
int hm_atomic_cmd(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
unsigned cmd, uint64_t arg);
#endif /* HASHMAP_ATOMIC_H */
| 1,384 | 36.432432 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/libart/arttree_structures.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree_structures.h
*
* Description: known structures of the ART tree
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#ifndef _ARTTREE_STRUCTURES_H
#define _ARTTREE_STRUCTURES_H
#define MAX_PREFIX_LEN 10
/*
* pmem_context -- structure for pmempool file
*/
struct pmem_context {
char *filename;
size_t psize;
int fd;
char *addr;
uint64_t art_tree_root_offset;
};
struct _art_node_u; typedef struct _art_node_u art_node_u;
struct _art_node; typedef struct _art_node art_node;
struct _art_node4; typedef struct _art_node4 art_node4;
struct _art_node16; typedef struct _art_node16 art_node16;
struct _art_node48; typedef struct _art_node48 art_node48;
struct _art_node256; typedef struct _art_node256 art_node256;
struct _var_string; typedef struct _var_string var_string;
struct _art_leaf; typedef struct _art_leaf art_leaf;
struct _art_tree_root; typedef struct _art_tree_root art_tree_root;
typedef uint8_t art_tree_root_toid_type_num[65535];
typedef uint8_t _toid_art_node_u_toid_type_num[2];
typedef uint8_t _toid_art_node_toid_type_num[3];
typedef uint8_t _toid_art_node4_toid_type_num[4];
typedef uint8_t _toid_art_node16_toid_type_num[5];
typedef uint8_t _toid_art_node48_toid_type_num[6];
typedef uint8_t _toid_art_node256_toid_type_num[7];
typedef uint8_t _toid_art_leaf_toid_type_num[8];
typedef uint8_t _toid_var_string_toid_type_num[9];
typedef struct pmemoid {
uint64_t pool_uuid_lo;
uint64_t off;
} PMEMoid;
union _toid_art_node_u_toid {
PMEMoid oid;
art_node_u *_type;
_toid_art_node_u_toid_type_num *_type_num;
};
union art_tree_root_toid {
PMEMoid oid;
struct art_tree_root *_type;
art_tree_root_toid_type_num *_type_num;
};
union _toid_art_node_toid {
PMEMoid oid;
art_node *_type;
_toid_art_node_toid_type_num *_type_num;
};
union _toid_art_node4_toid {
PMEMoid oid;
art_node4 *_type;
_toid_art_node4_toid_type_num *_type_num;
};
union _toid_art_node16_toid {
PMEMoid oid;
art_node16 *_type;
_toid_art_node16_toid_type_num *_type_num;
};
union _toid_art_node48_toid {
PMEMoid oid;
art_node48 *_type;
_toid_art_node48_toid_type_num *_type_num;
};
union _toid_art_node256_toid {
PMEMoid oid;
art_node256 *_type;
_toid_art_node256_toid_type_num *_type_num;
};
union _toid_var_string_toid {
PMEMoid oid;
var_string *_type;
_toid_var_string_toid_type_num *_type_num;
};
union _toid_art_leaf_toid {
PMEMoid oid;
art_leaf *_type;
_toid_art_leaf_toid_type_num *_type_num;
};
struct _art_tree_root {
int size;
union _toid_art_node_u_toid root;
};
struct _art_node {
uint8_t num_children;
uint32_t partial_len;
unsigned char partial[MAX_PREFIX_LEN];
};
struct _art_node4 {
art_node n;
unsigned char keys[4];
union _toid_art_node_u_toid children[4];
};
struct _art_node16 {
art_node n;
unsigned char keys[16];
union _toid_art_node_u_toid children[16];
};
struct _art_node48 {
art_node n;
unsigned char keys[256];
union _toid_art_node_u_toid children[48];
};
struct _art_node256 {
art_node n;
union _toid_art_node_u_toid children[256];
};
struct _var_string {
size_t len;
unsigned char s[];
};
struct _art_leaf {
union _toid_var_string_toid value;
union _toid_var_string_toid key;
};
struct _art_node_u {
uint8_t art_node_type;
uint8_t art_node_tag;
union {
union _toid_art_node4_toid an4;
union _toid_art_node16_toid an16;
union _toid_art_node48_toid an48;
union _toid_art_node256_toid an256;
union _toid_art_leaf_toid al;
} u;
};
typedef enum {
ART_NODE4 = 0,
ART_NODE16 = 1,
ART_NODE48 = 2,
ART_NODE256 = 3,
ART_LEAF = 4,
ART_NODE_U = 5,
ART_NODE = 6,
ART_TREE_ROOT = 7,
VAR_STRING = 8,
art_node_types = 9 /* number of different art_nodes */
} art_node_type;
#define VALID_NODE_TYPE(n) (((n) >= 0) && ((n) < art_node_types))
extern size_t art_node_sizes[];
extern char *art_node_names[];
#endif /* _ARTTREE_STRUCTURES_H */
| 5,923 | 25.927273 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/libart/art.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2012, Armon Dadgar. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: art.h
*
* Description: header file for art tree on pmem implementation
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
/*
* based on https://github.com/armon/libart/src/art.h
*/
#ifndef _ART_H
#define _ART_H
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_PREFIX_LEN 10
typedef enum {
NODE4 = 0,
NODE16 = 1,
NODE48 = 2,
NODE256 = 3,
art_leaf_t = 4,
art_node_types = 5 /* number of different art_nodes */
} art_node_type;
char *art_node_names[] = {
"art_node4",
"art_node16",
"art_node48",
"art_node256",
"art_leaf"
};
/*
* forward declarations; these are required when typedef shall be
* used instead of struct
*/
struct _art_node_u; typedef struct _art_node_u art_node_u;
struct _art_node; typedef struct _art_node art_node;
struct _art_node4; typedef struct _art_node4 art_node4;
struct _art_node16; typedef struct _art_node16 art_node16;
struct _art_node48; typedef struct _art_node48 art_node48;
struct _art_node256; typedef struct _art_node256 art_node256;
struct _art_leaf; typedef struct _art_leaf art_leaf;
struct _var_string; typedef struct _var_string var_string;
POBJ_LAYOUT_BEGIN(arttree_tx);
POBJ_LAYOUT_ROOT(arttree_tx, struct art_tree_root);
POBJ_LAYOUT_TOID(arttree_tx, art_node_u);
POBJ_LAYOUT_TOID(arttree_tx, art_node4);
POBJ_LAYOUT_TOID(arttree_tx, art_node16);
POBJ_LAYOUT_TOID(arttree_tx, art_node48);
POBJ_LAYOUT_TOID(arttree_tx, art_node256);
POBJ_LAYOUT_TOID(arttree_tx, art_leaf);
POBJ_LAYOUT_TOID(arttree_tx, var_string);
POBJ_LAYOUT_END(arttree_tx);
struct _var_string {
size_t len;
unsigned char s[];
};
/*
* This struct is included as part of all the various node sizes
*/
struct _art_node {
uint8_t num_children;
uint32_t partial_len;
unsigned char partial[MAX_PREFIX_LEN];
};
/*
* Small node with only 4 children
*/
struct _art_node4 {
art_node n;
unsigned char keys[4];
TOID(art_node_u) children[4];
};
/*
* Node with 16 children
*/
struct _art_node16 {
art_node n;
unsigned char keys[16];
TOID(art_node_u) children[16];
};
/*
* Node with 48 children, but a full 256 byte field.
*/
struct _art_node48 {
art_node n;
unsigned char keys[256];
TOID(art_node_u) children[48];
};
/*
* Full node with 256 children
*/
struct _art_node256 {
art_node n;
TOID(art_node_u) children[256];
};
/*
* Represents a leaf. These are of arbitrary size, as they include the key.
*/
struct _art_leaf {
TOID(var_string) value;
TOID(var_string) key;
};
struct _art_node_u {
uint8_t art_node_type;
uint8_t art_node_tag;
union {
TOID(art_node4) an4; /* starts with art_node */
TOID(art_node16) an16; /* starts with art_node */
TOID(art_node48) an48; /* starts with art_node */
TOID(art_node256) an256; /* starts with art_node */
TOID(art_leaf) al;
} u;
};
struct art_tree_root {
int size;
TOID(art_node_u) root;
};
typedef struct _cb_data {
TOID(art_node_u) node;
int child_idx;
} cb_data;
/*
* Macros to manipulate art_node tags
*/
#define IS_LEAF(x) (((x)->art_node_type == art_leaf_t))
#define SET_LEAF(x) (((x)->art_node_tag = art_leaf_t))
#define COPY_BLOB(_obj, _blob, _len) \
D_RW(_obj)->len = _len; \
TX_MEMCPY(D_RW(_obj)->s, _blob, _len); \
D_RW(_obj)->s[(_len) - 1] = '\0';
typedef int(*art_callback)(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *value, uint32_t val_len);
extern int art_tree_init(PMEMobjpool *pop, int *newpool);
extern uint64_t art_size(PMEMobjpool *pop);
extern int art_iter(PMEMobjpool *pop, art_callback cb, void *data);
extern TOID(var_string) art_insert(PMEMobjpool *pop,
const unsigned char *key, int key_len,
void *value, int val_len);
extern TOID(var_string) art_search(PMEMobjpool *pop,
const unsigned char *key, int key_len);
extern TOID(var_string) art_delete(PMEMobjpool *pop,
const unsigned char *key, int key_len);
#ifdef __cplusplus
}
#endif
#endif /* _ART_H */
| 5,998 | 26.773148 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/libart/arttree.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree.h
*
* Description: header file for art tree on pmem implementation
*
* Author: Andreas Bluemle, Dieter Kasper
* [email protected]
* [email protected]
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#ifndef _ARTTREE_H
#define _ARTTREE_H
#ifdef __cplusplus
extern "C" {
#endif
#include "art.h"
#ifdef __cplusplus
}
#endif
#endif /* _ARTTREE_H */
| 2,337 | 34.969231 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/queue/multirun.sh | #!/bin/bash
sudo rm -rf /mnt/mem/queue.pool
sudo pmempool create --layout="queue" obj myobjpool.set
sudo ./queue /mnt/mem/queue.pool new 10000
#for (( c=1; c<=10000; c++ ))
#do
#echo "$c"
sudo ./queue /mnt/mem/queue.pool enqueue hello
#done
| 246 | 23.7 | 55 | sh |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/queue/run.sh | sudo rm -rf /mnt/mem/queue.pool
sudo pmempool create --layout="queue" obj myobjpool.set
#sudo ../../../tools/pmempool/pmempool create obj /mnt/mem/queue.pool --layout queue
sudo ./queue /mnt/mem/queue.pool new 10000
sudo ./queue /mnt/mem/queue.pool enqueue hello>enqueue
sudo ./queue /mnt/mem/queue.pool show
grep tx enqueue | awk '{print $3}'>file
grep tx dequeue | awk '{print $3}'>file
make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=10000 EXTRA_CFLAGS="-Wno-error"
| 497 | 40.5 | 105 | sh |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/queue/runall.sh | make -j12 EXTRA_CFLAGS+=-DGET_NDP_PERFORMENCE EXTRA_CFLAGS+=-DRUN_COUNT=10000 EXTRA_CFLAGS="-Wno-error"
sudo rm -rf /mnt/mem/queue.pool
sudo pmempool create --layout="queue" obj myobjpool.set
#sudo ../../../tools/pmempool/pmempool create obj /mnt/mem/queue.pool --layout queue
sudo ./queue /mnt/mem/queue.pool new 10000
| 324 | 39.625 | 105 | sh |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/linkedlist/run.sh | sudo rm -rf /mnt/mem/fifo.pool
sudo pmempool create --layout="list" obj myobjpool.set
sudo ../../../tools/pmempool/pmempool create obj /mnt/mem/fifo.pool --layout list
sudo ./fifo /mnt/mem/fifo.pool insert a
sudo ./fifo /mnt/mem/fifo.pool remove a
| 249 | 34.714286 | 81 | sh |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/linkedlist/pmemobj_list.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* pmemobj_list.h -- macro definitions for persistent
* singly linked list and tail queue
*/
#ifndef PMEMOBJ_LISTS_H
#define PMEMOBJ_LISTS_H
#include <libpmemobj.h>
/*
* This file defines two types of persistent data structures:
* singly-linked lists and tail queue.
*
* All macros defined in this file must be used within libpmemobj
* transactional API. Following snippet presents example of usage:
*
* TX_BEGIN(pop) {
* POBJ_TAILQ_INIT(head);
* } TX_ONABORT {
* abort();
* } TX_END
*
* SLIST TAILQ
* _HEAD + +
* _ENTRY + +
* _INIT + +
* _EMPTY + +
* _FIRST + +
* _NEXT + +
* _PREV - +
* _LAST - +
* _FOREACH + +
* _FOREACH_REVERSE - +
* _INSERT_HEAD + +
* _INSERT_BEFORE - +
* _INSERT_AFTER + +
* _INSERT_TAIL - +
* _MOVE_ELEMENT_HEAD - +
* _MOVE_ELEMENT_TAIL - +
* _REMOVE_HEAD + -
* _REMOVE + +
* _REMOVE_FREE + +
* _SWAP_HEAD_TAIL - +
*/
/*
* Singly-linked List definitions.
*/
#define POBJ_SLIST_HEAD(name, type)\
struct name {\
TOID(type) pe_first;\
}
#define POBJ_SLIST_ENTRY(type)\
struct {\
TOID(type) pe_next;\
}
/*
* Singly-linked List access methods.
*/
#define POBJ_SLIST_EMPTY(head) (TOID_IS_NULL((head)->pe_first))
#define POBJ_SLIST_FIRST(head) ((head)->pe_first)
#define POBJ_SLIST_NEXT(elm, field) (D_RO(elm)->field.pe_next)
/*
* Singly-linked List functions.
*/
#define POBJ_SLIST_INIT(head) do {\
TX_ADD_DIRECT(&(head)->pe_first);\
TOID_ASSIGN((head)->pe_first, OID_NULL);\
} while (0)
#define POBJ_SLIST_INSERT_HEAD(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TX_ADD_DIRECT(&elm_ptr->field.pe_next);\
elm_ptr->field.pe_next = (head)->pe_first;\
TX_SET_DIRECT(head, pe_first, elm);\
} while (0)
#define POBJ_SLIST_INSERT_AFTER(slistelm, elm, field) do {\
TOID_TYPEOF(slistelm) *slistelm_ptr = D_RW(slistelm);\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TX_ADD_DIRECT(&elm_ptr->field.pe_next);\
elm_ptr->field.pe_next = slistelm_ptr->field.pe_next;\
TX_ADD_DIRECT(&slistelm_ptr->field.pe_next);\
slistelm_ptr->field.pe_next = elm;\
} while (0)
#define POBJ_SLIST_REMOVE_HEAD(head, field) do {\
TX_ADD_DIRECT(&(head)->pe_first);\
(head)->pe_first = D_RO((head)->pe_first)->field.pe_next;\
} while (0)
#define POBJ_SLIST_REMOVE(head, elm, field) do {\
if (TOID_EQUALS((head)->pe_first, elm)) {\
POBJ_SLIST_REMOVE_HEAD(head, field);\
} else {\
TOID_TYPEOF(elm) *curelm_ptr = D_RW((head)->pe_first);\
while (!TOID_EQUALS(curelm_ptr->field.pe_next, elm))\
curelm_ptr = D_RW(curelm_ptr->field.pe_next);\
TX_ADD_DIRECT(&curelm_ptr->field.pe_next);\
curelm_ptr->field.pe_next = D_RO(elm)->field.pe_next;\
}\
} while (0)
#define POBJ_SLIST_REMOVE_FREE(head, elm, field) do {\
POBJ_SLIST_REMOVE(head, elm, field);\
TX_FREE(elm);\
} while (0)
#define POBJ_SLIST_FOREACH(var, head, field)\
for ((var) = POBJ_SLIST_FIRST(head);\
!TOID_IS_NULL(var);\
var = POBJ_SLIST_NEXT(var, field))
/*
* Tail-queue definitions.
*/
#define POBJ_TAILQ_ENTRY(type)\
struct {\
TOID(type) pe_next;\
TOID(type) pe_prev;\
}
#define POBJ_TAILQ_HEAD(name, type)\
struct name {\
TOID(type) pe_first;\
TOID(type) pe_last;\
}
/*
* Tail-queue access methods.
*/
#define POBJ_TAILQ_FIRST(head) ((head)->pe_first)
#define POBJ_TAILQ_LAST(head) ((head)->pe_last)
#define POBJ_TAILQ_EMPTY(head) (TOID_IS_NULL((head)->pe_first))
#define POBJ_TAILQ_NEXT(elm, field) (D_RO(elm)->field.pe_next)
#define POBJ_TAILQ_PREV(elm, field) (D_RO(elm)->field.pe_prev)
/*
* Tail-queue List internal methods.
*/
#define _POBJ_SWAP_PTR(elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TX_ADD_DIRECT(&elm_ptr->field);\
__typeof__(elm) temp = elm_ptr->field.pe_prev;\
elm_ptr->field.pe_prev = elm_ptr->field.pe_next;\
elm_ptr->field.pe_next = temp;\
} while (0)
/*
* Tail-queue functions.
*/
#define POBJ_TAILQ_SWAP_HEAD_TAIL(head, field) do {\
__typeof__((head)->pe_first) temp = (head)->pe_first;\
TX_ADD_DIRECT(head);\
(head)->pe_first = (head)->pe_last;\
(head)->pe_last = temp;\
} while (0)
#define POBJ_TAILQ_FOREACH(var, head, field)\
for ((var) = POBJ_TAILQ_FIRST(head);\
!TOID_IS_NULL(var);\
var = POBJ_TAILQ_NEXT(var, field))
#define POBJ_TAILQ_FOREACH_REVERSE(var, head, field)\
for ((var) = POBJ_TAILQ_LAST(head);\
!TOID_IS_NULL(var);\
var = POBJ_TAILQ_PREV(var, field))
#define POBJ_TAILQ_INIT(head) do {\
TX_ADD_FIELD_DIRECT(head, pe_first);\
TOID_ASSIGN((head)->pe_first, OID_NULL);\
TX_ADD_FIELD_DIRECT(head, pe_last);\
TOID_ASSIGN((head)->pe_last, OID_NULL);\
} while (0)
#define POBJ_TAILQ_INSERT_HEAD(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_IS_NULL((head)->pe_first)) {\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_first;\
elm_ptr->field.pe_next = (head)->pe_first;\
TX_ADD_DIRECT(head);\
(head)->pe_first = elm;\
(head)->pe_last = elm;\
} else {\
TOID_TYPEOF(elm) *first = D_RW((head)->pe_first);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_next = (head)->pe_first;\
elm_ptr->field.pe_prev = first->field.pe_prev;\
TX_ADD_DIRECT(&first->field.pe_prev);\
first->field.pe_prev = elm;\
TX_SET_DIRECT(head, pe_first, elm);\
}\
} while (0)
#define POBJ_TAILQ_INSERT_TAIL(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_IS_NULL((head)->pe_last)) {\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_last;\
elm_ptr->field.pe_next = (head)->pe_last;\
TX_ADD_DIRECT(head);\
(head)->pe_first = elm;\
(head)->pe_last = elm;\
} else {\
TOID_TYPEOF(elm) *last = D_RW((head)->pe_last);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_last;\
elm_ptr->field.pe_next = last->field.pe_next;\
TX_ADD_DIRECT(&last->field.pe_next);\
last->field.pe_next = elm;\
TX_SET_DIRECT(head, pe_last, elm);\
}\
} while (0)
#define POBJ_TAILQ_INSERT_AFTER(listelm, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TOID_TYPEOF(listelm) *listelm_ptr = D_RW(listelm);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = listelm;\
elm_ptr->field.pe_next = listelm_ptr->field.pe_next;\
if (TOID_IS_NULL(listelm_ptr->field.pe_next)) {\
TX_SET_DIRECT(head, pe_last, elm);\
} else {\
TOID_TYPEOF(elm) *next = D_RW(listelm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm;\
}\
TX_ADD_DIRECT(&listelm_ptr->field.pe_next);\
listelm_ptr->field.pe_next = elm;\
} while (0)
#define POBJ_TAILQ_INSERT_BEFORE(listelm, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TOID_TYPEOF(listelm) *listelm_ptr = D_RW(listelm);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_next = listelm;\
elm_ptr->field.pe_prev = listelm_ptr->field.pe_prev;\
if (TOID_IS_NULL(listelm_ptr->field.pe_prev)) {\
TX_SET_DIRECT(head, pe_first, elm);\
} else {\
TOID_TYPEOF(elm) *prev = D_RW(listelm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm; \
}\
TX_ADD_DIRECT(&listelm_ptr->field.pe_prev);\
listelm_ptr->field.pe_prev = elm;\
} while (0)
#define POBJ_TAILQ_REMOVE(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_IS_NULL(elm_ptr->field.pe_prev) &&\
TOID_IS_NULL(elm_ptr->field.pe_next)) {\
TX_ADD_DIRECT(head);\
(head)->pe_first = elm_ptr->field.pe_prev;\
(head)->pe_last = elm_ptr->field.pe_next;\
} else {\
if (TOID_IS_NULL(elm_ptr->field.pe_prev)) {\
TX_SET_DIRECT(head, pe_first, elm_ptr->field.pe_next);\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
} else {\
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
}\
if (TOID_IS_NULL(elm_ptr->field.pe_next)) {\
TX_SET_DIRECT(head, pe_last, elm_ptr->field.pe_prev);\
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
} else {\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
}\
}\
} while (0)
#define POBJ_TAILQ_REMOVE_FREE(head, elm, field) do {\
POBJ_TAILQ_REMOVE(head, elm, field);\
TX_FREE(elm);\
} while (0)
/*
* 2 cases: only two elements, the rest possibilities
* including that elm is the last one
*/
#define POBJ_TAILQ_MOVE_ELEMENT_HEAD(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_EQUALS((head)->pe_last, elm) &&\
TOID_EQUALS(D_RO((head)->pe_first)->field.pe_next, elm)) {\
_POBJ_SWAP_PTR(elm, field);\
_POBJ_SWAP_PTR((head)->pe_first, field);\
POBJ_TAILQ_SWAP_HEAD_TAIL(head, field);\
} else {\
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
if (TOID_EQUALS((head)->pe_last, elm)) {\
TX_SET_DIRECT(head, pe_last, elm_ptr->field.pe_prev);\
} else {\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
}\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = D_RO((head)->pe_first)->field.pe_prev;\
elm_ptr->field.pe_next = (head)->pe_first;\
TOID_TYPEOF(elm) *first = D_RW((head)->pe_first);\
TX_ADD_DIRECT(&first->field.pe_prev);\
first->field.pe_prev = elm;\
TX_SET_DIRECT(head, pe_first, elm);\
}\
} while (0)
#define POBJ_TAILQ_MOVE_ELEMENT_TAIL(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_EQUALS((head)->pe_first, elm) &&\
TOID_EQUALS(D_RO((head)->pe_last)->field.pe_prev, elm)) {\
_POBJ_SWAP_PTR(elm, field);\
_POBJ_SWAP_PTR((head)->pe_last, field);\
POBJ_TAILQ_SWAP_HEAD_TAIL(head, field);\
} else {\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
if (TOID_EQUALS((head)->pe_first, elm)) {\
TX_SET_DIRECT(head, pe_first, elm_ptr->field.pe_next);\
} else { \
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
}\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_last;\
elm_ptr->field.pe_next = D_RO((head)->pe_last)->field.pe_next;\
__typeof__(elm_ptr) last = D_RW((head)->pe_last);\
TX_ADD_DIRECT(&last->field.pe_next);\
last->field.pe_next = elm;\
TX_SET_DIRECT(head, pe_last, elm);\
} \
} while (0)
#endif /* PMEMOBJ_LISTS_H */
| 11,243 | 30.762712 | 66 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/map_hashmap_atomic.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* map_hashmap_atomic.h -- common interface for maps
*/
#ifndef MAP_HASHMAP_ATOMIC_H
#define MAP_HASHMAP_ATOMIC_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops hashmap_atomic_ops;
#define MAP_HASHMAP_ATOMIC (&hashmap_atomic_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_HASHMAP_ATOMIC_H */
| 421 | 15.230769 | 52 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/kv_server_test.sh | #!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2015-2016, Intel Corporation
set -euo pipefail
MAP=ctree
PORT=9100
POOL=$1
# start a new server instance
./kv_server $MAP $POOL $PORT &
# wait for the server to properly start
sleep 1
# insert a new key value pair and disconnect
RESP=`echo -e "INSERT foo bar\nGET foo\nBYE" | nc 127.0.0.1 $PORT`
echo $RESP
# remove previously inserted key value pair and shutdown the server
RESP=`echo -e "GET foo\nREMOVE foo\nGET foo\nKILL" | nc 127.0.0.1 $PORT`
echo $RESP
| 537 | 21.416667 | 72 | sh |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/map_btree.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* map_ctree.h -- common interface for maps
*/
#ifndef MAP_BTREE_H
#define MAP_BTREE_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops btree_map_ops;
#define MAP_BTREE (&btree_map_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_BTREE_H */
| 366 | 13.115385 | 44 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/map_rtree.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* map_rtree.h -- common interface for maps
*/
#ifndef MAP_RTREE_H
#define MAP_RTREE_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops rtree_map_ops;
#define MAP_RTREE (&rtree_map_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_RTREE_H */
| 366 | 13.115385 | 44 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/map_skiplist.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* map_skiplist.h -- common interface for maps
*/
#ifndef MAP_SKIPLIST_H
#define MAP_SKIPLIST_H
#include "map.h"
extern struct map_ops skiplist_map_ops;
#define MAP_SKIPLIST (&skiplist_map_ops)
#endif /* MAP_SKIPLIST_H */
| 313 | 16.444444 | 46 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* map.h -- common interface for maps
*/
#ifndef MAP_H
#define MAP_H
#include <libpmemobj.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef MAP_TYPE_OFFSET
#define MAP_TYPE_OFFSET 1000
#endif
TOID_DECLARE(struct map, MAP_TYPE_OFFSET + 0);
struct map;
struct map_ctx;
struct map_ops {
int(*check)(PMEMobjpool *pop, TOID(struct map) map);
int(*create)(PMEMobjpool *pop, TOID(struct map) *map, void *arg);
int(*destroy)(PMEMobjpool *pop, TOID(struct map) *map);
int(*init)(PMEMobjpool *pop, TOID(struct map) map);
int(*insert)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value);
int(*insert_new)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void(*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid(*remove)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*remove_free)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*clear)(PMEMobjpool *pop, TOID(struct map) map);
PMEMoid(*get)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*lookup)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*foreach)(PMEMobjpool *pop, TOID(struct map) map,
int(*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg);
int(*is_empty)(PMEMobjpool *pop, TOID(struct map) map);
size_t(*count)(PMEMobjpool *pop, TOID(struct map) map);
int(*cmd)(PMEMobjpool *pop, TOID(struct map) map,
unsigned cmd, uint64_t arg);
};
struct map_ctx {
PMEMobjpool *pop;
const struct map_ops *ops;
};
struct map_ctx *map_ctx_init(const struct map_ops *ops, PMEMobjpool *pop);
void map_ctx_free(struct map_ctx *mapc);
int map_check(struct map_ctx *mapc, TOID(struct map) map);
int map_create(struct map_ctx *mapc, TOID(struct map) *map, void *arg);
int map_destroy(struct map_ctx *mapc, TOID(struct map) *map);
int map_init(struct map_ctx *mapc, TOID(struct map) map);
int map_insert(struct map_ctx *mapc, TOID(struct map) map,
uint64_t key, PMEMoid value);
int map_insert_new(struct map_ctx *mapc, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void(*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid map_remove(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_remove_free(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_clear(struct map_ctx *mapc, TOID(struct map) map);
PMEMoid map_get(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_lookup(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_foreach(struct map_ctx *mapc, TOID(struct map) map,
int(*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg);
int map_is_empty(struct map_ctx *mapc, TOID(struct map) map);
size_t map_count(struct map_ctx *mapc, TOID(struct map) map);
int map_cmd(struct map_ctx *mapc, TOID(struct map) map,
unsigned cmd, uint64_t arg);
#ifdef __cplusplus
}
#endif
#endif /* MAP_H */
| 3,010 | 31.728261 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/map_hashmap_rp.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* map_hashmap_rp.h -- common interface for maps
*/
#ifndef MAP_HASHMAP_RP_H
#define MAP_HASHMAP_RP_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops hashmap_rp_ops;
#define MAP_HASHMAP_RP (&hashmap_rp_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_HASHMAP_RP_H */
| 388 | 13.961538 | 48 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/run.sh | rm -rf /mnt/mem/*
sudo pmempool create --layout="data_store" obj myobjpool.set
sudo ./data_store /mnt/mem/data_store.pool
awk '{sum+= $3;} END{print sum;}' time
grep "time" out > time
grep "timecp" out > time
| 209 | 29 | 60 | sh |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/map_hashmap_tx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* map_hashmap_tx.h -- common interface for maps
*/
#ifndef MAP_HASHMAP_TX_H
#define MAP_HASHMAP_TX_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops hashmap_tx_ops;
#define MAP_HASHMAP_TX (&hashmap_tx_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_HASHMAP_TX_H */
| 393 | 14.153846 | 48 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/map_rbtree.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* map_rbtree.h -- common interface for maps
*/
#ifndef MAP_RBTREE_H
#define MAP_RBTREE_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops rbtree_map_ops;
#define MAP_RBTREE (&rbtree_map_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_RBTREE_H */
| 373 | 13.384615 | 44 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/kv_protocol.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2016, Intel Corporation */
/*
* kv_protocol.h -- kv store text protocol
*/
#ifndef KV_PROTOCOL_H
#define KV_PROTOCOL_H
#include <stdint.h>
#define MAX_KEY_LEN 255
/*
* All client messages must start with a valid message token and be terminated
* by a newline character ('\n'). The message parser is case-sensitive.
*
* Server responds with newline terminated string literals.
* If invalid message token is received RESP_MSG_UNKNOWN is sent.
*/
enum kv_cmsg {
/*
* INSERT client message
* Syntax: INSERT [key] [value]\n
*
* The key is limited to 255 characters, the size of a value is limited
* by the pmemobj maximum allocation size (~16 gigabytes).
*
* Operation adds a new key value pair to the map.
* Returns RESP_MSG_SUCCESS if successful or RESP_MSG_FAIL otherwise.
*/
CMSG_INSERT,
/*
* REMOVE client message
* Syntax: REMOVE [key]\n
*
* Operation removes a key value pair from the map.
* Returns RESP_MSG_SUCCESS if successful or RESP_MSG_FAIL otherwise.
*/
CMSG_REMOVE,
/*
* GET client message
* Syntax: GET [key]\n
*
* Operation retrieves a key value pair from the map.
* Returns the value if found or RESP_MSG_NULL otherwise.
*/
CMSG_GET,
/*
* BYE client message
* Syntax: BYE\n
*
* Operation terminates the client connection.
* No return value.
*/
CMSG_BYE,
/*
* KILL client message
* Syntax: KILL\n
*
* Operation terminates the client connection and gracefully shutdowns
* the server.
* No return value.
*/
CMSG_KILL,
MAX_CMSG
};
enum resp_messages {
RESP_MSG_SUCCESS,
RESP_MSG_FAIL,
RESP_MSG_NULL,
RESP_MSG_UNKNOWN,
MAX_RESP_MSG
};
static const char *resp_msg[MAX_RESP_MSG] = {
[RESP_MSG_SUCCESS] = "SUCCESS\n",
[RESP_MSG_FAIL] = "FAIL\n",
[RESP_MSG_NULL] = "NULL\n",
[RESP_MSG_UNKNOWN] = "UNKNOWN\n"
};
static const char *kv_cmsg_token[MAX_CMSG] = {
[CMSG_INSERT] = "INSERT",
[CMSG_REMOVE] = "REMOVE",
[CMSG_GET] = "GET",
[CMSG_BYE] = "BYE",
[CMSG_KILL] = "KILL"
};
#endif /* KV_PROTOCOL_H */
| 2,082 | 19.623762 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/map/map_ctree.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* map_ctree.h -- common interface for maps
*/
#ifndef MAP_CTREE_H
#define MAP_CTREE_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops ctree_map_ops;
#define MAP_CTREE (&ctree_map_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_CTREE_H */
| 366 | 13.115385 | 44 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/string_store_tx/layout.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* layout.h -- example from introduction part 2
*/
#define LAYOUT_NAME "intro_2"
#define MAX_BUF_LEN 10
struct my_root {
char buf[MAX_BUF_LEN];
};
| 241 | 16.285714 | 47 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/string_store/layout.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* layout.h -- example from introduction part 1
*/
#define LAYOUT_NAME "intro_1"
#define MAX_BUF_LEN 10
struct my_root {
size_t len;
char buf[MAX_BUF_LEN];
};
| 254 | 16 | 47 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/tree_map/ctree_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* ctree_map.h -- TreeMap sorted collection implementation
*/
#ifndef CTREE_MAP_H
#define CTREE_MAP_H
#include <libpmemobj.h>
#ifndef CTREE_MAP_TYPE_OFFSET
#define CTREE_MAP_TYPE_OFFSET 1008
#endif
struct ctree_map;
TOID_DECLARE(struct ctree_map, CTREE_MAP_TYPE_OFFSET + 0);
int ctree_map_check(PMEMobjpool *pop, TOID(struct ctree_map) map);
int ctree_map_create(PMEMobjpool *pop, TOID(struct ctree_map) *map, void *arg);
int ctree_map_destroy(PMEMobjpool *pop, TOID(struct ctree_map) *map);
int ctree_map_insert(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, PMEMoid value);
int ctree_map_insert_new(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid ctree_map_remove(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_remove_free(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_clear(PMEMobjpool *pop, TOID(struct ctree_map) map);
PMEMoid ctree_map_get(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_lookup(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_foreach(PMEMobjpool *pop, TOID(struct ctree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int ctree_map_is_empty(PMEMobjpool *pop, TOID(struct ctree_map) map);
#endif /* CTREE_MAP_H */
| 1,523 | 34.44186 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/tree_map/rtree_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rtree_map.h -- Radix TreeMap collection implementation
*/
#ifndef RTREE_MAP_H
#define RTREE_MAP_H
#include <libpmemobj.h>
#ifndef RTREE_MAP_TYPE_OFFSET
#define RTREE_MAP_TYPE_OFFSET 1020
#endif
struct rtree_map;
TOID_DECLARE(struct rtree_map, RTREE_MAP_TYPE_OFFSET + 0);
int rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map);
int rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg);
int rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map);
int rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value);
int rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size,
size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map);
PMEMoid rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid value, void *arg),
void *arg);
int rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map);
#endif /* RTREE_MAP_H */
| 1,739 | 36.826087 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/tree_map/rbtree_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* rbtree_map.h -- TreeMap sorted collection implementation
*/
#ifndef RBTREE_MAP_H
#define RBTREE_MAP_H
#include <libpmemobj.h>
#ifndef RBTREE_MAP_TYPE_OFFSET
#define RBTREE_MAP_TYPE_OFFSET 1016
#endif
struct rbtree_map;
TOID_DECLARE(struct rbtree_map, RBTREE_MAP_TYPE_OFFSET + 0);
int rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map);
int rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map,
void *arg);
int rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map);
int rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, PMEMoid value);
int rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map);
PMEMoid rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map);
#endif /* RBTREE_MAP_H */
| 1,557 | 34.409091 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/examples/libpmemobj/tree_map/btree_map.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* btree_map.h -- TreeMap sorted collection implementation
*/
#ifndef BTREE_MAP_H
#define BTREE_MAP_H
#include <libpmemobj.h>
#ifndef BTREE_MAP_TYPE_OFFSET
#define BTREE_MAP_TYPE_OFFSET 1012
#endif
struct btree_map;
TOID_DECLARE(struct btree_map, BTREE_MAP_TYPE_OFFSET + 0);
int btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map);
int btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg);
int btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map);
int btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, PMEMoid value);
int btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map);
PMEMoid btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map);
#endif /* BTREE_MAP_H */
| 1,523 | 34.44186 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/librpmem/rpmem_ssh.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_ssh.h -- rpmem ssh transport layer header file
*/
#ifndef RPMEM_SSH_H
#define RPMEM_SSH_H 1
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_ssh;
struct rpmem_ssh *rpmem_ssh_open(const struct rpmem_target_info *info);
struct rpmem_ssh *rpmem_ssh_exec(const struct rpmem_target_info *info, ...);
struct rpmem_ssh *rpmem_ssh_execv(const struct rpmem_target_info *info,
const char **argv);
int rpmem_ssh_close(struct rpmem_ssh *rps);
int rpmem_ssh_send(struct rpmem_ssh *rps, const void *buff, size_t len);
int rpmem_ssh_recv(struct rpmem_ssh *rps, void *buff, size_t len);
int rpmem_ssh_monitor(struct rpmem_ssh *rps, int nonblock);
const char *rpmem_ssh_strerror(struct rpmem_ssh *rps, int oerrno);
#ifdef __cplusplus
}
#endif
#endif
| 866 | 23.771429 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/librpmem/rpmem_fip.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_fip.h -- rpmem libfabric provider module header file
*/
#ifndef RPMEM_FIP_H
#define RPMEM_FIP_H
#include <stdint.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_fip;
struct rpmem_fip_attr {
enum rpmem_provider provider;
size_t max_wq_size;
enum rpmem_persist_method persist_method;
void *laddr;
size_t size;
size_t buff_size;
unsigned nlanes;
void *raddr;
uint64_t rkey;
};
struct rpmem_fip *rpmem_fip_init(const char *node, const char *service,
struct rpmem_fip_attr *attr, unsigned *nlanes);
void rpmem_fip_fini(struct rpmem_fip *fip);
int rpmem_fip_connect(struct rpmem_fip *fip);
int rpmem_fip_close(struct rpmem_fip *fip);
int rpmem_fip_process_start(struct rpmem_fip *fip);
int rpmem_fip_process_stop(struct rpmem_fip *fip);
int rpmem_fip_flush(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags);
int rpmem_fip_drain(struct rpmem_fip *fip, unsigned lane);
int rpmem_fip_persist(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags);
int rpmem_fip_read(struct rpmem_fip *fip, void *buff,
size_t len, size_t off, unsigned lane);
void rpmem_fip_probe_fork_safety(void);
size_t rpmem_fip_get_wq_size(struct rpmem_fip *fip);
#ifdef __cplusplus
}
#endif
#endif
| 1,427 | 22.032258 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/librpmem/rpmem.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem.h -- internal definitions for librpmem
*/
#include "alloc.h"
#include "fault_injection.h"
#define RPMEM_LOG_PREFIX "librpmem"
#define RPMEM_LOG_LEVEL_VAR "RPMEM_LOG_LEVEL"
#define RPMEM_LOG_FILE_VAR "RPMEM_LOG_FILE"
#if FAULT_INJECTION
void
rpmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
rpmem_fault_injection_enabled(void);
#else
static inline void
rpmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
rpmem_fault_injection_enabled(void)
{
return 0;
}
#endif
| 672 | 18.228571 | 62 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/librpmem/rpmem_util.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_util.h -- util functions for librpmem header file
*/
#ifndef RPMEM_UTIL_H
#define RPMEM_UTIL_H 1
#ifdef __cplusplus
extern "C" {
#endif
enum {
LERR = 1,
LWARN = 2,
LNOTICE = 3,
LINFO = 4,
_LDBG = 10,
};
#define RPMEM_LOG(level, fmt, args...) LOG(L##level, fmt, ## args)
#define RPMEM_DBG(fmt, args...) LOG(_LDBG, fmt, ## args)
#define RPMEM_FATAL(fmt, args...) FATAL(fmt, ## args)
#define RPMEM_ASSERT(cond) ASSERT(cond)
#define RPMEM_PERSIST_FLAGS_ALL RPMEM_PERSIST_RELAXED
#define RPMEM_PERSIST_FLAGS_MASK ((unsigned)(~RPMEM_PERSIST_FLAGS_ALL))
#define RPMEM_FLUSH_FLAGS_ALL RPMEM_FLUSH_RELAXED
#define RPMEM_FLUSH_FLAGS_MASK ((unsigned)(~RPMEM_FLUSH_FLAGS_ALL))
const char *rpmem_util_proto_errstr(enum rpmem_err err);
int rpmem_util_proto_errno(enum rpmem_err err);
void rpmem_util_cmds_init(void);
void rpmem_util_cmds_fini(void);
const char *rpmem_util_cmd_get(void);
void rpmem_util_get_env_max_nlanes(unsigned *max_nlanes);
void rpmem_util_get_env_wq_size(unsigned *wq_size);
#ifdef __cplusplus
}
#endif
#endif
| 1,137 | 22.708333 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/librpmem/rpmem_obc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_obc.h -- rpmem out-of-band connection client header file
*/
#ifndef RPMEM_OBC_H
#define RPMEM_OBC_H 1
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_obc;
struct rpmem_obc *rpmem_obc_init(void);
void rpmem_obc_fini(struct rpmem_obc *rpc);
int rpmem_obc_connect(struct rpmem_obc *rpc,
const struct rpmem_target_info *info);
int rpmem_obc_disconnect(struct rpmem_obc *rpc);
int rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock);
int rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr);
int rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_close(struct rpmem_obc *rpc, int flags);
#ifdef __cplusplus
}
#endif
#endif
| 1,100 | 21.9375 | 65 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/librpmem/rpmem_cmd.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_cmd.h -- helper module for invoking separate process
*/
#ifndef RPMEM_CMD_H
#define RPMEM_CMD_H 1
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_cmd {
int fd_in; /* stdin */
int fd_out; /* stdout */
int fd_err; /* stderr */
struct {
char **argv;
int argc;
} args; /* command arguments */
pid_t pid; /* pid of process */
};
struct rpmem_cmd *rpmem_cmd_init(void);
int rpmem_cmd_push(struct rpmem_cmd *cmd, const char *arg);
int rpmem_cmd_run(struct rpmem_cmd *cmd);
void rpmem_cmd_term(struct rpmem_cmd *cmd);
int rpmem_cmd_wait(struct rpmem_cmd *cmd, int *status);
void rpmem_cmd_fini(struct rpmem_cmd *cmd);
#ifdef __cplusplus
}
#endif
#endif
| 790 | 18.775 | 61 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemblk/blk.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* blk.h -- internal definitions for libpmem blk module
*/
#ifndef BLK_H
#define BLK_H 1
#include <stddef.h>
#include "ctl.h"
#include "os_thread.h"
#include "pool_hdr.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
#define PMEMBLK_LOG_PREFIX "libpmemblk"
#define PMEMBLK_LOG_LEVEL_VAR "PMEMBLK_LOG_LEVEL"
#define PMEMBLK_LOG_FILE_VAR "PMEMBLK_LOG_FILE"
/* attributes of the blk memory pool format for the pool header */
#define BLK_HDR_SIG "PMEMBLK" /* must be 8 bytes including '\0' */
#define BLK_FORMAT_MAJOR 1
#define BLK_FORMAT_FEAT_DEFAULT \
{POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define BLK_FORMAT_FEAT_CHECK \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t blk_format_feat_default = BLK_FORMAT_FEAT_DEFAULT;
struct pmemblk {
struct pool_hdr hdr; /* memory pool header */
/* root info for on-media format... */
uint32_t bsize; /* block size */
/* flag indicating if the pool was zero-initialized */
int is_zeroed;
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
void *data; /* post-header data area */
size_t datasize; /* size of data area */
size_t nlba; /* number of LBAs in pool */
struct btt *bttp; /* btt handle */
unsigned nlane; /* number of lanes */
unsigned next_lane; /* used to rotate through lanes */
os_mutex_t *locks; /* one per lane */
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct pool_set *set; /* pool set info */
#ifdef DEBUG
/* held during read/write mprotected sections */
os_mutex_t write_lock;
#endif
};
/* data area starts at this alignment after the struct pmemblk above */
#define BLK_FORMAT_DATA_ALIGN ((uintptr_t)PMEM_PAGESIZE)
#if FAULT_INJECTION
void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmemblk_fault_injection_enabled(void);
#else
static inline void
pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmemblk_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 2,483 | 23.116505 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemblk/btt.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* btt.h -- btt module definitions
*/
#ifndef BTT_H
#define BTT_H 1
#ifdef __cplusplus
extern "C" {
#endif
/* callback functions passed to btt_init() */
struct ns_callback {
int (*nsread)(void *ns, unsigned lane,
void *buf, size_t count, uint64_t off);
int (*nswrite)(void *ns, unsigned lane,
const void *buf, size_t count, uint64_t off);
int (*nszero)(void *ns, unsigned lane, size_t count, uint64_t off);
ssize_t (*nsmap)(void *ns, unsigned lane, void **addrp,
size_t len, uint64_t off);
void (*nssync)(void *ns, unsigned lane, void *addr, size_t len);
int ns_is_zeroed;
};
struct btt_info;
struct btt *btt_init(uint64_t rawsize, uint32_t lbasize, uint8_t parent_uuid[],
unsigned maxlane, void *ns, const struct ns_callback *ns_cbp);
unsigned btt_nlane(struct btt *bttp);
size_t btt_nlba(struct btt *bttp);
int btt_read(struct btt *bttp, unsigned lane, uint64_t lba, void *buf);
int btt_write(struct btt *bttp, unsigned lane, uint64_t lba, const void *buf);
int btt_set_zero(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_set_error(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_check(struct btt *bttp);
void btt_fini(struct btt *bttp);
uint64_t btt_flog_size(uint32_t nfree);
uint64_t btt_map_size(uint32_t external_nlba);
uint64_t btt_arena_datasize(uint64_t arena_size, uint32_t nfree);
int btt_info_set(struct btt_info *info, uint32_t external_lbasize,
uint32_t nfree, uint64_t arena_size, uint64_t space_left);
struct btt_flog *btt_flog_get_valid(struct btt_flog *flog_pair, int *next);
int map_entry_is_initial(uint32_t map_entry);
void btt_info_convert2h(struct btt_info *infop);
void btt_info_convert2le(struct btt_info *infop);
void btt_flog_convert2h(struct btt_flog *flogp);
void btt_flog_convert2le(struct btt_flog *flogp);
#ifdef __cplusplus
}
#endif
#endif
| 1,908 | 30.816667 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemblk/btt_layout.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* btt_layout.h -- block translation table on-media layout definitions
*/
/*
* Layout of BTT info block. All integers are stored little-endian.
*/
#ifndef BTT_LAYOUT_H
#define BTT_LAYOUT_H 1
#ifdef __cplusplus
extern "C" {
#endif
#define BTT_ALIGNMENT ((uintptr_t)4096) /* alignment of all BTT structures */
#define BTTINFO_SIG_LEN 16
#define BTTINFO_UUID_LEN 16
#define BTTINFO_UNUSED_LEN 3968
#define BTTINFO_SIG "BTT_ARENA_INFO\0"
struct btt_info {
char sig[BTTINFO_SIG_LEN]; /* must be "BTT_ARENA_INFO\0\0" */
uint8_t uuid[BTTINFO_UUID_LEN]; /* BTT UUID */
uint8_t parent_uuid[BTTINFO_UUID_LEN]; /* UUID of container */
uint32_t flags; /* see flag bits below */
uint16_t major; /* major version */
uint16_t minor; /* minor version */
uint32_t external_lbasize; /* advertised LBA size (bytes) */
uint32_t external_nlba; /* advertised LBAs in this arena */
uint32_t internal_lbasize; /* size of data area blocks (bytes) */
uint32_t internal_nlba; /* number of blocks in data area */
uint32_t nfree; /* number of free blocks */
uint32_t infosize; /* size of this info block */
/*
* The following offsets are relative to the beginning of
* the btt_info block.
*/
uint64_t nextoff; /* offset to next arena (or zero) */
uint64_t dataoff; /* offset to arena data area */
uint64_t mapoff; /* offset to area map */
uint64_t flogoff; /* offset to area flog */
uint64_t infooff; /* offset to backup info block */
char unused[BTTINFO_UNUSED_LEN]; /* must be zero */
uint64_t checksum; /* Fletcher64 of all fields */
};
/*
* Definitions for flags mask for btt_info structure above.
*/
#define BTTINFO_FLAG_ERROR 0x00000001 /* error state (read-only) */
#define BTTINFO_FLAG_ERROR_MASK 0x00000001 /* all error bits */
/*
* Current on-media format versions.
*/
#define BTTINFO_MAJOR_VERSION 1
#define BTTINFO_MINOR_VERSION 1
/*
* Layout of a BTT "flog" entry. All integers are stored little-endian.
*
* The "nfree" field in the BTT info block determines how many of these
* flog entries there are, and each entry consists of two of the following
* structs (entry updates alternate between the two structs), padded up
* to a cache line boundary to isolate adjacent updates.
*/
#define BTT_FLOG_PAIR_ALIGN ((uintptr_t)64)
struct btt_flog {
uint32_t lba; /* last pre-map LBA using this entry */
uint32_t old_map; /* old post-map LBA (the freed block) */
uint32_t new_map; /* new post-map LBA */
uint32_t seq; /* sequence number (01, 10, 11) */
};
/*
* Layout of a BTT "map" entry. 4-byte internal LBA offset, little-endian.
*/
#define BTT_MAP_ENTRY_SIZE 4
#define BTT_MAP_ENTRY_ERROR 0x40000000U
#define BTT_MAP_ENTRY_ZERO 0x80000000U
#define BTT_MAP_ENTRY_NORMAL 0xC0000000U
#define BTT_MAP_ENTRY_LBA_MASK 0x3fffffffU
#define BTT_MAP_LOCK_ALIGN ((uintptr_t)64)
/*
* BTT layout properties...
*/
#define BTT_MIN_SIZE ((1u << 20) * 16)
#define BTT_MAX_ARENA (1ull << 39) /* 512GB per arena */
#define BTT_MIN_LBA_SIZE (size_t)512
#define BTT_INTERNAL_LBA_ALIGNMENT 256U
#define BTT_DEFAULT_NFREE 256
#ifdef __cplusplus
}
#endif
#endif
| 3,197 | 28.611111 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/heap_layout.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* heap_layout.h -- internal definitions for heap layout
*/
#ifndef LIBPMEMOBJ_HEAP_LAYOUT_H
#define LIBPMEMOBJ_HEAP_LAYOUT_H 1
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_MAJOR 1
#define HEAP_MINOR 0
#define MAX_CHUNK (UINT16_MAX - 7) /* has to be multiple of 8 */
#define CHUNK_BASE_ALIGNMENT 1024
#define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */
#define MAX_MEMORY_BLOCK_SIZE (MAX_CHUNK * CHUNKSIZE)
#define HEAP_SIGNATURE_LEN 16
#define HEAP_SIGNATURE "MEMORY_HEAP_HDR\0"
#define ZONE_HEADER_MAGIC 0xC3F0A2D2
#define ZONE_MIN_SIZE (sizeof(struct zone) + sizeof(struct chunk))
#define ZONE_MAX_SIZE (sizeof(struct zone) + sizeof(struct chunk) * MAX_CHUNK)
#define HEAP_MIN_SIZE (sizeof(struct heap_layout) + ZONE_MIN_SIZE)
/* Base bitmap values, relevant for both normal and flexible bitmaps */
#define RUN_BITS_PER_VALUE 64U
#define RUN_BASE_METADATA_VALUES\
((unsigned)(sizeof(struct chunk_run_header) / sizeof(uint64_t)))
#define RUN_BASE_METADATA_SIZE (sizeof(struct chunk_run_header))
#define RUN_CONTENT_SIZE (CHUNKSIZE - RUN_BASE_METADATA_SIZE)
/*
* Calculates the size in bytes of a single run instance, including bitmap
*/
#define RUN_CONTENT_SIZE_BYTES(size_idx)\
(RUN_CONTENT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
/* Default bitmap values, specific for old, non-flexible, bitmaps */
#define RUN_DEFAULT_METADATA_VALUES 40 /* in 8 byte words, 320 bytes total */
#define RUN_DEFAULT_BITMAP_VALUES \
(RUN_DEFAULT_METADATA_VALUES - RUN_BASE_METADATA_VALUES)
#define RUN_DEFAULT_BITMAP_SIZE (sizeof(uint64_t) * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_BITMAP_NBITS\
(RUN_BITS_PER_VALUE * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_SIZE \
(CHUNKSIZE - RUN_BASE_METADATA_SIZE - RUN_DEFAULT_BITMAP_SIZE)
/*
* Calculates the size in bytes of a single run instance, without bitmap,
* but only for the default fixed-bitmap algorithm
*/
#define RUN_DEFAULT_SIZE_BYTES(size_idx)\
(RUN_DEFAULT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
#define CHUNK_MASK ((CHUNKSIZE) - 1)
#define CHUNK_ALIGN_UP(value) ((((value) + CHUNK_MASK) & ~CHUNK_MASK))
enum chunk_flags {
CHUNK_FLAG_COMPACT_HEADER = 0x0001,
CHUNK_FLAG_HEADER_NONE = 0x0002,
CHUNK_FLAG_ALIGNED = 0x0004,
CHUNK_FLAG_FLEX_BITMAP = 0x0008,
};
#define CHUNK_FLAGS_ALL_VALID (\
CHUNK_FLAG_COMPACT_HEADER |\
CHUNK_FLAG_HEADER_NONE |\
CHUNK_FLAG_ALIGNED |\
CHUNK_FLAG_FLEX_BITMAP\
)
enum chunk_type {
CHUNK_TYPE_UNKNOWN,
CHUNK_TYPE_FOOTER, /* not actual chunk type */
CHUNK_TYPE_FREE,
CHUNK_TYPE_USED,
CHUNK_TYPE_RUN,
CHUNK_TYPE_RUN_DATA,
MAX_CHUNK_TYPE
};
struct chunk {
uint8_t data[CHUNKSIZE];
};
struct chunk_run_header {
uint64_t block_size;
uint64_t alignment; /* valid only /w CHUNK_FLAG_ALIGNED */
};
struct chunk_run {
struct chunk_run_header hdr;
uint8_t content[RUN_CONTENT_SIZE]; /* bitmap + data */
};
struct chunk_header {
uint16_t type;
uint16_t flags;
uint32_t size_idx;
};
struct zone_header {
uint32_t magic;
uint32_t size_idx;
uint8_t reserved[56];
};
struct zone {
struct zone_header header;
struct chunk_header chunk_headers[MAX_CHUNK];
struct chunk chunks[];
};
struct heap_header {
char signature[HEAP_SIGNATURE_LEN];
uint64_t major;
uint64_t minor;
uint64_t unused; /* might be garbage */
uint64_t chunksize;
uint64_t chunks_per_zone;
uint8_t reserved[960];
uint64_t checksum;
};
struct heap_layout {
struct heap_header header;
struct zone zone0; /* first element of zones array */
};
#define ALLOC_HDR_SIZE_SHIFT (48ULL)
#define ALLOC_HDR_FLAGS_MASK (((1ULL) << ALLOC_HDR_SIZE_SHIFT) - 1)
struct allocation_header_legacy {
uint8_t unused[8];
uint64_t size;
uint8_t unused2[32];
uint64_t root_size;
uint64_t type_num;
};
#define ALLOC_HDR_COMPACT_SIZE sizeof(struct allocation_header_compact)
struct allocation_header_compact {
uint64_t size;
uint64_t extra;
};
enum header_type {
HEADER_LEGACY,
HEADER_COMPACT,
HEADER_NONE,
MAX_HEADER_TYPES
};
static const size_t header_type_to_size[MAX_HEADER_TYPES] = {
sizeof(struct allocation_header_legacy),
sizeof(struct allocation_header_compact),
0
};
static const enum chunk_flags header_type_to_flag[MAX_HEADER_TYPES] = {
(enum chunk_flags)0,
CHUNK_FLAG_COMPACT_HEADER,
CHUNK_FLAG_HEADER_NONE
};
static inline struct zone *
ZID_TO_ZONE(struct heap_layout *layout, size_t zone_id)
{
return (struct zone *)
((uintptr_t)&layout->zone0 + ZONE_MAX_SIZE * zone_id);
}
static inline struct chunk_header *
GET_CHUNK_HDR(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunk_headers[chunk_id];
}
static inline struct chunk *
GET_CHUNK(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunks[chunk_id];
}
static inline struct chunk_run *
GET_CHUNK_RUN(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return (struct chunk_run *)GET_CHUNK(layout, zone_id, chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 5,105 | 23.666667 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/alloc_class.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* alloc_class.h -- internal definitions for allocation classes
*/
#ifndef LIBPMEMOBJ_ALLOC_CLASS_H
#define LIBPMEMOBJ_ALLOC_CLASS_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "heap_layout.h"
#include "memblock.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_ALLOCATION_CLASSES (UINT8_MAX)
#define DEFAULT_ALLOC_CLASS_ID (0)
#define RUN_UNIT_MAX RUN_BITS_PER_VALUE
struct alloc_class_collection;
enum alloc_class_type {
CLASS_UNKNOWN,
CLASS_HUGE,
CLASS_RUN,
MAX_ALLOC_CLASS_TYPES
};
struct alloc_class {
uint8_t id;
uint16_t flags;
size_t unit_size;
enum header_type header_type;
enum alloc_class_type type;
/* run-specific data */
struct run_descriptor rdsc;
};
struct alloc_class_collection *alloc_class_collection_new(void);
void alloc_class_collection_delete(struct alloc_class_collection *ac);
struct alloc_class *alloc_class_by_run(
struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx);
struct alloc_class *alloc_class_by_alloc_size(
struct alloc_class_collection *ac, size_t size);
struct alloc_class *alloc_class_by_id(
struct alloc_class_collection *ac, uint8_t id);
int alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id);
int alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot);
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size);
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx);
void alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c);
#ifdef __cplusplus
}
#endif
#endif
| 1,815 | 21.7 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/container_seglists.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* container_seglists.h -- internal definitions for
* segregated lists block container
*/
#ifndef LIBPMEMOBJ_CONTAINER_SEGLISTS_H
#define LIBPMEMOBJ_CONTAINER_SEGLISTS_H 1
#include "container.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container *container_new_seglists(struct palloc_heap *heap);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_SEGLISTS_H */
| 479 | 18.2 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/obj.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* obj.h -- internal definitions for obj module
*/
#ifndef LIBPMEMOBJ_OBJ_H
#define LIBPMEMOBJ_OBJ_H 1
#include <stddef.h>
#include <stdint.h>
#include "lane.h"
#include "pool_hdr.h"
#include "pmalloc.h"
#include "ctl.h"
#include "sync.h"
#include "stats.h"
#include "ctl_debug.h"
#include "page_size.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
#define PMEMOBJ_LOG_PREFIX "libpmemobj"
#define PMEMOBJ_LOG_LEVEL_VAR "PMEMOBJ_LOG_LEVEL"
#define PMEMOBJ_LOG_FILE_VAR "PMEMOBJ_LOG_FILE"
/* attributes of the obj memory pool format for the pool header */
#define OBJ_HDR_SIG "PMEMOBJ" /* must be 8 bytes including '\0' */
#define OBJ_FORMAT_MAJOR 6
#define OBJ_FORMAT_FEAT_DEFAULT \
{POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define OBJ_FORMAT_FEAT_CHECK \
{POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t obj_format_feat_default = OBJ_FORMAT_FEAT_CHECK;
/* size of the persistent part of PMEMOBJ pool descriptor */
#define OBJ_DSC_P_SIZE 2048
/* size of unused part of the persistent part of PMEMOBJ pool descriptor */
#define OBJ_DSC_P_UNUSED (OBJ_DSC_P_SIZE - PMEMOBJ_MAX_LAYOUT - 40)
#define OBJ_LANES_OFFSET (sizeof(struct pmemobjpool)) /* lanes offset */
#define OBJ_NLANES 1024 /* number of lanes */
#define OBJ_OFF_TO_PTR(pop, off) ((void *)((uintptr_t)(pop) + (off)))
#define OBJ_PTR_TO_OFF(pop, ptr) ((uintptr_t)(ptr) - (uintptr_t)(pop))
#define OBJ_OID_IS_NULL(oid) ((oid).off == 0)
#define OBJ_LIST_EMPTY(head) OBJ_OID_IS_NULL((head)->pe_first)
#define OBJ_OFF_FROM_HEAP(pop, off)\
((off) >= (pop)->heap_offset &&\
(off) < (pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_FROM_LANES(pop, off)\
((off) >= (pop)->lanes_offset &&\
(off) < (pop)->lanes_offset +\
(pop)->nlanes * sizeof(struct lane_layout))
#define OBJ_PTR_FROM_POOL(pop, ptr)\
((uintptr_t)(ptr) >= (uintptr_t)(pop) &&\
(uintptr_t)(ptr) < (uintptr_t)(pop) +\
(pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_IS_VALID(pop, off)\
(OBJ_OFF_FROM_HEAP(pop, off) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_offset) == (off)) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_size) == (off)) ||\
(OBJ_OFF_FROM_LANES(pop, off)))
#define OBJ_PTR_IS_VALID(pop, ptr)\
OBJ_OFF_IS_VALID(pop, OBJ_PTR_TO_OFF(pop, ptr))
typedef void (*persist_local_fn)(const void *, size_t);
typedef void (*flush_local_fn)(const void *, size_t);
typedef void (*drain_local_fn)(void);
typedef void *(*memcpy_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_local_fn)(void *dest, int c, size_t len, unsigned flags);
typedef int (*persist_remote_fn)(PMEMobjpool *pop, const void *addr,
size_t len, unsigned lane, unsigned flags);
typedef uint64_t type_num_t;
#define CONVERSION_FLAG_OLD_SET_CACHE ((1ULL) << 0)
/* PMEM_OBJ_POOL_HEAD_SIZE Without the unused and unused2 arrays */
#define PMEM_OBJ_POOL_HEAD_SIZE 2196
#define PMEM_OBJ_POOL_UNUSED2_SIZE (PMEM_PAGESIZE \
- OBJ_DSC_P_UNUSED\
- PMEM_OBJ_POOL_HEAD_SIZE)
/*
//NEW
//#define _GNU_SOURCE
//#include <sys/types.h>
//#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
//int __real_open(const char *__path, int __oflag);
//int __wrap_open(const char *__path, int __oflag);
void* open_device(void);
//END NEW
*/
struct pmemobjpool {
struct pool_hdr hdr; /* memory pool header */
/* persistent part of PMEMOBJ pool descriptor (2kB) */
char layout[PMEMOBJ_MAX_LAYOUT];
uint64_t lanes_offset;
uint64_t nlanes;
uint64_t heap_offset;
uint64_t unused3;
unsigned char unused[OBJ_DSC_P_UNUSED]; /* must be zero */
uint64_t checksum; /* checksum of above fields */
uint64_t root_offset;
/* unique runID for this program run - persistent but not checksummed */
uint64_t run_id;
uint64_t root_size;
/*
* These flags can be set from a conversion tool and are set only for
* the first recovery of the pool.
*/
uint64_t conversion_flags;
uint64_t heap_size;
struct stats_persistent stats_persistent;
char pmem_reserved[496]; /* must be zeroed */
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
struct palloc_heap heap;
struct lane_descriptor lanes_desc;
uint64_t uuid_lo;
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct stats *stats;
struct pool_set *set; /* pool set info */
struct pmemobjpool *replica; /* next replica */
/* per-replica functions: pmem or non-pmem */
persist_local_fn persist_local; /* persist function */
flush_local_fn flush_local; /* flush function */
drain_local_fn drain_local; /* drain function */
memcpy_local_fn memcpy_local; /* persistent memcpy function */
memmove_local_fn memmove_local; /* persistent memmove function */
memset_local_fn memset_local; /* persistent memset function */
/* for 'master' replica: with or without data replication */
struct pmem_ops p_ops;
PMEMmutex rootlock; /* root object lock */
int is_master_replica;
int has_remote_replicas;
/* remote replica section */
void *rpp; /* RPMEMpool opaque handle if it is a remote replica */
uintptr_t remote_base; /* beginning of the remote pool */
char *node_addr; /* address of a remote node */
char *pool_desc; /* descriptor of a poolset */
persist_remote_fn persist_remote; /* remote persist function */
int vg_boot;
int tx_debug_skip_expensive_checks;
struct tx_parameters *tx_params;
/*
* Locks are dynamically allocated on FreeBSD. Keep track so
* we can free them on pmemobj_close.
*/
PMEMmutex_internal *mutex_head;
PMEMrwlock_internal *rwlock_head;
PMEMcond_internal *cond_head;
struct {
struct ravl *map;
os_mutex_t lock;
int verify;
} ulog_user_buffers;
void *user_data;
//New
//void *device;
/* padding to align size of this structure to page boundary */
/* sizeof(unused2) == 8192 - offsetof(struct pmemobjpool, unused2) */
char unused2[PMEM_OBJ_POOL_UNUSED2_SIZE -28 ];
};
/*
* Stored in the 'size' field of oobh header, determines whether the object
* is internal or not. Internal objects are skipped in pmemobj iteration
* functions.
*/
#define OBJ_INTERNAL_OBJECT_MASK ((1ULL) << 15)
#define CLASS_ID_FROM_FLAG(flag)\
((uint16_t)((flag) >> 48))
#define ARENA_ID_FROM_FLAG(flag)\
((uint16_t)((flag) >> 32))
/*
* pmemobj_get_uuid_lo -- (internal) evaluates XOR sum of least significant
* 8 bytes with most significant 8 bytes.
*/
static inline uint64_t
pmemobj_get_uuid_lo(PMEMobjpool *pop)
{
uint64_t uuid_lo = 0;
for (int i = 0; i < 8; i++) {
uuid_lo = (uuid_lo << 8) |
(pop->hdr.poolset_uuid[i] ^
pop->hdr.poolset_uuid[8 + i]);
}
return uuid_lo;
}
/*
* OBJ_OID_IS_VALID -- (internal) checks if 'oid' is valid
*/
static inline int
OBJ_OID_IS_VALID(PMEMobjpool *pop, PMEMoid oid)
{
return OBJ_OID_IS_NULL(oid) ||
(oid.pool_uuid_lo == pop->uuid_lo &&
oid.off >= pop->heap_offset &&
oid.off < pop->heap_offset + pop->heap_size);
}
static inline int
OBJ_OFF_IS_VALID_FROM_CTX(void *ctx, uint64_t offset)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
return OBJ_OFF_IS_VALID(pop, offset);
}
void obj_init(void);
void obj_fini(void);
int obj_read_remote(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
/*
* (debug helper macro) logs notice message if used inside a transaction
*/
#ifdef DEBUG
#define _POBJ_DEBUG_NOTICE_IN_TX()\
_pobj_debug_notice(__func__, NULL, 0)
#else
#define _POBJ_DEBUG_NOTICE_IN_TX() do {} while (0)
#endif
#if FAULT_INJECTION
void
pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmemobj_fault_injection_enabled(void);
#else
static inline void
pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmemobj_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 8,196 | 25.441935 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/list.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* list.h -- internal definitions for persistent atomic lists module
*/
#ifndef LIBPMEMOBJ_LIST_H
#define LIBPMEMOBJ_LIST_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "lane.h"
#include "pmalloc.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
struct list_entry {
PMEMoid pe_next;
PMEMoid pe_prev;
};
struct list_head {
PMEMoid pe_first;
PMEMmutex lock;
};
int list_insert_new_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before,
size_t size, uint64_t type_num, palloc_constr constructor, void *arg,
PMEMoid *oidp);
int list_insert(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head, PMEMoid dest, int before,
PMEMoid oid);
int list_remove_free_user(PMEMobjpool *pop,
size_t pe_offset, struct list_head *user_head,
PMEMoid *oidp);
int list_remove(PMEMobjpool *pop,
ssize_t pe_offset, struct list_head *head,
PMEMoid oid);
int list_move(PMEMobjpool *pop,
size_t pe_offset_old, struct list_head *head_old,
size_t pe_offset_new, struct list_head *head_new,
PMEMoid dest, int before, PMEMoid oid);
void list_move_oob(PMEMobjpool *pop,
struct list_head *head_old, struct list_head *head_new,
PMEMoid oid);
#ifdef __cplusplus
}
#endif
#endif
| 1,376 | 20.184615 | 73 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/ctl_debug.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* ctl_debug.h -- definitions for the debug CTL namespace
*/
#ifndef LIBPMEMOBJ_CTL_DEBUG_H
#define LIBPMEMOBJ_CTL_DEBUG_H 1
#include "libpmemobj.h"
#ifdef __cplusplus
extern "C" {
#endif
void debug_ctl_register(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CTL_DEBUG_H */
| 386 | 15.826087 | 57 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/heap.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* heap.h -- internal definitions for heap
*/
#ifndef LIBPMEMOBJ_HEAP_H
#define LIBPMEMOBJ_HEAP_H 1
#include <stddef.h>
#include <stdint.h>
#include "bucket.h"
#include "memblock.h"
#include "memops.h"
#include "palloc.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_OFF_TO_PTR(heap, off) ((void *)((char *)((heap)->base) + (off)))
#define HEAP_PTR_TO_OFF(heap, ptr)\
((uintptr_t)(ptr) - (uintptr_t)((heap)->base))
#define BIT_IS_CLR(a, i) (!((a) & (1ULL << (i))))
#define HEAP_ARENA_PER_THREAD (0)
int heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int heap_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void heap_cleanup(struct palloc_heap *heap);
int heap_check(void *heap_start, uint64_t heap_size);
int heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
int heap_buckets_init(struct palloc_heap *heap);
int heap_create_alloc_class_buckets(struct palloc_heap *heap,
struct alloc_class *c);
int heap_extend(struct palloc_heap *heap, struct bucket *defb, size_t size);
struct alloc_class *
heap_get_best_class(struct palloc_heap *heap, size_t size);
struct bucket *
heap_bucket_acquire(struct palloc_heap *heap, uint8_t class_id,
uint16_t arena_id);
void
heap_bucket_release(struct palloc_heap *heap, struct bucket *b);
int heap_get_bestfit_block(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m);
struct memory_block
heap_coalesce_huge(struct palloc_heap *heap, struct bucket *b,
const struct memory_block *m);
os_mutex_t *heap_get_run_lock(struct palloc_heap *heap,
uint32_t chunk_id);
void
heap_force_recycle(struct palloc_heap *heap);
void
heap_discard_run(struct palloc_heap *heap, struct memory_block *m);
void
heap_memblock_on_free(struct palloc_heap *heap, const struct memory_block *m);
int
heap_free_chunk_reuse(struct palloc_heap *heap,
struct bucket *bucket, struct memory_block *m);
void heap_foreach_object(struct palloc_heap *heap, object_callback cb,
void *arg, struct memory_block start);
struct alloc_class_collection *heap_alloc_classes(struct palloc_heap *heap);
void *heap_end(struct palloc_heap *heap);
unsigned heap_get_narenas_total(struct palloc_heap *heap);
unsigned heap_get_narenas_max(struct palloc_heap *heap);
int heap_set_narenas_max(struct palloc_heap *heap, unsigned size);
unsigned heap_get_narenas_auto(struct palloc_heap *heap);
unsigned heap_get_thread_arena_id(struct palloc_heap *heap);
int heap_arena_create(struct palloc_heap *heap);
struct bucket **
heap_get_arena_buckets(struct palloc_heap *heap, unsigned arena_id);
int heap_get_arena_auto(struct palloc_heap *heap, unsigned arena_id);
int heap_set_arena_auto(struct palloc_heap *heap, unsigned arena_id,
int automatic);
void heap_set_arena_thread(struct palloc_heap *heap, unsigned arena_id);
void heap_vg_open(struct palloc_heap *heap, object_callback cb,
void *arg, int objects);
static inline struct chunk_header *
heap_get_chunk_hdr(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_HDR(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk *
heap_get_chunk(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK(heap->layout, m->zone_id, m->chunk_id);
}
static inline struct chunk_run *
heap_get_chunk_run(struct palloc_heap *heap, const struct memory_block *m)
{
return GET_CHUNK_RUN(heap->layout, m->zone_id, m->chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 3,719 | 26.969925 | 78 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/memops.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* memops.h -- aggregated memory operations helper definitions
*/
#ifndef LIBPMEMOBJ_MEMOPS_H
#define LIBPMEMOBJ_MEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "vec.h"
#include "pmemops.h"
#include "ulog.h"
#include "lane.h"
#ifdef __cplusplus
extern "C" {
#endif
enum operation_log_type {
LOG_PERSISTENT, /* log of persistent modifications */
LOG_TRANSIENT, /* log of transient memory modifications */
MAX_OPERATION_LOG_TYPE
};
enum log_type {
LOG_TYPE_UNDO,
LOG_TYPE_REDO,
MAX_LOG_TYPE,
};
struct user_buffer_def {
void *addr;
size_t size;
};
#ifdef GET_NDP_BREAKDOWN
extern uint64_t ulogCycles;
#endif
#ifdef USE_NDP_REDO
extern int use_ndp_redo;
#endif
struct operation_context;
struct operation_context *
operation_new(struct ulog *redo, size_t ulog_base_nbytes,
ulog_extend_fn extend, ulog_free_fn ulog_free,
const struct pmem_ops *p_ops, enum log_type type);
void operation_init(struct operation_context *ctx);
void operation_start(struct operation_context *ctx);
void operation_resume(struct operation_context *ctx);
void operation_delete(struct operation_context *ctx);
void operation_free_logs(struct operation_context *ctx, uint64_t flags);
int operation_add_buffer(struct operation_context *ctx,
void *dest, void *src, size_t size, ulog_operation_type type);
int operation_add_entry(struct operation_context *ctx,
void *ptr, uint64_t value, ulog_operation_type type);
int operation_add_typed_entry(struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type log_type);
int operation_user_buffer_verify_align(struct operation_context *ctx,
struct user_buffer_def *userbuf);
void operation_add_user_buffer(struct operation_context *ctx,
struct user_buffer_def *userbuf);
void operation_set_auto_reserve(struct operation_context *ctx,
int auto_reserve);
void operation_set_any_user_buffer(struct operation_context *ctx,
int any_user_buffer);
int operation_get_any_user_buffer(struct operation_context *ctx);
int operation_user_buffer_range_cmp(const void *lhs, const void *rhs);
int operation_reserve(struct operation_context *ctx, size_t new_capacity);
void operation_process(struct operation_context *ctx);
void operation_finish(struct operation_context *ctx, unsigned flags);
void operation_cancel(struct operation_context *ctx);
#ifdef __cplusplus
}
#endif
#endif
| 2,467 | 26.422222 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/pmalloc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* pmalloc.h -- internal definitions for persistent malloc
*/
#ifndef LIBPMEMOBJ_PMALLOC_H
#define LIBPMEMOBJ_PMALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
/* single operations done in the internal context of the lane */
int pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id);
int prealloc(PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t object_flags);
void pfree(PMEMobjpool *pop, uint64_t *off);
/* external operation to be used together with context-aware palloc funcs */
struct operation_context *pmalloc_operation_hold(PMEMobjpool *pop);
struct operation_context *pmalloc_operation_hold_no_start(PMEMobjpool *pop);
void pmalloc_operation_release(PMEMobjpool *pop);
void pmalloc_ctl_register(PMEMobjpool *pop);
int pmalloc_cleanup(PMEMobjpool *pop);
int pmalloc_boot(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 1,291 | 24.333333 | 76 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/recycler.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* recycler.h -- internal definitions of run recycler
*
* This is a container that stores runs that are currently not used by any of
* the buckets.
*/
#ifndef LIBPMEMOBJ_RECYCLER_H
#define LIBPMEMOBJ_RECYCLER_H 1
#include "memblock.h"
#include "vec.h"
#ifdef __cplusplus
extern "C" {
#endif
struct recycler;
VEC(empty_runs, struct memory_block);
struct recycler_element {
uint32_t max_free_block;
uint32_t free_space;
uint32_t chunk_id;
uint32_t zone_id;
};
struct recycler *recycler_new(struct palloc_heap *layout,
size_t nallocs, size_t *peak_arenas);
void recycler_delete(struct recycler *r);
struct recycler_element recycler_element_new(struct palloc_heap *heap,
const struct memory_block *m);
int recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element);
int recycler_get(struct recycler *r, struct memory_block *m);
struct empty_runs recycler_recalc(struct recycler *r, int force);
void recycler_inc_unaccounted(struct recycler *r,
const struct memory_block *m);
#ifdef __cplusplus
}
#endif
#endif
| 1,158 | 20.867925 | 77 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/palloc.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* palloc.h -- internal definitions for persistent allocator
*/
#ifndef LIBPMEMOBJ_PALLOC_H
#define LIBPMEMOBJ_PALLOC_H 1
#include <stddef.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "memops.h"
#include "ulog.h"
#include "valgrind_internal.h"
#include "stats.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PALLOC_CTL_DEBUG_NO_PATTERN (-1)
struct palloc_heap {
struct pmem_ops p_ops;
struct heap_layout *layout;
struct heap_rt *rt;
uint64_t *sizep;
uint64_t growsize;
struct stats *stats;
struct pool_set *set;
void *base;
int alloc_pattern;
};
struct memory_block;
typedef int (*palloc_constr)(void *base, void *ptr,
size_t usable_size, void *arg);
int palloc_operation(struct palloc_heap *heap, uint64_t off, uint64_t *dest_off,
size_t size, palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct operation_context *ctx);
int
palloc_reserve(struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags,
uint16_t class_id, uint16_t arena_id,
struct pobj_action *act);
void
palloc_defer_free(struct palloc_heap *heap, uint64_t off,
struct pobj_action *act);
void
palloc_cancel(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt);
void
palloc_publish(struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt,
struct operation_context *ctx);
void
palloc_set_value(struct palloc_heap *heap, struct pobj_action *act,
uint64_t *ptr, uint64_t value);
uint64_t palloc_first(struct palloc_heap *heap);
uint64_t palloc_next(struct palloc_heap *heap, uint64_t off);
size_t palloc_usable_size(struct palloc_heap *heap, uint64_t off);
uint64_t palloc_extra(struct palloc_heap *heap, uint64_t off);
uint16_t palloc_flags(struct palloc_heap *heap, uint64_t off);
int palloc_boot(struct palloc_heap *heap, void *heap_start,
uint64_t heap_size, uint64_t *sizep,
void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set);
int palloc_buckets_init(struct palloc_heap *heap);
int palloc_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops);
void *palloc_heap_end(struct palloc_heap *h);
int palloc_heap_check(void *heap_start, uint64_t heap_size);
int palloc_heap_check_remote(void *heap_start, uint64_t heap_size,
struct remote_ops *ops);
void palloc_heap_cleanup(struct palloc_heap *heap);
size_t palloc_heap(void *heap_start);
int palloc_defrag(struct palloc_heap *heap, uint64_t **objv, size_t objcnt,
struct operation_context *ctx, struct pobj_defrag_result *result);
/* foreach callback, terminates iteration if return value is non-zero */
typedef int (*object_callback)(const struct memory_block *m, void *arg);
#if VG_MEMCHECK_ENABLED
void palloc_heap_vg_open(struct palloc_heap *heap, int objects);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,006 | 25.377193 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/container.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* container.h -- internal definitions for block containers
*/
#ifndef LIBPMEMOBJ_CONTAINER_H
#define LIBPMEMOBJ_CONTAINER_H 1
#include "memblock.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container {
const struct block_container_ops *c_ops;
struct palloc_heap *heap;
};
struct block_container_ops {
/* inserts a new memory block into the container */
int (*insert)(struct block_container *c, const struct memory_block *m);
/* removes exact match memory block */
int (*get_rm_exact)(struct block_container *c,
const struct memory_block *m);
/* removes and returns the best-fit memory block for size */
int (*get_rm_bestfit)(struct block_container *c,
struct memory_block *m);
/* checks whether the container is empty */
int (*is_empty)(struct block_container *c);
/* removes all elements from the container */
void (*rm_all)(struct block_container *c);
/* deletes the container */
void (*destroy)(struct block_container *c);
};
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_H */
| 1,125 | 21.979592 | 72 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/stats.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* stats.h -- definitions of statistics
*/
#ifndef LIBPMEMOBJ_STATS_H
#define LIBPMEMOBJ_STATS_H 1
#include "ctl.h"
#include "libpmemobj/ctl.h"
#ifdef __cplusplus
extern "C" {
#endif
struct stats_transient {
uint64_t heap_run_allocated;
uint64_t heap_run_active;
};
struct stats_persistent {
uint64_t heap_curr_allocated;
};
struct stats {
enum pobj_stats_enabled enabled;
struct stats_transient *transient;
struct stats_persistent *persistent;
};
#define STATS_INC(stats, type, name, value) do {\
STATS_INC_##type(stats, name, value);\
} while (0)
#define STATS_INC_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_add64((&(stats)->transient->name), (value));\
} while (0)
#define STATS_INC_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_add64((&(stats)->persistent->name), (value));\
} while (0)
#define STATS_SUB(stats, type, name, value) do {\
STATS_SUB_##type(stats, name, value);\
} while (0)
#define STATS_SUB_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_sub64((&(stats)->transient->name), (value));\
} while (0)
#define STATS_SUB_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_fetch_and_sub64((&(stats)->persistent->name), (value));\
} while (0)
#define STATS_SET(stats, type, name, value) do {\
STATS_SET_##type(stats, name, value);\
} while (0)
#define STATS_SET_transient(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_atomic_store_explicit64((&(stats)->transient->name),\
(value), memory_order_release);\
} while (0)
#define STATS_SET_persistent(stats, name, value) do {\
if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\
(stats)->enabled == POBJ_STATS_ENABLED_BOTH)\
util_atomic_store_explicit64((&(stats)->persistent->name),\
(value), memory_order_release);\
} while (0)
#define STATS_CTL_LEAF(type, name)\
{CTL_STR(name), CTL_NODE_LEAF,\
{CTL_READ_HANDLER(type##_##name), NULL, NULL},\
NULL, NULL}
#define STATS_CTL_HANDLER(type, name, varname)\
static int CTL_READ_HANDLER(type##_##name)(void *ctx,\
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)\
{\
PMEMobjpool *pop = ctx;\
uint64_t *argv = arg;\
util_atomic_load_explicit64(&pop->stats->type->varname,\
argv, memory_order_acquire);\
return 0;\
}
void stats_ctl_register(PMEMobjpool *pop);
struct stats *stats_new(PMEMobjpool *pop);
void stats_delete(PMEMobjpool *pop, struct stats *stats);
#ifdef __cplusplus
}
#endif
#endif
| 2,990 | 26.440367 | 71 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/container_ravl.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* container_ravl.h -- internal definitions for ravl-based block container
*/
#ifndef LIBPMEMOBJ_CONTAINER_RAVL_H
#define LIBPMEMOBJ_CONTAINER_RAVL_H 1
#include "container.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container *container_new_ravl(struct palloc_heap *heap);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_RAVL_H */
| 445 | 17.583333 | 74 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/tx.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* tx.h -- internal definitions for transactions
*/
#ifndef LIBPMEMOBJ_INTERNAL_TX_H
#define LIBPMEMOBJ_INTERNAL_TX_H 1
#include <stdint.h>
#include "obj.h"
#include "ulog.h"
#ifdef __cplusplus
extern "C" {
#endif
#define TX_DEFAULT_RANGE_CACHE_SIZE (1 << 15)
#define TX_DEFAULT_RANGE_CACHE_THRESHOLD (1 << 12)
#define TX_RANGE_MASK (8ULL - 1)
#define TX_RANGE_MASK_LEGACY (32ULL - 1)
#define TX_ALIGN_SIZE(s, amask) (((s) + (amask)) & ~(amask))
#define TX_SNAPSHOT_LOG_ENTRY_ALIGNMENT CACHELINE_SIZE
#define TX_SNAPSHOT_LOG_BUFFER_OVERHEAD sizeof(struct ulog)
#define TX_SNAPSHOT_LOG_ENTRY_OVERHEAD sizeof(struct ulog_entry_buf)
#define TX_INTENT_LOG_BUFFER_ALIGNMENT CACHELINE_SIZE
#define TX_INTENT_LOG_BUFFER_OVERHEAD sizeof(struct ulog)
#define TX_INTENT_LOG_ENTRY_OVERHEAD sizeof(struct ulog_entry_val)
struct tx_parameters {
size_t cache_size;
};
/*
* Returns the current transaction's pool handle, NULL if not within
* a transaction.
*/
PMEMobjpool *tx_get_pop(void);
void tx_ctl_register(PMEMobjpool *pop);
struct tx_parameters *tx_params_new(void);
void tx_params_delete(struct tx_parameters *tx_params);
#ifdef __cplusplus
}
#endif
#endif
| 1,258 | 22.314815 | 68 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/memblock.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* memblock.h -- internal definitions for memory block
*/
#ifndef LIBPMEMOBJ_MEMBLOCK_H
#define LIBPMEMOBJ_MEMBLOCK_H 1
#include <stddef.h>
#include <stdint.h>
#include "os_thread.h"
#include "heap_layout.h"
#include "memops.h"
#include "palloc.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MEMORY_BLOCK_NONE \
(struct memory_block)\
{0, 0, 0, 0, NULL, NULL, MAX_HEADER_TYPES, MAX_MEMORY_BLOCK, NULL}
#define MEMORY_BLOCK_IS_NONE(_m)\
((_m).heap == NULL)
#define MEMORY_BLOCK_EQUALS(lhs, rhs)\
((lhs).zone_id == (rhs).zone_id && (lhs).chunk_id == (rhs).chunk_id &&\
(lhs).block_off == (rhs).block_off && (lhs).heap == (rhs).heap)
enum memory_block_type {
/*
* Huge memory blocks are directly backed by memory chunks. A single
* huge block can consist of several chunks.
* The persistent representation of huge memory blocks can be thought
* of as a doubly linked list with variable length elements.
* That list is stored in the chunk headers array where one element
* directly corresponds to one chunk.
*
* U - used, F - free, R - footer, . - empty
* |U| represents a used chunk with a size index of 1, with type
* information (CHUNK_TYPE_USED) stored in the corresponding header
* array element - chunk_headers[chunk_id].
*
* |F...R| represents a free chunk with size index of 5. The empty
* chunk headers have undefined values and shouldn't be used. All
* chunks with size larger than 1 must have a footer in the last
* corresponding header array - chunk_headers[chunk_id - size_idx - 1].
*
* The above representation of chunks will be used to describe the
* way fail-safety is achieved during heap operations.
*
* Allocation of huge memory block with size index 5:
* Initial heap state: |U| <> |F..R| <> |U| <> |F......R|
*
* The only block that matches that size is at very end of the chunks
* list: |F......R|
*
* As the request was for memory block of size 5, and this ones size is
* 7 there's a need to first split the chunk in two.
* 1) The last chunk header of the new allocation is marked as footer
* and the block after that one is marked as free: |F...RF.R|
* This is allowed and has no impact on the heap because this
* modification is into chunk header that is otherwise unused, in
* other words the linked list didn't change.
*
* 2) The size index of the first header is changed from previous value
* of 7 to 5: |F...R||F.R|
* This is a single fail-safe atomic operation and this is the
* first change that is noticeable by the heap operations.
* A single linked list element is split into two new ones.
*
* 3) The allocation process either uses redo log or changes directly
* the chunk header type from free to used: |U...R| <> |F.R|
*
* In a similar fashion the reverse operation, free, is performed:
* Initial heap state: |U| <> |F..R| <> |F| <> |U...R| <> |F.R|
*
* This is the heap after the previous example with the single chunk
* in between changed from used to free.
*
* 1) Determine the neighbors of the memory block which is being
* freed.
*
* 2) Update the footer (if needed) information of the last chunk which
* is the memory block being freed or it's neighbor to the right.
* |F| <> |U...R| <> |F.R << this one|
*
* 3) Update the size index and type of the left-most chunk header.
* And so this: |F << this one| <> |U...R| <> |F.R|
* becomes this: |F.......R|
* The entire chunk header can be updated in a single fail-safe
* atomic operation because it's size is only 64 bytes.
*/
MEMORY_BLOCK_HUGE,
/*
* Run memory blocks are chunks with CHUNK_TYPE_RUN and size index of 1.
* The entire chunk is subdivided into smaller blocks and has an
* additional metadata attached in the form of a bitmap - each bit
* corresponds to a single block.
* In this case there's no need to perform any coalescing or splitting
* on the persistent metadata.
* The bitmap is stored on a variable number of 64 bit values and
* because of the requirement of allocation fail-safe atomicity the
* maximum size index of a memory block from a run is 64 - since that's
* the limit of atomic write guarantee.
*
* The allocation/deallocation process is a single 8 byte write that
* sets/clears the corresponding bits. Depending on the user choice
* it can either be made atomically or using redo-log when grouped with
* other operations.
* It's also important to note that in a case of realloc it might so
* happen that a single 8 byte bitmap value has its bits both set and
* cleared - that's why the run memory block metadata changes operate
* on AND'ing or OR'ing a bitmask instead of directly setting the value.
*/
MEMORY_BLOCK_RUN,
MAX_MEMORY_BLOCK
};
enum memblock_state {
MEMBLOCK_STATE_UNKNOWN,
MEMBLOCK_ALLOCATED,
MEMBLOCK_FREE,
MAX_MEMBLOCK_STATE,
};
/* runtime bitmap information for a run */
struct run_bitmap {
unsigned nvalues; /* number of 8 byte values - size of values array */
unsigned nbits; /* number of valid bits */
size_t size; /* total size of the bitmap in bytes */
uint64_t *values; /* pointer to the bitmap's values array */
};
/* runtime information necessary to create a run */
struct run_descriptor {
uint16_t flags; /* chunk flags for the run */
size_t unit_size; /* the size of a single unit in a run */
uint32_t size_idx; /* size index of a single run instance */
size_t alignment; /* required alignment of objects */
unsigned nallocs; /* number of allocs per run */
struct run_bitmap bitmap;
};
struct memory_block_ops {
/* returns memory block size */
size_t (*block_size)(const struct memory_block *m);
/* prepares header modification operation */
void (*prep_hdr)(const struct memory_block *m,
enum memblock_state dest_state, struct operation_context *ctx);
/* returns lock associated with memory block */
os_mutex_t *(*get_lock)(const struct memory_block *m);
/* returns whether a block is allocated or not */
enum memblock_state (*get_state)(const struct memory_block *m);
/* returns pointer to the data of a block */
void *(*get_user_data)(const struct memory_block *m);
/*
* Returns the size of a memory block without overhead.
* This is the size of a data block that can be used.
*/
size_t (*get_user_size)(const struct memory_block *m);
/* returns pointer to the beginning of data of a run block */
void *(*get_real_data)(const struct memory_block *m);
/* returns the size of a memory block, including headers */
size_t (*get_real_size)(const struct memory_block *m);
/* writes a header of an allocation */
void (*write_header)(const struct memory_block *m,
uint64_t extra_field, uint16_t flags);
void (*invalidate)(const struct memory_block *m);
/*
* Checks the header type of a chunk matches the expected type and
* modifies it if necessary. This is fail-safe atomic.
*/
void (*ensure_header_type)(const struct memory_block *m,
enum header_type t);
/*
* Reinitializes a block after a heap restart.
* This is called for EVERY allocation, but *only* under Valgrind.
*/
void (*reinit_header)(const struct memory_block *m);
/* returns the extra field of an allocation */
uint64_t (*get_extra)(const struct memory_block *m);
/* returns the flags of an allocation */
uint16_t (*get_flags)(const struct memory_block *m);
/* initializes memblock in valgrind */
void (*vg_init)(const struct memory_block *m, int objects,
object_callback cb, void *arg);
/* iterates over every free block */
int (*iterate_free)(const struct memory_block *m,
object_callback cb, void *arg);
/* iterates over every used block */
int (*iterate_used)(const struct memory_block *m,
object_callback cb, void *arg);
/* calculates number of free units, valid only for runs */
void (*calc_free)(const struct memory_block *m,
uint32_t *free_space, uint32_t *max_free_block);
/* this is called exactly once for every existing chunk */
void (*reinit_chunk)(const struct memory_block *m);
/*
* Initializes bitmap data for a run.
* Do *not* use this function unless absolutely necessary, it breaks
* the abstraction layer by exposing implementation details.
*/
void (*get_bitmap)(const struct memory_block *m, struct run_bitmap *b);
/* calculates the ratio between occupied and unoccupied space */
unsigned (*fill_pct)(const struct memory_block *m);
};
struct memory_block {
uint32_t chunk_id; /* index of the memory block in its zone */
uint32_t zone_id; /* index of this block zone in the heap */
/*
* Size index of the memory block represented in either multiple of
* CHUNKSIZE in the case of a huge chunk or in multiple of a run
* block size.
*/
uint32_t size_idx;
/*
* Used only for run chunks, must be zeroed for huge.
* Number of preceding blocks in the chunk. In other words, the
* position of this memory block in run bitmap.
*/
uint32_t block_off;
/*
* The variables below are associated with the memory block and are
* stored here for convenience. Those fields are filled by either the
* memblock_from_offset or memblock_rebuild_state, and they should not
* be modified manually.
*/
const struct memory_block_ops *m_ops;
struct palloc_heap *heap;
enum header_type header_type;
enum memory_block_type type;
struct run_bitmap *cached_bitmap;
};
/*
* This is a representation of a run memory block that is active in a bucket or
* is on a pending list in the recycler.
* This structure should never be passed around by value because the address of
* the nresv variable can be in reservations made through palloc_reserve(). Only
* if the number of reservations equals 0 the structure can be moved/freed.
*/
struct memory_block_reserved {
struct memory_block m;
struct bucket *bucket;
/*
* Number of reservations made from this run, the pointer to this value
* is stored in a user facing pobj_action structure. Decremented once
* the reservation is published or canceled.
*/
int nresv;
};
struct memory_block memblock_from_offset(struct palloc_heap *heap,
uint64_t off);
struct memory_block memblock_from_offset_opt(struct palloc_heap *heap,
uint64_t off, int size);
void memblock_rebuild_state(struct palloc_heap *heap, struct memory_block *m);
struct memory_block memblock_huge_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx);
struct memory_block memblock_run_init(struct palloc_heap *heap,
uint32_t chunk_id, uint32_t zone_id, struct run_descriptor *rdsc);
void memblock_run_bitmap(uint32_t *size_idx, uint16_t flags,
uint64_t unit_size, uint64_t alignment, void *content,
struct run_bitmap *b);
#ifdef __cplusplus
}
#endif
#endif
| 10,750 | 34.019544 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/critnib.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* critnib.h -- internal definitions for critnib tree
*/
#ifndef LIBPMEMOBJ_CRITNIB_H
#define LIBPMEMOBJ_CRITNIB_H 1
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
struct critnib;
struct critnib *critnib_new(void);
void critnib_delete(struct critnib *c);
int critnib_insert(struct critnib *c, uint64_t key, void *value);
void *critnib_remove(struct critnib *c, uint64_t key);
void *critnib_get(struct critnib *c, uint64_t key);
void *critnib_find_le(struct critnib *c, uint64_t key);
#ifdef __cplusplus
}
#endif
#endif
| 625 | 18.5625 | 65 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/pmemops.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
#ifndef LIBPMEMOBJ_PMEMOPS_H
#define LIBPMEMOBJ_PMEMOPS_H 1
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*persist_fn)(void *base, const void *, size_t, unsigned);
typedef int (*flush_fn)(void *base, const void *, size_t, unsigned);
typedef void (*drain_fn)(void *base);
typedef void *(*memcpy_fn)(void *base, void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_fn)(void *base, void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_fn)(void *base, void *dest, int c, size_t len,
unsigned flags);
typedef int (*remote_read_fn)(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
struct pmem_ops {
/* for 'master' replica: with or without data replication */
persist_fn persist; /* persist function */
flush_fn flush; /* flush function */
drain_fn drain; /* drain function */
memcpy_fn memcpy; /* persistent memcpy function */
memmove_fn memmove; /* persistent memmove function */
memset_fn memset; /* persistent memset function */
void *base;
//char a;
//temp var end
struct remote_ops {
remote_read_fn read;
void *ctx;
uintptr_t base;
} remote;
void *device;
uint16_t objid;
};
static force_inline int
pmemops_xpersist(const struct pmem_ops *p_ops, const void *d, size_t s,
unsigned flags)
{
return p_ops->persist(p_ops->base, d, s, flags);
}
static force_inline void
pmemops_persist(const struct pmem_ops *p_ops, const void *d, size_t s)
{
(void) pmemops_xpersist(p_ops, d, s, 0);
}
static force_inline int
pmemops_xflush(const struct pmem_ops *p_ops, const void *d, size_t s,
unsigned flags)
{
return p_ops->flush(p_ops->base, d, s, flags);
}
static force_inline void
pmemops_flush(const struct pmem_ops *p_ops, const void *d, size_t s)
{
(void) pmemops_xflush(p_ops, d, s, 0);
}
static force_inline void
pmemops_drain(const struct pmem_ops *p_ops)
{
p_ops->drain(p_ops->base);
}
static force_inline void *
pmemops_memcpy(const struct pmem_ops *p_ops, void *dest,
const void *src, size_t len, unsigned flags)
{
return p_ops->memcpy(p_ops->base, dest, src, len, flags);
}
static force_inline void *
pmemops_memmove(const struct pmem_ops *p_ops, void *dest,
const void *src, size_t len, unsigned flags)
{
return p_ops->memmove(p_ops->base, dest, src, len, flags);
}
static force_inline void *
pmemops_memset(const struct pmem_ops *p_ops, void *dest, int c,
size_t len, unsigned flags)
{
return p_ops->memset(p_ops->base, dest, c, len, flags);
}
#ifdef __cplusplus
}
#endif
#endif
| 2,672 | 22.866071 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/sync.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* sync.h -- internal to obj synchronization API
*/
#ifndef LIBPMEMOBJ_SYNC_H
#define LIBPMEMOBJ_SYNC_H 1
#include <errno.h>
#include <stdint.h>
#include "libpmemobj.h"
#include "out.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* internal definitions of PMEM-locks
*/
typedef union padded_pmemmutex {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_mutex_t mutex;
struct {
void *bsd_mutex_p;
union padded_pmemmutex *next;
} bsd_u;
} mutex_u;
} pmemmutex;
} PMEMmutex_internal;
#define PMEMmutex_lock pmemmutex.mutex_u.mutex
#define PMEMmutex_bsd_mutex_p pmemmutex.mutex_u.bsd_u.bsd_mutex_p
#define PMEMmutex_next pmemmutex.mutex_u.bsd_u.next
typedef union padded_pmemrwlock {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_rwlock_t rwlock;
struct {
void *bsd_rwlock_p;
union padded_pmemrwlock *next;
} bsd_u;
} rwlock_u;
} pmemrwlock;
} PMEMrwlock_internal;
#define PMEMrwlock_lock pmemrwlock.rwlock_u.rwlock
#define PMEMrwlock_bsd_rwlock_p pmemrwlock.rwlock_u.bsd_u.bsd_rwlock_p
#define PMEMrwlock_next pmemrwlock.rwlock_u.bsd_u.next
typedef union padded_pmemcond {
char padding[_POBJ_CL_SIZE];
struct {
uint64_t runid;
union {
os_cond_t cond;
struct {
void *bsd_cond_p;
union padded_pmemcond *next;
} bsd_u;
} cond_u;
} pmemcond;
} PMEMcond_internal;
#define PMEMcond_cond pmemcond.cond_u.cond
#define PMEMcond_bsd_cond_p pmemcond.cond_u.bsd_u.bsd_cond_p
#define PMEMcond_next pmemcond.cond_u.bsd_u.next
/*
* pmemobj_mutex_lock_nofail -- pmemobj_mutex_lock variant that never
* fails from caller perspective. If pmemobj_mutex_lock failed, this function
* aborts the program.
*/
static inline void
pmemobj_mutex_lock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp)
{
int ret = pmemobj_mutex_lock(pop, mutexp);
if (ret) {
errno = ret;
FATAL("!pmemobj_mutex_lock");
}
}
/*
* pmemobj_mutex_unlock_nofail -- pmemobj_mutex_unlock variant that never
* fails from caller perspective. If pmemobj_mutex_unlock failed, this function
* aborts the program.
*/
static inline void
pmemobj_mutex_unlock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp)
{
int ret = pmemobj_mutex_unlock(pop, mutexp);
if (ret) {
errno = ret;
FATAL("!pmemobj_mutex_unlock");
}
}
int pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp);
#ifdef __cplusplus
}
#endif
#endif
| 2,504 | 21.168142 | 79 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/lane.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* lane.h -- internal definitions for lanes
*/
#ifndef LIBPMEMOBJ_LANE_H
#define LIBPMEMOBJ_LANE_H 1
#include <stdint.h>
#include "ulog.h"
#include "libpmemobj.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Distance between lanes used by threads required to prevent threads from
* false sharing part of lanes array. Used if properly spread lanes are
* available. Otherwise less spread out lanes would be used.
*/
#define LANE_JUMP (64 / sizeof(uint64_t))
/*
* Number of times the algorithm will try to reacquire the primary lane for the
* thread. If this threshold is exceeded, a new primary lane is selected for the
* thread.
*/
#define LANE_PRIMARY_ATTEMPTS 128
#define RLANE_DEFAULT 0
#define LANE_TOTAL_SIZE 3072 /* 3 * 1024 (sum of 3 old lane sections) */
/*
* We have 3 kilobytes to distribute.
* The smallest capacity is needed for the internal redo log for which we can
* accurately calculate the maximum number of occupied space: 48 bytes,
* 3 times sizeof(struct ulog_entry_val). One for bitmap OR, second for bitmap
* AND, third for modification of the destination pointer. For future needs,
* this has been bumped up to 12 ulog entries.
*
* The remaining part has to be split between transactional redo and undo logs,
* and since by far the most space consuming operations are transactional
* snapshots, most of the space, 2 kilobytes, is assigned to the undo log.
* After that, the remainder, 640 bytes, or 40 ulog entries, is left for the
* transactional redo logs.
* Thanks to this distribution, all small and medium transactions should be
* entirely performed without allocating any additional metadata.
*
* These values must be cacheline size aligned to be used for ulogs. Therefore
* they are parametrized for the size of the struct ulog changes between
* platforms.
*/
#define LANE_UNDO_SIZE (LANE_TOTAL_SIZE \
- LANE_REDO_EXTERNAL_SIZE \
- LANE_REDO_INTERNAL_SIZE \
- 3 * sizeof(struct ulog)) /* 2048 for 64B ulog */
#define LANE_REDO_EXTERNAL_SIZE ALIGN_UP(704 - sizeof(struct ulog), \
CACHELINE_SIZE) /* 640 for 64B ulog */
#define LANE_REDO_INTERNAL_SIZE ALIGN_UP(256 - sizeof(struct ulog), \
CACHELINE_SIZE) /* 192 for 64B ulog */
struct lane_layout {
/*
* Redo log for self-contained and 'one-shot' allocator operations.
* Cannot be extended.
*/
struct ULOG(LANE_REDO_INTERNAL_SIZE) internal;
/*
* Redo log for large operations/transactions.
* Can be extended by the use of internal ulog.
*/
struct ULOG(LANE_REDO_EXTERNAL_SIZE) external;
/*
* Undo log for snapshots done in a transaction.
* Can be extended/shrunk by the use of internal ulog.
*/
struct ULOG(LANE_UNDO_SIZE) undo;
};
struct lane {
struct lane_layout *layout; /* pointer to persistent layout */
struct operation_context *internal; /* context for internal ulog */
struct operation_context *external; /* context for external ulog */
struct operation_context *undo; /* context for undo ulog */
};
struct lane_descriptor {
/*
* Number of lanes available at runtime must be <= total number of lanes
* available in the pool. Number of lanes can be limited by shortage of
* other resources e.g. available RNIC's submission queue sizes.
*/
unsigned runtime_nlanes;
unsigned next_lane_idx;
uint64_t *lane_locks;
struct lane *lane;
};
typedef int (*section_layout_op)(PMEMobjpool *pop, void *data, unsigned length);
typedef void *(*section_constr)(PMEMobjpool *pop, void *data);
typedef void (*section_destr)(PMEMobjpool *pop, void *rt);
typedef int (*section_global_op)(PMEMobjpool *pop);
struct section_operations {
section_constr construct_rt;
section_destr destroy_rt;
section_layout_op check;
section_layout_op recover;
section_global_op boot;
section_global_op cleanup;
};
struct lane_info {
uint64_t pop_uuid_lo;
uint64_t lane_idx;
unsigned long nest_count;
/*
* The index of the primary lane for the thread. A thread will always
* try to acquire the primary lane first, and only if that fails it will
* look for a different available lane.
*/
uint64_t primary;
int primary_attempts;
struct lane_info *prev, *next;
};
void lane_info_boot(void);
void lane_info_destroy(void);
void lane_init_data(PMEMobjpool *pop);
int lane_boot(PMEMobjpool *pop);
void lane_cleanup(PMEMobjpool *pop);
int lane_recover_and_section_boot(PMEMobjpool *pop);
int lane_section_cleanup(PMEMobjpool *pop);
int lane_check(PMEMobjpool *pop);
unsigned lane_hold(PMEMobjpool *pop, struct lane **lane);
void lane_release(PMEMobjpool *pop);
#ifdef __cplusplus
}
#endif
#endif
| 4,652 | 30.02 | 80 | h |
null | NearPMSW-main/nearpm/shadow/pmdkArrSwap-sd/src/libpmemobj/bucket.h | // SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* bucket.h -- internal definitions for bucket
*/
#ifndef LIBPMEMOBJ_BUCKET_H
#define LIBPMEMOBJ_BUCKET_H 1
#include <stddef.h>
#include <stdint.h>
#include "container.h"
#include "memblock.h"
#include "os_thread.h"
#ifdef __cplusplus
extern "C" {
#endif
#define CALC_SIZE_IDX(_unit_size, _size)\
((_size) == 0 ? 0 : (uint32_t)((((_size) - 1) / (_unit_size)) + 1))
struct bucket {
os_mutex_t lock;
struct alloc_class *aclass;
struct block_container *container;
const struct block_container_ops *c_ops;
struct memory_block_reserved *active_memory_block;
int is_active;
};
struct bucket *bucket_new(struct block_container *c,
struct alloc_class *aclass);
int *bucket_current_resvp(struct bucket *b);
int bucket_insert_block(struct bucket *b, const struct memory_block *m);
void bucket_delete(struct bucket *b);
#ifdef __cplusplus
}
#endif
#endif
| 957 | 17.784314 | 72 | h |
Subsets and Splits