repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/crypto/openssl/openssl_crypto_plugin.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef ISAL_CRYPTO_PLUGIN_H
#define ISAL_CRYPTO_PLUGIN_H
#include "crypto/crypto_plugin.h"
#include "crypto/openssl/openssl_crypto_accel.h"
class OpenSSLCryptoPlugin : public CryptoPlugin {
CryptoAccelRef cryptoaccel;
public:
explicit OpenSSLCryptoPlugin(CephContext* cct) : CryptoPlugin(cct)
{}
int factory(CryptoAccelRef *cs,
std::ostream *ss,
const size_t chunk_size,
const size_t max_requests) override {
if (cryptoaccel == nullptr)
cryptoaccel = CryptoAccelRef(new OpenSSLCryptoAccel);
*cs = cryptoaccel;
return 0;
}
};
#endif
| 1,023 | 24.6 | 70 | h |
null | ceph-main/src/crypto/qat/qat_crypto_accel.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
* Author: Ganesh Mahalingam <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "crypto/qat/qat_crypto_accel.h"
bool QccCryptoAccel::cbc_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) {
if (unlikely((size % AES_256_IVSIZE) != 0)) {
return false;
}
return qcccrypto.perform_op_batch(out, in, size,
const_cast<unsigned char *>(&iv[0][0]),
const_cast<unsigned char *>(&key[0]),
CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT, y);
}
bool QccCryptoAccel::cbc_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) {
if (unlikely((size % AES_256_IVSIZE) != 0)) {
return false;
}
return qcccrypto.perform_op_batch(out, in, size,
const_cast<unsigned char *>(&iv[0][0]),
const_cast<unsigned char *>(&key[0]),
CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT, y);
}
| 1,498 | 30.229167 | 96 | cc |
null | ceph-main/src/crypto/qat/qat_crypto_accel.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
* Author: Ganesh Mahalingam <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef QAT_CRYPTO_ACCEL_H
#define QAT_CRYPTO_ACCEL_H
#include "crypto/crypto_accel.h"
#include "crypto/qat/qcccrypto.h"
#include "common/async/yield_context.h"
class QccCryptoAccel : public CryptoAccel {
public:
QccCrypto qcccrypto;
QccCryptoAccel(const size_t chunk_size, const size_t max_requests):qcccrypto() { qcccrypto.init(chunk_size, max_requests); };
~QccCryptoAccel() { qcccrypto.destroy(); };
bool cbc_encrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override { return false; }
bool cbc_decrypt(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char (&iv)[AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override { return false; }
bool cbc_encrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override;
bool cbc_decrypt_batch(unsigned char* out, const unsigned char* in, size_t size,
const unsigned char iv[][AES_256_IVSIZE],
const unsigned char (&key)[AES_256_KEYSIZE],
optional_yield y) override;
};
#endif
| 1,825 | 37.851064 | 129 | h |
null | ceph-main/src/crypto/qat/qat_crypto_plugin.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
* Author: Ganesh Mahalingam <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "crypto/qat/qat_crypto_plugin.h"
#include "ceph_ver.h"
std::mutex QccCryptoPlugin::qat_init;
const char *__ceph_plugin_version()
{
return CEPH_GIT_NICE_VER;
}
int __ceph_plugin_init(CephContext *cct,
const std::string& type,
const std::string& name)
{
PluginRegistry *instance = cct->get_plugin_registry();
return instance->add(type, name, new QccCryptoPlugin(cct));
}
| 872 | 23.25 | 70 | cc |
null | ceph-main/src/crypto/qat/qat_crypto_plugin.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Intel Corporation
*
* Author: Qiaowei Ren <[email protected]>
* Author: Ganesh Mahalingam <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef QAT_CRYPTO_PLUGIN_H
#define QAT_CRYPTO_PLUGIN_H
#include "crypto/crypto_plugin.h"
#include "crypto/qat/qat_crypto_accel.h"
class QccCryptoPlugin : public CryptoPlugin {
static std::mutex qat_init;
public:
explicit QccCryptoPlugin(CephContext* cct) : CryptoPlugin(cct)
{}
~QccCryptoPlugin()
{}
virtual int factory(CryptoAccelRef *cs, std::ostream *ss, const size_t chunk_size, const size_t max_requests)
{
std::lock_guard<std::mutex> l(qat_init);
if (cryptoaccel == nullptr)
cryptoaccel = CryptoAccelRef(new QccCryptoAccel(chunk_size, max_requests));
*cs = cryptoaccel;
return 0;
}
};
#endif
| 1,117 | 25 | 111 | h |
null | ceph-main/src/crypto/qat/qcccrypto.cc | #include "qcccrypto.h"
#include <iostream>
#include "string.h"
#include <pthread.h>
#include <condition_variable>
#include "common/debug.h"
#include "include/scope_guard.h"
#include "common/dout.h"
#include "common/errno.h"
#include <atomic>
#include <utility>
#include <future>
#include <chrono>
#include "boost/container/static_vector.hpp"
// -----------------------------------------------------------------------------
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
#undef dout_prefix
#define dout_prefix _prefix(_dout)
static std::ostream& _prefix(std::ostream* _dout)
{
return *_dout << "QccCrypto: ";
}
// -----------------------------------------------------------------------------
/*
* Callback function
*/
static void symDpCallback(CpaCySymDpOpData *pOpData,
CpaStatus status,
CpaBoolean verifyResult)
{
if (nullptr != pOpData->pCallbackTag)
{
static_cast<QatCrypto*>(pOpData->pCallbackTag)->complete();
}
}
static std::mutex qcc_alloc_mutex;
static std::mutex qcc_eng_mutex;
static std::atomic<bool> init_called = { false };
static std::mutex poll_inst_mutex;
static std::condition_variable poll_inst_cv;
#define NON_INSTANCE -1
#define RETRY_MAX_NUM 100
template <typename CompletionToken>
auto QccCrypto::async_get_instance(CompletionToken&& token) {
using boost::asio::async_completion;
using Signature = void(int);
async_completion<CompletionToken, Signature> init(token);
auto ex = boost::asio::get_associated_executor(init.completion_handler);
boost::asio::post(my_pool, [this, ex, handler = std::move(init.completion_handler)]()mutable{
auto handler1 = std::move(handler);
if (!open_instances.empty()) {
int avail_inst = open_instances.front();
open_instances.pop_front();
boost::asio::post(ex, std::bind(handler1, avail_inst));
} else if (!instance_completions.full()) {
// keep a few objects to wait QAT instance to make sure qat full utilization as much as possible,
// that is, QAT don't need to wait for new objects to ensure
// that QAT will not be in a free state as much as possible
instance_completions.push_back([this, ex, handler2 = std::move(handler1)](int inst)mutable{
boost::asio::post(ex, std::bind(handler2, inst));
});
} else {
boost::asio::post(ex, std::bind(handler1, NON_INSTANCE));
}
});
return init.result.get();
}
void QccCrypto::QccFreeInstance(int entry) {
boost::asio::post(my_pool, [this, entry]()mutable{
if (!instance_completions.empty()) {
instance_completions.front()(entry);
instance_completions.pop_front();
} else {
open_instances.push_back(entry);
}
});
}
void QccCrypto::cleanup() {
icp_sal_userStop();
qaeMemDestroy();
is_init = false;
init_called = false;
derr << "Failure during QAT init sequence. Quitting" << dendl;
}
void QccCrypto::poll_instances(void) {
CpaStatus stat = CPA_STATUS_SUCCESS;
poll_retry_num = RETRY_MAX_NUM;
while (!thread_stop) {
int free_instance_num = 0;
for (int iter = 0; iter < qcc_inst->num_instances; iter++) {
if (qcc_inst->is_polled[iter] == CPA_TRUE) {
stat = icp_sal_CyPollDpInstance(qcc_inst->cy_inst_handles[iter], 0);
if (stat != CPA_STATUS_SUCCESS) {
free_instance_num++;
}
}
}
if (free_instance_num == qcc_inst->num_instances) {
poll_retry_num--;
} else {
poll_retry_num = RETRY_MAX_NUM;
}
if (0 == poll_retry_num) {
std::unique_lock lock{poll_inst_mutex};
poll_inst_cv.wait_for(lock, std::chrono::milliseconds(1), [this](){return poll_retry_num > 0;});
poll_retry_num = RETRY_MAX_NUM;
}
}
}
/*
* We initialize QAT instance and everything that is common for all ops
*/
bool QccCrypto::init(const size_t chunk_size, const size_t max_requests) {
std::lock_guard<std::mutex> l(qcc_eng_mutex);
CpaStatus stat = CPA_STATUS_SUCCESS;
this->chunk_size = chunk_size;
this->max_requests = max_requests;
if (init_called) {
dout(10) << "Init sequence already called. Skipping duplicate call" << dendl;
return true;
}
// First call to init
dout(15) << "First init for QAT" << dendl;
init_called = true;
// Find if the usermode memory driver is available. We need to this to
// create contiguous memory needed by QAT.
stat = qaeMemInit();
if (stat != CPA_STATUS_SUCCESS) {
derr << "Unable to load memory driver" << dendl;
this->cleanup();
return false;
}
stat = icp_sal_userStart("CEPH");
if (stat != CPA_STATUS_SUCCESS) {
derr << "Unable to start qat device" << dendl;
this->cleanup();
return false;
}
qcc_os_mem_alloc((void **)&qcc_inst, sizeof(QCCINST));
if (qcc_inst == NULL) {
derr << "Unable to alloc mem for instance struct" << dendl;
this->cleanup();
return false;
}
// Initialize contents of qcc_inst
qcc_inst->num_instances = 0;
qcc_inst->cy_inst_handles = NULL;
stat = cpaCyGetNumInstances(&(qcc_inst->num_instances));
if ((stat != CPA_STATUS_SUCCESS) || (qcc_inst->num_instances <= 0)) {
derr << "Unable to find available instances" << dendl;
this->cleanup();
return false;
}
qcc_os_mem_alloc((void **)&qcc_inst->cy_inst_handles,
((int)qcc_inst->num_instances * sizeof(CpaInstanceHandle)));
if (qcc_inst->cy_inst_handles == NULL) {
derr << "Unable to allocate instances array memory" << dendl;
this->cleanup();
return false;
}
stat = cpaCyGetInstances(qcc_inst->num_instances, qcc_inst->cy_inst_handles);
if (stat != CPA_STATUS_SUCCESS) {
derr << "Unable to get instances" << dendl;
this->cleanup();
return false;
}
dout(1) << "Get instances num: " << qcc_inst->num_instances << dendl;
if (max_requests > qcc_inst->num_instances) {
instance_completions.set_capacity(max_requests - qcc_inst->num_instances);
}
open_instances.set_capacity(qcc_inst->num_instances);
int iter = 0;
//Start Instances
for (iter = 0; iter < qcc_inst->num_instances; iter++) {
stat = cpaCyStartInstance(qcc_inst->cy_inst_handles[iter]);
if (stat != CPA_STATUS_SUCCESS) {
derr << "Unable to start instance" << dendl;
this->cleanup();
return false;
}
}
qcc_os_mem_alloc((void **)&qcc_inst->is_polled,
((int)qcc_inst->num_instances * sizeof(CpaBoolean)));
CpaInstanceInfo2 info;
for (iter = 0; iter < qcc_inst->num_instances; iter++) {
qcc_inst->is_polled[iter] = cpaCyInstanceGetInfo2(qcc_inst->cy_inst_handles[iter],
&info) == CPA_STATUS_SUCCESS ? info.isPolled : CPA_FALSE;
}
// Allocate memory structures for all instances
qcc_os_mem_alloc((void **)&qcc_sess,
((int)qcc_inst->num_instances * sizeof(QCCSESS)));
if (qcc_sess == NULL) {
derr << "Unable to allocate memory for session struct" << dendl;
this->cleanup();
return false;
}
qcc_os_mem_alloc((void **)&qcc_op_mem,
((int)qcc_inst->num_instances * sizeof(QCCOPMEM)));
if (qcc_sess == NULL) {
derr << "Unable to allocate memory for opmem struct" << dendl;
this->cleanup();
return false;
}
//At this point we are only doing an user-space version.
for (iter = 0; iter < qcc_inst->num_instances; iter++) {
stat = cpaCySetAddressTranslation(qcc_inst->cy_inst_handles[iter],
qaeVirtToPhysNUMA);
if (stat == CPA_STATUS_SUCCESS) {
open_instances.push_back(iter);
qcc_op_mem[iter].is_mem_alloc = false;
stat = cpaCySymDpRegCbFunc(qcc_inst->cy_inst_handles[iter], symDpCallback);
if (stat != CPA_STATUS_SUCCESS) {
dout(1) << "Unable to register callback function for instance " << iter << " with status = " << stat << dendl;
return false;
}
} else {
dout(1) << "Unable to find address translations of instance " << iter << dendl;
this->cleanup();
return false;
}
}
qat_poll_thread = make_named_thread("qat_poll", &QccCrypto::poll_instances, this);
is_init = true;
dout(10) << "Init complete" << dendl;
return true;
}
bool QccCrypto::destroy() {
if((!is_init) || (!init_called)) {
dout(15) << "QAT not initialized here. Nothing to do" << dendl;
return false;
}
thread_stop = true;
if (qat_poll_thread.joinable()) {
qat_poll_thread.join();
}
my_pool.join();
dout(10) << "Destroying QAT crypto & related memory" << dendl;
int iter = 0;
// Free up op related memory
for (iter =0; iter < qcc_inst->num_instances; iter++) {
for (size_t i = 0; i < MAX_NUM_SYM_REQ_BATCH; i++) {
qcc_contig_mem_free((void **)&(qcc_op_mem[iter].src_buff[i]));
qcc_contig_mem_free((void **)&(qcc_op_mem[iter].iv_buff[i]));
qcc_contig_mem_free((void **)&(qcc_op_mem[iter].sym_op_data[i]));
}
}
// Free up Session memory
for (iter = 0; iter < qcc_inst->num_instances; iter++) {
cpaCySymDpRemoveSession(qcc_inst->cy_inst_handles[iter], qcc_sess[iter].sess_ctx);
qcc_contig_mem_free((void **)&(qcc_sess[iter].sess_ctx));
}
// Stop QAT Instances
for (iter = 0; iter < qcc_inst->num_instances; iter++) {
cpaCyStopInstance(qcc_inst->cy_inst_handles[iter]);
}
// Free up the base structures we use
qcc_os_mem_free((void **)&qcc_op_mem);
qcc_os_mem_free((void **)&qcc_sess);
qcc_os_mem_free((void **)&(qcc_inst->cy_inst_handles));
qcc_os_mem_free((void **)&(qcc_inst->is_polled));
qcc_os_mem_free((void **)&qcc_inst);
//Un-init memory driver and QAT HW
icp_sal_userStop();
qaeMemDestroy();
init_called = false;
is_init = false;
return true;
}
bool QccCrypto::perform_op_batch(unsigned char* out, const unsigned char* in, size_t size,
Cpa8U *iv,
Cpa8U *key,
CpaCySymCipherDirection op_type,
optional_yield y)
{
if (!init_called) {
dout(10) << "QAT not intialized yet. Initializing now..." << dendl;
if (!QccCrypto::init(chunk_size, max_requests)) {
derr << "QAT init failed" << dendl;
return false;
}
}
if (!is_init)
{
dout(10) << "QAT not initialized in this instance or init failed" << dendl;
return is_init;
}
CpaStatus status = CPA_STATUS_SUCCESS;
int avail_inst = NON_INSTANCE;
if (y) {
yield_context yield = y.get_yield_context();
avail_inst = async_get_instance(yield);
} else {
auto result = async_get_instance(boost::asio::use_future);
avail_inst = result.get();
}
if (avail_inst == NON_INSTANCE) {
return false;
}
dout(15) << "Using dp_batch inst " << avail_inst << dendl;
auto sg = make_scope_guard([this, avail_inst] {
//free up the instance irrespective of the op status
dout(15) << "Completed task under " << avail_inst << dendl;
qcc_op_mem[avail_inst].op_complete = false;
QccCrypto::QccFreeInstance(avail_inst);
});
/*
* Allocate buffers for this version of the instance if not already done.
* Hold onto to most of them until destructor is called.
*/
if (qcc_op_mem[avail_inst].is_mem_alloc == false) {
for (size_t i = 0; i < MAX_NUM_SYM_REQ_BATCH; i++) {
// Allocate IV memory
status = qcc_contig_mem_alloc((void **)&(qcc_op_mem[avail_inst].iv_buff[i]), AES_256_IV_LEN, 8);
if (status != CPA_STATUS_SUCCESS) {
derr << "Unable to allocate iv_buff memory" << dendl;
return false;
}
// Allocate src memory
status = qcc_contig_mem_alloc((void **)&(qcc_op_mem[avail_inst].src_buff[i]), chunk_size, 8);
if (status != CPA_STATUS_SUCCESS) {
derr << "Unable to allocate src_buff memory" << dendl;
return false;
}
//Setup OpData
status = qcc_contig_mem_alloc((void **)&(qcc_op_mem[avail_inst].sym_op_data[i]),
sizeof(CpaCySymDpOpData), 8);
if (status != CPA_STATUS_SUCCESS) {
derr << "Unable to allocate opdata memory" << dendl;
return false;
}
}
// Set memalloc flag so that we don't go through this exercise again.
qcc_op_mem[avail_inst].is_mem_alloc = true;
qcc_sess[avail_inst].sess_ctx = nullptr;
status = initSession(qcc_inst->cy_inst_handles[avail_inst],
&(qcc_sess[avail_inst].sess_ctx),
(Cpa8U *)key,
op_type);
} else {
do {
cpaCySymDpRemoveSession(qcc_inst->cy_inst_handles[avail_inst], qcc_sess[avail_inst].sess_ctx);
status = initSession(qcc_inst->cy_inst_handles[avail_inst],
&(qcc_sess[avail_inst].sess_ctx),
(Cpa8U *)key,
op_type);
if (unlikely(status == CPA_STATUS_RETRY)) {
dout(1) << "cpaCySymDpRemoveSession and initSession retry" << dendl;
}
} while (status == CPA_STATUS_RETRY);
}
if (unlikely(status != CPA_STATUS_SUCCESS)) {
derr << "Unable to init session with status =" << status << dendl;
return false;
}
return symPerformOp(avail_inst,
qcc_sess[avail_inst].sess_ctx,
in,
out,
size,
reinterpret_cast<Cpa8U*>(iv),
AES_256_IV_LEN, y);
}
/*
* Perform session update
*/
CpaStatus QccCrypto::updateSession(CpaCySymSessionCtx sessionCtx,
Cpa8U *pCipherKey,
CpaCySymCipherDirection cipherDirection) {
CpaStatus status = CPA_STATUS_SUCCESS;
CpaCySymSessionUpdateData sessionUpdateData = {0};
sessionUpdateData.flags = CPA_CY_SYM_SESUPD_CIPHER_KEY;
sessionUpdateData.flags |= CPA_CY_SYM_SESUPD_CIPHER_DIR;
sessionUpdateData.pCipherKey = pCipherKey;
sessionUpdateData.cipherDirection = cipherDirection;
status = cpaCySymUpdateSession(sessionCtx, &sessionUpdateData);
if (unlikely(status != CPA_STATUS_SUCCESS)) {
dout(10) << "cpaCySymUpdateSession failed with status = " << status << dendl;
}
return status;
}
CpaStatus QccCrypto::initSession(CpaInstanceHandle cyInstHandle,
CpaCySymSessionCtx *sessionCtx,
Cpa8U *pCipherKey,
CpaCySymCipherDirection cipherDirection) {
CpaStatus status = CPA_STATUS_SUCCESS;
Cpa32U sessionCtxSize = 0;
CpaCySymSessionSetupData sessionSetupData;
memset(&sessionSetupData, 0, sizeof(sessionSetupData));
sessionSetupData.sessionPriority = CPA_CY_PRIORITY_NORMAL;
sessionSetupData.symOperation = CPA_CY_SYM_OP_CIPHER;
sessionSetupData.cipherSetupData.cipherAlgorithm = CPA_CY_SYM_CIPHER_AES_CBC;
sessionSetupData.cipherSetupData.cipherKeyLenInBytes = AES_256_KEY_SIZE;
sessionSetupData.cipherSetupData.pCipherKey = pCipherKey;
sessionSetupData.cipherSetupData.cipherDirection = cipherDirection;
if (nullptr == *sessionCtx) {
status = cpaCySymDpSessionCtxGetSize(cyInstHandle, &sessionSetupData, &sessionCtxSize);
if (likely(CPA_STATUS_SUCCESS == status)) {
status = qcc_contig_mem_alloc((void **)(sessionCtx), sessionCtxSize);
} else {
dout(1) << "cpaCySymDpSessionCtxGetSize failed with status = " << status << dendl;
}
}
if (likely(CPA_STATUS_SUCCESS == status)) {
status = cpaCySymDpInitSession(cyInstHandle,
&sessionSetupData,
*sessionCtx);
if (unlikely(status != CPA_STATUS_SUCCESS)) {
dout(1) << "cpaCySymDpInitSession failed with status = " << status << dendl;
}
} else {
dout(1) << "Session alloc failed with status = " << status << dendl;
}
return status;
}
template <typename CompletionToken>
auto QatCrypto::async_perform_op(int avail_inst, std::span<CpaCySymDpOpData*> pOpDataVec, CompletionToken&& token) {
CpaStatus status = CPA_STATUS_SUCCESS;
using boost::asio::async_completion;
using Signature = void(CpaStatus);
async_completion<CompletionToken, Signature> init(token);
auto ex = boost::asio::get_associated_executor(init.completion_handler);
completion_handler = [this, ex, handler = init.completion_handler](CpaStatus stat) {
boost::asio::post(ex, std::bind(handler, stat));
};
count = pOpDataVec.size();
poll_inst_cv.notify_one();
status = cpaCySymDpEnqueueOpBatch(pOpDataVec.size(), pOpDataVec.data(), CPA_TRUE);
if (status != CPA_STATUS_SUCCESS) {
completion_handler(status);
}
return init.result.get();
}
bool QccCrypto::symPerformOp(int avail_inst,
CpaCySymSessionCtx sessionCtx,
const Cpa8U *pSrc,
Cpa8U *pDst,
Cpa32U size,
Cpa8U *pIv,
Cpa32U ivLen,
optional_yield y) {
CpaStatus status = CPA_STATUS_SUCCESS;
Cpa32U one_batch_size = chunk_size * MAX_NUM_SYM_REQ_BATCH;
Cpa32U iv_index = 0;
size_t perform_retry_num = 0;
for (Cpa32U off = 0; off < size; off += one_batch_size) {
QatCrypto helper;
boost::container::static_vector<CpaCySymDpOpData*, MAX_NUM_SYM_REQ_BATCH> pOpDataVec;
for (Cpa32U offset = off, i = 0; offset < size && i < MAX_NUM_SYM_REQ_BATCH; offset += chunk_size, i++) {
CpaCySymDpOpData *pOpData = qcc_op_mem[avail_inst].sym_op_data[i];
Cpa8U *pSrcBuffer = qcc_op_mem[avail_inst].src_buff[i];
Cpa8U *pIvBuffer = qcc_op_mem[avail_inst].iv_buff[i];
Cpa32U process_size = offset + chunk_size <= size ? chunk_size : size - offset;
// copy source into buffer
memcpy(pSrcBuffer, pSrc + offset, process_size);
// copy IV into buffer
memcpy(pIvBuffer, &pIv[iv_index * ivLen], ivLen);
iv_index++;
//pOpData assignment
pOpData->thisPhys = qaeVirtToPhysNUMA(pOpData);
pOpData->instanceHandle = qcc_inst->cy_inst_handles[avail_inst];
pOpData->sessionCtx = sessionCtx;
pOpData->pCallbackTag = &helper;
pOpData->cryptoStartSrcOffsetInBytes = 0;
pOpData->messageLenToCipherInBytes = process_size;
pOpData->iv = qaeVirtToPhysNUMA(pIvBuffer);
pOpData->pIv = pIvBuffer;
pOpData->ivLenInBytes = ivLen;
pOpData->srcBuffer = qaeVirtToPhysNUMA(pSrcBuffer);
pOpData->srcBufferLen = process_size;
pOpData->dstBuffer = qaeVirtToPhysNUMA(pSrcBuffer);
pOpData->dstBufferLen = process_size;
pOpDataVec.push_back(pOpData);
}
do {
poll_retry_num = RETRY_MAX_NUM;
if (y) {
yield_context yield = y.get_yield_context();
status = helper.async_perform_op(avail_inst, std::span<CpaCySymDpOpData*>(pOpDataVec), yield);
} else {
auto result = helper.async_perform_op(avail_inst, std::span<CpaCySymDpOpData*>(pOpDataVec), boost::asio::use_future);
status = result.get();
}
if (status == CPA_STATUS_RETRY) {
if (++perform_retry_num > 3) {
cpaCySymDpPerformOpNow(qcc_inst->cy_inst_handles[avail_inst]);
return false;
}
}
} while (status == CPA_STATUS_RETRY);
if (likely(CPA_STATUS_SUCCESS == status)) {
for (Cpa32U offset = off, i = 0; offset < size && i < MAX_NUM_SYM_REQ_BATCH; offset += chunk_size, i++) {
Cpa8U *pSrcBuffer = qcc_op_mem[avail_inst].src_buff[i];
Cpa32U process_size = offset + chunk_size <= size ? chunk_size : size - offset;
memcpy(pDst + offset, pSrcBuffer, process_size);
}
} else {
dout(1) << "async_perform_op failed with status = " << status << dendl;
break;
}
}
Cpa32U max_used_buffer_num = iv_index > MAX_NUM_SYM_REQ_BATCH ? MAX_NUM_SYM_REQ_BATCH : iv_index;
for (Cpa32U i = 0; i < max_used_buffer_num; i++) {
memset(qcc_op_mem[avail_inst].src_buff[i], 0, chunk_size);
memset(qcc_op_mem[avail_inst].iv_buff[i], 0, ivLen);
}
return (CPA_STATUS_SUCCESS == status);
}
| 19,837 | 33.440972 | 125 | cc |
null | ceph-main/src/crypto/qat/qcccrypto.h | #ifndef QCCCRYPTO_H
#define QCCCRYPTO_H
#include <atomic>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <pthread.h>
#include <thread>
#include <mutex>
#include <queue>
#include <memory>
#include "common/async/yield_context.h"
#include <memory>
#include "common/ceph_mutex.h"
#include <vector>
#include <functional>
#include <span>
#include "boost/circular_buffer.hpp"
#include "boost/asio/thread_pool.hpp"
extern "C" {
#include "cpa.h"
#include "cpa_cy_sym_dp.h"
#include "cpa_cy_im.h"
#include "lac/cpa_cy_sym.h"
#include "lac/cpa_cy_im.h"
#include "qae_mem.h"
#include "icp_sal_user.h"
#include "icp_sal_poll.h"
#include "qae_mem_utils.h"
}
class QccCrypto {
friend class QatCrypto;
size_t chunk_size{0};
size_t max_requests{0};
boost::asio::thread_pool my_pool{1};
boost::circular_buffer<std::function<void(int)>> instance_completions;
template <typename CompletionToken>
auto async_get_instance(CompletionToken&& token);
public:
CpaCySymCipherDirection qcc_op_type;
QccCrypto() {};
~QccCrypto() { destroy(); };
bool init(const size_t chunk_size, const size_t max_requests);
bool destroy();
bool perform_op_batch(unsigned char* out, const unsigned char* in, size_t size,
Cpa8U *iv,
Cpa8U *key,
CpaCySymCipherDirection op_type,
optional_yield y);
private:
// Currently only supporting AES_256_CBC.
// To-Do: Needs to be expanded
static const size_t AES_256_IV_LEN = 16;
static const size_t AES_256_KEY_SIZE = 32;
static const size_t MAX_NUM_SYM_REQ_BATCH = 32;
/*
* Struct to hold an instance of QAT to handle the crypto operations. These
* will be identified at the start and held until the destructor is called
* To-Do:
* The struct was creating assuming that we will use all the instances.
* Expand current implementation to allow multiple instances to operate
* independently.
*/
struct QCCINST {
CpaInstanceHandle *cy_inst_handles;
CpaBoolean *is_polled;
Cpa16U num_instances;
} *qcc_inst;
/*
* QAT Crypto Session
* Crypto Session Context and setupdata holds
* priority, type of crypto operation (cipher/chained),
* cipher algorithm (AES, DES, etc),
* single crypto or multi-buffer crypto.
*/
struct QCCSESS {
Cpa32U sess_ctx_sz;
CpaCySymSessionCtx sess_ctx;
} *qcc_sess;
/*
* Cipher Memory Allocations
* Holds bufferlist, flatbuffer, cipher opration data and buffermeta needed
* by QAT to perform the operation. Also buffers for IV, SRC, DEST.
*/
struct QCCOPMEM {
// Op common items
bool is_mem_alloc;
bool op_complete;
CpaCySymDpOpData *sym_op_data[MAX_NUM_SYM_REQ_BATCH];
Cpa8U *src_buff[MAX_NUM_SYM_REQ_BATCH];
Cpa8U *iv_buff[MAX_NUM_SYM_REQ_BATCH];
} *qcc_op_mem;
/*
* Handle queue with free instances to handle op
*/
boost::circular_buffer<int> open_instances;
void QccFreeInstance(int entry);
std::thread qat_poll_thread;
bool thread_stop{false};
/*
* Contiguous Memory Allocator and de-allocator. We are using the usdm
* driver that comes along with QAT to get us direct memory access using
* hugepages.
* To-Do: A kernel based one.
*/
static inline void qcc_contig_mem_free(void **ptr) {
if (*ptr) {
qaeMemFreeNUMA(ptr);
*ptr = NULL;
}
}
static inline CpaStatus qcc_contig_mem_alloc(void **ptr, Cpa32U size, Cpa32U alignment = 1) {
*ptr = qaeMemAllocNUMA(size, 0, alignment);
if (NULL == *ptr)
{
return CPA_STATUS_RESOURCE;
}
return CPA_STATUS_SUCCESS;
}
/*
* Malloc & free calls masked to maintain consistency and future kernel
* alloc support.
*/
static inline void qcc_os_mem_free(void **ptr) {
if (*ptr) {
free(*ptr);
*ptr = NULL;
}
}
static inline CpaStatus qcc_os_mem_alloc(void **ptr, Cpa32U size) {
*ptr = malloc(size);
if (*ptr == NULL)
{
return CPA_STATUS_RESOURCE;
}
return CPA_STATUS_SUCCESS;
}
std::atomic<bool> is_init = { false };
/*
* Function to cleanup memory if constructor fails
*/
void cleanup();
/*
* Crypto Polling Function & helpers
* This helps to retrieve data from the QAT rings and dispatching the
* associated callbacks. For synchronous operation (like this one), QAT
* library creates an internal callback for the operation.
*/
void poll_instances(void);
std::atomic<size_t> poll_retry_num{0};
bool symPerformOp(int avail_inst,
CpaCySymSessionCtx sessionCtx,
const Cpa8U *pSrc,
Cpa8U *pDst,
Cpa32U size,
Cpa8U *pIv,
Cpa32U ivLen,
optional_yield y);
CpaStatus initSession(CpaInstanceHandle cyInstHandle,
CpaCySymSessionCtx *sessionCtx,
Cpa8U *pCipherKey,
CpaCySymCipherDirection cipherDirection);
CpaStatus updateSession(CpaCySymSessionCtx sessionCtx,
Cpa8U *pCipherKey,
CpaCySymCipherDirection cipherDirection);
};
class QatCrypto {
private:
std::function<void(CpaStatus stat)> completion_handler;
std::atomic<std::size_t> count;
public:
void complete() {
if (--count == 0) {
completion_handler(CPA_STATUS_SUCCESS);
}
return ;
}
QatCrypto () : count(0) {}
QatCrypto (const QatCrypto &qat) = delete;
QatCrypto (QatCrypto &&qat) = delete;
void operator=(const QatCrypto &qat) = delete;
void operator=(QatCrypto &&qat) = delete;
template <typename CompletionToken>
auto async_perform_op(int avail_inst, std::span<CpaCySymDpOpData*> pOpDataVec, CompletionToken&& token);
};
#endif //QCCCRYPTO_H
| 6,100 | 27.376744 | 106 | h |
null | ceph-main/src/dokan/ceph_dokan.cc | /*
* ceph-dokan - Win32 CephFS client based on Dokan
*
* Copyright (C) 2021 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#define UNICODE
#define _UNICODE
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "ceph_dokan.h"
#include <algorithm>
#include <stdlib.h>
#include <fileinfo.h>
#include <dirent.h>
#include <fcntl.h>
#include <signal.h>
#include <sddl.h>
#include <accctrl.h>
#include <aclapi.h>
#include <ntstatus.h>
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/version.h"
#include "common/win32/wstring.h"
#include "global/global_init.h"
#include "include/uuid.h"
#include "dbg.h"
#include "utils.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_client
#undef dout_prefix
#define dout_prefix *_dout << "ceph-dokan: "
using namespace std;
#define READ_ACCESS_REQUESTED(access_mode) \
(access_mode & GENERIC_READ || \
access_mode & FILE_SHARE_READ || \
access_mode & STANDARD_RIGHTS_READ || \
access_mode & FILE_SHARE_READ)
#define WRITE_ACCESS_REQUESTED(access_mode) \
(access_mode & GENERIC_WRITE || \
access_mode & FILE_SHARE_WRITE || \
access_mode & STANDARD_RIGHTS_WRITE || \
access_mode & FILE_SHARE_WRITE)
// TODO: check if those dokan limits still stand.
#define CEPH_DOKAN_MAX_FILE_SZ (1LL << 40) // 1TB
#define CEPH_DOKAN_MAX_IO_SZ (128 * 1024 * 1024) // 128MB
struct ceph_mount_info *cmount;
Config *g_cfg;
// Used as part of DOKAN_FILE_INFO.Context, must fit within 8B.
typedef struct {
int fd;
short read_only;
} fd_context, *pfd_context;
static_assert(sizeof(fd_context) <= 8,
"fd_context exceeds DOKAN_FILE_INFO.Context size.");
string get_path(LPCWSTR path_w) {
string path = to_string(path_w);
replace(path.begin(), path.end(), '\\', '/');
return path;
}
static NTSTATUS do_open_file(
string path,
int flags,
mode_t mode,
fd_context* fdc)
{
dout(20) << __func__ << " " << path << dendl;
int fd = ceph_open(cmount, path.c_str(), flags, mode);
if (fd < 0) {
dout(2) << __func__ << " " << path
<< ": ceph_open failed. Error: " << fd << dendl;
return cephfs_errno_to_ntstatus_map(fd);
}
fdc->fd = fd;
dout(20) << __func__ << " " << path << " - fd: " << fd << dendl;
return 0;
}
static NTSTATUS WinCephCreateDirectory(
LPCWSTR FileName,
PDOKAN_FILE_INFO DokanFileInfo)
{
string path = get_path(FileName);
dout(20) << __func__ << " " << path << dendl;
if (path == "/") {
return 0;
}
int ret = ceph_mkdir(cmount, path.c_str(), g_cfg->dir_mode);
if (ret < 0) {
dout(2) << __func__ << " " << path
<< ": ceph_mkdir failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
return 0;
}
static NTSTATUS WinCephCreateFile(
LPCWSTR FileName,
PDOKAN_IO_SECURITY_CONTEXT SecurityContext,
ACCESS_MASK DesiredAccess,
ULONG FileAttributes,
ULONG ShareMode,
ULONG CreateDisposition,
ULONG CreateOptions,
PDOKAN_FILE_INFO DokanFileInfo)
{
// TODO: use ZwCreateFile args by default and avoid conversions.
ACCESS_MASK AccessMode;
DWORD FlagsAndAttributes, CreationDisposition;
DokanMapKernelToUserCreateFileFlags(
DesiredAccess, FileAttributes, CreateOptions, CreateDisposition,
&AccessMode, &FlagsAndAttributes, &CreationDisposition);
string path = get_path(FileName);
dout(20) << __func__ << " " << path
<< ". CreationDisposition: " << CreationDisposition << dendl;
if (g_cfg->debug) {
print_open_params(
path.c_str(), AccessMode, FlagsAndAttributes, ShareMode,
CreationDisposition, CreateOptions, DokanFileInfo);
}
pfd_context fdc = (pfd_context) &(DokanFileInfo->Context);
*fdc = { 0 };
NTSTATUS st = 0;
struct ceph_statx stbuf;
unsigned int requested_attrs = CEPH_STATX_BASIC_STATS;
int ret = ceph_statx(cmount, path.c_str(), &stbuf, requested_attrs, 0);
if (!ret) { /* File Exists */
if (S_ISREG(stbuf.stx_mode)) {
dout(20) << __func__ << " " << path << ". File exists." << dendl;
if (CreateOptions & FILE_DIRECTORY_FILE) {
dout(2) << __func__ << " " << path << ". Not a directory." << dendl;
return STATUS_NOT_A_DIRECTORY;
}
switch (CreationDisposition) {
case CREATE_NEW:
return STATUS_OBJECT_NAME_COLLISION;
case TRUNCATE_EXISTING:
// open O_TRUNC & return 0
return do_open_file(path, O_CREAT | O_TRUNC | O_RDWR,
g_cfg->file_mode, fdc);
case OPEN_ALWAYS:
// open & return STATUS_OBJECT_NAME_COLLISION
if (!WRITE_ACCESS_REQUESTED(AccessMode))
fdc->read_only = 1;
if ((st = do_open_file(path, fdc->read_only ? O_RDONLY : O_RDWR,
g_cfg->file_mode, fdc)))
return st;
return STATUS_OBJECT_NAME_COLLISION;
case OPEN_EXISTING:
// open & return 0
if (!WRITE_ACCESS_REQUESTED(AccessMode))
fdc->read_only = 1;
if ((st = do_open_file(path, fdc->read_only ? O_RDONLY : O_RDWR,
g_cfg->file_mode, fdc)))
return st;
return 0;
case CREATE_ALWAYS:
// open O_TRUNC & return STATUS_OBJECT_NAME_COLLISION
if ((st = do_open_file(path, O_CREAT | O_TRUNC | O_RDWR,
g_cfg->file_mode, fdc)))
return st;
return STATUS_OBJECT_NAME_COLLISION;
}
} else if (S_ISDIR(stbuf.stx_mode)) {
dout(20) << __func__ << " " << path << ". Directory exists." << dendl;
DokanFileInfo->IsDirectory = TRUE;
if (CreateOptions & FILE_NON_DIRECTORY_FILE) {
dout(2) << __func__ << " " << path << ". File is a directory." << dendl;
return STATUS_FILE_IS_A_DIRECTORY;
}
switch (CreationDisposition) {
case CREATE_NEW:
return STATUS_OBJECT_NAME_COLLISION;
case TRUNCATE_EXISTING:
return 0;
case OPEN_ALWAYS:
case OPEN_EXISTING:
return do_open_file(path, O_RDONLY, g_cfg->file_mode, fdc);
case CREATE_ALWAYS:
return STATUS_OBJECT_NAME_COLLISION;
}
} else {
derr << __func__ << " " << path
<< ": Unsupported st_mode: " << stbuf.stx_mode << dendl;
return STATUS_BAD_FILE_TYPE;
}
} else { // The file doens't exist.
if (DokanFileInfo->IsDirectory) {
// TODO: check create disposition.
dout(20) << __func__ << " " << path << ". New directory." << dendl;
if ((st = WinCephCreateDirectory(FileName, DokanFileInfo)))
return st;
// Dokan expects a file handle even when creating new directories.
return do_open_file(path, O_RDONLY, g_cfg->file_mode, fdc);
}
dout(20) << __func__ << " " << path << ". New file." << dendl;
switch (CreationDisposition) {
case CREATE_NEW:
// create & return 0
return do_open_file(path, O_CREAT | O_RDWR | O_EXCL,
g_cfg->file_mode, fdc);
case CREATE_ALWAYS:
// create & return 0
return do_open_file(path, O_CREAT | O_TRUNC | O_RDWR,
g_cfg->file_mode, fdc);
case OPEN_ALWAYS:
return do_open_file(path, O_CREAT | O_RDWR,
g_cfg->file_mode, fdc);
case OPEN_EXISTING:
case TRUNCATE_EXISTING:
dout(2) << __func__ << " " << path << ": Not found." << dendl;
return STATUS_OBJECT_NAME_NOT_FOUND;
default:
derr << __func__ << " " << path
<< ": Unsupported create disposition: "
<< CreationDisposition << dendl;
return STATUS_INVALID_PARAMETER;
}
}
// We shouldn't get here.
derr << __func__ << ": unknown error while opening: " << path << dendl;
return STATUS_INTERNAL_ERROR;
}
static void WinCephCloseFile(
LPCWSTR FileName,
PDOKAN_FILE_INFO DokanFileInfo)
{
string path = get_path(FileName);
pfd_context fdc = (pfd_context) &(DokanFileInfo->Context);
if (!fdc) {
derr << __func__ << ": missing context: " << path << dendl;
return;
}
dout(20) << __func__ << " " << path << " fd: " << fdc->fd << dendl;
int ret = ceph_close(cmount, fdc->fd);
if (ret) {
dout(2) << __func__ << " " << path
<< " failed. fd: " << fdc->fd
<< ". Error: " << ret << dendl;
}
DokanFileInfo->Context = 0;
}
static void WinCephCleanup(
LPCWSTR FileName,
PDOKAN_FILE_INFO DokanFileInfo)
{
string path = get_path(FileName);
if (!DokanFileInfo->Context) {
dout(10) << __func__ << ": missing context: " << path << dendl;
return;
}
if (DokanFileInfo->DeleteOnClose) {
dout(20) << __func__ << " DeleteOnClose: " << path << dendl;
if (DokanFileInfo->IsDirectory) {
int ret = ceph_rmdir(cmount, path.c_str());
if (ret)
derr << __func__ << " " << path
<< ": ceph_rmdir failed. Error: " << ret << dendl;
} else {
int ret = ceph_unlink(cmount, path.c_str());
if (ret != 0) {
derr << __func__ << " " << path
<< ": ceph_unlink failed. Error: " << ret << dendl;
}
}
}
}
static NTSTATUS WinCephReadFile(
LPCWSTR FileName,
LPVOID Buffer,
DWORD BufferLength,
LPDWORD ReadLength,
LONGLONG Offset,
PDOKAN_FILE_INFO DokanFileInfo)
{
if (!BufferLength) {
*ReadLength = 0;
return 0;
}
if (Offset < 0) {
dout(2) << __func__ << " " << get_path(FileName)
<< ": Invalid offset: " << Offset << dendl;
return STATUS_INVALID_PARAMETER;
}
if (Offset > CEPH_DOKAN_MAX_FILE_SZ ||
BufferLength > CEPH_DOKAN_MAX_IO_SZ) {
dout(2) << "File read too large: " << get_path(FileName)
<< ". Offset: " << Offset
<< ". Buffer length: " << BufferLength << dendl;
return STATUS_FILE_TOO_LARGE;
}
pfd_context fdc = (pfd_context) &(DokanFileInfo->Context);
if (!fdc->fd) {
dout(15) << __func__ << " " << get_path(FileName)
<< ". Missing context, using temporary handle." << dendl;
string path = get_path(FileName);
int fd_new = ceph_open(cmount, path.c_str(), O_RDONLY, 0);
if (fd_new < 0) {
dout(2) << __func__ << " " << path
<< ": ceph_open failed. Error: " << fd_new << dendl;
return cephfs_errno_to_ntstatus_map(fd_new);
}
int ret = ceph_read(cmount, fd_new, (char*) Buffer, BufferLength, Offset);
if (ret < 0) {
dout(2) << __func__ << " " << path
<< ": ceph_read failed. Error: " << ret
<< ". Offset: " << Offset
<< "Buffer length: " << BufferLength << dendl;
ceph_close(cmount, fd_new);
return cephfs_errno_to_ntstatus_map(ret);
}
*ReadLength = ret;
ceph_close(cmount, fd_new);
return 0;
} else {
int ret = ceph_read(cmount, fdc->fd, (char*) Buffer, BufferLength, Offset);
if (ret < 0) {
dout(2) << __func__ << " " << get_path(FileName)
<< ": ceph_read failed. Error: " << ret
<< ". Offset: " << Offset
<< "Buffer length: " << BufferLength << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
*ReadLength = ret;
return 0;
}
}
static NTSTATUS WinCephWriteFile(
LPCWSTR FileName,
LPCVOID Buffer,
DWORD NumberOfBytesToWrite,
LPDWORD NumberOfBytesWritten,
LONGLONG Offset,
PDOKAN_FILE_INFO DokanFileInfo)
{
if (!NumberOfBytesToWrite) {
*NumberOfBytesWritten = 0;
return 0;
}
if (Offset < 0) {
if (DokanFileInfo->WriteToEndOfFile) {
string path = get_path(FileName);
struct ceph_statx stbuf;
unsigned int requested_attrs = CEPH_STATX_BASIC_STATS;
int ret = ceph_statx(cmount, path.c_str(), &stbuf, requested_attrs, 0);
if (ret) {
dout(2) << __func__ << " " << path
<< ": ceph_statx failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
Offset = stbuf.stx_size;
} else {
dout(2) << __func__ << " " << get_path(FileName)
<< ": Invalid offset: " << Offset << dendl;
return STATUS_INVALID_PARAMETER;
}
}
if (Offset > CEPH_DOKAN_MAX_FILE_SZ ||
NumberOfBytesToWrite > CEPH_DOKAN_MAX_IO_SZ) {
dout(2) << "File write too large: " << get_path(FileName)
<< ". Offset: " << Offset
<< ". Buffer length: " << NumberOfBytesToWrite
<< ". WriteToEndOfFile: " << (bool) DokanFileInfo->WriteToEndOfFile
<< dendl;
return STATUS_FILE_TOO_LARGE;
}
pfd_context fdc = (pfd_context) &(DokanFileInfo->Context);
if (fdc->read_only)
return STATUS_ACCESS_DENIED;
// TODO: check if we still have to support missing handles.
// According to Dokan docs, it might be related to memory mapped files, in
// which case reads/writes can be performed between the Close/Cleanup calls.
if (!fdc->fd) {
string path = get_path(FileName);
dout(15) << __func__ << " " << path
<< ". Missing context, using temporary handle." << dendl;
int fd_new = ceph_open(cmount, path.c_str(), O_RDWR, 0);
if (fd_new < 0) {
dout(2) << __func__ << " " << path
<< ": ceph_open failed. Error: " << fd_new << dendl;
return cephfs_errno_to_ntstatus_map(fd_new);
}
int ret = ceph_write(cmount, fd_new, (char*) Buffer,
NumberOfBytesToWrite, Offset);
if (ret < 0) {
dout(2) << __func__ << " " << path
<< ": ceph_write failed. Error: " << ret
<< ". Offset: " << Offset
<< "Buffer length: " << NumberOfBytesToWrite << dendl;
ceph_close(cmount, fd_new);
return cephfs_errno_to_ntstatus_map(ret);
}
*NumberOfBytesWritten = ret;
ceph_close(cmount, fd_new);
return 0;
} else {
int ret = ceph_write(cmount, fdc->fd, (char*) Buffer,
NumberOfBytesToWrite, Offset);
if (ret < 0) {
dout(2) << __func__ << " " << get_path(FileName)
<< ": ceph_write failed. Error: " << ret
<< ". Offset: " << Offset
<< "Buffer length: " << NumberOfBytesToWrite << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
*NumberOfBytesWritten = ret;
return 0;
}
}
static NTSTATUS WinCephFlushFileBuffers(
LPCWSTR FileName,
PDOKAN_FILE_INFO DokanFileInfo)
{
pfd_context fdc = (pfd_context) &(DokanFileInfo->Context);
if (!fdc->fd) {
derr << __func__ << ": missing context: " << get_path(FileName) << dendl;
return STATUS_INVALID_HANDLE;
}
int ret = ceph_fsync(cmount, fdc->fd, 0);
if (ret) {
dout(2) << __func__ << " " << get_path(FileName)
<< ": ceph_sync failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
return 0;
}
static NTSTATUS WinCephGetFileInformation(
LPCWSTR FileName,
LPBY_HANDLE_FILE_INFORMATION HandleFileInformation,
PDOKAN_FILE_INFO DokanFileInfo)
{
string path = get_path(FileName);
dout(20) << __func__ << " " << path << dendl;
memset(HandleFileInformation, 0, sizeof(BY_HANDLE_FILE_INFORMATION));
struct ceph_statx stbuf;
unsigned int requested_attrs = CEPH_STATX_BASIC_STATS;
pfd_context fdc = (pfd_context) &(DokanFileInfo->Context);
if (!fdc->fd) {
int ret = ceph_statx(cmount, path.c_str(), &stbuf, requested_attrs, 0);
if (ret) {
dout(2) << __func__ << " " << path
<< ": ceph_statx failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
} else {
int ret = ceph_fstatx(cmount, fdc->fd, &stbuf, requested_attrs, 0);
if (ret) {
dout(2) << __func__ << " " << path
<< ": ceph_fstatx failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
}
HandleFileInformation->nFileSizeLow = (stbuf.stx_size << 32) >> 32;
HandleFileInformation->nFileSizeHigh = stbuf.stx_size >> 32;
to_filetime(stbuf.stx_ctime.tv_sec, &HandleFileInformation->ftCreationTime);
to_filetime(stbuf.stx_atime.tv_sec, &HandleFileInformation->ftLastAccessTime);
to_filetime(stbuf.stx_mtime.tv_sec, &HandleFileInformation->ftLastWriteTime);
if (S_ISDIR(stbuf.stx_mode)) {
HandleFileInformation->dwFileAttributes |= FILE_ATTRIBUTE_DIRECTORY;
} else if (S_ISREG(stbuf.stx_mode)) {
HandleFileInformation->dwFileAttributes |= FILE_ATTRIBUTE_NORMAL;
}
HandleFileInformation->nFileIndexLow = (stbuf.stx_ino << 32) >> 32;
HandleFileInformation->nFileIndexHigh = stbuf.stx_ino >> 32;
HandleFileInformation->nNumberOfLinks = stbuf.stx_nlink;
return 0;
}
static NTSTATUS WinCephFindFiles(
LPCWSTR FileName,
PFillFindData FillFindData, // function pointer
PDOKAN_FILE_INFO DokanFileInfo)
{
string path = get_path(FileName);
dout(20) << __func__ << " " << path << dendl;
struct ceph_dir_result *dirp;
int ret = ceph_opendir(cmount, path.c_str(), &dirp);
if (ret != 0) {
dout(2) << __func__ << " " << path
<< ": ceph_mkdir failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
WIN32_FIND_DATAW findData;
int count = 0;
while (1) {
memset(&findData, 0, sizeof(findData));
struct dirent result;
struct ceph_statx stbuf;
unsigned int requested_attrs = CEPH_STATX_BASIC_STATS;
ret = ceph_readdirplus_r(cmount, dirp, &result, &stbuf,
requested_attrs,
0, // no special flags used when filling attrs
NULL); // we're not using inodes.
if (!ret)
break;
if (ret < 0) {
dout(2) << __func__ << " " << path
<< ": ceph_readdirplus_r failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
to_wstring(result.d_name).copy(findData.cFileName, MAX_PATH);
findData.nFileSizeLow = (stbuf.stx_size << 32) >> 32;
findData.nFileSizeHigh = stbuf.stx_size >> 32;
to_filetime(stbuf.stx_ctime.tv_sec, &findData.ftCreationTime);
to_filetime(stbuf.stx_atime.tv_sec, &findData.ftLastAccessTime);
to_filetime(stbuf.stx_mtime.tv_sec, &findData.ftLastWriteTime);
if (S_ISDIR(stbuf.stx_mode)) {
findData.dwFileAttributes |= FILE_ATTRIBUTE_DIRECTORY;
} else if (S_ISREG(stbuf.stx_mode)) {
findData.dwFileAttributes |= FILE_ATTRIBUTE_NORMAL;
}
FillFindData(&findData, DokanFileInfo);
count++;
}
ceph_closedir(cmount, dirp);
dout(20) << __func__ << " " << path
<< " found " << count << " entries." << dendl;
return 0;
}
/**
* This callback is only supposed to check if deleting a file is
* allowed. The actual file deletion will be performed by WinCephCleanup
*/
static NTSTATUS WinCephDeleteFile(
LPCWSTR FileName,
PDOKAN_FILE_INFO DokanFileInfo)
{
string path = get_path(FileName);
dout(20) << __func__ << " " << path << dendl;
if (ceph_may_delete(cmount, path.c_str()) < 0) {
return STATUS_ACCESS_DENIED;
}
return 0;
}
static NTSTATUS WinCephDeleteDirectory(
LPCWSTR FileName,
PDOKAN_FILE_INFO DokanFileInfo)
{
string path = get_path(FileName);
dout(20) << __func__ << " " << path << dendl;
if (ceph_may_delete(cmount, path.c_str()) < 0) {
return STATUS_ACCESS_DENIED;
}
struct ceph_dir_result *dirp;
int ret = ceph_opendir(cmount, path.c_str(), &dirp);
if (ret != 0) {
dout(2) << __func__ << " " << path
<< ": ceph_opendir failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
WIN32_FIND_DATAW findData;
while (1) {
memset(&findData, 0, sizeof(findData));
struct dirent *result = ceph_readdir(cmount, dirp);
if (result) {
if (strcmp(result->d_name, ".") && strcmp(result->d_name, "..")) {
ceph_closedir(cmount, dirp);
dout(2) << __func__ << " " << path
<< ": directory is not empty. " << dendl;
return STATUS_DIRECTORY_NOT_EMPTY;
}
} else break;
}
ceph_closedir(cmount, dirp);
return 0;
}
static NTSTATUS WinCephMoveFile(
LPCWSTR FileName, // existing file name
LPCWSTR NewFileName,
BOOL ReplaceIfExisting,
PDOKAN_FILE_INFO DokanFileInfo)
{
string path = get_path(FileName);
string new_path = get_path(NewFileName);
dout(20) << __func__ << " " << path << " -> " << new_path << dendl;
int ret = ceph_rename(cmount, path.c_str(), new_path.c_str());
if (ret) {
dout(2) << __func__ << " " << path << " -> " << new_path
<< ": ceph_rename failed. Error: " << ret << dendl;
}
return cephfs_errno_to_ntstatus_map(ret);
}
static NTSTATUS WinCephSetEndOfFile(
LPCWSTR FileName,
LONGLONG ByteOffset,
PDOKAN_FILE_INFO DokanFileInfo)
{
pfd_context fdc = (pfd_context) &(DokanFileInfo->Context);
if (!fdc->fd) {
derr << __func__ << ": missing context: " << get_path(FileName) << dendl;
return STATUS_INVALID_HANDLE;
}
int ret = ceph_ftruncate(cmount, fdc->fd, ByteOffset);
if (ret) {
dout(2) << __func__ << " " << get_path(FileName)
<< ": ceph_ftruncate failed. Error: " << ret
<< " Offset: " << ByteOffset << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
return 0;
}
static NTSTATUS WinCephSetAllocationSize(
LPCWSTR FileName,
LONGLONG AllocSize,
PDOKAN_FILE_INFO DokanFileInfo)
{
pfd_context fdc = (pfd_context) &(DokanFileInfo->Context);
if (!fdc->fd) {
derr << __func__ << ": missing context: " << get_path(FileName) << dendl;
return STATUS_INVALID_HANDLE;
}
struct ceph_statx stbuf;
unsigned int requested_attrs = CEPH_STATX_BASIC_STATS;
int ret = ceph_fstatx(cmount, fdc->fd, &stbuf, requested_attrs, 0);
if (ret) {
dout(2) << __func__ << " " << get_path(FileName)
<< ": ceph_fstatx failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
if ((unsigned long long) AllocSize < stbuf.stx_size) {
int ret = ceph_ftruncate(cmount, fdc->fd, AllocSize);
if (ret) {
dout(2) << __func__ << " " << get_path(FileName)
<< ": ceph_ftruncate failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
return 0;
}
return 0;
}
static NTSTATUS WinCephSetFileAttributes(
LPCWSTR FileName,
DWORD FileAttributes,
PDOKAN_FILE_INFO DokanFileInfo)
{
string path = get_path(FileName);
dout(20) << __func__ << " (stubbed) " << path << dendl;
return 0;
}
static NTSTATUS WinCephSetFileTime(
LPCWSTR FileName,
CONST FILETIME* CreationTime,
CONST FILETIME* LastAccessTime,
CONST FILETIME* LastWriteTime,
PDOKAN_FILE_INFO DokanFileInfo)
{
// TODO: as per a previous inline comment, this might cause problems
// with some apps such as MS Office (different error code than expected
// or ctime issues probably). We might allow disabling it.
string path = get_path(FileName);
dout(20) << __func__ << " " << path << dendl;
struct ceph_statx stbuf = { 0 };
int mask = 0;
if (CreationTime) {
mask |= CEPH_SETATTR_CTIME;
// On Windows, st_ctime is the creation time while on Linux it's the time
// of the last metadata change. We'll try to stick with the Windows
// semantics, although this might be overridden by Linux hosts.
to_unix_time(*CreationTime, &stbuf.stx_ctime.tv_sec);
}
if (LastAccessTime) {
mask |= CEPH_SETATTR_ATIME;
to_unix_time(*LastAccessTime, &stbuf.stx_atime.tv_sec);
}
if (LastWriteTime) {
mask |= CEPH_SETATTR_MTIME;
to_unix_time(*LastWriteTime, &stbuf.stx_mtime.tv_sec);
}
int ret = ceph_setattrx(cmount, path.c_str(), &stbuf, mask, 0);
if (ret) {
dout(2) << __func__ << " " << path
<< ": ceph_setattrx failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
return 0;
}
static NTSTATUS WinCephSetFileSecurity(
LPCWSTR FileName,
PSECURITY_INFORMATION SecurityInformation,
PSECURITY_DESCRIPTOR SecurityDescriptor,
ULONG SecurityDescriptorLength,
PDOKAN_FILE_INFO DokanFileInfo)
{
string path = get_path(FileName);
dout(20) << __func__ << " (stubbed) " << path << dendl;
// TODO: Windows ACLs are ignored. At the moment, we're reporting this
// operation as successful to avoid breaking applications. We might consider
// making this behavior configurable.
return 0;
}
static NTSTATUS WinCephGetVolumeInformation(
LPWSTR VolumeNameBuffer,
DWORD VolumeNameSize,
LPDWORD VolumeSerialNumber,
LPDWORD MaximumComponentLength,
LPDWORD FileSystemFlags,
LPWSTR FileSystemNameBuffer,
DWORD FileSystemNameSize,
PDOKAN_FILE_INFO DokanFileInfo)
{
g_cfg->win_vol_name.copy(VolumeNameBuffer, VolumeNameSize);
*VolumeSerialNumber = g_cfg->win_vol_serial;
*MaximumComponentLength = g_cfg->max_path_len;
*FileSystemFlags = FILE_CASE_SENSITIVE_SEARCH |
FILE_CASE_PRESERVED_NAMES |
FILE_SUPPORTS_REMOTE_STORAGE |
FILE_UNICODE_ON_DISK |
FILE_PERSISTENT_ACLS;
wcscpy(FileSystemNameBuffer, L"Ceph");
return 0;
}
static NTSTATUS WinCephGetDiskFreeSpace(
PULONGLONG FreeBytesAvailable,
PULONGLONG TotalNumberOfBytes,
PULONGLONG TotalNumberOfFreeBytes,
PDOKAN_FILE_INFO DokanFileInfo)
{
struct statvfs vfsbuf;
int ret = ceph_statfs(cmount, "/", &vfsbuf);
if (ret) {
derr << "ceph_statfs failed. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);;
}
*FreeBytesAvailable = vfsbuf.f_bsize * vfsbuf.f_bfree;
*TotalNumberOfBytes = vfsbuf.f_bsize * vfsbuf.f_blocks;
*TotalNumberOfFreeBytes = vfsbuf.f_bsize * vfsbuf.f_bfree;
return 0;
}
int do_unmap(wstring& mountpoint) {
if (!DokanRemoveMountPoint(mountpoint.c_str())) {
wcerr << "Couldn't remove the specified CephFS mount: "
<< mountpoint << std::endl;
return -EINVAL;
}
return 0;
}
int cleanup_mount() {
int ret = ceph_unmount(cmount);
if (ret)
derr << "Couldn't perform clean unmount. Error: " << ret << dendl;
else
dout(0) << "Unmounted." << dendl;
return ret;
}
static NTSTATUS WinCephUnmount(
PDOKAN_FILE_INFO DokanFileInfo)
{
cleanup_mount();
// TODO: consider propagating unmount errors to Dokan.
return 0;
}
BOOL WINAPI ConsoleHandler(DWORD dwType)
{
switch(dwType) {
case CTRL_C_EVENT:
dout(0) << "Received ctrl-c." << dendl;
exit(0);
case CTRL_BREAK_EVENT:
dout(0) << "Received break event." << dendl;
break;
default:
dout(0) << "Received console event: " << dwType << dendl;
}
return TRUE;
}
static void unmount_atexit(void)
{
cleanup_mount();
}
NTSTATUS get_volume_serial(PDWORD serial) {
int64_t fs_cid = ceph_get_fs_cid(cmount);
char fsid_str[64] = { 0 };
int ret = ceph_getxattr(cmount, "/", "ceph.cluster_fsid",
fsid_str, sizeof(fsid_str));
if (ret < 0) {
dout(2) << "Coudln't retrieve the cluster fsid. Error: " << ret << dendl;
return cephfs_errno_to_ntstatus_map(ret);
}
uuid_d fsid;
if (!fsid.parse(fsid_str)) {
dout(2) << "Couldn't parse cluster fsid" << dendl;
return STATUS_INTERNAL_ERROR;
}
// We're generating a volume serial number by concatenating the last 16 bits
// of the filesystem id and the cluster fsid.
*serial = ((*(uint16_t*) fsid.bytes() & 0xffff) << 16) | (fs_cid & 0xffff);
return 0;
}
int do_map() {
PDOKAN_OPERATIONS dokan_operations =
(PDOKAN_OPERATIONS) malloc(sizeof(DOKAN_OPERATIONS));
PDOKAN_OPTIONS dokan_options =
(PDOKAN_OPTIONS) malloc(sizeof(DOKAN_OPTIONS));
if (!dokan_operations || !dokan_options) {
derr << "Not enough memory" << dendl;
return -ENOMEM;
}
int r = set_dokan_options(g_cfg, dokan_options);
if (r) {
return r;
}
ZeroMemory(dokan_operations, sizeof(DOKAN_OPERATIONS));
dokan_operations->ZwCreateFile = WinCephCreateFile;
dokan_operations->Cleanup = WinCephCleanup;
dokan_operations->CloseFile = WinCephCloseFile;
dokan_operations->ReadFile = WinCephReadFile;
dokan_operations->WriteFile = WinCephWriteFile;
dokan_operations->FlushFileBuffers = WinCephFlushFileBuffers;
dokan_operations->GetFileInformation = WinCephGetFileInformation;
dokan_operations->FindFiles = WinCephFindFiles;
dokan_operations->SetFileAttributes = WinCephSetFileAttributes;
dokan_operations->SetFileTime = WinCephSetFileTime;
dokan_operations->DeleteFile = WinCephDeleteFile;
dokan_operations->DeleteDirectory = WinCephDeleteDirectory;
dokan_operations->MoveFile = WinCephMoveFile;
dokan_operations->SetEndOfFile = WinCephSetEndOfFile;
dokan_operations->SetAllocationSize = WinCephSetAllocationSize;
dokan_operations->SetFileSecurity = WinCephSetFileSecurity;
dokan_operations->GetDiskFreeSpace = WinCephGetDiskFreeSpace;
dokan_operations->GetVolumeInformation = WinCephGetVolumeInformation;
dokan_operations->Unmounted = WinCephUnmount;
ceph_create_with_context(&cmount, g_ceph_context);
r = ceph_mount(cmount, g_cfg->root_path.c_str());
if (r) {
derr << "ceph_mount failed. Error: " << r << dendl;
return cephfs_errno_to_ntstatus_map(r);
}
if (g_cfg->win_vol_name.empty()) {
string ceph_fs_name = g_conf().get_val<string>("client_fs");
g_cfg->win_vol_name = L"Ceph";
if (!ceph_fs_name.empty()) {
g_cfg->win_vol_name += L" - " + to_wstring(ceph_fs_name);
}
}
if (!g_cfg->win_vol_serial) {
if (get_volume_serial(&g_cfg->win_vol_serial)) {
return -EINVAL;
}
}
if (g_cfg->max_path_len > 260) {
dout(0) << "maximum path length set to " << g_cfg->max_path_len
<< ". Some Windows utilities may not be able to handle "
<< "paths that exceed MAX_PATH (260) characters. "
<< "CreateDirectoryW, used by Powershell, has also been "
<< "observed to fail when paths exceed 16384 characters."
<< dendl;
}
atexit(unmount_atexit);
dout(0) << "Mounted cephfs directory: " << g_cfg->root_path.c_str()
<<". Mountpoint: " << to_string(g_cfg->mountpoint) << dendl;
DokanInit();
DWORD status = DokanMain(dokan_options, dokan_operations);
switch (static_cast<int>(status)) {
case DOKAN_SUCCESS:
dout(2) << "Dokan has returned successfully" << dendl;
break;
case DOKAN_ERROR:
derr << "Received generic dokan error." << dendl;
break;
case DOKAN_DRIVE_LETTER_ERROR:
derr << "Invalid drive letter or mountpoint." << dendl;
break;
case DOKAN_DRIVER_INSTALL_ERROR:
derr << "Can't initialize Dokan driver." << dendl;
break;
case DOKAN_START_ERROR:
derr << "Dokan failed to start" << dendl;
break;
case DOKAN_MOUNT_ERROR:
derr << "Dokan mount error." << dendl;
break;
case DOKAN_MOUNT_POINT_ERROR:
derr << "Invalid mountpoint." << dendl;
break;
default:
derr << "Unknown Dokan error: " << status << dendl;
break;
}
DokanShutdown();
free(dokan_options);
free(dokan_operations);
return 0;
}
boost::intrusive_ptr<CephContext> do_global_init(
int argc, const char **argv, Command cmd)
{
auto args = argv_to_vec(argc, argv);
code_environment_t code_env;
int flags;
switch (cmd) {
case Command::Map:
code_env = CODE_ENVIRONMENT_DAEMON;
flags = CINIT_FLAG_UNPRIVILEGED_DAEMON_DEFAULTS;
break;
default:
code_env = CODE_ENVIRONMENT_UTILITY;
flags = CINIT_FLAG_NO_MON_CONFIG;
break;
}
global_pre_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, code_env, flags);
// Avoid cluttering the console when spawning a mapping that will run
// in the background.
if (g_conf()->daemonize) {
flags |= CINIT_FLAG_NO_DAEMON_ACTIONS;
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
code_env, flags, FALSE);
// There's no fork on Windows, we should be safe calling this anytime.
common_init_finish(g_ceph_context);
global_init_chdir(g_ceph_context);
return cct;
}
int main(int argc, const char** argv)
{
if (!SetConsoleCtrlHandler((PHANDLER_ROUTINE)ConsoleHandler, TRUE)) {
cerr << "Couldn't initialize console event handler." << std::endl;
return -EINVAL;
}
g_cfg = new Config;
Command cmd = Command::None;
auto args = argv_to_vec(argc, argv);
std::ostringstream err_msg;
int r = parse_args(args, &err_msg, &cmd, g_cfg);
if (r) {
std::cerr << err_msg.str() << std::endl;
return r;
}
switch (cmd) {
case Command::Version:
std::cout << pretty_version_to_str() << std::endl;
return 0;
case Command::Help:
print_usage();
return 0;
default:
break;
}
auto cct = do_global_init(argc, argv, cmd);
switch (cmd) {
case Command::Map:
return do_map();
case Command::Unmap:
return do_unmap(g_cfg->mountpoint);
default:
print_usage();
break;
}
return 0;
}
| 32,956 | 29.319227 | 80 | cc |
null | ceph-main/src/dokan/ceph_dokan.h | /*
* Copyright (C) 2021 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#define CEPH_DOKAN_IO_DEFAULT_TIMEOUT 60 * 5 // Seconds
// Avoid conflicting COM types, exposed when using C++.
#define _OLE2_H_
#include <bcrypt.h> // for typedef of NTSTATUS
#include <dokan.h>
struct Config {
bool removable = false;
bool readonly = false;
bool use_win_mount_mgr = false;
bool current_session_only = false;
bool debug = false;
bool dokan_stderr = false;
int operation_timeout = CEPH_DOKAN_IO_DEFAULT_TIMEOUT;
std::wstring mountpoint = L"";
std::string root_path = "/";
std::wstring win_vol_name = L"";
unsigned long win_vol_serial = 0;
unsigned long max_path_len = 256;
mode_t file_mode = 0755;
mode_t dir_mode = 0755;
};
extern Config *g_cfg;
// TODO: list and service commands.
enum class Command {
None,
Version,
Help,
Map,
Unmap,
};
void print_usage();
int parse_args(
std::vector<const char*>& args,
std::ostream *err_msg,
Command *command, Config *cfg);
int set_dokan_options(Config *cfg, PDOKAN_OPTIONS dokan_options);
| 1,274 | 20.982759 | 65 | h |
null | ceph-main/src/dokan/dbg.cc | /*
* Copyright (C) 2021 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "ceph_dokan.h"
#include "utils.h"
#include "dbg.h"
#include "common/debug.h"
#include "common/dout.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_client
#undef dout_prefix
#define dout_prefix *_dout << "ceph-dokan: "
#define check_flag(stream, val, flag) if (val & flag) { stream << "[" #flag "]"; }
#define check_flag_eq(stream, val, flag) if (val == flag) { stream << "[" #flag "]"; }
using namespace std;
void print_credentials(ostringstream& Stream, PDOKAN_FILE_INFO DokanFileInfo)
{
UCHAR buffer[1024];
DWORD returnLength;
CHAR accountName[256];
CHAR domainName[256];
DWORD accountLength = sizeof(accountName) / sizeof(WCHAR);
DWORD domainLength = sizeof(domainName) / sizeof(WCHAR);
SID_NAME_USE snu;
int err = 0;
HANDLE handle = DokanOpenRequestorToken(DokanFileInfo);
if (handle == INVALID_HANDLE_VALUE) {
err = GetLastError();
derr << "DokanOpenRequestorToken failed. Error: " << err << dendl;
return;
}
if (!GetTokenInformation(handle, TokenUser, buffer,
sizeof(buffer), &returnLength)) {
err = GetLastError();
derr << "GetTokenInformation failed. Error: " << err << dendl;
CloseHandle(handle);
return;
}
CloseHandle(handle);
PTOKEN_USER tokenUser = (PTOKEN_USER)buffer;
if (!LookupAccountSidA(NULL, tokenUser->User.Sid, accountName,
&accountLength, domainName, &domainLength, &snu)) {
err = GetLastError();
derr << "LookupAccountSid failed. Error: " << err << dendl;
return;
}
Stream << "\n\tAccountName: " << accountName << ", DomainName: " << domainName;
}
void print_open_params(
LPCSTR FilePath,
ACCESS_MASK AccessMode,
DWORD FlagsAndAttributes,
ULONG ShareMode,
DWORD CreationDisposition,
ULONG CreateOptions,
PDOKAN_FILE_INFO DokanFileInfo)
{
ostringstream o;
o << "CreateFile: " << FilePath << ". ";
print_credentials(o, DokanFileInfo);
o << "\n\tCreateDisposition: " << hex << CreationDisposition << " ";
check_flag_eq(o, CreationDisposition, CREATE_NEW);
check_flag_eq(o, CreationDisposition, OPEN_ALWAYS);
check_flag_eq(o, CreationDisposition, CREATE_ALWAYS);
check_flag_eq(o, CreationDisposition, OPEN_EXISTING);
check_flag_eq(o, CreationDisposition, TRUNCATE_EXISTING);
o << "\n\tShareMode: " << hex << ShareMode << " ";
check_flag(o, ShareMode, FILE_SHARE_READ);
check_flag(o, ShareMode, FILE_SHARE_WRITE);
check_flag(o, ShareMode, FILE_SHARE_DELETE);
o << "\n\tAccessMode: " << hex << AccessMode << " ";
check_flag(o, AccessMode, GENERIC_READ);
check_flag(o, AccessMode, GENERIC_WRITE);
check_flag(o, AccessMode, GENERIC_EXECUTE);
check_flag(o, AccessMode, WIN32_DELETE);
check_flag(o, AccessMode, FILE_READ_DATA);
check_flag(o, AccessMode, FILE_READ_ATTRIBUTES);
check_flag(o, AccessMode, FILE_READ_EA);
check_flag(o, AccessMode, READ_CONTROL);
check_flag(o, AccessMode, FILE_WRITE_DATA);
check_flag(o, AccessMode, FILE_WRITE_ATTRIBUTES);
check_flag(o, AccessMode, FILE_WRITE_EA);
check_flag(o, AccessMode, FILE_APPEND_DATA);
check_flag(o, AccessMode, WRITE_DAC);
check_flag(o, AccessMode, WRITE_OWNER);
check_flag(o, AccessMode, SYNCHRONIZE);
check_flag(o, AccessMode, FILE_EXECUTE);
check_flag(o, AccessMode, STANDARD_RIGHTS_READ);
check_flag(o, AccessMode, STANDARD_RIGHTS_WRITE);
check_flag(o, AccessMode, STANDARD_RIGHTS_EXECUTE);
o << "\n\tFlagsAndAttributes: " << hex << FlagsAndAttributes << " ";
check_flag(o, FlagsAndAttributes, FILE_ATTRIBUTE_ARCHIVE);
check_flag(o, FlagsAndAttributes, FILE_ATTRIBUTE_ENCRYPTED);
check_flag(o, FlagsAndAttributes, FILE_ATTRIBUTE_HIDDEN);
check_flag(o, FlagsAndAttributes, FILE_ATTRIBUTE_NORMAL);
check_flag(o, FlagsAndAttributes, FILE_ATTRIBUTE_NOT_CONTENT_INDEXED);
check_flag(o, FlagsAndAttributes, FILE_ATTRIBUTE_OFFLINE);
check_flag(o, FlagsAndAttributes, FILE_ATTRIBUTE_READONLY);
check_flag(o, FlagsAndAttributes, FILE_ATTRIBUTE_SYSTEM);
check_flag(o, FlagsAndAttributes, FILE_ATTRIBUTE_TEMPORARY);
check_flag(o, FlagsAndAttributes, FILE_FLAG_WRITE_THROUGH);
check_flag(o, FlagsAndAttributes, FILE_FLAG_OVERLAPPED);
check_flag(o, FlagsAndAttributes, FILE_FLAG_NO_BUFFERING);
check_flag(o, FlagsAndAttributes, FILE_FLAG_RANDOM_ACCESS);
check_flag(o, FlagsAndAttributes, FILE_FLAG_SEQUENTIAL_SCAN);
check_flag(o, FlagsAndAttributes, FILE_FLAG_DELETE_ON_CLOSE);
check_flag(o, FlagsAndAttributes, FILE_FLAG_BACKUP_SEMANTICS);
check_flag(o, FlagsAndAttributes, FILE_FLAG_POSIX_SEMANTICS);
check_flag(o, FlagsAndAttributes, FILE_FLAG_OPEN_REPARSE_POINT);
check_flag(o, FlagsAndAttributes, FILE_FLAG_OPEN_NO_RECALL);
check_flag(o, FlagsAndAttributes, SECURITY_ANONYMOUS);
check_flag(o, FlagsAndAttributes, SECURITY_IDENTIFICATION);
check_flag(o, FlagsAndAttributes, SECURITY_IMPERSONATION);
check_flag(o, FlagsAndAttributes, SECURITY_DELEGATION);
check_flag(o, FlagsAndAttributes, SECURITY_CONTEXT_TRACKING);
check_flag(o, FlagsAndAttributes, SECURITY_EFFECTIVE_ONLY);
check_flag(o, FlagsAndAttributes, SECURITY_SQOS_PRESENT);
o << "\n\tIsDirectory: " << static_cast<bool>(DokanFileInfo->IsDirectory);
o << "\n\tCreateOptions: " << hex << CreateOptions << " ";
check_flag(o, CreateOptions, FILE_DIRECTORY_FILE);
check_flag(o, CreateOptions, FILE_WRITE_THROUGH);
check_flag(o, CreateOptions, FILE_SEQUENTIAL_ONLY);
check_flag(o, CreateOptions, FILE_NO_INTERMEDIATE_BUFFERING);
check_flag(o, CreateOptions, FILE_SYNCHRONOUS_IO_ALERT);
check_flag(o, CreateOptions, FILE_SYNCHRONOUS_IO_NONALERT);
check_flag(o, CreateOptions, FILE_NON_DIRECTORY_FILE);
check_flag(o, CreateOptions, FILE_CREATE_TREE_CONNECTION);
check_flag(o, CreateOptions, FILE_COMPLETE_IF_OPLOCKED);
check_flag(o, CreateOptions, FILE_NO_EA_KNOWLEDGE);
check_flag(o, CreateOptions, FILE_OPEN_REMOTE_INSTANCE);
check_flag(o, CreateOptions, FILE_RANDOM_ACCESS);
check_flag(o, CreateOptions, FILE_DELETE_ON_CLOSE);
check_flag(o, CreateOptions, FILE_OPEN_BY_FILE_ID);
check_flag(o, CreateOptions, FILE_OPEN_FOR_BACKUP_INTENT);
check_flag(o, CreateOptions, FILE_NO_COMPRESSION);
check_flag(o, CreateOptions, FILE_OPEN_REQUIRING_OPLOCK);
check_flag(o, CreateOptions, FILE_DISALLOW_EXCLUSIVE);
check_flag(o, CreateOptions, FILE_RESERVE_OPFILTER);
check_flag(o, CreateOptions, FILE_OPEN_REPARSE_POINT);
check_flag(o, CreateOptions, FILE_OPEN_NO_RECALL);
check_flag(o, CreateOptions, FILE_OPEN_FOR_FREE_SPACE_QUERY);
// We're using a high log level since this will only be enabled with the
// explicit debug flag.
dout(0) << o.str() << dendl;
}
| 6,880 | 39.005814 | 86 | cc |
null | ceph-main/src/dokan/dbg.h | // Various helpers used for debugging purposes, such as functions
// logging certain flags. Since those can be rather verbose, it's
// better if we keep them separate.
#ifndef CEPH_DOKAN_DBG_H
#define CEPH_DOKAN_DBG_H
#include "include/compat.h"
#include <sstream>
#include "ceph_dokan.h"
void print_credentials(
std::ostringstream& Stream,
PDOKAN_FILE_INFO DokanFileInfo);
void print_open_params(
LPCSTR FilePath,
ACCESS_MASK AccessMode,
DWORD FlagsAndAttributes,
ULONG ShareMode,
DWORD CreationDisposition,
ULONG CreateOptions,
PDOKAN_FILE_INFO DokanFileInfo);
#endif // CEPH_DOKAN_DBG_H
| 614 | 21.777778 | 65 | h |
null | ceph-main/src/dokan/options.cc | /*
* Copyright (C) 2021 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <regex>
#include "include/compat.h"
#include "include/cephfs/libcephfs.h"
#include "ceph_dokan.h"
#include "utils.h"
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "common/win32/wstring.h"
#include "global/global_init.h"
void print_usage() {
const char* usage_str = R"(
Usage: ceph-dokan.exe -l <mountpoint>
map -l <mountpoint> Map a CephFS filesystem
unmap -l <mountpoint> Unmap a CephFS filesystem
Map options:
-l [ --mountpoint ] arg mountpoint (path or drive letter) (e.g -l x)
-x [ --root-path ] arg mount a Ceph filesystem subdirectory
--operation-timeout arg Dokan operation timeout. Default: 120s.
--debug enable debug output
--dokan-stderr enable stderr Dokan logging
--read-only read-only mount
-o [ --win-mount-mgr] use the Windows mount manager
--current-session-only expose the mount only to the current user session
--removable use a removable drive
--win-vol-name arg The Windows volume name. Default: Ceph - <fs_name>.
--win-vol-serial arg The Windows volume serial number. Default: <fs_id>.
--max-path-len The value of the maximum path length. Default: 256.
--file-mode The access mode to be used when creating files.
--dir-mode The access mode to be used when creating directories.
Unmap options:
-l [ --mountpoint ] arg mountpoint (path or drive letter) (e.g -l x).
It has to be the exact same mountpoint that was
used when the mapping was created.
Common Options:
)";
std::cout << usage_str;
generic_client_usage();
}
int parse_args(
std::vector<const char*>& args,
std::ostream *err_msg,
Command *command, Config *cfg)
{
if (args.empty()) {
std::cout << "ceph-dokan: -h or --help for usage" << std::endl;
return -EINVAL;
}
std::string conf_file_list;
std::string cluster;
CephInitParameters iparams = ceph_argparse_early_args(
args, CEPH_ENTITY_TYPE_CLIENT, &cluster, &conf_file_list);
ConfigProxy config{false};
config->name = iparams.name;
config->cluster = cluster;
if (!conf_file_list.empty()) {
config.parse_config_files(conf_file_list.c_str(), nullptr, 0);
} else {
config.parse_config_files(nullptr, nullptr, 0);
}
config.parse_env(CEPH_ENTITY_TYPE_CLIENT);
config.parse_argv(args);
std::vector<const char*>::iterator i;
std::ostringstream err;
std::string mountpoint;
std::string win_vol_name;
std::string win_vol_serial;
std::string max_path_len;
std::string file_mode;
std::string dir_mode;
int thread_count;
for (i = args.begin(); i != args.end(); ) {
if (ceph_argparse_flag(args, i, "-h", "--help", (char*)NULL)) {
*command = Command::Help;
return 0;
} else if (ceph_argparse_flag(args, i, "-v", "--version", (char*)NULL)) {
*command = Command::Version;
} else if (ceph_argparse_witharg(args, i, &mountpoint,
"--mountpoint", "-l", (char *)NULL)) {
cfg->mountpoint = to_wstring(mountpoint);
} else if (ceph_argparse_witharg(args, i, &cfg->root_path,
"--root-path", "-x", (char *)NULL)) {
} else if (ceph_argparse_flag(args, i, "--debug", (char *)NULL)) {
cfg->debug = true;
} else if (ceph_argparse_flag(args, i, "--dokan-stderr", (char *)NULL)) {
cfg->dokan_stderr = true;
} else if (ceph_argparse_flag(args, i, "--read-only", (char *)NULL)) {
cfg->readonly = true;
} else if (ceph_argparse_flag(args, i, "--removable", (char *)NULL)) {
cfg->removable = true;
} else if (ceph_argparse_flag(args, i, "--win-mount-mgr", "-o", (char *)NULL)) {
cfg->use_win_mount_mgr = true;
} else if (ceph_argparse_witharg(args, i, &win_vol_name,
"--win-vol-name", (char *)NULL)) {
cfg->win_vol_name = to_wstring(win_vol_name);
} else if (ceph_argparse_witharg(args, i, &win_vol_serial,
"--win-vol-serial", (char *)NULL)) {
try {
cfg->win_vol_serial = std::stoul(win_vol_serial);
} catch (std::logic_error&) {
*err_msg << "ceph-dokan: invalid volume serial number: " << win_vol_serial;
return -EINVAL;
}
} else if (ceph_argparse_witharg(args, i, &max_path_len,
"--max-path-len", (char*)NULL)) {
unsigned long max_path_length = 0;
try {
max_path_length = std::stoul(max_path_len);
} catch (std::logic_error&) {
*err_msg << "ceph-dokan: invalid maximum path length: " << max_path_len;
return -EINVAL;
}
if (max_path_length > 32767) {
*err_msg << "ceph-dokan: maximum path length should not "
<< "exceed " << 32767;
return -EINVAL;
}
if (max_path_length < 256) {
*err_msg << "ceph-dokan: maximum path length should not "
<< "have a value lower than 256";
return -EINVAL;
}
cfg->max_path_len = max_path_length;
} else if (ceph_argparse_witharg(args, i, &file_mode, "--file-mode", (char *)NULL)) {
mode_t mode;
try {
mode = std::stol(file_mode, nullptr, 8);
} catch (std::logic_error&) {
*err_msg << "ceph-dokan: invalid file access mode: " << file_mode;
return -EINVAL;
}
if (!std::regex_match(file_mode, std::regex("^[0-7]{3}$"))
|| mode < 01 || mode > 0777) {
*err_msg << "ceph-dokan: invalid file access mode: " << file_mode;
return -EINVAL;
}
cfg->file_mode = mode;
} else if (ceph_argparse_witharg(args, i, &dir_mode, "--dir-mode", (char *)NULL)) {
mode_t mode;
try {
mode = std::stol(dir_mode, nullptr, 8);
} catch (std::logic_error&) {
*err_msg << "ceph-dokan: invalid directory access mode: " << dir_mode;
return -EINVAL;
}
if (!std::regex_match(dir_mode, std::regex("^[0-7]{3}$"))
|| mode < 01 || mode > 0777) {
*err_msg << "ceph-dokan: invalid directory access mode: " << dir_mode;
return -EINVAL;
}
cfg->dir_mode = mode;
} else if (ceph_argparse_flag(args, i, "--current-session-only", (char *)NULL)) {
cfg->current_session_only = true;
} else if (ceph_argparse_witharg(args, i, &thread_count,
err, "--thread-count", "-t", (char *)NULL)) {
std::cerr << "ceph-dokan: the thread count parameter is not supported by Dokany v2 "
<< "and has been deprecated." << std::endl;
} else if (ceph_argparse_witharg(args, i, (int*)&cfg->operation_timeout,
err, "--operation-timeout", (char *)NULL)) {
if (!err.str().empty()) {
*err_msg << "ceph-dokan: " << err.str();
return -EINVAL;
}
if (cfg->operation_timeout < 0) {
*err_msg << "ceph-dokan: Invalid argument for operation-timeout";
return -EINVAL;
}
} else {
++i;
}
}
if (cfg->use_win_mount_mgr && cfg->current_session_only) {
*err_msg << "ceph-dokan: The mount manager always mounts the drive "
<< "for all user sessions.";
return -EINVAL;
}
Command cmd = Command::None;
if (args.begin() != args.end()) {
if (strcmp(*args.begin(), "help") == 0) {
cmd = Command::Help;
} else if (strcmp(*args.begin(), "version") == 0) {
cmd = Command::Version;
} else if (strcmp(*args.begin(), "map") == 0) {
cmd = Command::Map;
} else if (strcmp(*args.begin(), "unmap") == 0) {
cmd = Command::Unmap;
} else {
*err_msg << "ceph-dokan: unknown command: " << *args.begin();
return -EINVAL;
}
args.erase(args.begin());
}
if (cmd == Command::None) {
// The default command.
cmd = Command::Map;
}
switch (cmd) {
case Command::Map:
case Command::Unmap:
if (cfg->mountpoint.empty()) {
*err_msg << "ceph-dokan: missing mountpoint.";
return -EINVAL;
}
break;
default:
break;
}
if (args.begin() != args.end()) {
*err_msg << "ceph-dokan: unknown args: " << *args.begin();
return -EINVAL;
}
*command = cmd;
return 0;
}
int set_dokan_options(Config *cfg, PDOKAN_OPTIONS dokan_options) {
ZeroMemory(dokan_options, sizeof(DOKAN_OPTIONS));
dokan_options->Version = DOKAN_VERSION;
dokan_options->MountPoint = cfg->mountpoint.c_str();
dokan_options->Timeout = cfg->operation_timeout * 1000;
if (cfg->removable)
dokan_options->Options |= DOKAN_OPTION_REMOVABLE;
if (cfg->use_win_mount_mgr)
dokan_options->Options |= DOKAN_OPTION_MOUNT_MANAGER;
if (cfg->current_session_only)
dokan_options->Options |= DOKAN_OPTION_CURRENT_SESSION;
if (cfg->readonly)
dokan_options->Options |= DOKAN_OPTION_WRITE_PROTECT;
if (cfg->debug)
dokan_options->Options |= DOKAN_OPTION_DEBUG;
if (cfg->dokan_stderr)
dokan_options->Options |= DOKAN_OPTION_STDERR;
return 0;
}
| 9,453 | 33.630037 | 90 | cc |
null | ceph-main/src/dokan/utils.cc | /*
* ceph-dokan - Win32 CephFS client based on Dokan
*
* Copyright (C) 2021 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "utils.h"
void to_filetime(time_t t, LPFILETIME pft)
{
// Note that LONGLONG is a 64-bit value
LONGLONG ll = Int32x32To64(t, 10000000) + 116444736000000000;
pft->dwLowDateTime = (DWORD)ll;
pft->dwHighDateTime = ll >> 32;
}
void to_unix_time(FILETIME ft, time_t *t)
{
ULARGE_INTEGER ui;
ui.LowPart = ft.dwLowDateTime;
ui.HighPart = ft.dwHighDateTime;
*t = (LONGLONG)(ui.QuadPart / 10000000ULL - 11644473600ULL);
}
| 758 | 23.483871 | 63 | cc |
null | ceph-main/src/dokan/utils.h | /*
* ceph-dokan - Win32 CephFS client based on Dokan
*
* Copyright (C) 2021 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "include/compat.h"
void to_filetime(time_t t, LPFILETIME pft);
void to_unix_time(FILETIME ft, time_t *t);
| 446 | 22.526316 | 61 | h |
null | ceph-main/src/erasure-code/ErasureCode.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <algorithm>
#include <cerrno>
#include "ErasureCode.h"
#include "common/strtol.h"
#include "include/buffer.h"
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#define DEFAULT_RULE_ROOT "default"
#define DEFAULT_RULE_FAILURE_DOMAIN "host"
using std::make_pair;
using std::map;
using std::ostream;
using std::pair;
using std::set;
using std::string;
using std::vector;
using ceph::bufferlist;
namespace ceph {
const unsigned ErasureCode::SIMD_ALIGN = 32;
int ErasureCode::init(
ErasureCodeProfile &profile,
std::ostream *ss)
{
int err = 0;
err |= to_string("crush-root", profile,
&rule_root,
DEFAULT_RULE_ROOT, ss);
err |= to_string("crush-failure-domain", profile,
&rule_failure_domain,
DEFAULT_RULE_FAILURE_DOMAIN, ss);
err |= to_string("crush-device-class", profile,
&rule_device_class,
"", ss);
if (err)
return err;
_profile = profile;
return 0;
}
int ErasureCode::create_rule(
const std::string &name,
CrushWrapper &crush,
std::ostream *ss) const
{
int ruleid = crush.add_simple_rule(
name,
rule_root,
rule_failure_domain,
rule_device_class,
"indep",
pg_pool_t::TYPE_ERASURE,
ss);
if (ruleid < 0)
return ruleid;
return ruleid;
}
int ErasureCode::sanity_check_k_m(int k, int m, ostream *ss)
{
if (k < 2) {
*ss << "k=" << k << " must be >= 2" << std::endl;
return -EINVAL;
}
if (m < 1) {
*ss << "m=" << m << " must be >= 1" << std::endl;
return -EINVAL;
}
return 0;
}
int ErasureCode::chunk_index(unsigned int i) const
{
return chunk_mapping.size() > i ? chunk_mapping[i] : i;
}
int ErasureCode::_minimum_to_decode(const set<int> &want_to_read,
const set<int> &available_chunks,
set<int> *minimum)
{
if (includes(available_chunks.begin(), available_chunks.end(),
want_to_read.begin(), want_to_read.end())) {
*minimum = want_to_read;
} else {
unsigned int k = get_data_chunk_count();
if (available_chunks.size() < (unsigned)k)
return -EIO;
set<int>::iterator i;
unsigned j;
for (i = available_chunks.begin(), j = 0; j < (unsigned)k; ++i, j++)
minimum->insert(*i);
}
return 0;
}
int ErasureCode::minimum_to_decode(const set<int> &want_to_read,
const set<int> &available_chunks,
map<int, vector<pair<int, int>>> *minimum)
{
set<int> minimum_shard_ids;
int r = _minimum_to_decode(want_to_read, available_chunks, &minimum_shard_ids);
if (r != 0) {
return r;
}
vector<pair<int, int>> default_subchunks;
default_subchunks.push_back(make_pair(0, get_sub_chunk_count()));
for (auto &&id : minimum_shard_ids) {
minimum->insert(make_pair(id, default_subchunks));
}
return 0;
}
int ErasureCode::minimum_to_decode_with_cost(const set<int> &want_to_read,
const map<int, int> &available,
set<int> *minimum)
{
set <int> available_chunks;
for (map<int, int>::const_iterator i = available.begin();
i != available.end();
++i)
available_chunks.insert(i->first);
return _minimum_to_decode(want_to_read, available_chunks, minimum);
}
int ErasureCode::encode_prepare(const bufferlist &raw,
map<int, bufferlist> &encoded) const
{
unsigned int k = get_data_chunk_count();
unsigned int m = get_chunk_count() - k;
unsigned blocksize = get_chunk_size(raw.length());
unsigned padded_chunks = k - raw.length() / blocksize;
bufferlist prepared = raw;
for (unsigned int i = 0; i < k - padded_chunks; i++) {
bufferlist &chunk = encoded[chunk_index(i)];
chunk.substr_of(prepared, i * blocksize, blocksize);
chunk.rebuild_aligned_size_and_memory(blocksize, SIMD_ALIGN);
ceph_assert(chunk.is_contiguous());
}
if (padded_chunks) {
unsigned remainder = raw.length() - (k - padded_chunks) * blocksize;
bufferptr buf(buffer::create_aligned(blocksize, SIMD_ALIGN));
raw.begin((k - padded_chunks) * blocksize).copy(remainder, buf.c_str());
buf.zero(remainder, blocksize - remainder);
encoded[chunk_index(k-padded_chunks)].push_back(std::move(buf));
for (unsigned int i = k - padded_chunks + 1; i < k; i++) {
bufferptr buf(buffer::create_aligned(blocksize, SIMD_ALIGN));
buf.zero();
encoded[chunk_index(i)].push_back(std::move(buf));
}
}
for (unsigned int i = k; i < k + m; i++) {
bufferlist &chunk = encoded[chunk_index(i)];
chunk.push_back(buffer::create_aligned(blocksize, SIMD_ALIGN));
}
return 0;
}
int ErasureCode::encode(const set<int> &want_to_encode,
const bufferlist &in,
map<int, bufferlist> *encoded)
{
unsigned int k = get_data_chunk_count();
unsigned int m = get_chunk_count() - k;
bufferlist out;
int err = encode_prepare(in, *encoded);
if (err)
return err;
encode_chunks(want_to_encode, encoded);
for (unsigned int i = 0; i < k + m; i++) {
if (want_to_encode.count(i) == 0)
encoded->erase(i);
}
return 0;
}
int ErasureCode::_decode(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
{
vector<int> have;
have.reserve(chunks.size());
for (map<int, bufferlist>::const_iterator i = chunks.begin();
i != chunks.end();
++i) {
have.push_back(i->first);
}
if (includes(
have.begin(), have.end(), want_to_read.begin(), want_to_read.end())) {
for (set<int>::iterator i = want_to_read.begin();
i != want_to_read.end();
++i) {
(*decoded)[*i] = chunks.find(*i)->second;
}
return 0;
}
unsigned int k = get_data_chunk_count();
unsigned int m = get_chunk_count() - k;
unsigned blocksize = (*chunks.begin()).second.length();
for (unsigned int i = 0; i < k + m; i++) {
if (chunks.find(i) == chunks.end()) {
bufferlist tmp;
bufferptr ptr(buffer::create_aligned(blocksize, SIMD_ALIGN));
tmp.push_back(ptr);
tmp.claim_append((*decoded)[i]);
(*decoded)[i].swap(tmp);
} else {
(*decoded)[i] = chunks.find(i)->second;
(*decoded)[i].rebuild_aligned(SIMD_ALIGN);
}
}
return decode_chunks(want_to_read, chunks, decoded);
}
int ErasureCode::decode(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded, int chunk_size)
{
return _decode(want_to_read, chunks, decoded);
}
int ErasureCode::parse(const ErasureCodeProfile &profile,
ostream *ss)
{
return to_mapping(profile, ss);
}
const vector<int> &ErasureCode::get_chunk_mapping() const {
return chunk_mapping;
}
int ErasureCode::to_mapping(const ErasureCodeProfile &profile,
ostream *ss)
{
if (profile.find("mapping") != profile.end()) {
std::string mapping = profile.find("mapping")->second;
int position = 0;
vector<int> coding_chunk_mapping;
for(std::string::iterator it = mapping.begin(); it != mapping.end(); ++it) {
if (*it == 'D')
chunk_mapping.push_back(position);
else
coding_chunk_mapping.push_back(position);
position++;
}
chunk_mapping.insert(chunk_mapping.end(),
coding_chunk_mapping.begin(),
coding_chunk_mapping.end());
}
return 0;
}
int ErasureCode::to_int(const std::string &name,
ErasureCodeProfile &profile,
int *value,
const std::string &default_value,
ostream *ss)
{
if (profile.find(name) == profile.end() ||
profile.find(name)->second.size() == 0)
profile[name] = default_value;
std::string p = profile.find(name)->second;
std::string err;
int r = strict_strtol(p.c_str(), 10, &err);
if (!err.empty()) {
*ss << "could not convert " << name << "=" << p
<< " to int because " << err
<< ", set to default " << default_value << std::endl;
*value = strict_strtol(default_value.c_str(), 10, &err);
return -EINVAL;
}
*value = r;
return 0;
}
int ErasureCode::to_bool(const std::string &name,
ErasureCodeProfile &profile,
bool *value,
const std::string &default_value,
ostream *ss)
{
if (profile.find(name) == profile.end() ||
profile.find(name)->second.size() == 0)
profile[name] = default_value;
const std::string p = profile.find(name)->second;
*value = (p == "yes") || (p == "true");
return 0;
}
int ErasureCode::to_string(const std::string &name,
ErasureCodeProfile &profile,
std::string *value,
const std::string &default_value,
ostream *ss)
{
if (profile.find(name) == profile.end() ||
profile.find(name)->second.size() == 0)
profile[name] = default_value;
*value = profile[name];
return 0;
}
int ErasureCode::decode_concat(const map<int, bufferlist> &chunks,
bufferlist *decoded)
{
set<int> want_to_read;
for (unsigned int i = 0; i < get_data_chunk_count(); i++) {
want_to_read.insert(chunk_index(i));
}
map<int, bufferlist> decoded_map;
int r = _decode(want_to_read, chunks, &decoded_map);
if (r == 0) {
for (unsigned int i = 0; i < get_data_chunk_count(); i++) {
decoded->claim_append(decoded_map[chunk_index(i)]);
}
}
return r;
}
}
| 9,856 | 27.243553 | 81 | cc |
null | ceph-main/src/erasure-code/ErasureCode.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_H
#define CEPH_ERASURE_CODE_H
/*! @file ErasureCode.h
@brief Base class for erasure code plugins implementors
*/
#include "ErasureCodeInterface.h"
namespace ceph {
class ErasureCode : public ErasureCodeInterface {
public:
static const unsigned SIMD_ALIGN;
std::vector<int> chunk_mapping;
ErasureCodeProfile _profile;
// for CRUSH rule
std::string rule_root;
std::string rule_failure_domain;
std::string rule_device_class;
~ErasureCode() override {}
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
const ErasureCodeProfile &get_profile() const override {
return _profile;
}
int create_rule(const std::string &name,
CrushWrapper &crush,
std::ostream *ss) const override;
int sanity_check_k_m(int k, int m, std::ostream *ss);
unsigned int get_coding_chunk_count() const override {
return get_chunk_count() - get_data_chunk_count();
}
virtual int get_sub_chunk_count() override {
return 1;
}
virtual int _minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available_chunks,
std::set<int> *minimum);
int minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::map<int, std::vector<std::pair<int, int>>> *minimum) override;
int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) override;
int encode_prepare(const bufferlist &raw,
std::map<int, bufferlist> &encoded) const;
int encode(const std::set<int> &want_to_encode,
const bufferlist &in,
std::map<int, bufferlist> *encoded) override;
int decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded, int chunk_size) override;
virtual int _decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded);
const std::vector<int> &get_chunk_mapping() const override;
int to_mapping(const ErasureCodeProfile &profile,
std::ostream *ss);
static int to_int(const std::string &name,
ErasureCodeProfile &profile,
int *value,
const std::string &default_value,
std::ostream *ss);
static int to_bool(const std::string &name,
ErasureCodeProfile &profile,
bool *value,
const std::string &default_value,
std::ostream *ss);
static int to_string(const std::string &name,
ErasureCodeProfile &profile,
std::string *value,
const std::string &default_value,
std::ostream *ss);
int decode_concat(const std::map<int, bufferlist> &chunks,
bufferlist *decoded) override;
protected:
int parse(const ErasureCodeProfile &profile,
std::ostream *ss);
private:
int chunk_index(unsigned int i) const;
};
}
#endif
| 3,679 | 28.206349 | 80 | h |
null | ceph-main/src/erasure-code/ErasureCodeInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_INTERFACE_H
#define CEPH_ERASURE_CODE_INTERFACE_H
/*! @file ErasureCodeInterface.h
@brief Interface provided by erasure code plugins
The erasure coded pools rely on plugins implementing
**ErasureCodeInterface** to encode and decode content. All codes
are systematic (i.e. the data is not mangled and can be
reconstructed by concatenating chunks ).
Methods returning an **int** return **0** on success and a
negative value on error. If the value returned on error is not
explained in **ErasureCodeInterface**, the sources or the
documentation of the interface implementer (i.e. the plugin ) must
be read to figure out what it means. It is recommended that each
error code matches an *errno* value that relates to the cause of
the error.
If an object is small enough, the caller can process it with
one call to the **encode** or **decode** method.
+---------------- coded object O -------------------------+
|+----------------+ +----------------+ +----------------+ |
|| chunk 0 | | chunk 1 | | chunk 2 | |
|| [0,N) | | [N,2N) | | [2N,3N) | |
|+----------------+ +----------------+ +----------------+ |
+------^--------------------------------------------------+
|
chunk B / C | offset B % C ( where C is the chunk size )
|
+-----^---- raw object O ----+------+
| B [0,X) | pad |
+----------------------------+------+
The object size is paded so that each chunks are of the same size.
In the example above, if the actual object size was X, then it
will be padded to 2N >= X assuming there are two data chunks (0
and 1) and one coding chunk (2).
For chunks of size C, byte B of the object is found in chunk number
B / C at offset B % C.
If an object is too large to be encoded in memory, the caller
should divide it in smaller units named **stripes**.
+---------------------- object O -------------------------+
|+----------------+ +----------------+ +----------------+ |
stripe || chunk 0 | | chunk 1 | | chunk 2 | |
0 || [0,N) | | [N,2N) | | [2N,3N) | |
|+----------------+ +----------------+ +----------------+ |
|+----------------+ +----------------+ +----------------+ |
stripe || chunk 0 | | chunk 1 | | chunk 2 | |
1 || [X,M) | | [X+M,X+2M) | | [X+2M,X+3M) | |
|| | | | | | |
|+----------------+ +----------------+ +----------------+ |
| ... |
+---------------------------------------------------------+
The interface does not concern itself with stripes nor does it
impose constraints on the size of each stripe. Variable names in
the interface always use **object** and never use **stripe**.
Assuming the interface implementer provides three data chunks ( K
= 3 ) and two coding chunks ( M = 2 ), a buffer could be encoded as
follows:
~~~~~~~~~~~~~~~~{.c}
set<int> want_to_encode(0, 1, 2, // data chunks
3, 4 // coding chunks
);
bufferlist in = "ABCDEF";
map<int, bufferlist> encoded
encode(want_to_encode, in, &encoded);
encoded[0] == "AB" // data chunk 0
encoded[1] == "CD" // data chunk 1
encoded[2] == "EF" // data chunk 2
encoded[3] // coding chunk 0
encoded[4] // coding chunk 1
~~~~~~~~~~~~~~~~
The **minimum_to_decode_with_cost** method can be used to minimize
the cost of fetching the chunks necessary to retrieve a given
content. For instance, if encoded[2] (contained **EF**) is missing
and accessing encoded[3] (the first coding chunk) is more
expensive than accessing encoded[4] (the second coding chunk),
**minimum_to_decode_with_cost** is expected to chose the first
coding chunk.
~~~~~~~~~~~~~~~~{.c}
set<int> want_to_read(2); // want the chunk containing "EF"
map<int,int> available(
0 => 1, // data chunk 0 : available and costs 1
1 => 1, // data chunk 1 : available and costs 1
// data chunk 2 : missing
3 => 9, // coding chunk 1 : available and costs 9
4 => 1, // coding chunk 2 : available and costs 1
);
set<int> minimum;
minimum_to_decode_with_cost(want_to_read,
available,
&minimum);
minimum == set<int>(0, 1, 4); // NOT set<int>(0, 1, 3);
~~~~~~~~~~~~~~~~
It sets **minimum** with three chunks to reconstruct the desired
data chunk and will pick the second coding chunk ( 4 ) because it
is less expensive ( 1 < 9 ) to retrieve than the first coding
chunk ( 3 ). The caller is responsible for retrieving the chunks
and call **decode** to reconstruct the second data chunk.
~~~~~~~~~~~~~~~~{.c}
map<int,bufferlist> chunks;
for i in minimum.keys():
chunks[i] = fetch_chunk(i); // get chunk from storage
map<int, bufferlist> decoded;
decode(want_to_read, chunks, &decoded);
decoded[2] == "EF"
~~~~~~~~~~~~~~~~
The semantic of the cost value is defined by the caller and must
be known to the implementer. For instance, it may be more
expensive to retrieve two chunks with cost 1 + 9 = 10 than two
chunks with cost 6 + 6 = 12.
*/
#include <map>
#include <set>
#include <vector>
#include <ostream>
#include <memory>
#include <string>
#include "include/buffer_fwd.h"
class CrushWrapper;
namespace ceph {
typedef std::map<std::string,std::string> ErasureCodeProfile;
inline std::ostream& operator<<(std::ostream& out, const ErasureCodeProfile& profile) {
out << "{";
for (ErasureCodeProfile::const_iterator it = profile.begin();
it != profile.end();
++it) {
if (it != profile.begin()) out << ",";
out << it->first << "=" << it->second;
}
out << "}";
return out;
}
class ErasureCodeInterface {
public:
virtual ~ErasureCodeInterface() {}
/**
* Initialize the instance according to the content of
* **profile**. The **ss** stream is set with debug messages or
* error messages, the content of which depend on the
* implementation.
*
* Return 0 on success or a negative errno on error. When
* returning on error, the implementation is expected to
* provide a human readable explanation in **ss**.
*
* @param [in] profile a key/value map
* @param [out] ss contains informative messages when an error occurs
* @return 0 on success or a negative errno on error.
*/
virtual int init(ErasureCodeProfile &profile, std::ostream *ss) = 0;
/**
* Return the profile that was used to initialize the instance
* with the **init** method.
*
* @return the profile in use by the instance
*/
virtual const ErasureCodeProfile &get_profile() const = 0;
/**
* Create a new rule in **crush** under the name **name**,
* unless it already exists.
*
* Return the rule number that was created on success. If a
* rule **name** already exists, return -EEXISTS, otherwise
* return a negative value indicating an error with a semantic
* defined by the implementation.
*
* @param [in] name of the rule to create
* @param [in] crush crushmap in which the rule is created
* @param [out] ss contains informative messages when an error occurs
* @return a rule on success or a negative errno on error.
*/
virtual int create_rule(const std::string &name,
CrushWrapper &crush,
std::ostream *ss) const = 0;
/**
* Return the number of chunks created by a call to the **encode**
* method.
*
* In the simplest case it can be K + M, i.e. the number
* of data chunks (K) plus the number of parity chunks
* (M). However, if the implementation provides local parity there
* could be an additional overhead.
*
* @return the number of chunks created by encode()
*/
virtual unsigned int get_chunk_count() const = 0;
/**
* Return the number of data chunks created by a call to the
* **encode** method. The data chunks contain the buffer provided
* to **encode**, verbatim, with padding at the end of the last
* chunk.
*
* @return the number of data chunks created by encode()
*/
virtual unsigned int get_data_chunk_count() const = 0;
/**
* Return the number of coding chunks created by a call to the
* **encode** method. The coding chunks are used to recover from
* the loss of one or more chunks. If there is one coding chunk,
* it is possible to recover from the loss of exactly one
* chunk. If there are two coding chunks, it is possible to
* recover from the loss of at most two chunks, etc.
*
* @return the number of coding chunks created by encode()
*/
virtual unsigned int get_coding_chunk_count() const = 0;
/**
* Return the number of sub chunks chunks created by a call to the
* **encode** method. Each chunk can be viewed as union of sub-chunks
* For the case of array codes, the sub-chunk count > 1, where as the
* scalar codes have sub-chunk count = 1.
*
* @return the number of sub-chunks per chunk created by encode()
*/
virtual int get_sub_chunk_count() = 0;
/**
* Return the size (in bytes) of a single chunk created by a call
* to the **decode** method. The returned size multiplied by
* **get_chunk_count()** is greater or equal to **object_size**.
*
* If the object size is properly aligned, the chunk size is
* **object_size / get_chunk_count()**. However, if
* **object_size** is not a multiple of **get_chunk_count** or if
* the implementation imposes additional alignment constraints,
* the chunk size may be larger.
*
* The byte found at offset **B** of the original object is mapped
* to chunk **B / get_chunk_size()** at offset **B % get_chunk_size()**.
*
* @param [in] object_size the number of bytes of the object to **encode()**
* @return the size (in bytes) of a single chunk created by **encode()**
*/
virtual unsigned int get_chunk_size(unsigned int object_size) const = 0;
/**
* Compute the smallest subset of **available** chunks that needs
* to be retrieved in order to successfully decode
* **want_to_read** chunks.
*
* It is strictly equivalent to calling
* **minimum_to_decode_with_cost** where each **available** chunk
* has the same cost.
*
* @see minimum_to_decode_with_cost
*
* @param [in] want_to_read chunk indexes to be decoded
* @param [in] available chunk indexes containing valid data
* @param [out] minimum chunk indexes and corresponding
* subchunk index offsets, count.
* @return **0** on success or a negative errno on error.
*/
virtual int minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::map<int, std::vector<std::pair<int, int>>>
*minimum) = 0;
/**
* Compute the smallest subset of **available** chunks that needs
* to be retrieved in order to successfully decode
* **want_to_read** chunks. If there are more than one possible
* subset, select the subset that minimizes the overall retrieval
* cost.
*
* The **available** parameter maps chunk indexes to their
* retrieval cost. The higher the cost value, the more costly it
* is to retrieve the chunk content.
*
* Returns -EIO if there are not enough chunk indexes in
* **available** to decode **want_to_read**.
*
* Returns 0 on success.
*
* The **minimum** argument must be a pointer to an empty set.
*
* @param [in] want_to_read chunk indexes to be decoded
* @param [in] available map chunk indexes containing valid data
* to their retrieval cost
* @param [out] minimum chunk indexes to retrieve
* @return **0** on success or a negative errno on error.
*/
virtual int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) = 0;
/**
* Encode the content of **in** and store the result in
* **encoded**. All buffers pointed to by **encoded** have the
* same size. The **encoded** map contains at least all chunk
* indexes found in the **want_to_encode** set.
*
* The **encoded** map is expected to be a pointer to an empty
* map.
*
* Assuming the **in** parameter is **length** bytes long,
* the concatenation of the first **length** bytes of the
* **encoded** buffers is equal to the content of the **in**
* parameter.
*
* The **encoded** map may contain more chunks than required by
* **want_to_encode** and the caller is expected to permanently
* store all of them, not just the chunks listed in
* **want_to_encode**.
*
* The **encoded** map may contain pointers to data stored in
* the **in** parameter. If the caller modifies the content of
* **in** after calling the encode method, it may have a side
* effect on the content of **encoded**.
*
* The **encoded** map may contain pointers to buffers allocated
* by the encode method. They will be freed when **encoded** is
* freed. The allocation method is not specified.
*
* Returns 0 on success.
*
* @param [in] want_to_encode chunk indexes to be encoded
* @param [in] in data to be encoded
* @param [out] encoded map chunk indexes to chunk data
* @return **0** on success or a negative errno on error.
*/
virtual int encode(const std::set<int> &want_to_encode,
const bufferlist &in,
std::map<int, bufferlist> *encoded) = 0;
virtual int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, bufferlist> *encoded) = 0;
/**
* Decode the **chunks** and store at least **want_to_read**
* chunks in **decoded**.
*
* The **decoded** map must be a pointer to an empty map.
*
* There must be enough **chunks** ( as returned by
* **minimum_to_decode** or **minimum_to_decode_with_cost** ) to
* perform a successful decoding of all chunks listed in
* **want_to_read**.
*
* All buffers pointed by **in** must have the same size.
*
* On success, the **decoded** map may contain more chunks than
* required by **want_to_read** and they can safely be used by the
* caller.
*
* If a chunk is listed in **want_to_read** and there is a
* corresponding **bufferlist** in **chunks**, it will be
* referenced in **decoded**. If not it will be reconstructed from
* the existing chunks.
*
* Because **decoded** may contain pointers to data found in
* **chunks**, modifying the content of **chunks** after calling
* decode may have a side effect on the content of **decoded**.
*
* Returns 0 on success.
*
* @param [in] want_to_read chunk indexes to be decoded
* @param [in] chunks map chunk indexes to chunk data
* @param [out] decoded map chunk indexes to chunk data
* @param [in] chunk_size chunk size
* @return **0** on success or a negative errno on error.
*/
virtual int decode(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded, int chunk_size) = 0;
virtual int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, bufferlist> &chunks,
std::map<int, bufferlist> *decoded) = 0;
/**
* Return the ordered list of chunks or an empty vector
* if no remapping is necessary.
*
* By default encoding an object with K=2,M=1 will create three
* chunks, the first two are data and the last one coding. For
* a 10MB object, it would be:
*
* chunk 0 for the first 5MB
* chunk 1 for the last 5MB
* chunk 2 for the 5MB coding chunk
*
* The plugin may, however, decide to remap them in a different
* order, such as:
*
* chunk 0 for the last 5MB
* chunk 1 for the 5MB coding chunk
* chunk 2 for the first 5MB
*
* The vector<int> remaps the chunks so that the first chunks are
* data, in sequential order, and the last chunks contain parity
* in the same order as they were output by the encoding function.
*
* In the example above the mapping would be:
*
* [ 1, 2, 0 ]
*
* The returned vector<int> only contains information for chunks
* that need remapping. If no remapping is necessary, the
* vector<int> is empty.
*
* @return vector<int> list of indices of chunks to be remapped
*/
virtual const std::vector<int> &get_chunk_mapping() const = 0;
/**
* Decode the first **get_data_chunk_count()** **chunks** and
* concatenate them into **decoded**.
*
* Returns 0 on success.
*
* @param [in] chunks map chunk indexes to chunk data
* @param [out] decoded concatenante of the data chunks
* @return **0** on success or a negative errno on error.
*/
virtual int decode_concat(const std::map<int, bufferlist> &chunks,
bufferlist *decoded) = 0;
};
typedef std::shared_ptr<ErasureCodeInterface> ErasureCodeInterfaceRef;
}
#endif
| 18,567 | 38.590618 | 89 | h |
null | ceph-main/src/erasure-code/ErasureCodePlugin.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include "ceph_ver.h"
#include "ErasureCodePlugin.h"
#include "common/errno.h"
#include "include/dlfcn_compat.h"
#include "include/str_list.h"
#include "include/ceph_assert.h"
using namespace std;
#define PLUGIN_PREFIX "libec_"
#define PLUGIN_SUFFIX SHARED_LIB_SUFFIX
#define PLUGIN_INIT_FUNCTION "__erasure_code_init"
#define PLUGIN_VERSION_FUNCTION "__erasure_code_version"
namespace ceph {
ErasureCodePluginRegistry ErasureCodePluginRegistry::singleton;
ErasureCodePluginRegistry::ErasureCodePluginRegistry() = default;
ErasureCodePluginRegistry::~ErasureCodePluginRegistry()
{
if (disable_dlclose)
return;
for (std::map<std::string,ErasureCodePlugin*>::iterator i = plugins.begin();
i != plugins.end();
++i) {
void *library = i->second->library;
delete i->second;
dlclose(library);
}
}
int ErasureCodePluginRegistry::remove(const std::string &name)
{
ceph_assert(ceph_mutex_is_locked(lock));
if (plugins.find(name) == plugins.end())
return -ENOENT;
std::map<std::string,ErasureCodePlugin*>::iterator plugin = plugins.find(name);
void *library = plugin->second->library;
delete plugin->second;
dlclose(library);
plugins.erase(plugin);
return 0;
}
int ErasureCodePluginRegistry::add(const std::string &name,
ErasureCodePlugin* plugin)
{
ceph_assert(ceph_mutex_is_locked(lock));
if (plugins.find(name) != plugins.end())
return -EEXIST;
plugins[name] = plugin;
return 0;
}
ErasureCodePlugin *ErasureCodePluginRegistry::get(const std::string &name)
{
ceph_assert(ceph_mutex_is_locked(lock));
if (plugins.find(name) != plugins.end())
return plugins[name];
else
return 0;
}
int ErasureCodePluginRegistry::factory(const std::string &plugin_name,
const std::string &directory,
ErasureCodeProfile &profile,
ErasureCodeInterfaceRef *erasure_code,
ostream *ss)
{
ErasureCodePlugin *plugin;
{
std::lock_guard l{lock};
plugin = get(plugin_name);
if (plugin == 0) {
loading = true;
int r = load(plugin_name, directory, &plugin, ss);
loading = false;
if (r != 0)
return r;
}
}
int r = plugin->factory(directory, profile, erasure_code, ss);
if (r)
return r;
if (profile != (*erasure_code)->get_profile()) {
*ss << __func__ << " profile " << profile << " != get_profile() "
<< (*erasure_code)->get_profile() << std::endl;
return -EINVAL;
}
return 0;
}
static const char *an_older_version() {
return "an older version";
}
int ErasureCodePluginRegistry::load(const std::string &plugin_name,
const std::string &directory,
ErasureCodePlugin **plugin,
ostream *ss)
{
ceph_assert(ceph_mutex_is_locked(lock));
std::string fname = directory + "/" PLUGIN_PREFIX
+ plugin_name + PLUGIN_SUFFIX;
void *library = dlopen(fname.c_str(), RTLD_NOW);
if (!library) {
*ss << "load dlopen(" << fname << "): " << dlerror();
return -EIO;
}
const char * (*erasure_code_version)() =
(const char *(*)())dlsym(library, PLUGIN_VERSION_FUNCTION);
if (erasure_code_version == NULL)
erasure_code_version = an_older_version;
if (erasure_code_version() != string(CEPH_GIT_NICE_VER)) {
*ss << "expected plugin " << fname << " version " << CEPH_GIT_NICE_VER
<< " but it claims to be " << erasure_code_version() << " instead";
dlclose(library);
return -EXDEV;
}
int (*erasure_code_init)(const char *, const char *) =
(int (*)(const char *, const char *))dlsym(library, PLUGIN_INIT_FUNCTION);
if (erasure_code_init) {
std::string name = plugin_name;
int r = erasure_code_init(name.c_str(), directory.c_str());
if (r != 0) {
*ss << "erasure_code_init(" << plugin_name
<< "," << directory
<< "): " << cpp_strerror(r);
dlclose(library);
return r;
}
} else {
*ss << "load dlsym(" << fname
<< ", " << PLUGIN_INIT_FUNCTION
<< "): " << dlerror();
dlclose(library);
return -ENOENT;
}
*plugin = get(plugin_name);
if (*plugin == 0) {
*ss << "load " << PLUGIN_INIT_FUNCTION << "()"
<< "did not register " << plugin_name;
dlclose(library);
return -EBADF;
}
(*plugin)->library = library;
*ss << __func__ << ": " << plugin_name << " ";
return 0;
}
int ErasureCodePluginRegistry::preload(const std::string &plugins,
const std::string &directory,
ostream *ss)
{
std::lock_guard l{lock};
list<string> plugins_list;
get_str_list(plugins, plugins_list);
for (list<string>::iterator i = plugins_list.begin();
i != plugins_list.end();
++i) {
ErasureCodePlugin *plugin;
int r = load(*i, directory, &plugin, ss);
if (r)
return r;
}
return 0;
}
}
| 5,362 | 26.085859 | 81 | cc |
null | ceph-main/src/erasure-code/ErasureCodePlugin.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_H
#define CEPH_ERASURE_CODE_PLUGIN_H
#include "common/ceph_mutex.h"
#include "ErasureCodeInterface.h"
extern "C" {
const char *__erasure_code_version();
int __erasure_code_init(char *plugin_name, char *directory);
}
namespace ceph {
class ErasureCodePlugin {
public:
void *library;
ErasureCodePlugin() :
library(0) {}
virtual ~ErasureCodePlugin() {}
virtual int factory(const std::string &directory,
ErasureCodeProfile &profile,
ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) = 0;
};
class ErasureCodePluginRegistry {
public:
ceph::mutex lock = ceph::make_mutex("ErasureCodePluginRegistry::lock");
bool loading = false;
bool disable_dlclose = false;
std::map<std::string,ErasureCodePlugin*> plugins;
static ErasureCodePluginRegistry singleton;
ErasureCodePluginRegistry();
~ErasureCodePluginRegistry();
static ErasureCodePluginRegistry &instance() {
return singleton;
}
int factory(const std::string &plugin,
const std::string &directory,
ErasureCodeProfile &profile,
ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss);
int add(const std::string &name, ErasureCodePlugin *plugin);
int remove(const std::string &name);
ErasureCodePlugin *get(const std::string &name);
int load(const std::string &plugin_name,
const std::string &directory,
ErasureCodePlugin **plugin,
std::ostream *ss);
int preload(const std::string &plugins,
const std::string &directory,
std::ostream *ss);
};
}
#endif
| 2,197 | 25.481928 | 75 | h |
null | ceph-main/src/erasure-code/clay/ErasureCodeClay.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include <algorithm>
#include "ErasureCodeClay.h"
#include "common/debug.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "include/ceph_assert.h"
#include "include/str_map.h"
#include "include/stringify.h"
#include "osd/osd_types.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout)
#define LARGEST_VECTOR_WORDSIZE 16
#define talloc(type, num) (type *) malloc(sizeof(type)*(num))
using namespace std;
using namespace ceph;
static ostream& _prefix(std::ostream* _dout)
{
return *_dout << "ErasureCodeClay: ";
}
static int pow_int(int a, int x) {
int power = 1;
while (x) {
if (x & 1) power *= a;
x /= 2;
a *= a;
}
return power;
}
ErasureCodeClay::~ErasureCodeClay()
{
for (int i = 0; i < q*t; i++) {
if (U_buf[i].length() != 0) U_buf[i].clear();
}
}
int ErasureCodeClay::init(ErasureCodeProfile &profile,
ostream *ss)
{
int r;
r = parse(profile, ss);
if (r)
return r;
r = ErasureCode::init(profile, ss);
if (r)
return r;
ErasureCodePluginRegistry ®istry = ErasureCodePluginRegistry::instance();
r = registry.factory(mds.profile["plugin"],
directory,
mds.profile,
&mds.erasure_code,
ss);
if (r)
return r;
r = registry.factory(pft.profile["plugin"],
directory,
pft.profile,
&pft.erasure_code,
ss);
return r;
}
unsigned int ErasureCodeClay::get_chunk_size(unsigned int object_size) const
{
unsigned int alignment_scalar_code = pft.erasure_code->get_chunk_size(1);
unsigned int alignment = sub_chunk_no * k * alignment_scalar_code;
return round_up_to(object_size, alignment) / k;
}
int ErasureCodeClay::minimum_to_decode(const set<int> &want_to_read,
const set<int> &available,
map<int, vector<pair<int, int>>> *minimum)
{
if (is_repair(want_to_read, available)) {
return minimum_to_repair(want_to_read, available, minimum);
} else {
return ErasureCode::minimum_to_decode(want_to_read, available, minimum);
}
}
int ErasureCodeClay::decode(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded, int chunk_size)
{
set<int> avail;
for ([[maybe_unused]] auto& [node, bl] : chunks) {
avail.insert(node);
(void)bl; // silence -Wunused-variable
}
if (is_repair(want_to_read, avail) &&
((unsigned int)chunk_size > chunks.begin()->second.length())) {
return repair(want_to_read, chunks, decoded, chunk_size);
} else {
return ErasureCode::_decode(want_to_read, chunks, decoded);
}
}
void p(const set<int> &s) { cerr << s; } // for gdb
int ErasureCodeClay::encode_chunks(const set<int> &want_to_encode,
map<int, bufferlist> *encoded)
{
map<int, bufferlist> chunks;
set<int> parity_chunks;
int chunk_size = (*encoded)[0].length();
for (int i = 0; i < k + m; i++) {
if (i < k) {
chunks[i] = (*encoded)[i];
} else {
chunks[i+nu] = (*encoded)[i];
parity_chunks.insert(i+nu);
}
}
for (int i = k; i < k + nu; i++) {
bufferptr buf(buffer::create_aligned(chunk_size, SIMD_ALIGN));
buf.zero();
chunks[i].push_back(std::move(buf));
}
int res = decode_layered(parity_chunks, &chunks);
for (int i = k ; i < k + nu; i++) {
// need to clean some of the intermediate chunks here!!
chunks[i].clear();
}
return res;
}
int ErasureCodeClay::decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
{
set<int> erasures;
map<int, bufferlist> coded_chunks;
for (int i = 0; i < k + m; i++) {
if (chunks.count(i) == 0) {
erasures.insert(i < k ? i : i+nu);
}
ceph_assert(decoded->count(i) > 0);
coded_chunks[i < k ? i : i+nu] = (*decoded)[i];
}
int chunk_size = coded_chunks[0].length();
for (int i = k; i < k+nu; i++) {
bufferptr buf(buffer::create_aligned(chunk_size, SIMD_ALIGN));
buf.zero();
coded_chunks[i].push_back(std::move(buf));
}
int res = decode_layered(erasures, &coded_chunks);
for (int i = k; i < k+nu; i++) {
coded_chunks[i].clear();
}
return res;
}
int ErasureCodeClay::parse(ErasureCodeProfile &profile,
ostream *ss)
{
int err = 0;
err = ErasureCode::parse(profile, ss);
err |= to_int("k", profile, &k, DEFAULT_K, ss);
err |= to_int("m", profile, &m, DEFAULT_M, ss);
err |= sanity_check_k_m(k, m, ss);
err |= to_int("d", profile, &d, std::to_string(k+m-1), ss);
// check for scalar_mds in profile input
if (profile.find("scalar_mds") == profile.end() ||
profile.find("scalar_mds")->second.empty()) {
mds.profile["plugin"] = "jerasure";
pft.profile["plugin"] = "jerasure";
} else {
std::string p = profile.find("scalar_mds")->second;
if ((p == "jerasure") || (p == "isa") || (p == "shec")) {
mds.profile["plugin"] = p;
pft.profile["plugin"] = p;
} else {
*ss << "scalar_mds " << mds.profile["plugin"] <<
"is not currently supported, use one of 'jerasure',"<<
" 'isa', 'shec'" << std::endl;
err = -EINVAL;
return err;
}
}
if (profile.find("technique") == profile.end() ||
profile.find("technique")->second.empty()) {
if ((mds.profile["plugin"]=="jerasure") || (mds.profile["plugin"]=="isa") ) {
mds.profile["technique"] = "reed_sol_van";
pft.profile["technique"] = "reed_sol_van";
} else {
mds.profile["technique"] = "single";
pft.profile["technique"] = "single";
}
} else {
std::string p = profile.find("technique")->second;
if (mds.profile["plugin"] == "jerasure") {
if ( (p == "reed_sol_van") || (p == "reed_sol_r6_op") || (p == "cauchy_orig")
|| (p == "cauchy_good") || (p == "liber8tion")) {
mds.profile["technique"] = p;
pft.profile["technique"] = p;
} else {
*ss << "technique " << p << "is not currently supported, use one of "
<< "reed_sol_van', 'reed_sol_r6_op','cauchy_orig',"
<< "'cauchy_good','liber8tion'"<< std::endl;
err = -EINVAL;
return err;
}
} else if (mds.profile["plugin"] == "isa") {
if ( (p == "reed_sol_van") || (p == "cauchy")) {
mds.profile["technique"] = p;
pft.profile["technique"] = p;
} else {
*ss << "technique " << p << "is not currently supported, use one of"
<< "'reed_sol_van','cauchy'"<< std::endl;
err = -EINVAL;
return err;
}
} else {
if ( (p == "single") || (p == "multiple")) {
mds.profile["technique"] = p;
pft.profile["technique"] = p;
} else {
*ss << "technique " << p << "is not currently supported, use one of"<<
"'single','multiple'"<< std::endl;
err = -EINVAL;
return err;
}
}
}
if ((d < k) || (d > k + m - 1)) {
*ss << "value of d " << d
<< " must be within [ " << k << "," << k+m-1 << "]" << std::endl;
err = -EINVAL;
return err;
}
q = d - k + 1;
if ((k + m) % q) {
nu = q - (k + m) % q;
} else {
nu = 0;
}
if (k+m+nu > 254) {
err = -EINVAL;
return err;
}
if (mds.profile["plugin"] == "shec") {
mds.profile["c"] = '2';
pft.profile["c"] = '2';
}
mds.profile["k"] = std::to_string(k+nu);
mds.profile["m"] = std::to_string(m);
mds.profile["w"] = '8';
pft.profile["k"] = '2';
pft.profile["m"] = '2';
pft.profile["w"] = '8';
t = (k + m + nu) / q;
sub_chunk_no = pow_int(q, t);
dout(10) << __func__
<< " (q,t,nu)=(" << q << "," << t << "," << nu <<")" << dendl;
return err;
}
int ErasureCodeClay::is_repair(const set<int> &want_to_read,
const set<int> &available_chunks) {
if (includes(available_chunks.begin(), available_chunks.end(),
want_to_read.begin(), want_to_read.end())) return 0;
if (want_to_read.size() > 1) return 0;
int i = *want_to_read.begin();
int lost_node_id = (i < k) ? i: i+nu;
for (int x = 0; x < q; x++) {
int node = (lost_node_id/q)*q+x;
node = (node < k) ? node : node-nu;
if (node != i) { // node in the same group other than erased node
if (available_chunks.count(node) == 0) return 0;
}
}
if (available_chunks.size() < (unsigned)d) return 0;
return 1;
}
int ErasureCodeClay::minimum_to_repair(const set<int> &want_to_read,
const set<int> &available_chunks,
map<int, vector<pair<int, int>>> *minimum)
{
int i = *want_to_read.begin();
int lost_node_index = (i < k) ? i : i+nu;
int rep_node_index = 0;
// add all the nodes in lost node's y column.
vector<pair<int, int>> sub_chunk_ind;
get_repair_subchunks(lost_node_index, sub_chunk_ind);
if ((available_chunks.size() >= (unsigned)d)) {
for (int j = 0; j < q; j++) {
if (j != lost_node_index%q) {
rep_node_index = (lost_node_index/q)*q+j;
if (rep_node_index < k) {
minimum->insert(make_pair(rep_node_index, sub_chunk_ind));
} else if (rep_node_index >= k+nu) {
minimum->insert(make_pair(rep_node_index-nu, sub_chunk_ind));
}
}
}
for (auto chunk : available_chunks) {
if (minimum->size() >= (unsigned)d) {
break;
}
if (!minimum->count(chunk)) {
minimum->emplace(chunk, sub_chunk_ind);
}
}
} else {
dout(0) << "minimum_to_repair: shouldn't have come here" << dendl;
ceph_assert(0);
}
ceph_assert(minimum->size() == (unsigned)d);
return 0;
}
void ErasureCodeClay::get_repair_subchunks(const int &lost_node,
vector<pair<int, int>> &repair_sub_chunks_ind)
{
const int y_lost = lost_node / q;
const int x_lost = lost_node % q;
const int seq_sc_count = pow_int(q, t-1-y_lost);
const int num_seq = pow_int(q, y_lost);
int index = x_lost * seq_sc_count;
for (int ind_seq = 0; ind_seq < num_seq; ind_seq++) {
repair_sub_chunks_ind.push_back(make_pair(index, seq_sc_count));
index += q * seq_sc_count;
}
}
int ErasureCodeClay::get_repair_sub_chunk_count(const set<int> &want_to_read)
{
int weight_vector[t];
std::fill(weight_vector, weight_vector + t, 0);
for (auto to_read : want_to_read) {
weight_vector[to_read / q]++;
}
int repair_subchunks_count = 1;
for (int y = 0; y < t; y++) {
repair_subchunks_count = repair_subchunks_count*(q-weight_vector[y]);
}
return sub_chunk_no - repair_subchunks_count;
}
int ErasureCodeClay::repair(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *repaired, int chunk_size)
{
ceph_assert((want_to_read.size() == 1) && (chunks.size() == (unsigned)d));
int repair_sub_chunk_no = get_repair_sub_chunk_count(want_to_read);
vector<pair<int, int>> repair_sub_chunks_ind;
unsigned repair_blocksize = chunks.begin()->second.length();
assert(repair_blocksize%repair_sub_chunk_no == 0);
unsigned sub_chunksize = repair_blocksize/repair_sub_chunk_no;
unsigned chunksize = sub_chunk_no*sub_chunksize;
ceph_assert(chunksize == (unsigned)chunk_size);
map<int, bufferlist> recovered_data;
map<int, bufferlist> helper_data;
set<int> aloof_nodes;
for (int i = 0; i < k + m; i++) {
// included helper data only for d+nu nodes.
if (auto found = chunks.find(i); found != chunks.end()) { // i is a helper
if (i<k) {
helper_data[i] = found->second;
} else {
helper_data[i+nu] = found->second;
}
} else {
if (i != *want_to_read.begin()) { // aloof node case.
int aloof_node_id = (i < k) ? i: i+nu;
aloof_nodes.insert(aloof_node_id);
} else {
bufferptr ptr(buffer::create_aligned(chunksize, SIMD_ALIGN));
ptr.zero();
int lost_node_id = (i < k) ? i : i+nu;
(*repaired)[i].push_back(ptr);
recovered_data[lost_node_id] = (*repaired)[i];
get_repair_subchunks(lost_node_id, repair_sub_chunks_ind);
}
}
}
// this is for shortened codes i.e., when nu > 0
for (int i=k; i < k+nu; i++) {
bufferptr ptr(buffer::create_aligned(repair_blocksize, SIMD_ALIGN));
ptr.zero();
helper_data[i].push_back(ptr);
}
ceph_assert(helper_data.size()+aloof_nodes.size()+recovered_data.size() ==
(unsigned) q*t);
int r = repair_one_lost_chunk(recovered_data, aloof_nodes,
helper_data, repair_blocksize,
repair_sub_chunks_ind);
// clear buffers created for the purpose of shortening
for (int i = k; i < k+nu; i++) {
helper_data[i].clear();
}
return r;
}
int ErasureCodeClay::repair_one_lost_chunk(map<int, bufferlist> &recovered_data,
set<int> &aloof_nodes,
map<int, bufferlist> &helper_data,
int repair_blocksize,
vector<pair<int,int>> &repair_sub_chunks_ind)
{
unsigned repair_subchunks = (unsigned)sub_chunk_no / q;
unsigned sub_chunksize = repair_blocksize / repair_subchunks;
int z_vec[t];
map<int, set<int> > ordered_planes;
map<int, int> repair_plane_to_ind;
int count_retrieved_sub_chunks = 0;
int plane_ind = 0;
bufferptr buf(buffer::create_aligned(sub_chunksize, SIMD_ALIGN));
bufferlist temp_buf;
temp_buf.push_back(buf);
for (auto [index,count] : repair_sub_chunks_ind) {
for (int j = index; j < index + count; j++) {
get_plane_vector(j, z_vec);
int order = 0;
// check across all erasures and aloof nodes
for ([[maybe_unused]] auto& [node, bl] : recovered_data) {
if (node % q == z_vec[node / q]) order++;
(void)bl; // silence -Wunused-variable
}
for (auto node : aloof_nodes) {
if (node % q == z_vec[node / q]) order++;
}
ceph_assert(order > 0);
ordered_planes[order].insert(j);
// to keep track of a sub chunk within helper buffer recieved
repair_plane_to_ind[j] = plane_ind;
plane_ind++;
}
}
assert((unsigned)plane_ind == repair_subchunks);
for (int i = 0; i < q*t; i++) {
if (U_buf[i].length() == 0) {
bufferptr buf(buffer::create_aligned(sub_chunk_no*sub_chunksize, SIMD_ALIGN));
buf.zero();
U_buf[i].push_back(std::move(buf));
}
}
int lost_chunk;
int count = 0;
for ([[maybe_unused]] auto& [node, bl] : recovered_data) {
lost_chunk = node;
count++;
(void)bl; // silence -Wunused-variable
}
ceph_assert(count == 1);
set<int> erasures;
for (int i = 0; i < q; i++) {
erasures.insert(lost_chunk - lost_chunk % q + i);
}
for (auto node : aloof_nodes) {
erasures.insert(node);
}
for (int order = 1; ;order++) {
if (ordered_planes.count(order) == 0) {
break;
}
for (auto z : ordered_planes[order]) {
get_plane_vector(z, z_vec);
for (int y = 0; y < t; y++) {
for (int x = 0; x < q; x++) {
int node_xy = y*q + x;
map<int, bufferlist> known_subchunks;
map<int, bufferlist> pftsubchunks;
set<int> pft_erasures;
if (erasures.count(node_xy) == 0) {
assert(helper_data.count(node_xy) > 0);
int z_sw = z + (x - z_vec[y])*pow_int(q,t-1-y);
int node_sw = y*q + z_vec[y];
int i0 = 0, i1 = 1, i2 = 2, i3 = 3;
if (z_vec[y] > x) {
i0 = 1;
i1 = 0;
i2 = 3;
i3 = 2;
}
if (aloof_nodes.count(node_sw) > 0) {
assert(repair_plane_to_ind.count(z) > 0);
assert(repair_plane_to_ind.count(z_sw) > 0);
pft_erasures.insert(i2);
known_subchunks[i0].substr_of(helper_data[node_xy], repair_plane_to_ind[z]*sub_chunksize, sub_chunksize);
known_subchunks[i3].substr_of(U_buf[node_sw], z_sw*sub_chunksize, sub_chunksize);
pftsubchunks[i0] = known_subchunks[i0];
pftsubchunks[i1] = temp_buf;
pftsubchunks[i2].substr_of(U_buf[node_xy], z*sub_chunksize, sub_chunksize);
pftsubchunks[i3] = known_subchunks[i3];
for (int i=0; i<3; i++) {
pftsubchunks[i].rebuild_aligned(SIMD_ALIGN);
}
pft.erasure_code->decode_chunks(pft_erasures, known_subchunks, &pftsubchunks);
} else {
ceph_assert(helper_data.count(node_sw) > 0);
ceph_assert(repair_plane_to_ind.count(z) > 0);
if (z_vec[y] != x){
pft_erasures.insert(i2);
ceph_assert(repair_plane_to_ind.count(z_sw) > 0);
known_subchunks[i0].substr_of(helper_data[node_xy], repair_plane_to_ind[z]*sub_chunksize, sub_chunksize);
known_subchunks[i1].substr_of(helper_data[node_sw], repair_plane_to_ind[z_sw]*sub_chunksize, sub_chunksize);
pftsubchunks[i0] = known_subchunks[i0];
pftsubchunks[i1] = known_subchunks[i1];
pftsubchunks[i2].substr_of(U_buf[node_xy], z*sub_chunksize, sub_chunksize);
pftsubchunks[i3].substr_of(temp_buf, 0, sub_chunksize);
for (int i=0; i<3; i++) {
pftsubchunks[i].rebuild_aligned(SIMD_ALIGN);
}
pft.erasure_code->decode_chunks(pft_erasures, known_subchunks, &pftsubchunks);
} else {
char* uncoupled_chunk = U_buf[node_xy].c_str();
char* coupled_chunk = helper_data[node_xy].c_str();
memcpy(&uncoupled_chunk[z*sub_chunksize],
&coupled_chunk[repair_plane_to_ind[z]*sub_chunksize],
sub_chunksize);
}
}
}
} // x
} // y
ceph_assert(erasures.size() <= (unsigned)m);
decode_uncoupled(erasures, z, sub_chunksize);
for (auto i : erasures) {
int x = i % q;
int y = i / q;
int node_sw = y*q+z_vec[y];
int z_sw = z + (x - z_vec[y]) * pow_int(q,t-1-y);
set<int> pft_erasures;
map<int, bufferlist> known_subchunks;
map<int, bufferlist> pftsubchunks;
int i0 = 0, i1 = 1, i2 = 2, i3 = 3;
if (z_vec[y] > x) {
i0 = 1;
i1 = 0;
i2 = 3;
i3 = 2;
}
// make sure it is not an aloof node before you retrieve repaired_data
if (aloof_nodes.count(i) == 0) {
if (x == z_vec[y]) { // hole-dot pair (type 0)
char* coupled_chunk = recovered_data[i].c_str();
char* uncoupled_chunk = U_buf[i].c_str();
memcpy(&coupled_chunk[z*sub_chunksize],
&uncoupled_chunk[z*sub_chunksize],
sub_chunksize);
count_retrieved_sub_chunks++;
} else {
ceph_assert(y == lost_chunk / q);
ceph_assert(node_sw == lost_chunk);
ceph_assert(helper_data.count(i) > 0);
pft_erasures.insert(i1);
known_subchunks[i0].substr_of(helper_data[i], repair_plane_to_ind[z]*sub_chunksize, sub_chunksize);
known_subchunks[i2].substr_of(U_buf[i], z*sub_chunksize, sub_chunksize);
pftsubchunks[i0] = known_subchunks[i0];
pftsubchunks[i1].substr_of(recovered_data[node_sw], z_sw*sub_chunksize, sub_chunksize);
pftsubchunks[i2] = known_subchunks[i2];
pftsubchunks[i3] = temp_buf;
for (int i=0; i<3; i++) {
pftsubchunks[i].rebuild_aligned(SIMD_ALIGN);
}
pft.erasure_code->decode_chunks(pft_erasures, known_subchunks, &pftsubchunks);
}
}
} // recover all erasures
} // planes of particular order
} // order
return 0;
}
int ErasureCodeClay::decode_layered(set<int> &erased_chunks,
map<int, bufferlist> *chunks)
{
int num_erasures = erased_chunks.size();
int size = (*chunks)[0].length();
ceph_assert(size%sub_chunk_no == 0);
int sc_size = size / sub_chunk_no;
ceph_assert(num_erasures > 0);
for (int i = k+nu; (num_erasures < m) && (i < q*t); i++) {
if ([[maybe_unused]] auto [it, added] = erased_chunks.emplace(i); added) {
num_erasures++;
(void)it; // silence -Wunused-variable
}
}
ceph_assert(num_erasures == m);
int max_iscore = get_max_iscore(erased_chunks);
int order[sub_chunk_no];
int z_vec[t];
for (int i = 0; i < q*t; i++) {
if (U_buf[i].length() == 0) {
bufferptr buf(buffer::create_aligned(size, SIMD_ALIGN));
buf.zero();
U_buf[i].push_back(std::move(buf));
}
}
set_planes_sequential_decoding_order(order, erased_chunks);
for (int iscore = 0; iscore <= max_iscore; iscore++) {
for (int z = 0; z < sub_chunk_no; z++) {
if (order[z] == iscore) {
decode_erasures(erased_chunks, z, chunks, sc_size);
}
}
for (int z = 0; z < sub_chunk_no; z++) {
if (order[z] == iscore) {
get_plane_vector(z, z_vec);
for (auto node_xy : erased_chunks) {
int x = node_xy % q;
int y = node_xy / q;
int node_sw = y*q+z_vec[y];
if (z_vec[y] != x) {
if (erased_chunks.count(node_sw) == 0) {
recover_type1_erasure(chunks, x, y, z, z_vec, sc_size);
} else if (z_vec[y] < x){
ceph_assert(erased_chunks.count(node_sw) > 0);
ceph_assert(z_vec[y] != x);
get_coupled_from_uncoupled(chunks, x, y, z, z_vec, sc_size);
}
} else {
char* C = (*chunks)[node_xy].c_str();
char* U = U_buf[node_xy].c_str();
memcpy(&C[z*sc_size], &U[z*sc_size], sc_size);
}
}
}
} // plane
} // iscore, order
return 0;
}
int ErasureCodeClay::decode_erasures(const set<int>& erased_chunks, int z,
map<int, bufferlist>* chunks, int sc_size)
{
int z_vec[t];
get_plane_vector(z, z_vec);
for (int x = 0; x < q; x++) {
for (int y = 0; y < t; y++) {
int node_xy = q*y+x;
int node_sw = q*y+z_vec[y];
if (erased_chunks.count(node_xy) == 0) {
if (z_vec[y] < x) {
get_uncoupled_from_coupled(chunks, x, y, z, z_vec, sc_size);
} else if (z_vec[y] == x) {
char* uncoupled_chunk = U_buf[node_xy].c_str();
char* coupled_chunk = (*chunks)[node_xy].c_str();
memcpy(&uncoupled_chunk[z*sc_size], &coupled_chunk[z*sc_size], sc_size);
} else {
if (erased_chunks.count(node_sw) > 0) {
get_uncoupled_from_coupled(chunks, x, y, z, z_vec, sc_size);
}
}
}
}
}
return decode_uncoupled(erased_chunks, z, sc_size);
}
int ErasureCodeClay::decode_uncoupled(const set<int>& erased_chunks, int z, int sc_size)
{
map<int, bufferlist> known_subchunks;
map<int, bufferlist> all_subchunks;
for (int i = 0; i < q*t; i++) {
if (erased_chunks.count(i) == 0) {
known_subchunks[i].substr_of(U_buf[i], z*sc_size, sc_size);
all_subchunks[i] = known_subchunks[i];
} else {
all_subchunks[i].substr_of(U_buf[i], z*sc_size, sc_size);
}
all_subchunks[i].rebuild_aligned_size_and_memory(sc_size, SIMD_ALIGN);
assert(all_subchunks[i].is_contiguous());
}
mds.erasure_code->decode_chunks(erased_chunks, known_subchunks, &all_subchunks);
return 0;
}
void ErasureCodeClay::set_planes_sequential_decoding_order(int* order, set<int>& erasures) {
int z_vec[t];
for (int z = 0; z < sub_chunk_no; z++) {
get_plane_vector(z,z_vec);
order[z] = 0;
for (auto i : erasures) {
if (i % q == z_vec[i / q]) {
order[z] = order[z] + 1;
}
}
}
}
void ErasureCodeClay::recover_type1_erasure(map<int, bufferlist>* chunks,
int x, int y, int z,
int* z_vec, int sc_size)
{
set<int> erased_chunks;
int node_xy = y*q+x;
int node_sw = y*q+z_vec[y];
int z_sw = z + (x - z_vec[y]) * pow_int(q,t-1-y);
map<int, bufferlist> known_subchunks;
map<int, bufferlist> pftsubchunks;
bufferptr ptr(buffer::create_aligned(sc_size, SIMD_ALIGN));
ptr.zero();
int i0 = 0, i1 = 1, i2 = 2, i3 = 3;
if (z_vec[y] > x) {
i0 = 1;
i1 = 0;
i2 = 3;
i3 = 2;
}
erased_chunks.insert(i0);
pftsubchunks[i0].substr_of((*chunks)[node_xy], z * sc_size, sc_size);
known_subchunks[i1].substr_of((*chunks)[node_sw], z_sw * sc_size, sc_size);
known_subchunks[i2].substr_of(U_buf[node_xy], z * sc_size, sc_size);
pftsubchunks[i1] = known_subchunks[i1];
pftsubchunks[i2] = known_subchunks[i2];
pftsubchunks[i3].push_back(ptr);
for (int i=0; i<3; i++) {
pftsubchunks[i].rebuild_aligned_size_and_memory(sc_size, SIMD_ALIGN);
}
pft.erasure_code->decode_chunks(erased_chunks, known_subchunks, &pftsubchunks);
}
void ErasureCodeClay::get_coupled_from_uncoupled(map<int, bufferlist>* chunks,
int x, int y, int z,
int* z_vec, int sc_size)
{
set<int> erased_chunks = {0, 1};
int node_xy = y*q+x;
int node_sw = y*q+z_vec[y];
int z_sw = z + (x - z_vec[y]) * pow_int(q,t-1-y);
ceph_assert(z_vec[y] < x);
map<int, bufferlist> uncoupled_subchunks;
uncoupled_subchunks[2].substr_of(U_buf[node_xy], z * sc_size, sc_size);
uncoupled_subchunks[3].substr_of(U_buf[node_sw], z_sw * sc_size, sc_size);
map<int, bufferlist> pftsubchunks;
pftsubchunks[0].substr_of((*chunks)[node_xy], z * sc_size, sc_size);
pftsubchunks[1].substr_of((*chunks)[node_sw], z_sw * sc_size, sc_size);
pftsubchunks[2] = uncoupled_subchunks[2];
pftsubchunks[3] = uncoupled_subchunks[3];
for (int i=0; i<3; i++) {
pftsubchunks[i].rebuild_aligned_size_and_memory(sc_size, SIMD_ALIGN);
}
pft.erasure_code->decode_chunks(erased_chunks, uncoupled_subchunks, &pftsubchunks);
}
void ErasureCodeClay::get_uncoupled_from_coupled(map<int, bufferlist>* chunks,
int x, int y, int z,
int* z_vec, int sc_size)
{
set<int> erased_chunks = {2, 3};
int node_xy = y*q+x;
int node_sw = y*q+z_vec[y];
int z_sw = z + (x - z_vec[y]) * pow_int(q,t-1-y);
int i0 = 0, i1 = 1, i2 = 2, i3 = 3;
if (z_vec[y] > x) {
i0 = 1;
i1 = 0;
i2 = 3;
i3 = 2;
}
map<int, bufferlist> coupled_subchunks;
coupled_subchunks[i0].substr_of((*chunks)[node_xy], z * sc_size, sc_size);
coupled_subchunks[i1].substr_of((*chunks)[node_sw], z_sw * sc_size, sc_size);
map<int, bufferlist> pftsubchunks;
pftsubchunks[0] = coupled_subchunks[0];
pftsubchunks[1] = coupled_subchunks[1];
pftsubchunks[i2].substr_of(U_buf[node_xy], z * sc_size, sc_size);
pftsubchunks[i3].substr_of(U_buf[node_sw], z_sw * sc_size, sc_size);
for (int i=0; i<3; i++) {
pftsubchunks[i].rebuild_aligned_size_and_memory(sc_size, SIMD_ALIGN);
}
pft.erasure_code->decode_chunks(erased_chunks, coupled_subchunks, &pftsubchunks);
}
int ErasureCodeClay::get_max_iscore(set<int>& erased_chunks)
{
int weight_vec[t];
int iscore = 0;
memset(weight_vec, 0, sizeof(int)*t);
for (auto i : erased_chunks) {
if (weight_vec[i / q] == 0) {
weight_vec[i / q] = 1;
iscore++;
}
}
return iscore;
}
void ErasureCodeClay::get_plane_vector(int z, int* z_vec)
{
for (int i = 0; i < t; i++) {
z_vec[t-1-i] = z % q;
z = (z - z_vec[t-1-i]) / q;
}
}
| 26,780 | 28.989922 | 112 | cc |
null | ceph-main/src/erasure-code/clay/ErasureCodeClay.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_CLAY_H
#define CEPH_ERASURE_CODE_CLAY_H
#include "include/err.h"
#include "include/buffer_fwd.h"
#include "erasure-code/ErasureCode.h"
class ErasureCodeClay final : public ceph::ErasureCode {
public:
std::string DEFAULT_K{"4"};
std::string DEFAULT_M{"2"};
std::string DEFAULT_W{"8"};
int k = 0, m = 0, d = 0, w = 8;
int q = 0, t = 0, nu = 0;
int sub_chunk_no = 0;
std::map<int, ceph::bufferlist> U_buf;
struct ScalarMDS {
ceph::ErasureCodeInterfaceRef erasure_code;
ceph::ErasureCodeProfile profile;
};
ScalarMDS mds;
ScalarMDS pft;
const std::string directory;
explicit ErasureCodeClay(const std::string& dir)
: directory(dir)
{}
~ErasureCodeClay() override;
unsigned int get_chunk_count() const override {
return k+m;
}
unsigned int get_data_chunk_count() const override {
return k;
}
int get_sub_chunk_count() override {
return sub_chunk_no;
}
unsigned int get_chunk_size(unsigned int object_size) const override;
int minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::map<int, std::vector<std::pair<int, int>>> *minimum) override;
int decode(const std::set<int> &want_to_read,
const std::map<int, ceph::bufferlist> &chunks,
std::map<int, ceph::bufferlist> *decoded, int chunk_size) override;
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::bufferlist> *encoded) override;
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::bufferlist> &chunks,
std::map<int, ceph::bufferlist> *decoded) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
int is_repair(const std::set<int> &want_to_read,
const std::set<int> &available_chunks);
int get_repair_sub_chunk_count(const std::set<int> &want_to_read);
virtual int parse(ceph::ErasureCodeProfile &profile, std::ostream *ss);
private:
int minimum_to_repair(const std::set<int> &want_to_read,
const std::set<int> &available_chunks,
std::map<int, std::vector<std::pair<int, int>>> *minimum);
int repair(const std::set<int> &want_to_read,
const std::map<int, ceph::bufferlist> &chunks,
std::map<int, ceph::bufferlist> *recovered, int chunk_size);
int decode_layered(std::set<int>& erased_chunks, std::map<int, ceph::bufferlist>* chunks);
int repair_one_lost_chunk(std::map<int, ceph::bufferlist> &recovered_data, std::set<int> &aloof_nodes,
std::map<int, ceph::bufferlist> &helper_data, int repair_blocksize,
std::vector<std::pair<int,int>> &repair_sub_chunks_ind);
void get_repair_subchunks(const int &lost_node,
std::vector<std::pair<int, int>> &repair_sub_chunks_ind);
int decode_erasures(const std::set<int>& erased_chunks, int z,
std::map<int, ceph::bufferlist>* chunks, int sc_size);
int decode_uncoupled(const std::set<int>& erasures, int z, int ss_size);
void set_planes_sequential_decoding_order(int* order, std::set<int>& erasures);
void recover_type1_erasure(std::map<int, ceph::bufferlist>* chunks, int x, int y, int z,
int* z_vec, int sc_size);
void get_uncoupled_from_coupled(std::map<int, ceph::bufferlist>* chunks, int x, int y, int z,
int* z_vec, int sc_size);
void get_coupled_from_uncoupled(std::map<int, ceph::bufferlist>* chunks, int x, int y, int z,
int* z_vec, int sc_size);
void get_plane_vector(int z, int* z_vec);
int get_max_iscore(std::set<int>& erased_chunks);
};
#endif
| 4,307 | 32.92126 | 104 | h |
null | ceph-main/src/erasure-code/clay/ErasureCodePluginClay.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "ceph_ver.h"
#include "common/debug.h"
#include "ErasureCodePluginClay.h"
#include "ErasureCodeClay.h"
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout)
int ErasureCodePluginClay::factory(const std::string &directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) {
auto interface = std::make_unique<ErasureCodeClay>(directory);
if (int r = interface->init(profile, ss); r) {
return r;
}
*erasure_code = ceph::ErasureCodeInterfaceRef(interface.release());
return 0;
};
const char *__erasure_code_version() { return CEPH_GIT_NICE_VER; }
int __erasure_code_init(char *plugin_name, char *directory)
{
auto& instance = ceph::ErasureCodePluginRegistry::instance();
return instance.add(plugin_name, new ErasureCodePluginClay());
}
| 1,414 | 30.444444 | 73 | cc |
null | ceph-main/src/erasure-code/clay/ErasureCodePluginClay.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_CLAY_H
#define CEPH_ERASURE_CODE_PLUGIN_CLAY_H
#include "erasure-code/ErasureCodePlugin.h"
class ErasureCodePluginClay : public ceph::ErasureCodePlugin {
public:
int factory(const std::string& directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) override;
};
#endif
| 920 | 28.709677 | 73 | h |
null | ceph-main/src/erasure-code/isa/ErasureCodeIsa.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Andreas-Joachim Peters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// -----------------------------------------------------------------------------
#include <algorithm>
#include <cerrno>
// -----------------------------------------------------------------------------
#include "common/debug.h"
#include "ErasureCodeIsa.h"
#include "xor_op.h"
#include "include/ceph_assert.h"
using namespace std;
using namespace ceph;
// -----------------------------------------------------------------------------
extern "C" {
#include "isa-l/include/erasure_code.h"
}
// -----------------------------------------------------------------------------
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout)
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
static ostream&
_prefix(std::ostream* _dout)
{
return *_dout << "ErasureCodeIsa: ";
}
// -----------------------------------------------------------------------------
const std::string ErasureCodeIsaDefault::DEFAULT_K("7");
const std::string ErasureCodeIsaDefault::DEFAULT_M("3");
// -----------------------------------------------------------------------------
int
ErasureCodeIsa::init(ErasureCodeProfile &profile, ostream *ss)
{
int err = 0;
err |= parse(profile, ss);
if (err)
return err;
prepare();
return ErasureCode::init(profile, ss);
}
// -----------------------------------------------------------------------------
unsigned int
ErasureCodeIsa::get_chunk_size(unsigned int object_size) const
{
unsigned alignment = get_alignment();
unsigned chunk_size = ( object_size + k - 1 ) / k;
dout(20) << "get_chunk_size: chunk_size " << chunk_size
<< " must be modulo " << alignment << dendl;
unsigned modulo = chunk_size % alignment;
if (modulo) {
dout(10) << "get_chunk_size: " << chunk_size
<< " padded to " << chunk_size + alignment - modulo << dendl;
chunk_size += alignment - modulo;
}
return chunk_size;
}
// -----------------------------------------------------------------------------
int ErasureCodeIsa::encode_chunks(const set<int> &want_to_encode,
map<int, bufferlist> *encoded)
{
char *chunks[k + m];
for (int i = 0; i < k + m; i++)
chunks[i] = (*encoded)[i].c_str();
isa_encode(&chunks[0], &chunks[k], (*encoded)[0].length());
return 0;
}
int ErasureCodeIsa::decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
{
unsigned blocksize = (*chunks.begin()).second.length();
int erasures[k + m + 1];
int erasures_count = 0;
char *data[k];
char *coding[m];
for (int i = 0; i < k + m; i++) {
if (chunks.find(i) == chunks.end()) {
erasures[erasures_count] = i;
erasures_count++;
}
if (i < k)
data[i] = (*decoded)[i].c_str();
else
coding[i - k] = (*decoded)[i].c_str();
}
erasures[erasures_count] = -1;
ceph_assert(erasures_count > 0);
return isa_decode(erasures, data, coding, blocksize);
}
// -----------------------------------------------------------------------------
void
ErasureCodeIsaDefault::isa_encode(char **data,
char **coding,
int blocksize)
{
if (m == 1)
// single parity stripe
region_xor((unsigned char**) data, (unsigned char*) coding[0], k, blocksize);
else
ec_encode_data(blocksize, k, m, encode_tbls,
(unsigned char**) data, (unsigned char**) coding);
}
// -----------------------------------------------------------------------------
bool
ErasureCodeIsaDefault::erasure_contains(int *erasures, int i)
{
for (int l = 0; erasures[l] != -1; l++) {
if (erasures[l] == i)
return true;
}
return false;
}
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
int
ErasureCodeIsaDefault::isa_decode(int *erasures,
char **data,
char **coding,
int blocksize)
{
int nerrs = 0;
int i, r, s;
// count the errors
for (int l = 0; erasures[l] != -1; l++) {
nerrs++;
}
unsigned char *recover_source[k];
unsigned char *recover_target[m];
memset(recover_source, 0, sizeof (recover_source));
memset(recover_target, 0, sizeof (recover_target));
// ---------------------------------------------
// Assign source and target buffers
// ---------------------------------------------
for (i = 0, s = 0, r = 0; ((r < k) || (s < nerrs)) && (i < (k + m)); i++) {
if (!erasure_contains(erasures, i)) {
if (r < k) {
if (i < k) {
recover_source[r] = (unsigned char*) data[i];
} else {
recover_source[r] = (unsigned char*) coding[i - k];
}
r++;
}
} else {
if (s < m) {
if (i < k) {
recover_target[s] = (unsigned char*) data[i];
} else {
recover_target[s] = (unsigned char*) coding[i - k];
}
s++;
}
}
}
if (m == 1) {
// single parity decoding
ceph_assert(1 == nerrs);
dout(20) << "isa_decode: reconstruct using region xor [" <<
erasures[0] << "]" << dendl;
region_xor(recover_source, recover_target[0], k, blocksize);
return 0;
}
if ((matrixtype == kVandermonde) &&
(nerrs == 1) &&
(erasures[0] < (k + 1))) {
// use xor decoding if a data chunk is missing or the first coding chunk
dout(20) << "isa_decode: reconstruct using region xor [" <<
erasures[0] << "]" << dendl;
ceph_assert(1 == s);
ceph_assert(k == r);
region_xor(recover_source, recover_target[0], k, blocksize);
return 0;
}
unsigned char d[k * (m + k)];
unsigned char decode_tbls[k * (m + k)*32];
unsigned char *p_tbls = decode_tbls;
int decode_index[k];
if (nerrs > m)
return -1;
std::string erasure_signature; // describes a matrix configuration for caching
// ---------------------------------------------
// Construct b by removing error rows
// ---------------------------------------------
for (i = 0, r = 0; i < k; i++, r++) {
char id[128];
while (erasure_contains(erasures, r))
r++;
decode_index[i] = r;
snprintf(id, sizeof (id), "+%d", r);
erasure_signature += id;
}
for (int p = 0; p < nerrs; p++) {
char id[128];
snprintf(id, sizeof (id), "-%d", erasures[p]);
erasure_signature += id;
}
// ---------------------------------------------
// Try to get an already computed matrix
// ---------------------------------------------
if (!tcache.getDecodingTableFromCache(erasure_signature, p_tbls, matrixtype, k, m)) {
int j;
unsigned char b[k * (m + k)];
unsigned char c[k * (m + k)];
for (i = 0; i < k; i++) {
r = decode_index[i];
for (j = 0; j < k; j++)
b[k * i + j] = encode_coeff[k * r + j];
}
// ---------------------------------------------
// Compute inverted matrix
// ---------------------------------------------
// --------------------------------------------------------
// Remark: this may fail for certain Vandermonde matrices !
// There is an advanced way trying to use different
// source chunks to get an invertible matrix, however
// there are also (k,m) combinations which cannot be
// inverted when m chunks are lost and this optimizations
// does not help. Therefor we keep the code simpler.
// --------------------------------------------------------
if (gf_invert_matrix(b, d, k) < 0) {
dout(0) << "isa_decode: bad matrix" << dendl;
return -1;
}
for (int p = 0; p < nerrs; p++) {
if (erasures[p] < k) {
// decoding matrix elements for data chunks
for (j = 0; j < k; j++) {
c[k * p + j] = d[k * erasures[p] + j];
}
} else {
// decoding matrix element for coding chunks
for (i = 0; i < k; i++) {
int s = 0;
for (j = 0; j < k; j++)
s ^= gf_mul(d[j * k + i],
encode_coeff[k * erasures[p] + j]);
c[k * p + i] = s;
}
}
}
// ---------------------------------------------
// Initialize Decoding Table
// ---------------------------------------------
ec_init_tables(k, nerrs, c, decode_tbls);
tcache.putDecodingTableToCache(erasure_signature, p_tbls, matrixtype, k, m);
}
// Recover data sources
ec_encode_data(blocksize,
k, nerrs, decode_tbls, recover_source, recover_target);
return 0;
}
// -----------------------------------------------------------------------------
unsigned
ErasureCodeIsaDefault::get_alignment() const
{
return EC_ISA_ADDRESS_ALIGNMENT;
}
// -----------------------------------------------------------------------------
int ErasureCodeIsaDefault::parse(ErasureCodeProfile &profile,
ostream *ss)
{
int err = ErasureCode::parse(profile, ss);
err |= to_int("k", profile, &k, DEFAULT_K, ss);
err |= to_int("m", profile, &m, DEFAULT_M, ss);
err |= sanity_check_k_m(k, m, ss);
if (matrixtype == kVandermonde) {
// these are verified safe values evaluated using the
// benchmarktool and 10*(combinatoric for maximum loss) random
// full erasures
if (k > 32) {
*ss << "Vandermonde: m=" << m
<< " should be less/equal than 32 : revert to k=32" << std::endl;
k = 32;
err = -EINVAL;
}
if (m > 4) {
*ss << "Vandermonde: m=" << m
<< " should be less than 5 to guarantee an MDS codec:"
<< " revert to m=4" << std::endl;
m = 4;
err = -EINVAL;
}
switch (m) {
case 4:
if (k > 21) {
*ss << "Vandermonde: k=" << k
<< " should be less than 22 to guarantee an MDS"
<< " codec with m=4: revert to k=21" << std::endl;
k = 21;
err = -EINVAL;
}
break;
default:
;
}
}
return err;
}
// -----------------------------------------------------------------------------
void
ErasureCodeIsaDefault::prepare()
{
// setup shared encoding table and coefficients
unsigned char** p_enc_table =
tcache.getEncodingTable(matrixtype, k, m);
unsigned char** p_enc_coeff =
tcache.getEncodingCoefficient(matrixtype, k, m);
if (!*p_enc_coeff) {
dout(10) << "[ cache tables ] creating coeff for k=" <<
k << " m=" << m << dendl;
// build encoding coefficients which need to be computed once for each (k,m)
encode_coeff = (unsigned char*) malloc(k * (m + k));
if (matrixtype == kVandermonde)
gf_gen_rs_matrix(encode_coeff, k + m, k);
if (matrixtype == kCauchy)
gf_gen_cauchy1_matrix(encode_coeff, k + m, k);
// either our new created coefficients are stored or if they have been
// created in the meanwhile the locally allocated coefficients will be
// freed by setEncodingCoefficient
encode_coeff = tcache.setEncodingCoefficient(matrixtype, k, m, encode_coeff);
} else {
encode_coeff = *p_enc_coeff;
}
if (!*p_enc_table) {
dout(10) << "[ cache tables ] creating tables for k=" <<
k << " m=" << m << dendl;
// build encoding table which needs to be computed once for each (k,m)
encode_tbls = (unsigned char*) malloc(k * (m + k)*32);
ec_init_tables(k, m, &encode_coeff[k * k], encode_tbls);
// either our new created table is stored or if it has been
// created in the meanwhile the locally allocated table will be
// freed by setEncodingTable
encode_tbls = tcache.setEncodingTable(matrixtype, k, m, encode_tbls);
} else {
encode_tbls = *p_enc_table;
}
unsigned memory_lru_cache =
k * (m + k) * 32 * tcache.decoding_tables_lru_length;
dout(10) << "[ cache memory ] = " << memory_lru_cache << " bytes" <<
" [ matrix ] = " <<
((matrixtype == kVandermonde) ? "Vandermonde" : "Cauchy") << dendl;
ceph_assert((matrixtype == kVandermonde) || (matrixtype == kCauchy));
}
// -----------------------------------------------------------------------------
| 12,762 | 29.172577 | 87 | cc |
null | ceph-main/src/erasure-code/isa/ErasureCodeIsa.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Andreas-Joachim Peters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
/**
* @file ErasureCodeIsa.cc
*
* @brief Erasure Code CODEC using the INTEL ISA-L library.
*
* The INTEL ISA-L library supports two pre-defined encoding matrices (cauchy = default, reed_sol_van = default)
* The default CODEC implementation using these two matrices is implemented in class ErasureCodeIsaDefault.
* ISA-L allows to use custom matrices which might be added later as implementations deriving from the base class ErasoreCodeIsa.
*/
#ifndef CEPH_ERASURE_CODE_ISA_L_H
#define CEPH_ERASURE_CODE_ISA_L_H
// -----------------------------------------------------------------------------
#include "erasure-code/ErasureCode.h"
#include "ErasureCodeIsaTableCache.h"
// -----------------------------------------------------------------------------
class ErasureCodeIsa : public ceph::ErasureCode {
public:
enum eMatrix {
kVandermonde = 0, kCauchy = 1
};
int k;
int m;
int w;
ErasureCodeIsaTableCache &tcache;
const char *technique;
ErasureCodeIsa(const char *_technique,
ErasureCodeIsaTableCache &_tcache) :
k(0),
m(0),
w(0),
tcache(_tcache),
technique(_technique)
{
}
~ErasureCodeIsa() override
{
}
unsigned int
get_chunk_count() const override
{
return k + m;
}
unsigned int
get_data_chunk_count() const override
{
return k;
}
unsigned int get_chunk_size(unsigned int object_size) const override;
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::buffer::list> *encoded) override;
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
virtual void isa_encode(char **data,
char **coding,
int blocksize) = 0;
virtual int isa_decode(int *erasures,
char **data,
char **coding,
int blocksize) = 0;
virtual unsigned get_alignment() const = 0;
virtual void prepare() = 0;
private:
virtual int parse(ceph::ErasureCodeProfile &profile,
std::ostream *ss) = 0;
};
// -----------------------------------------------------------------------------
class ErasureCodeIsaDefault : public ErasureCodeIsa {
private:
int matrixtype;
public:
static const std::string DEFAULT_K;
static const std::string DEFAULT_M;
unsigned char* encode_coeff; // encoding coefficient
unsigned char* encode_tbls; // encoding table
ErasureCodeIsaDefault(ErasureCodeIsaTableCache &_tcache,
int matrix = kVandermonde) :
ErasureCodeIsa("default", _tcache),
encode_coeff(0), encode_tbls(0)
{
matrixtype = matrix;
}
~ErasureCodeIsaDefault() override
{
}
void isa_encode(char **data,
char **coding,
int blocksize) override;
virtual bool erasure_contains(int *erasures, int i);
int isa_decode(int *erasures,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
void prepare() override;
private:
int parse(ceph::ErasureCodeProfile &profile,
std::ostream *ss) override;
};
#endif
| 3,893 | 24.285714 | 129 | h |
null | ceph-main/src/erasure-code/isa/ErasureCodeIsaTableCache.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Andreas-Joachim Peters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
/**
* @file ErasureCodeIsaTableCache.cc
*
* @brief Erasure Code Isa CODEC Table Cache
*
* The INTEL ISA-L library supports two pre-defined encoding matrices (cauchy = default, reed_sol_van = default)
* The default CODEC implementation using these two matrices is implemented in class ErasureCodeIsaDefault.
* ISA-L allows to use custom matrices which might be added later as implementations deriving from the base class ErasoreCodeIsa.
*/
// -----------------------------------------------------------------------------
#include "ErasureCodeIsaTableCache.h"
#include "common/debug.h"
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _tc_prefix(_dout)
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
static std::ostream&
_tc_prefix(std::ostream* _dout)
{
return *_dout << "ErasureCodeIsaTableCache: ";
}
// -----------------------------------------------------------------------------
ErasureCodeIsaTableCache::~ErasureCodeIsaTableCache()
{
std::lock_guard lock{codec_tables_guard};
codec_technique_tables_t::const_iterator ttables_it;
codec_tables_t::const_iterator tables_it;
codec_table_t::const_iterator table_it;
std::map<int, lru_map_t*>::const_iterator lru_map_it;
std::map<int, lru_list_t*>::const_iterator lru_list_it;
// clean-up all allocated tables
for (ttables_it = encoding_coefficient.begin(); ttables_it != encoding_coefficient.end(); ++ttables_it) {
for (tables_it = ttables_it->second.begin(); tables_it != ttables_it->second.end(); ++tables_it) {
for (table_it = tables_it->second.begin(); table_it != tables_it->second.end(); ++table_it) {
if (table_it->second) {
if (*(table_it->second)) {
delete *(table_it->second);
}
delete table_it->second;
}
}
}
}
for (ttables_it = encoding_table.begin(); ttables_it != encoding_table.end(); ++ttables_it) {
for (tables_it = ttables_it->second.begin(); tables_it != ttables_it->second.end(); ++tables_it) {
for (table_it = tables_it->second.begin(); table_it != tables_it->second.end(); ++table_it) {
if (table_it->second) {
if (*(table_it->second)) {
delete *(table_it->second);
}
delete table_it->second;
}
}
}
}
for (lru_map_it = decoding_tables.begin(); lru_map_it != decoding_tables.end(); ++lru_map_it) {
if (lru_map_it->second) {
delete lru_map_it->second;
}
}
for (lru_list_it = decoding_tables_lru.begin(); lru_list_it != decoding_tables_lru.end(); ++lru_list_it) {
if (lru_list_it->second) {
delete lru_list_it->second;
}
}
}
// -----------------------------------------------------------------------------
int
ErasureCodeIsaTableCache::getDecodingTableCacheSize(int matrixtype)
{
std::lock_guard lock{codec_tables_guard};
if (decoding_tables[matrixtype])
return decoding_tables[matrixtype]->size();
else
return -1;
}
// -----------------------------------------------------------------------------
ErasureCodeIsaTableCache::lru_map_t*
ErasureCodeIsaTableCache::getDecodingTables(int matrix_type)
{
// the caller must hold the guard mutex:
// => std::lock_guard lock{codec_tables_guard};
// create an lru_map if not yet allocated
if (!decoding_tables[matrix_type]) {
decoding_tables[matrix_type] = new lru_map_t;
}
return decoding_tables[matrix_type];
}
// -----------------------------------------------------------------------------
ErasureCodeIsaTableCache::lru_list_t*
ErasureCodeIsaTableCache::getDecodingTablesLru(int matrix_type)
{
// the caller must hold the guard mutex:
// => std::lock_guard lock{codec_tables_guard};
// create an lru_list if not yet allocated
if (!decoding_tables_lru[matrix_type]) {
decoding_tables_lru[matrix_type] = new lru_list_t;
}
return decoding_tables_lru[matrix_type];
}
// -----------------------------------------------------------------------------
unsigned char**
ErasureCodeIsaTableCache::getEncodingTable(int matrix, int k, int m)
{
std::lock_guard lock{codec_tables_guard};
return getEncodingTableNoLock(matrix,k,m);
}
// -----------------------------------------------------------------------------
unsigned char**
ErasureCodeIsaTableCache::getEncodingTableNoLock(int matrix, int k, int m)
{
// create a pointer to store an encoding table address
if (!encoding_table[matrix][k][m]) {
encoding_table[matrix][k][m] = new (unsigned char*);
*encoding_table[matrix][k][m] = 0;
}
return encoding_table[matrix][k][m];
}
// -----------------------------------------------------------------------------
unsigned char**
ErasureCodeIsaTableCache::getEncodingCoefficient(int matrix, int k, int m)
{
std::lock_guard lock{codec_tables_guard};
return getEncodingCoefficientNoLock(matrix,k,m);
}
// -----------------------------------------------------------------------------
unsigned char**
ErasureCodeIsaTableCache::getEncodingCoefficientNoLock(int matrix, int k, int m)
{
// create a pointer to store an encoding coefficients address
if (!encoding_coefficient[matrix][k][m]) {
encoding_coefficient[matrix][k][m] = new (unsigned char*);
*encoding_coefficient[matrix][k][m] = 0;
}
return encoding_coefficient[matrix][k][m];
}
// -----------------------------------------------------------------------------
unsigned char*
ErasureCodeIsaTableCache::setEncodingTable(int matrix, int k, int m, unsigned char* ec_in_table)
{
std::lock_guard lock{codec_tables_guard};
unsigned char** ec_out_table = getEncodingTableNoLock(matrix, k, m);
if (*ec_out_table) {
// somebody might have deposited this table in the meanwhile, so clean
// the input table and return the stored one
free (ec_in_table);
return *ec_out_table;
} else {
// we store the provided input table and return this one
*encoding_table[matrix][k][m] = ec_in_table;
return ec_in_table;
}
}
// -----------------------------------------------------------------------------
unsigned char*
ErasureCodeIsaTableCache::setEncodingCoefficient(int matrix, int k, int m, unsigned char* ec_in_coeff)
{
std::lock_guard lock{codec_tables_guard};
unsigned char** ec_out_coeff = getEncodingCoefficientNoLock(matrix, k, m);
if (*ec_out_coeff) {
// somebody might have deposited these coefficients in the meanwhile, so clean
// the input coefficients and return the stored ones
free (ec_in_coeff);
return *ec_out_coeff;
} else {
// we store the provided input coefficients and return these
*encoding_coefficient[matrix][k][m] = ec_in_coeff;
return ec_in_coeff;
}
}
// -----------------------------------------------------------------------------
ceph::mutex*
ErasureCodeIsaTableCache::getLock()
{
return &codec_tables_guard;
}
// -----------------------------------------------------------------------------
bool
ErasureCodeIsaTableCache::getDecodingTableFromCache(std::string &signature,
unsigned char* &table,
int matrixtype,
int k,
int m)
{
// --------------------------------------------------------------------------
// LRU decoding matrix cache
// --------------------------------------------------------------------------
dout(12) << "[ get table ] = " << signature << dendl;
// we try to fetch a decoding table from an LRU cache
bool found = false;
std::lock_guard lock{codec_tables_guard};
lru_map_t* decode_tbls_map =
getDecodingTables(matrixtype);
lru_list_t* decode_tbls_lru =
getDecodingTablesLru(matrixtype);
if (decode_tbls_map->count(signature)) {
dout(12) << "[ cached table ] = " << signature << dendl;
// copy the table out of the cache
memcpy(table, (*decode_tbls_map)[signature].second.c_str(), k * (m + k)*32);
// find item in LRU queue and push back
dout(12) << "[ cache size ] = " << decode_tbls_lru->size() << dendl;
decode_tbls_lru->splice( (decode_tbls_lru->begin()), *decode_tbls_lru, (*decode_tbls_map)[signature].first);
found = true;
}
return found;
}
// -----------------------------------------------------------------------------
void
ErasureCodeIsaTableCache::putDecodingTableToCache(std::string &signature,
unsigned char* &table,
int matrixtype,
int k,
int m)
{
// --------------------------------------------------------------------------
// LRU decoding matrix cache
// --------------------------------------------------------------------------
dout(12) << "[ put table ] = " << signature << dendl;
// we store a new table to the cache
ceph::buffer::ptr cachetable;
std::lock_guard lock{codec_tables_guard};
lru_map_t* decode_tbls_map =
getDecodingTables(matrixtype);
lru_list_t* decode_tbls_lru =
getDecodingTablesLru(matrixtype);
// evt. shrink the LRU queue/map
if ((int) decode_tbls_lru->size() >= ErasureCodeIsaTableCache::decoding_tables_lru_length) {
dout(12) << "[ shrink lru ] = " << signature << dendl;
// reuse old buffer
cachetable = (*decode_tbls_map)[decode_tbls_lru->back()].second;
if ((int) cachetable.length() != (k * (m + k)*32)) {
// we need to replace this with a different size buffer
cachetable = ceph::buffer::create(k * (m + k)*32);
}
// remove from map
decode_tbls_map->erase(decode_tbls_lru->back());
// remove from lru
decode_tbls_lru->pop_back();
// add to the head of lru
decode_tbls_lru->push_front(signature);
// add the new to the map
(*decode_tbls_map)[signature] = std::make_pair(decode_tbls_lru->begin(), cachetable);
} else {
dout(12) << "[ store table ] = " << signature << dendl;
// allocate a new buffer
cachetable = ceph::buffer::create(k * (m + k)*32);
decode_tbls_lru->push_front(signature);
(*decode_tbls_map)[signature] = std::make_pair(decode_tbls_lru->begin(), cachetable);
dout(12) << "[ cache size ] = " << decode_tbls_lru->size() << dendl;
}
// copy-in the new table
memcpy(cachetable.c_str(), table, k * (m + k)*32);
}
| 11,175 | 33.073171 | 129 | cc |
null | ceph-main/src/erasure-code/isa/ErasureCodeIsaTableCache.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Andreas-Joachim Peters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
/**
* @file ErasureCodeIsaTableCache.h
*
* @brief Erasure Code Isa CODEC Table Cache
*
* The INTEL ISA-L library supports two pre-defined encoding matrices (cauchy = default, reed_sol_van = default)
* The default CODEC implementation using these two matrices is implemented in class ErasureCodeIsaDefault.
* ISA-L allows to use custom matrices which might be added later as implementations deriving from the base class ErasoreCodeIsa.
*/
#ifndef CEPH_ERASURE_CODE_ISA_TABLE_CACHE_H
#define CEPH_ERASURE_CODE_ISA_TABLE_CACHE_H
// -----------------------------------------------------------------------------
#include "common/ceph_mutex.h"
#include "erasure-code/ErasureCodeInterface.h"
// -----------------------------------------------------------------------------
#include <list>
// -----------------------------------------------------------------------------
class ErasureCodeIsaTableCache {
// ---------------------------------------------------------------------------
// This class implements a table cache for encoding and decoding matrices.
// Encoding matrices are shared for the same (k,m) combination. It supplies
// a decoding matrix lru cache which is shared for identical
// matrix types e.g. there is one cache (lru-list + lru-map) for Cauchy and
// one for Vandermonde matrices!
// ---------------------------------------------------------------------------
public:
// the cache size is sufficient up to (12,4) decodings
static const int decoding_tables_lru_length = 2516;
typedef std::pair<std::list<std::string>::iterator, ceph::buffer::ptr> lru_entry_t;
typedef std::map< int, unsigned char** > codec_table_t;
typedef std::map< int, codec_table_t > codec_tables_t;
typedef std::map< int, codec_tables_t > codec_technique_tables_t;
typedef std::map< std::string, lru_entry_t > lru_map_t;
typedef std::list< std::string > lru_list_t;
ErasureCodeIsaTableCache() = default;
virtual ~ErasureCodeIsaTableCache();
// mutex used to protect modifications in encoding/decoding table maps
ceph::mutex codec_tables_guard = ceph::make_mutex("isa-lru-cache");
bool getDecodingTableFromCache(std::string &signature,
unsigned char* &table,
int matrixtype,
int k,
int m);
void putDecodingTableToCache(std::string&,
unsigned char*&,
int matrixtype,
int k,
int m);
unsigned char** getEncodingTable(int matrix, int k, int m);
unsigned char** getEncodingCoefficient(int matrix, int k, int m);
unsigned char** getEncodingTableNoLock(int matrix, int k, int m);
unsigned char** getEncodingCoefficientNoLock(int matrix, int k, int m);
unsigned char* setEncodingTable(int matrix, int k, int m, unsigned char*);
unsigned char* setEncodingCoefficient(int matrix, int k, int m, unsigned char*);
int getDecodingTableCacheSize(int matrixtype = 0);
private:
codec_technique_tables_t encoding_coefficient; // encoding coefficients accessed via table[matrix][k][m]
codec_technique_tables_t encoding_table; // encoding coefficients accessed via table[matrix][k][m]
std::map<int, lru_map_t*> decoding_tables; // decoding table cache accessed via map[matrixtype]
std::map<int, lru_list_t*> decoding_tables_lru; // decoding table lru list accessed via list[matrixtype]
lru_map_t* getDecodingTables(int matrix_type);
lru_list_t* getDecodingTablesLru(int matrix_type);
ceph::mutex* getLock();
};
#endif
| 4,080 | 38.240385 | 129 | h |
null | ceph-main/src/erasure-code/isa/ErasureCodePluginIsa.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Andreas-Joachim Peters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
/**
* @file ErasureCodePluginIsa.cc
*
* @brief Erasure Code Plug-in class wrapping the INTEL ISA-L library
*
* The factory plug-in class allows to call individual encoding techniques.
* The INTEL ISA-L library provides two pre-defined encoding matrices
* (cauchy, reed_sol_van = default).
*/
// -----------------------------------------------------------------------------
#include "ceph_ver.h"
#include "include/buffer.h"
#include "ErasureCodePluginIsa.h"
#include "ErasureCodeIsa.h"
// -----------------------------------------------------------------------------
int ErasureCodePluginIsa::factory(const std::string &directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss)
{
ErasureCodeIsa *interface;
std::string t;
if (profile.find("technique") == profile.end())
profile["technique"] = "reed_sol_van";
t = profile.find("technique")->second;
if ((t == "reed_sol_van")) {
interface = new ErasureCodeIsaDefault(tcache,
ErasureCodeIsaDefault::kVandermonde);
} else {
if ((t == "cauchy")) {
interface = new ErasureCodeIsaDefault(tcache,
ErasureCodeIsaDefault::kCauchy);
} else {
*ss << "technique=" << t << " is not a valid coding technique. "
<< " Choose one of the following: "
<< "reed_sol_van,"
<< "cauchy" << std::endl;
return -ENOENT;
}
}
int r = interface->init(profile, ss);
if (r) {
delete interface;
return r;
}
*erasure_code = ceph::ErasureCodeInterfaceRef(interface);
return 0;
}
// -----------------------------------------------------------------------------
const char *__erasure_code_version()
{
return CEPH_GIT_NICE_VER;
}
// -----------------------------------------------------------------------------
int __erasure_code_init(char *plugin_name, char *directory)
{
auto& instance = ceph::ErasureCodePluginRegistry::instance();
return instance.add(plugin_name, new ErasureCodePluginIsa());
}
| 2,657 | 31.024096 | 81 | cc |
null | ceph-main/src/erasure-code/isa/ErasureCodePluginIsa.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_ISA_H
#define CEPH_ERASURE_CODE_PLUGIN_ISA_H
#include "erasure-code/ErasureCodePlugin.h"
#include "ErasureCodeIsaTableCache.h"
class ErasureCodePluginIsa : public ceph::ErasureCodePlugin {
public:
ErasureCodeIsaTableCache tcache;
int factory(const std::string &directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) override;
};
#endif
| 1,030 | 28.457143 | 71 | h |
null | ceph-main/src/erasure-code/isa/xor_op.cc | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
* * Author: Andreas-Joachim Peters <[email protected]> *
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// -----------------------------------------------------------------------------
#include "xor_op.h"
#include <stdio.h>
#include <string.h>
#include "arch/intel.h"
#include "include/ceph_assert.h"
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
void
// -----------------------------------------------------------------------------
byte_xor(unsigned char* cw, unsigned char* dw, unsigned char* ew)
// -----------------------------------------------------------------------------
{
while (cw < ew)
*dw++ ^= *cw++;
}
// -----------------------------------------------------------------------------
void
// -----------------------------------------------------------------------------
vector_xor(vector_op_t* cw,
vector_op_t* dw,
vector_op_t* ew)
// -----------------------------------------------------------------------------
{
ceph_assert(is_aligned(cw, EC_ISA_VECTOR_OP_WORDSIZE));
ceph_assert(is_aligned(dw, EC_ISA_VECTOR_OP_WORDSIZE));
ceph_assert(is_aligned(ew, EC_ISA_VECTOR_OP_WORDSIZE));
while (cw < ew) {
*dw++ ^= *cw++;
}
}
// -----------------------------------------------------------------------------
void
// -----------------------------------------------------------------------------
region_xor(unsigned char** src,
unsigned char* parity,
int src_size,
unsigned size)
{
if (!size) {
// nothing to do
return;
}
if (!src_size) {
// nothing to do
return;
}
if (src_size == 1) {
// just copy source to parity
memcpy(parity, src[0], size);
return;
}
unsigned size_left = size;
// ----------------------------------------------------------
// region or vector XOR operations require aligned addresses
// ----------------------------------------------------------
bool src_aligned = true;
for (int i = 0; i < src_size; i++) {
src_aligned &= is_aligned(src[i], EC_ISA_VECTOR_OP_WORDSIZE);
}
if (src_aligned &&
is_aligned(parity, EC_ISA_VECTOR_OP_WORDSIZE)) {
#ifdef __x86_64__
if (ceph_arch_intel_sse2) {
// -----------------------------
// use SSE2 region xor function
// -----------------------------
unsigned region_size =
(size / EC_ISA_VECTOR_SSE2_WORDSIZE) * EC_ISA_VECTOR_SSE2_WORDSIZE;
size_left -= region_size;
// 64-byte region xor
region_sse2_xor((char**) src, (char*) parity, src_size, region_size);
} else
#endif
{
// --------------------------------------------
// use region xor based on vector xor operation
// --------------------------------------------
unsigned vector_words = size / EC_ISA_VECTOR_OP_WORDSIZE;
unsigned vector_size = vector_words * EC_ISA_VECTOR_OP_WORDSIZE;
memcpy(parity, src[0], vector_size);
size_left -= vector_size;
vector_op_t* p_vec = (vector_op_t*) parity;
for (int i = 1; i < src_size; i++) {
vector_op_t* s_vec = (vector_op_t*) src[i];
vector_op_t* e_vec = s_vec + vector_words;
vector_xor(s_vec, p_vec, e_vec);
}
}
}
if (size_left) {
// --------------------------------------------------
// xor the not aligned part with byte-wise region xor
// --------------------------------------------------
memcpy(parity + size - size_left, src[0] + size - size_left, size_left);
for (int i = 1; i < src_size; i++) {
byte_xor(src[i] + size - size_left, parity + size - size_left, src[i] + size);
}
}
}
// -----------------------------------------------------------------------------
void
// -----------------------------------------------------------------------------
region_sse2_xor(char** src,
char* parity,
int src_size,
unsigned size)
// -----------------------------------------------------------------------------
{
#ifdef __x86_64__
ceph_assert(!(size % EC_ISA_VECTOR_SSE2_WORDSIZE));
unsigned char* p;
int d, l;
unsigned i;
unsigned char* vbuf[256];
for (int v = 0; v < src_size; v++) {
vbuf[v] = (unsigned char*) src[v];
}
l = src_size;
p = (unsigned char*) parity;
for (i = 0; i < size; i += EC_ISA_VECTOR_SSE2_WORDSIZE) {
asm volatile("movdqa %0,%%xmm0" : : "m" (vbuf[0][i]));
asm volatile("movdqa %0,%%xmm1" : : "m" (vbuf[0][i + 16]));
asm volatile("movdqa %0,%%xmm2" : : "m" (vbuf[0][i + 32]));
asm volatile("movdqa %0,%%xmm3" : : "m" (vbuf[0][i + 48]));
for (d = 1; d < l; d++) {
asm volatile("movdqa %0,%%xmm4" : : "m" (vbuf[d][i]));
asm volatile("movdqa %0,%%xmm5" : : "m" (vbuf[d][i + 16]));
asm volatile("movdqa %0,%%xmm6" : : "m" (vbuf[d][i + 32]));
asm volatile("movdqa %0,%%xmm7" : : "m" (vbuf[d][i + 48]));
asm volatile("pxor %xmm4,%xmm0");
asm volatile("pxor %xmm5,%xmm1");
asm volatile("pxor %xmm6,%xmm2");
asm volatile("pxor %xmm7,%xmm3");
}
asm volatile("movntdq %%xmm0,%0" : "=m" (p[i]));
asm volatile("movntdq %%xmm1,%0" : "=m" (p[i + 16]));
asm volatile("movntdq %%xmm2,%0" : "=m" (p[i + 32]));
asm volatile("movntdq %%xmm3,%0" : "=m" (p[i + 48]));
}
asm volatile("sfence" : : : "memory");
#endif // __x86_64__
return;
}
| 6,105 | 32.184783 | 412 | cc |
null | ceph-main/src/erasure-code/isa/xor_op.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 CERN (Switzerland)
* \
* Author: Andreas-Joachim Peters <[email protected]> \
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef EC_ISA_XOR_OP_H
#define EC_ISA_XOR_OP_H
// -----------------------------------------------------------------------------
#include <assert.h>
#include <stdint.h>
// -----------------------------------------------------------------------------
// -------------------------------------------------------------------------
// declaration of 64/128-bit vector operations depending on availability
// -------------------------------------------------------------------------
// -------------------------------------------------------------------------
#define EC_ISA_ADDRESS_ALIGNMENT 32u
#define EC_ISA_VECTOR_SSE2_WORDSIZE 64u
#if __GNUC__ > 4 || \
( (__GNUC__ == 4) && (__GNUC_MINOR__ >= 4) ) ||\
(__clang__ == 1 )
#ifdef EC_ISA_VECTOR_OP_DEBUG
#pragma message "* using 128-bit vector operations in " __FILE__
#endif
// -------------------------------------------------------------------------
// use 128-bit pointer
// -------------------------------------------------------------------------
typedef long vector_op_t __attribute__((vector_size(16)));
#define EC_ISA_VECTOR_OP_WORDSIZE 16
#else
// -------------------------------------------------------------------------
// use 64-bit pointer
// -------------------------------------------------------------------------
typedef unsigned long long vector_op_t;
#define EC_ISA_VECTOR_OP_WORDSIZE 8
#endif
// -------------------------------------------------------------------------
// check if a pointer is aligend to byte_count
// -------------------------------------------------------------------------
#define is_aligned(POINTER, BYTE_COUNT) \
(((uintptr_t)(const void *)(POINTER)) % (BYTE_COUNT) == 0)
// -------------------------------------------------------------------------
// compute byte-wise XOR of cw and dw block, ew contains the end address of cw
// -------------------------------------------------------------------------
void
byte_xor(unsigned char* cw, unsigned char* dw, unsigned char* ew);
// -------------------------------------------------------------------------
// compute word-wise XOR of cw and dw block, ew contains the end address of cw
// -------------------------------------------------------------------------
void
vector_xor(vector_op_t* cw, vector_op_t* dw, vector_op_t* ew);
// -------------------------------------------------------------------------
// compute region XOR like parity = src[0] ^ src[1] ... ^ src[src_size-]
// -------------------------------------------------------------------------
void
region_xor(unsigned char** src, unsigned char* parity, int src_size, unsigned size);
// -------------------------------------------------------------------------
// compute region XOR like parity = src[0] ^ src[1] ... ^ src[src_size-]
// using SSE2 64-byte operations
// -------------------------------------------------------------------------
void
region_sse2_xor(char** src /* array of 64-byte aligned source pointer to xor */,
char* parity /* 64-byte aligned output pointer containing the parity */,
int src_size /* size of the source pointer array */,
unsigned size /* size of the region to xor */);
#endif // EC_ISA_XOR_OP_H
| 3,978 | 44.215909 | 206 | h |
null | ceph-main/src/erasure-code/jerasure/ErasureCodeJerasure.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "common/debug.h"
#include "ErasureCodeJerasure.h"
extern "C" {
#include "jerasure.h"
#include "reed_sol.h"
#include "galois.h"
#include "cauchy.h"
#include "liberation.h"
}
#define LARGEST_VECTOR_WORDSIZE 16
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout)
using std::ostream;
using std::map;
using std::set;
using ceph::bufferlist;
using ceph::ErasureCodeProfile;
static ostream& _prefix(std::ostream* _dout)
{
return *_dout << "ErasureCodeJerasure: ";
}
int ErasureCodeJerasure::init(ErasureCodeProfile& profile, ostream *ss)
{
int err = 0;
dout(10) << "technique=" << technique << dendl;
profile["technique"] = technique;
err |= parse(profile, ss);
if (err)
return err;
prepare();
return ErasureCode::init(profile, ss);
}
int ErasureCodeJerasure::parse(ErasureCodeProfile &profile,
ostream *ss)
{
int err = ErasureCode::parse(profile, ss);
err |= to_int("k", profile, &k, DEFAULT_K, ss);
err |= to_int("m", profile, &m, DEFAULT_M, ss);
err |= to_int("w", profile, &w, DEFAULT_W, ss);
if (chunk_mapping.size() > 0 && (int)chunk_mapping.size() != k + m) {
*ss << "mapping " << profile.find("mapping")->second
<< " maps " << chunk_mapping.size() << " chunks instead of"
<< " the expected " << k + m << " and will be ignored" << std::endl;
chunk_mapping.clear();
err = -EINVAL;
}
err |= sanity_check_k_m(k, m, ss);
return err;
}
unsigned int ErasureCodeJerasure::get_chunk_size(unsigned int object_size) const
{
unsigned alignment = get_alignment();
if (per_chunk_alignment) {
unsigned chunk_size = object_size / k;
if (object_size % k)
chunk_size++;
dout(20) << "get_chunk_size: chunk_size " << chunk_size
<< " must be modulo " << alignment << dendl;
ceph_assert(alignment <= chunk_size);
unsigned modulo = chunk_size % alignment;
if (modulo) {
dout(10) << "get_chunk_size: " << chunk_size
<< " padded to " << chunk_size + alignment - modulo << dendl;
chunk_size += alignment - modulo;
}
return chunk_size;
} else {
unsigned tail = object_size % alignment;
unsigned padded_length = object_size + ( tail ? ( alignment - tail ) : 0 );
ceph_assert(padded_length % k == 0);
return padded_length / k;
}
}
int ErasureCodeJerasure::encode_chunks(const set<int> &want_to_encode,
map<int, bufferlist> *encoded)
{
char *chunks[k + m];
for (int i = 0; i < k + m; i++)
chunks[i] = (*encoded)[i].c_str();
jerasure_encode(&chunks[0], &chunks[k], (*encoded)[0].length());
return 0;
}
int ErasureCodeJerasure::decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
{
unsigned blocksize = (*chunks.begin()).second.length();
int erasures[k + m + 1];
int erasures_count = 0;
char *data[k];
char *coding[m];
for (int i = 0; i < k + m; i++) {
if (chunks.find(i) == chunks.end()) {
erasures[erasures_count] = i;
erasures_count++;
}
if (i < k)
data[i] = (*decoded)[i].c_str();
else
coding[i - k] = (*decoded)[i].c_str();
}
erasures[erasures_count] = -1;
ceph_assert(erasures_count > 0);
return jerasure_decode(erasures, data, coding, blocksize);
}
bool ErasureCodeJerasure::is_prime(int value)
{
int prime55[] = {
2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,
73,79,83,89,97,101,103,107,109,113,127,131,137,139,149,
151,157,163,167,173,179,
181,191,193,197,199,211,223,227,229,233,239,241,251,257
};
int i;
for (i = 0; i < 55; i++)
if (value == prime55[i])
return true;
return false;
}
//
// ErasureCodeJerasureReedSolomonVandermonde
//
void ErasureCodeJerasureReedSolomonVandermonde::jerasure_encode(char **data,
char **coding,
int blocksize)
{
jerasure_matrix_encode(k, m, w, matrix, data, coding, blocksize);
}
int ErasureCodeJerasureReedSolomonVandermonde::jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize)
{
return jerasure_matrix_decode(k, m, w, matrix, 1,
erasures, data, coding, blocksize);
}
unsigned ErasureCodeJerasureReedSolomonVandermonde::get_alignment() const
{
if (per_chunk_alignment) {
return w * LARGEST_VECTOR_WORDSIZE;
} else {
unsigned alignment = k*w*sizeof(int);
if ( ((w*sizeof(int))%LARGEST_VECTOR_WORDSIZE) )
alignment = k*w*LARGEST_VECTOR_WORDSIZE;
return alignment;
}
}
int ErasureCodeJerasureReedSolomonVandermonde::parse(ErasureCodeProfile &profile,
ostream *ss)
{
int err = 0;
err |= ErasureCodeJerasure::parse(profile, ss);
if (w != 8 && w != 16 && w != 32) {
*ss << "ReedSolomonVandermonde: w=" << w
<< " must be one of {8, 16, 32} : revert to " << DEFAULT_W << std::endl;
err = -EINVAL;
}
err |= to_bool("jerasure-per-chunk-alignment", profile,
&per_chunk_alignment, "false", ss);
return err;
}
void ErasureCodeJerasureReedSolomonVandermonde::prepare()
{
matrix = reed_sol_vandermonde_coding_matrix(k, m, w);
}
//
// ErasureCodeJerasureReedSolomonRAID6
//
void ErasureCodeJerasureReedSolomonRAID6::jerasure_encode(char **data,
char **coding,
int blocksize)
{
reed_sol_r6_encode(k, w, data, coding, blocksize);
}
int ErasureCodeJerasureReedSolomonRAID6::jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize)
{
return jerasure_matrix_decode(k, m, w, matrix, 1, erasures, data, coding, blocksize);
}
unsigned ErasureCodeJerasureReedSolomonRAID6::get_alignment() const
{
if (per_chunk_alignment) {
return w * LARGEST_VECTOR_WORDSIZE;
} else {
unsigned alignment = k*w*sizeof(int);
if ( ((w*sizeof(int))%LARGEST_VECTOR_WORDSIZE) )
alignment = k*w*LARGEST_VECTOR_WORDSIZE;
return alignment;
}
}
int ErasureCodeJerasureReedSolomonRAID6::parse(ErasureCodeProfile &profile,
ostream *ss)
{
int err = ErasureCodeJerasure::parse(profile, ss);
if (m != stoi(DEFAULT_M)) {
*ss << "ReedSolomonRAID6: m=" << m
<< " must be 2 for RAID6: revert to 2" << std::endl;
err = -EINVAL;
}
if (w != 8 && w != 16 && w != 32) {
*ss << "ReedSolomonRAID6: w=" << w
<< " must be one of {8, 16, 32} : revert to 8 " << std::endl;
err = -EINVAL;
}
return err;
}
void ErasureCodeJerasureReedSolomonRAID6::prepare()
{
matrix = reed_sol_r6_coding_matrix(k, w);
}
//
// ErasureCodeJerasureCauchy
//
void ErasureCodeJerasureCauchy::jerasure_encode(char **data,
char **coding,
int blocksize)
{
jerasure_schedule_encode(k, m, w, schedule,
data, coding, blocksize, packetsize);
}
int ErasureCodeJerasureCauchy::jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize)
{
return jerasure_schedule_decode_lazy(k, m, w, bitmatrix,
erasures, data, coding, blocksize, packetsize, 1);
}
unsigned ErasureCodeJerasureCauchy::get_alignment() const
{
if (per_chunk_alignment) {
unsigned alignment = w * packetsize;
unsigned modulo = alignment % LARGEST_VECTOR_WORDSIZE;
if (modulo)
alignment += LARGEST_VECTOR_WORDSIZE - modulo;
return alignment;
} else {
unsigned alignment = k*w*packetsize*sizeof(int);
if ( ((w*packetsize*sizeof(int))%LARGEST_VECTOR_WORDSIZE) )
alignment = k*w*packetsize*LARGEST_VECTOR_WORDSIZE;
return alignment;
}
}
int ErasureCodeJerasureCauchy::parse(ErasureCodeProfile &profile,
ostream *ss)
{
int err = ErasureCodeJerasure::parse(profile, ss);
err |= to_int("packetsize", profile, &packetsize, DEFAULT_PACKETSIZE, ss);
err |= to_bool("jerasure-per-chunk-alignment", profile,
&per_chunk_alignment, "false", ss);
return err;
}
void ErasureCodeJerasureCauchy::prepare_schedule(int *matrix)
{
bitmatrix = jerasure_matrix_to_bitmatrix(k, m, w, matrix);
schedule = jerasure_smart_bitmatrix_to_schedule(k, m, w, bitmatrix);
}
ErasureCodeJerasureCauchy::~ErasureCodeJerasureCauchy()
{
if (bitmatrix)
free(bitmatrix);
if (schedule)
jerasure_free_schedule(schedule);
}
//
// ErasureCodeJerasureCauchyOrig
//
void ErasureCodeJerasureCauchyOrig::prepare()
{
int *matrix = cauchy_original_coding_matrix(k, m, w);
prepare_schedule(matrix);
free(matrix);
}
//
// ErasureCodeJerasureCauchyGood
//
void ErasureCodeJerasureCauchyGood::prepare()
{
int *matrix = cauchy_good_general_coding_matrix(k, m, w);
prepare_schedule(matrix);
free(matrix);
}
//
// ErasureCodeJerasureLiberation
//
ErasureCodeJerasureLiberation::~ErasureCodeJerasureLiberation()
{
if (bitmatrix)
free(bitmatrix);
if (schedule)
jerasure_free_schedule(schedule);
}
void ErasureCodeJerasureLiberation::jerasure_encode(char **data,
char **coding,
int blocksize)
{
jerasure_schedule_encode(k, m, w, schedule, data,
coding, blocksize, packetsize);
}
int ErasureCodeJerasureLiberation::jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize)
{
return jerasure_schedule_decode_lazy(k, m, w, bitmatrix, erasures, data,
coding, blocksize, packetsize, 1);
}
unsigned ErasureCodeJerasureLiberation::get_alignment() const
{
unsigned alignment = k*w*packetsize*sizeof(int);
if ( ((w*packetsize*sizeof(int))%LARGEST_VECTOR_WORDSIZE) )
alignment = k*w*packetsize*LARGEST_VECTOR_WORDSIZE;
return alignment;
}
bool ErasureCodeJerasureLiberation::check_k(ostream *ss) const
{
if (k > w) {
*ss << "k=" << k << " must be less than or equal to w=" << w << std::endl;
return false;
} else {
return true;
}
}
bool ErasureCodeJerasureLiberation::check_w(ostream *ss) const
{
if (w <= 2 || !is_prime(w)) {
*ss << "w=" << w << " must be greater than two and be prime" << std::endl;
return false;
} else {
return true;
}
}
bool ErasureCodeJerasureLiberation::check_packetsize_set(ostream *ss) const
{
if (packetsize == 0) {
*ss << "packetsize=" << packetsize << " must be set" << std::endl;
return false;
} else {
return true;
}
}
bool ErasureCodeJerasureLiberation::check_packetsize(ostream *ss) const
{
if ((packetsize%(sizeof(int))) != 0) {
*ss << "packetsize=" << packetsize
<< " must be a multiple of sizeof(int) = " << sizeof(int) << std::endl;
return false;
} else {
return true;
}
}
int ErasureCodeJerasureLiberation::revert_to_default(ErasureCodeProfile &profile,
ostream *ss)
{
int err = 0;
*ss << "reverting to k=" << DEFAULT_K << ", w="
<< DEFAULT_W << ", packetsize=" << DEFAULT_PACKETSIZE << std::endl;
profile["k"] = DEFAULT_K;
err |= to_int("k", profile, &k, DEFAULT_K, ss);
profile["w"] = DEFAULT_W;
err |= to_int("w", profile, &w, DEFAULT_W, ss);
profile["packetsize"] = DEFAULT_PACKETSIZE;
err |= to_int("packetsize", profile, &packetsize, DEFAULT_PACKETSIZE, ss);
return err;
}
int ErasureCodeJerasureLiberation::parse(ErasureCodeProfile &profile,
ostream *ss)
{
int err = ErasureCodeJerasure::parse(profile, ss);
err |= to_int("packetsize", profile, &packetsize, DEFAULT_PACKETSIZE, ss);
bool error = false;
if (!check_k(ss))
error = true;
if (!check_w(ss))
error = true;
if (!check_packetsize_set(ss) || !check_packetsize(ss))
error = true;
if (error) {
revert_to_default(profile, ss);
err = -EINVAL;
}
return err;
}
void ErasureCodeJerasureLiberation::prepare()
{
bitmatrix = liberation_coding_bitmatrix(k, w);
schedule = jerasure_smart_bitmatrix_to_schedule(k, m, w, bitmatrix);
}
//
// ErasureCodeJerasureBlaumRoth
//
bool ErasureCodeJerasureBlaumRoth::check_w(ostream *ss) const
{
// back in Firefly, w = 7 was the default and produced usable
// chunks. Tolerate this value for backward compatibility.
if (w == 7)
return true;
if (w <= 2 || !is_prime(w+1)) {
*ss << "w=" << w << " must be greater than two and "
<< "w+1 must be prime" << std::endl;
return false;
} else {
return true;
}
}
void ErasureCodeJerasureBlaumRoth::prepare()
{
bitmatrix = blaum_roth_coding_bitmatrix(k, w);
schedule = jerasure_smart_bitmatrix_to_schedule(k, m, w, bitmatrix);
}
//
// ErasureCodeJerasureLiber8tion
//
int ErasureCodeJerasureLiber8tion::parse(ErasureCodeProfile &profile,
ostream *ss)
{
int err = ErasureCodeJerasure::parse(profile, ss);
if (m != stoi(DEFAULT_M)) {
*ss << "liber8tion: m=" << m << " must be " << DEFAULT_M
<< " for liber8tion: revert to " << DEFAULT_M << std::endl;
err = -EINVAL;
}
if (w != stoi(DEFAULT_W)) {
*ss << "liber8tion: w=" << w << " must be " << DEFAULT_W
<< " for liber8tion: revert to " << DEFAULT_W << std::endl;
err = -EINVAL;
}
err |= to_int("packetsize", profile, &packetsize, DEFAULT_PACKETSIZE, ss);
bool error = false;
if (!check_k(ss))
error = true;
if (!check_packetsize_set(ss))
error = true;
if (error) {
revert_to_default(profile, ss);
err = -EINVAL;
}
return err;
}
void ErasureCodeJerasureLiber8tion::prepare()
{
bitmatrix = liber8tion_coding_bitmatrix(k);
schedule = jerasure_smart_bitmatrix_to_schedule(k, m, w, bitmatrix);
}
| 14,436 | 26.978682 | 87 | cc |
null | ceph-main/src/erasure-code/jerasure/ErasureCodeJerasure.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013, 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_JERASURE_H
#define CEPH_ERASURE_CODE_JERASURE_H
#include "erasure-code/ErasureCode.h"
class ErasureCodeJerasure : public ceph::ErasureCode {
public:
int k;
std::string DEFAULT_K;
int m;
std::string DEFAULT_M;
int w;
std::string DEFAULT_W;
const char *technique;
std::string rule_root;
std::string rule_failure_domain;
bool per_chunk_alignment;
explicit ErasureCodeJerasure(const char *_technique) :
k(0),
DEFAULT_K("2"),
m(0),
DEFAULT_M("1"),
w(0),
DEFAULT_W("8"),
technique(_technique),
per_chunk_alignment(false)
{}
~ErasureCodeJerasure() override {}
unsigned int get_chunk_count() const override {
return k + m;
}
unsigned int get_data_chunk_count() const override {
return k;
}
unsigned int get_chunk_size(unsigned int object_size) const override;
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::buffer::list> *encoded) override;
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
virtual void jerasure_encode(char **data,
char **coding,
int blocksize) = 0;
virtual int jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize) = 0;
virtual unsigned get_alignment() const = 0;
virtual void prepare() = 0;
static bool is_prime(int value);
protected:
virtual int parse(ceph::ErasureCodeProfile &profile, std::ostream *ss);
};
class ErasureCodeJerasureReedSolomonVandermonde : public ErasureCodeJerasure {
public:
int *matrix;
ErasureCodeJerasureReedSolomonVandermonde() :
ErasureCodeJerasure("reed_sol_van"),
matrix(0)
{
DEFAULT_K = "7";
DEFAULT_M = "3";
DEFAULT_W = "8";
}
~ErasureCodeJerasureReedSolomonVandermonde() override {
if (matrix)
free(matrix);
}
void jerasure_encode(char **data,
char **coding,
int blocksize) override;
int jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
void prepare() override;
private:
int parse(ceph::ErasureCodeProfile& profile, std::ostream *ss) override;
};
class ErasureCodeJerasureReedSolomonRAID6 : public ErasureCodeJerasure {
public:
int *matrix;
ErasureCodeJerasureReedSolomonRAID6() :
ErasureCodeJerasure("reed_sol_r6_op"),
matrix(0)
{
DEFAULT_K = "7";
DEFAULT_M = "2";
DEFAULT_W = "8";
}
~ErasureCodeJerasureReedSolomonRAID6() override {
if (matrix)
free(matrix);
}
void jerasure_encode(char **data,
char **coding,
int blocksize) override;
int jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
void prepare() override;
private:
int parse(ceph::ErasureCodeProfile& profile, std::ostream *ss) override;
};
#define DEFAULT_PACKETSIZE "2048"
class ErasureCodeJerasureCauchy : public ErasureCodeJerasure {
public:
int *bitmatrix;
int **schedule;
int packetsize;
explicit ErasureCodeJerasureCauchy(const char *technique) :
ErasureCodeJerasure(technique),
bitmatrix(0),
schedule(0),
packetsize(0)
{
DEFAULT_K = "7";
DEFAULT_M = "3";
DEFAULT_W = "8";
}
~ErasureCodeJerasureCauchy() override;
void jerasure_encode(char **data,
char **coding,
int blocksize) override;
int jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
void prepare_schedule(int *matrix);
private:
int parse(ceph::ErasureCodeProfile& profile, std::ostream *ss) override;
};
class ErasureCodeJerasureCauchyOrig : public ErasureCodeJerasureCauchy {
public:
ErasureCodeJerasureCauchyOrig() :
ErasureCodeJerasureCauchy("cauchy_orig")
{}
void prepare() override;
};
class ErasureCodeJerasureCauchyGood : public ErasureCodeJerasureCauchy {
public:
ErasureCodeJerasureCauchyGood() :
ErasureCodeJerasureCauchy("cauchy_good")
{}
void prepare() override;
};
class ErasureCodeJerasureLiberation : public ErasureCodeJerasure {
public:
int *bitmatrix;
int **schedule;
int packetsize;
explicit ErasureCodeJerasureLiberation(const char *technique = "liberation") :
ErasureCodeJerasure(technique),
bitmatrix(0),
schedule(0),
packetsize(0)
{
DEFAULT_K = "2";
DEFAULT_M = "2";
DEFAULT_W = "7";
}
~ErasureCodeJerasureLiberation() override;
void jerasure_encode(char **data,
char **coding,
int blocksize) override;
int jerasure_decode(int *erasures,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
virtual bool check_k(std::ostream *ss) const;
virtual bool check_w(std::ostream *ss) const;
virtual bool check_packetsize_set(std::ostream *ss) const;
virtual bool check_packetsize(std::ostream *ss) const;
virtual int revert_to_default(ceph::ErasureCodeProfile& profile,
std::ostream *ss);
void prepare() override;
private:
int parse(ceph::ErasureCodeProfile& profile, std::ostream *ss) override;
};
class ErasureCodeJerasureBlaumRoth : public ErasureCodeJerasureLiberation {
public:
ErasureCodeJerasureBlaumRoth() :
ErasureCodeJerasureLiberation("blaum_roth")
{
}
bool check_w(std::ostream *ss) const override;
void prepare() override;
};
class ErasureCodeJerasureLiber8tion : public ErasureCodeJerasureLiberation {
public:
ErasureCodeJerasureLiber8tion() :
ErasureCodeJerasureLiberation("liber8tion")
{
DEFAULT_K = "2";
DEFAULT_M = "2";
DEFAULT_W = "8";
}
void prepare() override;
private:
int parse(ceph::ErasureCodeProfile& profile, std::ostream *ss) override;
};
#endif
| 7,164 | 26.988281 | 80 | h |
null | ceph-main/src/erasure-code/jerasure/ErasureCodePluginJerasure.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "ceph_ver.h"
#include "common/debug.h"
#include "ErasureCodeJerasure.h"
#include "ErasureCodePluginJerasure.h"
#include "jerasure_init.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout)
static std::ostream& _prefix(std::ostream* _dout)
{
return *_dout << "ErasureCodePluginJerasure: ";
}
int ErasureCodePluginJerasure::factory(const std::string& directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) {
ErasureCodeJerasure *interface;
std::string t;
if (profile.find("technique") != profile.end())
t = profile.find("technique")->second;
if (t == "reed_sol_van") {
interface = new ErasureCodeJerasureReedSolomonVandermonde();
} else if (t == "reed_sol_r6_op") {
interface = new ErasureCodeJerasureReedSolomonRAID6();
} else if (t == "cauchy_orig") {
interface = new ErasureCodeJerasureCauchyOrig();
} else if (t == "cauchy_good") {
interface = new ErasureCodeJerasureCauchyGood();
} else if (t == "liberation") {
interface = new ErasureCodeJerasureLiberation();
} else if (t == "blaum_roth") {
interface = new ErasureCodeJerasureBlaumRoth();
} else if (t == "liber8tion") {
interface = new ErasureCodeJerasureLiber8tion();
} else {
*ss << "technique=" << t << " is not a valid coding technique. "
<< " Choose one of the following: "
<< "reed_sol_van, reed_sol_r6_op, cauchy_orig, "
<< "cauchy_good, liberation, blaum_roth, liber8tion";
return -ENOENT;
}
dout(20) << __func__ << ": " << profile << dendl;
int r = interface->init(profile, ss);
if (r) {
delete interface;
return r;
}
*erasure_code = ceph::ErasureCodeInterfaceRef(interface);
return 0;
}
const char *__erasure_code_version() { return CEPH_GIT_NICE_VER; }
int __erasure_code_init(char *plugin_name, char *directory)
{
auto& instance = ceph::ErasureCodePluginRegistry::instance();
int w[] = { 4, 8, 16, 32 };
int r = jerasure_init(4, w);
if (r) {
return -r;
}
return instance.add(plugin_name, new ErasureCodePluginJerasure());
}
| 2,825 | 32.247059 | 71 | cc |
null | ceph-main/src/erasure-code/jerasure/ErasureCodePluginJerasure.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_JERASURE_H
#define CEPH_ERASURE_CODE_PLUGIN_JERASURE_H
#include "erasure-code/ErasureCodePlugin.h"
class ErasureCodePluginJerasure : public ceph::ErasureCodePlugin {
public:
int factory(const std::string& directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) override;
};
#endif
| 971 | 29.375 | 71 | h |
null | ceph-main/src/erasure-code/jerasure/jerasure_init.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "common/debug.h"
#include "jerasure_init.h"
extern "C" {
#include "galois.h"
}
#define dout_context g_ceph_context
extern "C" int jerasure_init(int count, int *words)
{
for(int i = 0; i < count; i++) {
int r = galois_init_default_field(words[i]);
if (r) {
derr << "failed to galois_init_default_field(" << words[i] << ")" << dendl;
return -r;
}
}
return 0;
}
| 984 | 24.921053 | 81 | cc |
null | ceph-main/src/erasure-code/jerasure/jerasure_init.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2013, 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_JERASURE_INIT_H
#define CEPH_JERASURE_INIT_H
extern "C" int jerasure_init(int count, int *words);
#endif
| 705 | 27.24 | 71 | h |
null | ceph-main/src/erasure-code/lrc/ErasureCodeLrc.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <cerrno>
#include <algorithm>
#include "include/str_map.h"
#include "common/debug.h"
#include "crush/CrushWrapper.h"
#include "osd/osd_types.h"
#include "include/stringify.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "json_spirit/json_spirit_writer.h"
#include "ErasureCodeLrc.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout)
using namespace std;
using namespace ceph;
static ostream& _prefix(std::ostream* _dout)
{
return *_dout << "ErasureCodeLrc: ";
}
int ErasureCodeLrc::create_rule(const string &name,
CrushWrapper &crush,
ostream *ss) const
{
if (crush.rule_exists(name)) {
*ss << "rule " << name << " exists";
return -EEXIST;
}
if (!crush.name_exists(rule_root)) {
*ss << "root item " << rule_root << " does not exist";
return -ENOENT;
}
int root = crush.get_item_id(rule_root);
if (rule_device_class.size()) {
if (!crush.class_exists(rule_device_class)) {
*ss << "device class " << rule_device_class << " does not exist";
return -ENOENT;
}
int c = crush.get_class_id(rule_device_class);
if (crush.class_bucket.count(root) == 0 ||
crush.class_bucket[root].count(c) == 0) {
*ss << "root item " << rule_root << " has no devices with class "
<< rule_device_class;
return -EINVAL;
}
root = crush.class_bucket[root][c];
}
int rno = 0;
for (rno = 0; rno < crush.get_max_rules(); rno++) {
if (!crush.rule_exists(rno))
break;
}
int steps = 4 + rule_steps.size();
int ret;
ret = crush.add_rule(rno, steps, pg_pool_t::TYPE_ERASURE);
ceph_assert(ret == rno);
int step = 0;
ret = crush.set_rule_step(rno, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0);
ceph_assert(ret == 0);
ret = crush.set_rule_step(rno, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0);
ceph_assert(ret == 0);
ret = crush.set_rule_step(rno, step++, CRUSH_RULE_TAKE, root, 0);
ceph_assert(ret == 0);
// [ [ "choose", "rack", 2 ],
// [ "chooseleaf", "host", 5 ] ]
for (vector<Step>::const_iterator i = rule_steps.begin();
i != rule_steps.end();
++i) {
int op = i->op == "chooseleaf" ?
CRUSH_RULE_CHOOSELEAF_INDEP : CRUSH_RULE_CHOOSE_INDEP;
int type = crush.get_type_id(i->type);
if (type < 0) {
*ss << "unknown crush type " << i->type;
return -EINVAL;
}
ret = crush.set_rule_step(rno, step++, op, i->n, type);
ceph_assert(ret == 0);
}
ret = crush.set_rule_step(rno, step++, CRUSH_RULE_EMIT, 0, 0);
ceph_assert(ret == 0);
crush.set_rule_name(rno, name);
return rno;
}
int ErasureCodeLrc::layers_description(const ErasureCodeProfile &profile,
json_spirit::mArray *description,
ostream *ss) const
{
if (profile.count("layers") == 0) {
*ss << "could not find 'layers' in " << profile << std::endl;
return ERROR_LRC_DESCRIPTION;
}
string str = profile.find("layers")->second;
try {
json_spirit::mValue json;
json_spirit::read_or_throw(str, json);
if (json.type() != json_spirit::array_type) {
*ss << "layers='" << str
<< "' must be a JSON array but is of type "
<< json.type() << " instead" << std::endl;
return ERROR_LRC_ARRAY;
}
*description = json.get_array();
} catch (json_spirit::Error_position &e) {
*ss << "failed to parse layers='" << str << "'"
<< " at line " << e.line_ << ", column " << e.column_
<< " : " << e.reason_ << std::endl;
return ERROR_LRC_PARSE_JSON;
}
return 0;
}
int ErasureCodeLrc::layers_parse(const string &description_string,
json_spirit::mArray description,
ostream *ss)
{
int position = 0;
for (vector<json_spirit::mValue>::iterator i = description.begin();
i != description.end();
++i, position++) {
if (i->type() != json_spirit::array_type) {
stringstream json_string;
json_spirit::write(*i, json_string);
*ss << "each element of the array "
<< description_string << " must be a JSON array but "
<< json_string.str() << " at position " << position
<< " is of type " << i->type() << " instead" << std::endl;
return ERROR_LRC_ARRAY;
}
json_spirit::mArray layer_json = i->get_array();
ErasureCodeProfile profile;
int index = 0;
for (vector<json_spirit::mValue>::iterator j = layer_json.begin();
j != layer_json.end();
++j, ++index) {
if (index == 0) {
if (j->type() != json_spirit::str_type) {
stringstream element;
json_spirit::write(*j, element);
*ss << "the first element of the entry "
<< element.str() << " (first is zero) "
<< position << " in " << description_string
<< " is of type " << (*j).type() << " instead of string" << std::endl;
return ERROR_LRC_STR;
}
layers.push_back(Layer(j->get_str()));
Layer &layer = layers.back();
layer.chunks_map = j->get_str();
} else if(index == 1) {
Layer &layer = layers.back();
if (j->type() != json_spirit::str_type &&
j->type() != json_spirit::obj_type) {
stringstream element;
json_spirit::write(*j, element);
*ss << "the second element of the entry "
<< element.str() << " (first is zero) "
<< position << " in " << description_string
<< " is of type " << (*j).type() << " instead of string or object"
<< std::endl;
return ERROR_LRC_CONFIG_OPTIONS;
}
if (j->type() == json_spirit::str_type) {
int err = get_json_str_map(j->get_str(), *ss, &layer.profile);
if (err)
return err;
} else if (j->type() == json_spirit::obj_type) {
json_spirit::mObject o = j->get_obj();
for (map<string, json_spirit::mValue>::iterator i = o.begin();
i != o.end();
++i) {
layer.profile[i->first] = i->second.get_str();
}
}
} else {
// ignore trailing elements
}
}
}
return 0;
}
int ErasureCodeLrc::layers_init(ostream *ss)
{
ErasureCodePluginRegistry ®istry = ErasureCodePluginRegistry::instance();
for (unsigned int i = 0; i < layers.size(); i++) {
Layer &layer = layers[i];
int position = 0;
for(std::string::iterator it = layer.chunks_map.begin();
it != layer.chunks_map.end();
++it) {
if (*it == 'D')
layer.data.push_back(position);
if (*it == 'c')
layer.coding.push_back(position);
if (*it == 'c' || *it == 'D')
layer.chunks_as_set.insert(position);
position++;
}
layer.chunks = layer.data;
layer.chunks.insert(layer.chunks.end(),
layer.coding.begin(), layer.coding.end());
if (layer.profile.find("k") == layer.profile.end())
layer.profile["k"] = stringify(layer.data.size());
if (layer.profile.find("m") == layer.profile.end())
layer.profile["m"] = stringify(layer.coding.size());
if (layer.profile.find("plugin") == layer.profile.end())
layer.profile["plugin"] = "jerasure";
if (layer.profile.find("technique") == layer.profile.end())
layer.profile["technique"] = "reed_sol_van";
int err = registry.factory(layer.profile["plugin"],
directory,
layer.profile,
&layer.erasure_code,
ss);
if (err)
return err;
}
return 0;
}
int ErasureCodeLrc::layers_sanity_checks(const string &description_string,
ostream *ss) const
{
int position = 0;
if (layers.size() < 1) {
*ss << "layers parameter has " << layers.size()
<< " which is less than the minimum of one. "
<< description_string << std::endl;
return ERROR_LRC_LAYERS_COUNT;
}
for (vector<Layer>::const_iterator layer = layers.begin();
layer != layers.end();
++layer) {
if (chunk_count != layer->chunks_map.length()) {
*ss << "the first element of the array at position "
<< position << " (starting from zero) "
<< " is the string '" << layer->chunks_map
<< " found in the layers parameter "
<< description_string << ". It is expected to be "
<< chunk_count << " characters long but is "
<< layer->chunks_map.length() << " characters long instead "
<< std::endl;
return ERROR_LRC_MAPPING_SIZE;
}
}
return 0;
}
int ErasureCodeLrc::parse(ErasureCodeProfile &profile,
ostream *ss)
{
int r = ErasureCode::parse(profile, ss);
if (r)
return r;
return parse_rule(profile, ss);
}
const string ErasureCodeLrc::DEFAULT_KML("-1");
int ErasureCodeLrc::parse_kml(ErasureCodeProfile &profile,
ostream *ss)
{
int err = ErasureCode::parse(profile, ss);
const int DEFAULT_INT = -1;
int k, m, l;
err |= to_int("k", profile, &k, DEFAULT_KML, ss);
err |= to_int("m", profile, &m, DEFAULT_KML, ss);
err |= to_int("l", profile, &l, DEFAULT_KML, ss);
if (k == DEFAULT_INT && m == DEFAULT_INT && l == DEFAULT_INT)
return err;
if ((k != DEFAULT_INT || m != DEFAULT_INT || l != DEFAULT_INT) &&
(k == DEFAULT_INT || m == DEFAULT_INT || l == DEFAULT_INT)) {
*ss << "All of k, m, l must be set or none of them in "
<< profile << std::endl;
return ERROR_LRC_ALL_OR_NOTHING;
}
const char *generated[] = { "mapping",
"layers",
"crush-steps" };
for (int i = 0; i < 3; i++) {
if (profile.count(generated[i])) {
*ss << "The " << generated[i] << " parameter cannot be set "
<< "when k, m, l are set in " << profile << std::endl;
return ERROR_LRC_GENERATED;
}
}
if (l == 0 || (k + m) % l) {
*ss << "k + m must be a multiple of l in "
<< profile << std::endl;
return ERROR_LRC_K_M_MODULO;
}
int local_group_count = (k + m) / l;
if (k % local_group_count) {
*ss << "k must be a multiple of (k + m) / l in "
<< profile << std::endl;
return ERROR_LRC_K_MODULO;
}
if (m % local_group_count) {
*ss << "m must be a multiple of (k + m) / l in "
<< profile << std::endl;
return ERROR_LRC_M_MODULO;
}
string mapping;
for (int i = 0; i < local_group_count; i++) {
mapping += string(k / local_group_count, 'D') +
string(m / local_group_count, '_') + "_";
}
profile["mapping"] = mapping;
string layers = "[ ";
// global layer
layers += " [ \"";
for (int i = 0; i < local_group_count; i++) {
layers += string(k / local_group_count, 'D') +
string(m / local_group_count, 'c') + "_";
}
layers += "\", \"\" ],";
// local layers
for (int i = 0; i < local_group_count; i++) {
layers += " [ \"";
for (int j = 0; j < local_group_count; j++) {
if (i == j)
layers += string(l, 'D') + "c";
else
layers += string(l + 1, '_');
}
layers += "\", \"\" ],";
}
profile["layers"] = layers + "]";
ErasureCodeProfile::const_iterator parameter;
string rule_locality;
parameter = profile.find("crush-locality");
if (parameter != profile.end())
rule_locality = parameter->second;
string rule_failure_domain = "host";
parameter = profile.find("crush-failure-domain");
if (parameter != profile.end())
rule_failure_domain = parameter->second;
if (rule_locality != "") {
rule_steps.clear();
rule_steps.push_back(Step("choose", rule_locality,
local_group_count));
rule_steps.push_back(Step("chooseleaf", rule_failure_domain,
l + 1));
} else if (rule_failure_domain != "") {
rule_steps.clear();
rule_steps.push_back(Step("chooseleaf", rule_failure_domain, 0));
}
return err;
}
int ErasureCodeLrc::parse_rule(ErasureCodeProfile &profile,
ostream *ss)
{
int err = 0;
err |= to_string("crush-root", profile,
&rule_root,
"default", ss);
err |= to_string("crush-device-class", profile,
&rule_device_class,
"", ss);
if (err) {
return err;
}
if (profile.count("crush-steps") != 0) {
rule_steps.clear();
string str = profile.find("crush-steps")->second;
json_spirit::mArray description;
try {
json_spirit::mValue json;
json_spirit::read_or_throw(str, json);
if (json.type() != json_spirit::array_type) {
*ss << "crush-steps='" << str
<< "' must be a JSON array but is of type "
<< json.type() << " instead" << std::endl;
return ERROR_LRC_ARRAY;
}
description = json.get_array();
} catch (json_spirit::Error_position &e) {
*ss << "failed to parse crush-steps='" << str << "'"
<< " at line " << e.line_ << ", column " << e.column_
<< " : " << e.reason_ << std::endl;
return ERROR_LRC_PARSE_JSON;
}
int position = 0;
for (vector<json_spirit::mValue>::iterator i = description.begin();
i != description.end();
++i, position++) {
if (i->type() != json_spirit::array_type) {
stringstream json_string;
json_spirit::write(*i, json_string);
*ss << "element of the array "
<< str << " must be a JSON array but "
<< json_string.str() << " at position " << position
<< " is of type " << i->type() << " instead" << std::endl;
return ERROR_LRC_ARRAY;
}
int r = parse_rule_step(str, i->get_array(), ss);
if (r)
return r;
}
}
return 0;
}
int ErasureCodeLrc::parse_rule_step(const string &description_string,
json_spirit::mArray description,
ostream *ss)
{
stringstream json_string;
json_spirit::write(description, json_string);
string op;
string type;
int n = 0;
int position = 0;
for (vector<json_spirit::mValue>::iterator i = description.begin();
i != description.end();
++i, position++) {
if ((position == 0 || position == 1) &&
i->type() != json_spirit::str_type) {
*ss << "element " << position << " of the array "
<< json_string.str() << " found in " << description_string
<< " must be a JSON string but is of type "
<< i->type() << " instead" << std::endl;
return position == 0 ? ERROR_LRC_RULE_OP : ERROR_LRC_RULE_TYPE;
}
if (position == 2 && i->type() != json_spirit::int_type) {
*ss << "element " << position << " of the array "
<< json_string.str() << " found in " << description_string
<< " must be a JSON int but is of type "
<< i->type() << " instead" << std::endl;
return ERROR_LRC_RULE_N;
}
if (position == 0)
op = i->get_str();
if (position == 1)
type = i->get_str();
if (position == 2)
n = i->get_int();
}
rule_steps.push_back(Step(op, type, n));
return 0;
}
int ErasureCodeLrc::init(ErasureCodeProfile &profile,
ostream *ss)
{
int r;
r = parse_kml(profile, ss);
if (r)
return r;
r = parse(profile, ss);
if (r)
return r;
json_spirit::mArray description;
r = layers_description(profile, &description, ss);
if (r)
return r;
string description_string = profile.find("layers")->second;
dout(10) << "init(" << description_string << ")" << dendl;
r = layers_parse(description_string, description, ss);
if (r)
return r;
r = layers_init(ss);
if (r)
return r;
if (profile.count("mapping") == 0) {
*ss << "the 'mapping' profile is missing from " << profile;
return ERROR_LRC_MAPPING;
}
string mapping = profile.find("mapping")->second;
data_chunk_count = count(begin(mapping), end(mapping), 'D');
chunk_count = mapping.length();
r = layers_sanity_checks(description_string, ss);
if (r)
return r;
//
// When initialized with kml, the profile parameters
// that were generated should not be stored because
// they would otherwise be exposed to the caller.
//
if (profile.find("l") != profile.end() &&
profile.find("l")->second != DEFAULT_KML) {
profile.erase("mapping");
profile.erase("layers");
}
ErasureCode::init(profile, ss);
return 0;
}
set<int> ErasureCodeLrc::get_erasures(const set<int> &want,
const set<int> &available) const
{
set<int> result;
set_difference(want.begin(), want.end(),
available.begin(), available.end(),
inserter(result, result.end()));
return result;
}
unsigned int ErasureCodeLrc::get_chunk_size(unsigned int object_size) const
{
return layers.front().erasure_code->get_chunk_size(object_size);
}
void p(const set<int> &s) { cerr << s; } // for gdb
int ErasureCodeLrc::_minimum_to_decode(const set<int> &want_to_read,
const set<int> &available_chunks,
set<int> *minimum)
{
dout(20) << __func__ << " want_to_read " << want_to_read
<< " available_chunks " << available_chunks << dendl;
{
set<int> erasures_total;
set<int> erasures_not_recovered;
set<int> erasures_want;
for (unsigned int i = 0; i < get_chunk_count(); ++i) {
if (available_chunks.count(i) == 0) {
erasures_total.insert(i);
erasures_not_recovered.insert(i);
if (want_to_read.count(i) != 0)
erasures_want.insert(i);
}
}
//
// Case 1:
//
// When no chunk is missing there is no need to read more than what
// is wanted.
//
if (erasures_want.empty()) {
*minimum = want_to_read;
dout(20) << __func__ << " minimum == want_to_read == "
<< want_to_read << dendl;
return 0;
}
//
// Case 2:
//
// Try to recover erasures with as few chunks as possible.
//
for (vector<Layer>::reverse_iterator i = layers.rbegin();
i != layers.rend();
++i) {
//
// If this layer has no chunk that we want, skip it.
//
set<int> layer_want;
set_intersection(want_to_read.begin(), want_to_read.end(),
i->chunks_as_set.begin(), i->chunks_as_set.end(),
inserter(layer_want, layer_want.end()));
if (layer_want.empty())
continue;
//
// Are some of the chunks we want missing ?
//
set<int> layer_erasures;
set_intersection(layer_want.begin(), layer_want.end(),
erasures_want.begin(), erasures_want.end(),
inserter(layer_erasures, layer_erasures.end()));
set<int> layer_minimum;
if (layer_erasures.empty()) {
//
// The chunks we want are available, this is the minimum we need
// to read.
//
layer_minimum = layer_want;
} else {
set<int> erasures;
set_intersection(i->chunks_as_set.begin(), i->chunks_as_set.end(),
erasures_not_recovered.begin(), erasures_not_recovered.end(),
inserter(erasures, erasures.end()));
if (erasures.size() > i->erasure_code->get_coding_chunk_count()) {
//
// There are too many erasures for this layer to recover: skip
// it and hope that an upper layer will be do better.
//
continue;
} else {
//
// Get all available chunks in that layer to recover the
// missing one(s).
//
set_difference(i->chunks_as_set.begin(), i->chunks_as_set.end(),
erasures_not_recovered.begin(), erasures_not_recovered.end(),
inserter(layer_minimum, layer_minimum.end()));
//
// Chunks recovered by this layer are removed from the list of
// erasures so that upper levels do not attempt to recover
// them.
//
for (set<int>::const_iterator j = erasures.begin();
j != erasures.end();
++j) {
erasures_not_recovered.erase(*j);
erasures_want.erase(*j);
}
}
}
minimum->insert(layer_minimum.begin(), layer_minimum.end());
}
if (erasures_want.empty()) {
minimum->insert(want_to_read.begin(), want_to_read.end());
for (set<int>::const_iterator i = erasures_total.begin();
i != erasures_total.end();
++i) {
if (minimum->count(*i))
minimum->erase(*i);
}
dout(20) << __func__ << " minimum = " << *minimum << dendl;
return 0;
}
}
{
//
// Case 3:
//
// The previous strategy failed to recover from all erasures.
//
// Try to recover as many chunks as possible, even from layers
// that do not contain chunks that we want, in the hope that it
// will help the upper layers.
//
set<int> erasures_total;
for (unsigned int i = 0; i < get_chunk_count(); ++i) {
if (available_chunks.count(i) == 0)
erasures_total.insert(i);
}
for (vector<Layer>::reverse_iterator i = layers.rbegin();
i != layers.rend();
++i) {
set<int> layer_erasures;
set_intersection(i->chunks_as_set.begin(), i->chunks_as_set.end(),
erasures_total.begin(), erasures_total.end(),
inserter(layer_erasures, layer_erasures.end()));
//
// If this layer has no erasure, skip it
//
if (layer_erasures.empty())
continue;
if (layer_erasures.size() > 0 &&
layer_erasures.size() <= i->erasure_code->get_coding_chunk_count()) {
//
// chunks recovered by this layer are removed from the list of
// erasures so that upper levels know they can rely on their
// availability
//
for (set<int>::const_iterator j = layer_erasures.begin();
j != layer_erasures.end();
++j) {
erasures_total.erase(*j);
}
}
}
if (erasures_total.empty()) {
//
// Do not try to be smart about what chunks are necessary to
// recover, use all available chunks.
//
*minimum = available_chunks;
dout(20) << __func__ << " minimum == available_chunks == "
<< available_chunks << dendl;
return 0;
}
}
derr << __func__ << " not enough chunks in " << available_chunks
<< " to read " << want_to_read << dendl;
return -EIO;
}
int ErasureCodeLrc::encode_chunks(const set<int> &want_to_encode,
map<int, bufferlist> *encoded)
{
unsigned int top = layers.size();
for (vector<Layer>::reverse_iterator i = layers.rbegin();
i != layers.rend();
++i) {
--top;
if (includes(i->chunks_as_set.begin(), i->chunks_as_set.end(),
want_to_encode.begin(), want_to_encode.end()))
break;
}
for (unsigned int i = top; i < layers.size(); ++i) {
const Layer &layer = layers[i];
set<int> layer_want_to_encode;
map<int, bufferlist> layer_encoded;
int j = 0;
for (const auto& c : layer.chunks) {
std::swap(layer_encoded[j], (*encoded)[c]);
if (want_to_encode.find(c) != want_to_encode.end())
layer_want_to_encode.insert(j);
j++;
}
int err = layer.erasure_code->encode_chunks(layer_want_to_encode,
&layer_encoded);
j = 0;
for (const auto& c : layer.chunks) {
std::swap(layer_encoded[j++], (*encoded)[c]);
}
if (err) {
derr << __func__ << " layer " << layer.chunks_map
<< " failed with " << err << " trying to encode "
<< layer_want_to_encode << dendl;
return err;
}
}
return 0;
}
int ErasureCodeLrc::decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
{
set<int> available_chunks;
set<int> erasures;
for (unsigned int i = 0; i < get_chunk_count(); ++i) {
if (chunks.count(i) != 0)
available_chunks.insert(i);
else
erasures.insert(i);
}
set<int> want_to_read_erasures;
for (vector<Layer>::reverse_iterator layer = layers.rbegin();
layer != layers.rend();
++layer) {
set<int> layer_erasures;
set_intersection(layer->chunks_as_set.begin(), layer->chunks_as_set.end(),
erasures.begin(), erasures.end(),
inserter(layer_erasures, layer_erasures.end()));
if (layer_erasures.size() >
layer->erasure_code->get_coding_chunk_count()) {
// skip because there are too many erasures for this layer to recover
} else if(layer_erasures.size() == 0) {
// skip because all chunks are already available
} else {
set<int> layer_want_to_read;
map<int, bufferlist> layer_chunks;
map<int, bufferlist> layer_decoded;
int j = 0;
for (vector<int>::const_iterator c = layer->chunks.begin();
c != layer->chunks.end();
++c) {
//
// Pick chunks from *decoded* instead of *chunks* to re-use
// chunks recovered by previous layers. In other words
// *chunks* does not change but *decoded* gradually improves
// as more layers recover from erasures.
//
if (erasures.count(*c) == 0)
layer_chunks[j] = (*decoded)[*c];
if (want_to_read.count(*c) != 0)
layer_want_to_read.insert(j);
layer_decoded[j] = (*decoded)[*c];
++j;
}
int err = layer->erasure_code->decode_chunks(layer_want_to_read,
layer_chunks,
&layer_decoded);
if (err) {
derr << __func__ << " layer " << layer->chunks_map
<< " failed with " << err << " trying to decode "
<< layer_want_to_read << " with " << available_chunks << dendl;
return err;
}
j = 0;
for (vector<int>::const_iterator c = layer->chunks.begin();
c != layer->chunks.end();
++c) {
(*decoded)[*c] = layer_decoded[j];
++j;
erasures.erase(*c);
}
want_to_read_erasures.clear();
set_intersection(erasures.begin(), erasures.end(),
want_to_read.begin(), want_to_read.end(),
inserter(want_to_read_erasures, want_to_read_erasures.end()));
if (want_to_read_erasures.size() == 0)
break;
}
}
if (want_to_read_erasures.size() > 0) {
derr << __func__ << " want to read " << want_to_read
<< " with available_chunks = " << available_chunks
<< " end up being unable to read " << want_to_read_erasures << dendl;
return -EIO;
} else {
return 0;
}
}
| 25,525 | 28.681395 | 80 | cc |
null | ceph-main/src/erasure-code/lrc/ErasureCodeLrc.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_LRC_H
#define CEPH_ERASURE_CODE_LRC_H
#include "include/err.h"
#include "json_spirit/json_spirit.h"
#include "erasure-code/ErasureCode.h"
#define ERROR_LRC_ARRAY -(MAX_ERRNO + 1)
#define ERROR_LRC_OBJECT -(MAX_ERRNO + 2)
#define ERROR_LRC_INT -(MAX_ERRNO + 3)
#define ERROR_LRC_STR -(MAX_ERRNO + 4)
#define ERROR_LRC_PLUGIN -(MAX_ERRNO + 5)
#define ERROR_LRC_DESCRIPTION -(MAX_ERRNO + 6)
#define ERROR_LRC_PARSE_JSON -(MAX_ERRNO + 7)
#define ERROR_LRC_MAPPING -(MAX_ERRNO + 8)
#define ERROR_LRC_MAPPING_SIZE -(MAX_ERRNO + 9)
#define ERROR_LRC_FIRST_MAPPING -(MAX_ERRNO + 10)
#define ERROR_LRC_COUNT_CONSTRAINT -(MAX_ERRNO + 11)
#define ERROR_LRC_CONFIG_OPTIONS -(MAX_ERRNO + 12)
#define ERROR_LRC_LAYERS_COUNT -(MAX_ERRNO + 13)
#define ERROR_LRC_RULE_OP -(MAX_ERRNO + 14)
#define ERROR_LRC_RULE_TYPE -(MAX_ERRNO + 15)
#define ERROR_LRC_RULE_N -(MAX_ERRNO + 16)
#define ERROR_LRC_ALL_OR_NOTHING -(MAX_ERRNO + 17)
#define ERROR_LRC_GENERATED -(MAX_ERRNO + 18)
#define ERROR_LRC_K_M_MODULO -(MAX_ERRNO + 19)
#define ERROR_LRC_K_MODULO -(MAX_ERRNO + 20)
#define ERROR_LRC_M_MODULO -(MAX_ERRNO + 21)
class ErasureCodeLrc final : public ceph::ErasureCode {
public:
static const std::string DEFAULT_KML;
struct Layer {
explicit Layer(const std::string &_chunks_map) : chunks_map(_chunks_map) { }
ceph::ErasureCodeInterfaceRef erasure_code;
std::vector<int> data;
std::vector<int> coding;
std::vector<int> chunks;
std::set<int> chunks_as_set;
std::string chunks_map;
ceph::ErasureCodeProfile profile;
};
std::vector<Layer> layers;
std::string directory;
unsigned int chunk_count;
unsigned int data_chunk_count;
std::string rule_root;
std::string rule_device_class;
struct Step {
Step(const std::string &_op, const std::string &_type, int _n) :
op(_op),
type(_type),
n(_n) {}
std::string op;
std::string type;
int n;
};
std::vector<Step> rule_steps;
explicit ErasureCodeLrc(const std::string &dir)
: directory(dir),
chunk_count(0), data_chunk_count(0), rule_root("default")
{
rule_steps.push_back(Step("chooseleaf", "host", 0));
}
~ErasureCodeLrc() override {}
std::set<int> get_erasures(const std::set<int> &need,
const std::set<int> &available) const;
int _minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available,
std::set<int> *minimum) override;
int create_rule(const std::string &name,
CrushWrapper &crush,
std::ostream *ss) const override;
unsigned int get_chunk_count() const override {
return chunk_count;
}
unsigned int get_data_chunk_count() const override {
return data_chunk_count;
}
unsigned int get_chunk_size(unsigned int object_size) const override;
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::buffer::list> *encoded) override;
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
virtual int parse(ceph::ErasureCodeProfile &profile, std::ostream *ss);
int parse_kml(ceph::ErasureCodeProfile &profile, std::ostream *ss);
int parse_rule(ceph::ErasureCodeProfile &profile, std::ostream *ss);
int parse_rule_step(const std::string &description_string,
json_spirit::mArray description,
std::ostream *ss);
int layers_description(const ceph::ErasureCodeProfile &profile,
json_spirit::mArray *description,
std::ostream *ss) const;
int layers_parse(const std::string &description_string,
json_spirit::mArray description,
std::ostream *ss);
int layers_init(std::ostream *ss);
int layers_sanity_checks(const std::string &description_string,
std::ostream *ss) const;
};
#endif
| 4,502 | 31.395683 | 80 | h |
null | ceph-main/src/erasure-code/lrc/ErasureCodePluginLrc.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "ceph_ver.h"
#include "common/debug.h"
#include "ErasureCodePluginLrc.h"
#include "ErasureCodeLrc.h"
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout)
int ErasureCodePluginLrc::factory(const std::string &directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) {
ErasureCodeLrc *interface;
interface = new ErasureCodeLrc(directory);
int r = interface->init(profile, ss);
if (r) {
delete interface;
return r;
}
*erasure_code = ceph::ErasureCodeInterfaceRef(interface);
return 0;
};
const char *__erasure_code_version() { return CEPH_GIT_NICE_VER; }
int __erasure_code_init(char *plugin_name, char *directory)
{
auto& instance = ceph::ErasureCodePluginRegistry::instance();
return instance.add(plugin_name, new ErasureCodePluginLrc());
}
| 1,487 | 29.367347 | 70 | cc |
null | ceph-main/src/erasure-code/lrc/ErasureCodePluginLrc.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_LRC_H
#define CEPH_ERASURE_CODE_PLUGIN_LRC_H
#include "erasure-code/ErasureCodePlugin.h"
class ErasureCodePluginLrc : public ceph::ErasureCodePlugin {
public:
int factory(const std::string &directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) override;
};
#endif
| 956 | 28.90625 | 71 | h |
null | ceph-main/src/erasure-code/shec/ErasureCodePluginShec.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 FUJITSU LIMITED
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "ceph_ver.h"
#include "common/debug.h"
#include "ErasureCodePluginShec.h"
#include "ErasureCodeShecTableCache.h"
#include "ErasureCodeShec.h"
#include "jerasure_init.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout)
static std::ostream& _prefix(std::ostream* _dout)
{
return *_dout << "ErasureCodePluginShec: ";
}
int ErasureCodePluginShec::factory(const std::string &directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) {
ErasureCodeShec *interface;
if (profile.find("technique") == profile.end())
profile["technique"] = "multiple";
std::string t = profile.find("technique")->second;
if (t == "single"){
interface = new ErasureCodeShecReedSolomonVandermonde(tcache, ErasureCodeShec::SINGLE);
} else if (t == "multiple"){
interface = new ErasureCodeShecReedSolomonVandermonde(tcache, ErasureCodeShec::MULTIPLE);
} else {
*ss << "technique=" << t << " is not a valid coding technique. "
<< "Choose one of the following: "
<< "single, multiple ";
return -ENOENT;
}
int r = interface->init(profile, ss);
if (r) {
delete interface;
return r;
}
*erasure_code = ceph::ErasureCodeInterfaceRef(interface);
dout(10) << "ErasureCodePluginShec: factory() completed" << dendl;
return 0;
}
const char *__erasure_code_version() { return CEPH_GIT_NICE_VER; }
int __erasure_code_init(char *plugin_name, char *directory = (char *)"")
{
auto& instance = ceph::ErasureCodePluginRegistry::instance();
int w[] = { 8, 16, 32 };
int r = jerasure_init(3, w);
if (r) {
return -r;
}
return instance.add(plugin_name, new ErasureCodePluginShec());
}
| 2,560 | 29.855422 | 95 | cc |
null | ceph-main/src/erasure-code/shec/ErasureCodePluginShec.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_PLUGIN_SHEC_H
#define CEPH_ERASURE_CODE_PLUGIN_SHEC_H
#include "ErasureCodeShecTableCache.h"
#include "erasure-code/ErasureCodePlugin.h"
class ErasureCodePluginShec : public ceph::ErasureCodePlugin {
public:
ErasureCodeShecTableCache tcache;
int factory(const std::string &directory,
ceph::ErasureCodeProfile &profile,
ceph::ErasureCodeInterfaceRef *erasure_code,
std::ostream *ss) override;
};
#endif
| 1,035 | 28.6 | 71 | h |
null | ceph-main/src/erasure-code/shec/ErasureCodeShec.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 FUJITSU LIMITED
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cerrno>
#include <algorithm>
#include "common/debug.h"
#include "ErasureCodeShec.h"
extern "C" {
#include "jerasure/include/jerasure.h"
#include "jerasure/include/galois.h"
extern int calc_determinant(int *matrix, int dim);
extern int* reed_sol_vandermonde_coding_matrix(int k, int m, int w);
}
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout)
using namespace std;
using namespace ceph;
static ostream& _prefix(std::ostream* _dout)
{
return *_dout << "ErasureCodeShec: ";
}
int ErasureCodeShec::init(ErasureCodeProfile &profile,
ostream *ss)
{
int err = 0;
err |= parse(profile);
if (err)
return err;
prepare();
return ErasureCode::init(profile, ss);
}
unsigned int ErasureCodeShec::get_chunk_size(unsigned int object_size) const
{
unsigned alignment = get_alignment();
unsigned tail = object_size % alignment;
unsigned padded_length = object_size + ( tail ? ( alignment - tail ) : 0 );
ceph_assert(padded_length % k == 0);
return padded_length / k;
}
int ErasureCodeShec::_minimum_to_decode(const set<int> &want_to_read,
const set<int> &available_chunks,
set<int> *minimum_chunks)
{
if (!minimum_chunks) return -EINVAL;
for (set<int>::iterator it = available_chunks.begin(); it != available_chunks.end(); ++it){
if (*it < 0 || k+m <= *it) return -EINVAL;
}
for (set<int>::iterator it = want_to_read.begin(); it != want_to_read.end(); ++it){
if (*it < 0 || k+m <= *it) return -EINVAL;
}
int want[k + m];
int avails[k + m];
int minimum[k + m];
memset(want, 0, sizeof(want));
memset(avails, 0, sizeof(avails));
memset(minimum, 0, sizeof(minimum));
(*minimum_chunks).clear();
for (set<int>::const_iterator i = want_to_read.begin();
i != want_to_read.end();
++i) {
want[*i] = 1;
}
for (set<int>::const_iterator i = available_chunks.begin();
i != available_chunks.end();
++i) {
avails[*i] = 1;
}
{
int decoding_matrix[k*k];
int dm_row[k];
int dm_column[k];
memset(decoding_matrix, 0, sizeof(decoding_matrix));
memset(dm_row, 0, sizeof(dm_row));
memset(dm_column, 0, sizeof(dm_column));
if (shec_make_decoding_matrix(true, want, avails, decoding_matrix, dm_row, dm_column, minimum) < 0) {
return -EIO;
}
}
for (int i = 0; i < k + m; i++) {
if (minimum[i] == 1) minimum_chunks->insert(i);
}
return 0;
}
int ErasureCodeShec::minimum_to_decode_with_cost(const set<int> &want_to_read,
const map<int, int> &available,
set<int> *minimum_chunks)
{
set <int> available_chunks;
for (map<int, int>::const_iterator i = available.begin();
i != available.end();
++i)
available_chunks.insert(i->first);
return _minimum_to_decode(want_to_read, available_chunks, minimum_chunks);
}
int ErasureCodeShec::encode(const set<int> &want_to_encode,
const bufferlist &in,
map<int, bufferlist> *encoded)
{
unsigned int k = get_data_chunk_count();
unsigned int m = get_chunk_count() - k;
bufferlist out;
if (!encoded || !encoded->empty()){
return -EINVAL;
}
int err = encode_prepare(in, *encoded);
if (err)
return err;
encode_chunks(want_to_encode, encoded);
for (unsigned int i = 0; i < k + m; i++) {
if (want_to_encode.count(i) == 0)
encoded->erase(i);
}
return 0;
}
int ErasureCodeShec::encode_chunks(const set<int> &want_to_encode,
map<int, bufferlist> *encoded)
{
char *chunks[k + m];
for (int i = 0; i < k + m; i++){
chunks[i] = (*encoded)[i].c_str();
}
shec_encode(&chunks[0], &chunks[k], (*encoded)[0].length());
return 0;
}
int ErasureCodeShec::_decode(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
{
vector<int> have;
if (!decoded || !decoded->empty()){
return -EINVAL;
}
if (!want_to_read.empty() && chunks.empty()) {
// i need to get the blocksize from the first element of chunks
return -1;
}
have.reserve(chunks.size());
for (map<int, bufferlist>::const_iterator i = chunks.begin();
i != chunks.end();
++i) {
have.push_back(i->first);
}
if (includes(
have.begin(), have.end(), want_to_read.begin(), want_to_read.end())) {
for (set<int>::iterator i = want_to_read.begin();
i != want_to_read.end();
++i) {
(*decoded)[*i] = chunks.find(*i)->second;
}
return 0;
}
unsigned int k = get_data_chunk_count();
unsigned int m = get_chunk_count() - k;
unsigned blocksize = (*chunks.begin()).second.length();
for (unsigned int i = 0; i < k + m; i++) {
if (chunks.find(i) == chunks.end()) {
bufferlist tmp;
bufferptr ptr(buffer::create_aligned(blocksize, SIMD_ALIGN));
tmp.push_back(ptr);
tmp.claim_append((*decoded)[i]);
(*decoded)[i].swap(tmp);
} else {
(*decoded)[i] = chunks.find(i)->second;
(*decoded)[i].rebuild_aligned(SIMD_ALIGN);
}
}
return decode_chunks(want_to_read, chunks, decoded);
}
int ErasureCodeShec::decode_chunks(const set<int> &want_to_read,
const map<int, bufferlist> &chunks,
map<int, bufferlist> *decoded)
{
unsigned blocksize = (*chunks.begin()).second.length();
int erased[k + m];
int erased_count = 0;
int avails[k + m];
char *data[k];
char *coding[m];
for (int i = 0; i < k + m; i++) {
erased[i] = 0;
if (chunks.find(i) == chunks.end()) {
if (want_to_read.count(i) > 0) {
erased[i] = 1;
erased_count++;
}
avails[i] = 0;
} else {
avails[i] = 1;
}
if (i < k)
data[i] = (*decoded)[i].c_str();
else
coding[i - k] = (*decoded)[i].c_str();
}
if (erased_count > 0) {
return shec_decode(erased, avails, data, coding, blocksize);
} else {
return 0;
}
}
//
// ErasureCodeShecReedSolomonVandermonde
//
void ErasureCodeShecReedSolomonVandermonde::shec_encode(char **data,
char **coding,
int blocksize)
{
jerasure_matrix_encode(k, m, w, matrix, data, coding, blocksize);
}
int ErasureCodeShecReedSolomonVandermonde::shec_decode(int *erased,
int *avails,
char **data,
char **coding,
int blocksize)
{
return shec_matrix_decode(erased, avails, data, coding, blocksize);
}
unsigned ErasureCodeShecReedSolomonVandermonde::get_alignment() const
{
return k*w*sizeof(int);
}
int ErasureCodeShecReedSolomonVandermonde::parse(const ErasureCodeProfile &profile)
{
int err = 0;
// k, m, c
if (profile.find("k") == profile.end() &&
profile.find("m") == profile.end() &&
profile.find("c") == profile.end()){
dout(10) << "(k, m, c) default to " << "(" << DEFAULT_K
<< ", " << DEFAULT_M << ", " << DEFAULT_C << ")" << dendl;
k = DEFAULT_K; m = DEFAULT_M; c = DEFAULT_C;
} else if (profile.find("k") == profile.end() ||
profile.find("m") == profile.end() ||
profile.find("c") == profile.end()){
dout(10) << "(k, m, c) must be chosen" << dendl;
err = -EINVAL;
} else {
std::string err_k, err_m, err_c, value_k, value_m, value_c;
value_k = profile.find("k")->second;
value_m = profile.find("m")->second;
value_c = profile.find("c")->second;
k = strict_strtol(value_k.c_str(), 10, &err_k);
m = strict_strtol(value_m.c_str(), 10, &err_m);
c = strict_strtol(value_c.c_str(), 10, &err_c);
if (!err_k.empty() || !err_m.empty() || !err_c.empty()){
if (!err_k.empty()){
derr << "could not convert k=" << value_k << "to int" << dendl;
} else if (!err_m.empty()){
derr << "could not convert m=" << value_m << "to int" << dendl;
} else if (!err_c.empty()){
derr << "could not convert c=" << value_c << "to int" << dendl;
}
err = -EINVAL;
} else if (k <= 0){
derr << "k=" << k
<< " must be a positive number" << dendl;
err = -EINVAL;
} else if (m <= 0){
derr << "m=" << m
<< " must be a positive number" << dendl;
err = -EINVAL;
} else if (c <= 0){
derr << "c=" << c
<< " must be a positive number" << dendl;
err = -EINVAL;
} else if (m < c){
derr << "c=" << c
<< " must be less than or equal to m=" << m << dendl;
err = -EINVAL;
} else if (k > 12){
derr << "k=" << k
<< " must be less than or equal to 12" << dendl;
err = -EINVAL;
} else if (k+m > 20){
derr << "k+m=" << k+m
<< " must be less than or equal to 20" << dendl;
err = -EINVAL;
} else if (k<m){
derr << "m=" << m
<< " must be less than or equal to k=" << k << dendl;
err = -EINVAL;
}
}
if (err) {
derr << "(k, m, c)=(" << k << ", " << m << ", " << c
<< ") is not a valid parameter." << dendl;
return err;
}
dout(10) << "(k, m, c) set to " << "(" << k << ", " << m << ", "
<< c << ")"<< dendl;
// w
if (profile.find("w") == profile.end()){
dout(10) << "w default to " << DEFAULT_W << dendl;
w = DEFAULT_W;
} else {
std::string err_w, value_w;
value_w = profile.find("w")->second;
w = strict_strtol(value_w.c_str(), 10, &err_w);
if (!err_w.empty()){
derr << "could not convert w=" << value_w << "to int" << dendl;
dout(10) << "w default to " << DEFAULT_W << dendl;
w = DEFAULT_W;
} else if (w != 8 && w != 16 && w != 32) {
derr << "w=" << w
<< " must be one of {8, 16, 32}" << dendl;
dout(10) << "w default to " << DEFAULT_W << dendl;
w = DEFAULT_W;
} else {
dout(10) << "w set to " << w << dendl;
}
}
return 0;
}
void ErasureCodeShecReedSolomonVandermonde::prepare()
{
// setup shared encoding table
int** p_enc_table =
tcache.getEncodingTable(technique, k, m, c, w);
if (!*p_enc_table) {
dout(10) << "[ cache tables ] creating coeff for k=" <<
k << " m=" << m << " c=" << c << " w=" << w << dendl;
matrix = shec_reedsolomon_coding_matrix(technique);
// either our new created table is stored or if it has been
// created in the meanwhile the locally allocated table will be
// freed by setEncodingTable
matrix = tcache.setEncodingTable(technique, k, m, c, w, matrix);
dout(10) << "matrix = " << dendl;
for (int i=0; i<m; i++) {
char mat[k+1];
for (int j=0; j<k; j++) {
if (matrix[i*k+j] > 0) {
mat[j] = '1';
} else {
mat[j] = '0';
}
}
mat[k] = '\0';
dout(10) << mat << dendl;
}
} else {
matrix = *p_enc_table;
}
dout(10) << " [ technique ] = " <<
((technique == MULTIPLE) ? "multiple" : "single") << dendl;
ceph_assert((technique == SINGLE) || (technique == MULTIPLE));
}
// ErasureCodeShec::
// Mearged from shec.cc.
double ErasureCodeShec::shec_calc_recovery_efficiency1(int k, int m1, int m2, int c1, int c2){
int r_eff_k[k];
double r_e1;
int i, rr, cc, start, end;
int first_flag;
if (m1 < c1 || m2 < c2) return -1;
if ((m1 == 0 && c1 != 0) || (m2 == 0 && c2 != 0)) return -1;
for (i=0; i<k; i++) r_eff_k[i] = 100000000;
r_e1 = 0;
for (rr=0; rr<m1; rr++){
start = ((rr*k)/m1) % k;
end = (((rr+c1)*k)/m1) % k;
for (cc=start, first_flag=1; first_flag || cc!=end; cc=(cc+1)%k){
first_flag = 0;
r_eff_k[cc] = std::min(r_eff_k[cc], ((rr+c1)*k)/m1 - (rr*k)/m1);
}
r_e1 += ((rr+c1)*k)/m1 - (rr*k)/m1;
}
for (rr=0; rr<m2; rr++){
start = ((rr*k)/m2) % k;
end = (((rr+c2)*k)/m2) % k;
for (cc=start, first_flag=1; first_flag || cc!=end; cc=(cc+1)%k){
first_flag = 0;
r_eff_k[cc] = std::min(r_eff_k[cc], ((rr+c2)*k)/m2 - (rr*k)/m2);
}
r_e1 += ((rr+c2)*k)/m2 - (rr*k)/m2;
}
for (i=0; i<k; i++){
r_e1 += r_eff_k[i];
}
r_e1 /= (k+m1+m2);
return r_e1;
}
int* ErasureCodeShec::shec_reedsolomon_coding_matrix(int is_single)
{
int *matrix;
int rr, cc, start, end;
int m1, m2, c1, c2;
if (w != 8 && w != 16 && w != 32) return NULL;
if (!is_single){
int c1_best = -1, m1_best = -1;
double min_r_e1 = 100.0;
// create all multiple shec pattern and choose best.
for (c1=0; c1 <= c/2; c1++){
for (m1=0; m1 <= m; m1++){
c2 = c-c1;
m2 = m-m1;
if (m1 < c1 || m2 < c2) continue;
if ((m1 == 0 && c1 != 0) || (m2 == 0 && c2 != 0)) continue;
if ((m1 != 0 && c1 == 0) || (m2 != 0 && c2 == 0)) continue;
// minimize r_e1
if (true) {
double r_e1;
r_e1 = shec_calc_recovery_efficiency1(k, m1, m2, c1, c2);
if (min_r_e1 - r_e1 > std::numeric_limits<double>::epsilon() &&
r_e1 < min_r_e1) {
min_r_e1 = r_e1;
c1_best = c1;
m1_best = m1;
}
}
}
}
m1 = m1_best;
c1 = c1_best;
m2 = m - m1_best;
c2 = c - c1_best;
} else {
m1 = 0;
c1 = 0;
m2 = m;
c2 = c;
}
// create matrix
matrix = reed_sol_vandermonde_coding_matrix(k, m, w);
for (rr=0; rr<m1; rr++){
end = ((rr*k)/m1) % k;
start = (((rr+c1)*k)/m1) % k;
for (cc=start; cc!=end; cc=(cc+1)%k){
matrix[cc + rr*k] = 0;
}
}
for (rr=0; rr<m2; rr++){
end = ((rr*k)/m2) % k;
start = (((rr+c2)*k)/m2) % k;
for (cc=start; cc!=end; cc=(cc+1)%k){
matrix[cc + (rr+m1)*k] = 0;
}
}
return matrix;
}
int ErasureCodeShec::shec_make_decoding_matrix(bool prepare, int *want_, int *avails,
int *decoding_matrix, int *dm_row, int *dm_column,
int *minimum)
{
int mindup = k+1, minp = k+1;
int want[k + m];
memset(want, 0, sizeof(want));
for (int i = 0; i < k + m; ++i) {
want[i] = want_[i];
}
for (int i = 0; i < m; ++i) {
if (want[i + k] && !avails[i + k]) {
for (int j=0; j < k; ++j) {
if (matrix[i * k + j] > 0) {
want[j] = 1;
}
}
}
}
if (tcache.getDecodingTableFromCache(decoding_matrix,
dm_row, dm_column, minimum,
technique,
k, m, c, w,
want, avails)) {
return 0;
}
for (unsigned long long pp = 0; pp < (1ull << m); ++pp) {
// select parity chunks
int ek = 0;
int p[m];
for (int i=0; i < m; ++i) {
if (pp & (1ull << i)) {
p[ek++] = i;
}
}
if (ek > minp) {
continue;
}
// Are selected parity chunks avail?
bool ok = true;
for (int i = 0; i < ek && ok; i++) {
if (!avails[k+p[i]]) {
ok = false;
break;
}
}
if (!ok) {
continue;
}
int tmprow[k + m];
int tmpcolumn[k];
for (int i = 0; i < k + m; i++) {
tmprow[i] = 0;
}
for (int i = 0; i < k; i++) {
tmpcolumn[i] = 0;
}
for (int i=0; i < k; i++) {
if (want[i] && !avails[i]) {
tmpcolumn[i] = 1;
}
}
// Parity chunks which are used to recovery erased data chunks, are added to tmprow.
for (int i = 0; i < ek; i++) {
tmprow[k + p[i]] = 1;
for (int j = 0; j < k; j++) {
int element = matrix[(p[i]) * k + j];
if (element != 0) {
tmpcolumn[j] = 1;
}
if (element != 0 && avails[j] == 1) {
tmprow[j] = 1;
}
}
}
int dup_row = 0, dup_column = 0, dup = 0;
for (int i = 0; i < k + m; i++) {
if (tmprow[i]) {
dup_row++;
}
}
for (int i = 0; i < k; i++) {
if (tmpcolumn[i]) {
dup_column++;
}
}
if (dup_row != dup_column) {
continue;
}
dup = dup_row;
if (dup == 0) {
mindup = dup;
for (int i = 0; i < k; i++) {
dm_row[i] = -1;
}
for (int i = 0; i < k; i++) {
dm_column[i] = -1;
}
break;
}
// minimum is updated.
if (dup < mindup) {
int tmpmat[dup * dup];
{
for (int i = 0, row = 0; i < k + m; i++) {
if (tmprow[i]) {
for (int j = 0, column = 0; j < k; j++) {
if (tmpcolumn[j]) {
if (i < k) {
tmpmat[row * dup + column] = (i == j ? 1 : 0);
} else {
tmpmat[row * dup + column] = matrix[(i - k) * k + j];
}
column++;
}
}
row++;
}
}
}
int det = calc_determinant(tmpmat, dup);
if (det != 0) {
int row_id = 0;
int column_id = 0;
for (int i = 0; i < k; i++) {
dm_row[i] = -1;
}
for (int i = 0; i < k; i++) {
dm_column[i] = -1;
}
mindup = dup;
for (int i=0; i < k + m; i++) {
if (tmprow[i]) {
dm_row[row_id++] = i;
}
}
for (int i=0; i < k; i++) {
if (tmpcolumn[i]) {
dm_column[column_id++] = i;
}
}
minp = ek;
}
}
}
if (mindup == k+1) {
dout(10) << __func__ << ": can't find recover matrix." << dendl;
return -1;
}
for (int i = 0; i < k + m; i++) {
minimum[i] = 0;
}
for (int i=0; i < k && dm_row[i] != -1; i++) {
minimum[dm_row[i]] = 1;
}
for (int i = 0; i < k; ++i) {
if (want[i] && avails[i]) {
minimum[i] = 1;
}
}
for (int i = 0; i < m; ++i) {
if (want[k + i] && avails[k + i] && !minimum[k + i]) {
for (int j = 0; j < k; ++j) {
if (matrix[i * k + j] > 0 && !want[j]) {
minimum[k + i] = 1;
break;
}
}
}
}
if (mindup == 0) {
return 0;
}
int tmpmat[mindup * mindup];
for (int i=0; i < mindup; i++) {
for (int j=0; j < mindup; j++) {
if (dm_row[i] < k) {
tmpmat[i * mindup + j] = (dm_row[i] == dm_column[j] ? 1 : 0);
} else {
tmpmat[i * mindup + j] = matrix[(dm_row[i] - k) * k + dm_column[j]];
}
}
if (dm_row[i] < k) {
for (int j = 0; j < mindup; j++) {
if (dm_row[i] == dm_column[j]) {
dm_row[i] = j;
}
}
} else {
dm_row[i] -= (k - mindup);
}
}
if (prepare) {
return 0;
}
int ret = jerasure_invert_matrix(tmpmat, decoding_matrix, mindup, w);
tcache.putDecodingTableToCache(decoding_matrix, dm_row, dm_column, minimum, technique,
k, m, c, w, want, avails);
return ret;
}
int ErasureCodeShec::shec_matrix_decode(int *want, int *avails, char **data_ptrs,
char **coding_ptrs, int size)
{
int decoding_matrix[k*k];
int dm_row[k], dm_column[k];
int minimum[k + m];
memset(decoding_matrix, 0, sizeof(decoding_matrix));
memset(dm_row, -1, sizeof(dm_row));
memset(dm_column, -1, sizeof(dm_column));
memset(minimum, -1, sizeof(minimum));
if (w != 8 && w != 16 && w != 32) return -1;
if (shec_make_decoding_matrix(false, want, avails, decoding_matrix,
dm_row, dm_column, minimum) < 0) {
return -1;
}
// Get decoding matrix size
int dm_size = 0;
for (int i = 0; i < k; i++) {
if (dm_row[i] == -1) {
break;
}
dm_size++;
}
char *dm_data_ptrs[dm_size];
for (int i = 0; i < dm_size; i++) {
dm_data_ptrs[i] = data_ptrs[dm_column[i]];
}
// Decode the data drives
for (int i = 0; i < dm_size; i++) {
if (!avails[dm_column[i]]) {
jerasure_matrix_dotprod(dm_size, w, decoding_matrix + (i * dm_size),
dm_row, i, dm_data_ptrs, coding_ptrs, size);
}
}
// Re-encode any erased coding devices
for (int i = 0; i < m; i++) {
if (want[k+i] && !avails[k+i]) {
jerasure_matrix_dotprod(k, w, matrix + (i * k), NULL, i+k,
data_ptrs, coding_ptrs, size);
}
}
return 0;
}
| 20,702 | 24.371324 | 105 | cc |
null | ceph-main/src/erasure-code/shec/ErasureCodeShec.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 FUJITSU LIMITED
* Copyright (C) 2013, 2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_SHEC_H
#define CEPH_ERASURE_CODE_SHEC_H
#include "erasure-code/ErasureCode.h"
#include "ErasureCodeShecTableCache.h"
class ErasureCodeShec : public ceph::ErasureCode {
public:
enum {
MULTIPLE = 0,
SINGLE = 1
};
ErasureCodeShecTableCache &tcache;
int k;
int DEFAULT_K;
int m;
int DEFAULT_M;
int c;
int DEFAULT_C;
int w;
int DEFAULT_W;
int technique;
int *matrix;
ErasureCodeShec(const int _technique,
ErasureCodeShecTableCache &_tcache) :
tcache(_tcache),
k(0),
DEFAULT_K(4),
m(0),
DEFAULT_M(3),
c(0),
DEFAULT_C(2),
w(0),
DEFAULT_W(8),
technique(_technique),
matrix(0)
{}
~ErasureCodeShec() override {}
unsigned int get_chunk_count() const override {
return k + m;
}
unsigned int get_data_chunk_count() const override {
return k;
}
unsigned int get_chunk_size(unsigned int object_size) const override;
int _minimum_to_decode(const std::set<int> &want_to_read,
const std::set<int> &available_chunks,
std::set<int> *minimum);
int minimum_to_decode_with_cost(const std::set<int> &want_to_read,
const std::map<int, int> &available,
std::set<int> *minimum) override;
int encode(const std::set<int> &want_to_encode,
const ceph::buffer::list &in,
std::map<int, ceph::buffer::list> *encoded) override;
int encode_chunks(const std::set<int> &want_to_encode,
std::map<int, ceph::buffer::list> *encoded) override;
int _decode(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
int decode_chunks(const std::set<int> &want_to_read,
const std::map<int, ceph::buffer::list> &chunks,
std::map<int, ceph::buffer::list> *decoded) override;
int init(ceph::ErasureCodeProfile &profile, std::ostream *ss) override;
virtual void shec_encode(char **data,
char **coding,
int blocksize) = 0;
virtual int shec_decode(int *erasures,
int *avails,
char **data,
char **coding,
int blocksize) = 0;
virtual unsigned get_alignment() const = 0;
virtual void prepare() = 0;
virtual int shec_matrix_decode(int *erased, int *avails,
char **data_ptrs, char **coding_ptrs, int size);
virtual int* shec_reedsolomon_coding_matrix(int is_single);
private:
virtual int parse(const ceph::ErasureCodeProfile &profile) = 0;
virtual double shec_calc_recovery_efficiency1(int k, int m1, int m2, int c1, int c2);
virtual int shec_make_decoding_matrix(bool prepare,
int *want, int *avails,
int *decoding_matrix,
int *dm_row, int *dm_column,
int *minimum);
};
class ErasureCodeShecReedSolomonVandermonde final : public ErasureCodeShec {
public:
ErasureCodeShecReedSolomonVandermonde(ErasureCodeShecTableCache &_tcache,
int technique = MULTIPLE) :
ErasureCodeShec(technique, _tcache)
{}
~ErasureCodeShecReedSolomonVandermonde() override {
}
void shec_encode(char **data,
char **coding,
int blocksize) override;
int shec_decode(int *erasures,
int *avails,
char **data,
char **coding,
int blocksize) override;
unsigned get_alignment() const override;
void prepare() override;
private:
int parse(const ceph::ErasureCodeProfile &profile) override;
};
#endif
| 4,271 | 27.864865 | 87 | h |
null | ceph-main/src/erasure-code/shec/ErasureCodeShecTableCache.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 FUJITSU LIMITED
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
* Author: Andreas-Joachim Peters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
// -----------------------------------------------------------------------------
#include "ErasureCodeShecTableCache.h"
#include "common/debug.h"
// -----------------------------------------------------------------------------
using namespace std;
// -----------------------------------------------------------------------------
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _tc_prefix(_dout)
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
static ostream&
_tc_prefix(std::ostream* _dout) {
return *_dout << "ErasureCodeShecTableCache: ";
}
// -----------------------------------------------------------------------------
ErasureCodeShecTableCache::~ErasureCodeShecTableCache()
{
std::lock_guard lock{codec_tables_guard};
// clean-up all allocated tables
{
codec_technique_tables_t::const_iterator ttables_it;
codec_tables_t::const_iterator tables_it;
codec_tables_t_::const_iterator tables_it_;
codec_tables_t__::const_iterator tables_it__;
codec_table_t::const_iterator table_it;
for (ttables_it = encoding_table.begin(); ttables_it != encoding_table.end(); ++ttables_it) {
for (tables_it = ttables_it->second.begin(); tables_it != ttables_it->second.end(); ++tables_it) {
for (tables_it_ = tables_it->second.begin(); tables_it_ != tables_it->second.end(); ++tables_it_) {
for (tables_it__ = tables_it_->second.begin(); tables_it__ != tables_it_->second.end(); ++tables_it__) {
for (table_it = tables_it__->second.begin(); table_it != tables_it__->second.end(); ++table_it) {
if (table_it->second) {
if (*(table_it->second)) {
delete *(table_it->second);
}
delete table_it->second;
}
}
}
}
}
}
}
{
std::map<int, lru_map_t*>::const_iterator lru_map_it;
std::map<int, lru_list_t*>::const_iterator lru_list_it;
for (lru_map_it = decoding_tables.begin();
lru_map_it != decoding_tables.end();
++lru_map_it) {
if (lru_map_it->second) {
delete lru_map_it->second;
}
}
for (lru_list_it = decoding_tables_lru.begin();
lru_list_it != decoding_tables_lru.end();
++lru_list_it) {
if (lru_list_it->second) {
delete lru_list_it->second;
}
}
}
}
ErasureCodeShecTableCache::lru_map_t*
ErasureCodeShecTableCache::getDecodingTables(int technique) {
// the caller must hold the guard mutex:
// => std::lock_guard lock{codec_tables_guard};
// create an lru_map if not yet allocated
if (!decoding_tables[technique]) {
decoding_tables[technique] = new lru_map_t;
}
return decoding_tables[technique];
}
ErasureCodeShecTableCache::lru_list_t*
ErasureCodeShecTableCache::getDecodingTablesLru(int technique) {
// the caller must hold the guard mutex:
// => std::lock_guard lock{codec_tables_guard};
// create an lru_list if not yet allocated
if (!decoding_tables_lru[technique]) {
decoding_tables_lru[technique] = new lru_list_t;
}
return decoding_tables_lru[technique];
}
int**
ErasureCodeShecTableCache::getEncodingTable(int technique, int k, int m, int c, int w)
{
std::lock_guard lock{codec_tables_guard};
return getEncodingTableNoLock(technique,k,m,c,w);
}
// -----------------------------------------------------------------------------
int**
ErasureCodeShecTableCache::getEncodingTableNoLock(int technique, int k, int m, int c, int w)
{
// create a pointer to store an encoding table address
if (!encoding_table[technique][k][m][c][w]) {
encoding_table[technique][k][m][c][w] = new (int*);
*encoding_table[technique][k][m][c][w] = 0;
}
return encoding_table[technique][k][m][c][w];
}
int*
ErasureCodeShecTableCache::setEncodingTable(int technique, int k, int m, int c, int w, int* ec_in_table)
{
std::lock_guard lock{codec_tables_guard};
int** ec_out_table = getEncodingTableNoLock(technique, k, m, c, w);
if (*ec_out_table) {
// somebody might have deposited this table in the meanwhile, so clean
// the input table and return the stored one
free (ec_in_table);
return *ec_out_table;
} else {
// we store the provided input table and return this one
*encoding_table[technique][k][m][c][w] = ec_in_table;
return ec_in_table;
}
}
ceph::mutex*
ErasureCodeShecTableCache::getLock()
{
return &codec_tables_guard;
}
uint64_t
ErasureCodeShecTableCache::getDecodingCacheSignature(int k, int m, int c, int w,
int *erased, int *avails) {
uint64_t signature = 0;
signature = (uint64_t)k;
signature |= ((uint64_t)m << 6);
signature |= ((uint64_t)c << 12);
signature |= ((uint64_t)w << 18);
for (int i=0; i < k+m; i++) {
signature |= ((uint64_t)(avails[i] ? 1 : 0) << (24+i));
}
for (int i=0; i < k+m; i++) {
signature |= ((uint64_t)(erased[i] ? 1 : 0) << (44+i));
}
return signature;
}
bool
ErasureCodeShecTableCache::getDecodingTableFromCache(int* decoding_matrix,
int* dm_row,
int* dm_column,
int* minimum,
int technique,
int k,
int m,
int c,
int w,
int* erased,
int* avails) {
// --------------------------------------------------------------------------
// LRU decoding matrix cache
// --------------------------------------------------------------------------
uint64_t signature = getDecodingCacheSignature(k, m, c, w, erased, avails);
std::lock_guard lock{codec_tables_guard};
dout(20) << "[ get table ] = " << signature << dendl;
// we try to fetch a decoding table from an LRU cache
lru_map_t* decode_tbls_map =
getDecodingTables(technique);
lru_list_t* decode_tbls_lru =
getDecodingTablesLru(technique);
lru_map_t::iterator decode_tbls_map_it = decode_tbls_map->find(signature);
if (decode_tbls_map_it == decode_tbls_map->end()) {
return false;
}
dout(20) << "[ cached table ] = " << signature << dendl;
// copy parameters out of the cache
memcpy(decoding_matrix,
decode_tbls_map_it->second.second.decoding_matrix,
k * k * sizeof(int));
memcpy(dm_row,
decode_tbls_map_it->second.second.dm_row,
k * sizeof(int));
memcpy(dm_column,
decode_tbls_map_it->second.second.dm_column,
k * sizeof(int));
memcpy(minimum,
decode_tbls_map_it->second.second.minimum,
(k+m) * sizeof(int));
// find item in LRU queue and push back
decode_tbls_lru->splice(decode_tbls_lru->end(),
*decode_tbls_lru,
decode_tbls_map_it->second.first);
return true;
}
void
ErasureCodeShecTableCache::putDecodingTableToCache(int* decoding_matrix,
int* dm_row,
int* dm_column,
int* minimum,
int technique,
int k,
int m,
int c,
int w,
int* erased,
int* avails) {
// --------------------------------------------------------------------------
// LRU decoding matrix cache
// --------------------------------------------------------------------------
std::lock_guard lock{codec_tables_guard};
uint64_t signature = getDecodingCacheSignature(k, m, c, w, erased, avails);
dout(20) << "[ put table ] = " << signature << dendl;
// we store a new table to the cache
// bufferptr cachetable;
lru_map_t* decode_tbls_map =
getDecodingTables(technique);
lru_list_t* decode_tbls_lru =
getDecodingTablesLru(technique);
if (decode_tbls_map->count(signature)) {
dout(20) << "[ already on table ] = " << signature << dendl;
// find item in LRU queue and push back
decode_tbls_lru->splice(decode_tbls_lru->end(),
*decode_tbls_lru,
(*decode_tbls_map)[signature].first);
return;
}
// evt. shrink the LRU queue/map
if ((int)decode_tbls_lru->size() >=
ErasureCodeShecTableCache::decoding_tables_lru_length) {
dout(20) << "[ shrink lru ] = " << signature << dendl;
// remove from map
decode_tbls_map->erase(decode_tbls_lru->front());
// remove from lru
decode_tbls_lru->pop_front();
}
{
dout(20) << "[ store table ] = " << signature << dendl;
decode_tbls_lru->push_back(signature);
// allocate a new buffer
lru_list_t::iterator it_end = decode_tbls_lru->end();
--it_end;
lru_entry_t &map_value =
(*decode_tbls_map)[signature] =
std::make_pair(it_end, DecodingCacheParameter());
map_value.second.decoding_matrix = new int[k*k];
map_value.second.dm_row = new int[k];
map_value.second.dm_column = new int[k];
map_value.second.minimum = new int[k+m];
memcpy(map_value.second.decoding_matrix,
decoding_matrix,
k * k * sizeof(int));
memcpy(map_value.second.dm_row,
dm_row,
k * sizeof(int));
memcpy(map_value.second.dm_column,
dm_column,
k * sizeof(int));
memcpy(map_value.second.minimum,
minimum,
(k+m) * sizeof(int));
dout(20) << "[ cache size ] = " << decode_tbls_lru->size() << dendl;
}
}
| 10,928 | 33.260188 | 114 | cc |
null | ceph-main/src/erasure-code/shec/ErasureCodeShecTableCache.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 FUJITSU LIMITED
* Copyright (C) 2014 CERN (Switzerland)
*
* Author: Takanori Nakao <[email protected]>
* Author: Takeshi Miyamae <[email protected]>
* Author: Andreas-Joachim Peters <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_ERASURE_CODE_SHEC_TABLE_CACHE_H
#define CEPH_ERASURE_CODE_SHEC_TABLE_CACHE_H
// -----------------------------------------------------------------------------
#include "common/ceph_mutex.h"
#include "erasure-code/ErasureCodeInterface.h"
// -----------------------------------------------------------------------------
#include <list>
// -----------------------------------------------------------------------------
class ErasureCodeShecTableCache {
// ---------------------------------------------------------------------------
// This class implements a table cache for encoding and decoding matrices.
// Encoding matrices are shared for the same (k,m,c,w) combination.
// It supplies a decoding matrix lru cache which is shared for identical
// matrix types e.g. there is one cache (lru-list + lru-map)
// ---------------------------------------------------------------------------
class DecodingCacheParameter {
public:
int* decoding_matrix; // size: k*k
int* dm_row; // size: k
int* dm_column; // size: k
int* minimum; // size: k+m
DecodingCacheParameter() {
decoding_matrix = 0;
dm_row = 0;
dm_column = 0;
minimum = 0;
}
~DecodingCacheParameter() {
if (decoding_matrix) {
delete[] decoding_matrix;
}
if (dm_row) {
delete[] dm_row;
}
if (dm_column) {
delete[] dm_column;
}
if (minimum) {
delete[] minimum;
}
}
};
public:
static const int decoding_tables_lru_length = 10000;
typedef std::pair<std::list<uint64_t>::iterator,
DecodingCacheParameter> lru_entry_t;
typedef std::map< int, int** > codec_table_t;
typedef std::map< int, codec_table_t > codec_tables_t__;
typedef std::map< int, codec_tables_t__ > codec_tables_t_;
typedef std::map< int, codec_tables_t_ > codec_tables_t;
typedef std::map< int, codec_tables_t > codec_technique_tables_t;
// int** matrix = codec_technique_tables_t[technique][k][m][c][w]
typedef std::map< uint64_t, lru_entry_t > lru_map_t;
typedef std::list< uint64_t > lru_list_t;
ErasureCodeShecTableCache() = default;
virtual ~ErasureCodeShecTableCache();
// mutex used to protect modifications in encoding/decoding table maps
ceph::mutex codec_tables_guard = ceph::make_mutex("shec-lru-cache");
bool getDecodingTableFromCache(int* matrix,
int* dm_row, int* dm_column,
int* minimum,
int technique,
int k, int m, int c, int w,
int* want, int* avails);
void putDecodingTableToCache(int* matrix,
int* dm_row, int* dm_column,
int* minimum,
int technique,
int k, int m, int c, int w,
int* want, int* avails);
int** getEncodingTable(int technique, int k, int m, int c, int w);
int** getEncodingTableNoLock(int technique, int k, int m, int c, int w);
int* setEncodingTable(int technique, int k, int m, int c, int w, int*);
private:
// encoding table accessed via table[matrix][k][m][c][w]
// decoding table cache accessed via map[matrixtype]
// decoding table lru list accessed via list[matrixtype]
codec_technique_tables_t encoding_table;
std::map<int, lru_map_t*> decoding_tables;
std::map<int, lru_list_t*> decoding_tables_lru;
lru_map_t* getDecodingTables(int technique);
lru_list_t* getDecodingTablesLru(int technique);
uint64_t getDecodingCacheSignature(int k, int m, int c, int w,
int *want, int *avails);
ceph::mutex* getLock();
};
#endif
| 4,490 | 36.115702 | 80 | h |
null | ceph-main/src/exporter/DaemonMetricCollector.cc | #include "DaemonMetricCollector.h"
#include <boost/json/src.hpp>
#include <chrono>
#include <filesystem>
#include <iostream>
#include <map>
#include <memory>
#include <regex>
#include <string>
#include <utility>
#include "common/admin_socket_client.h"
#include "common/debug.h"
#include "common/hostname.h"
#include "common/perf_counters.h"
#include "common/split.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "include/common_fwd.h"
#include "util.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_ceph_exporter
using json_object = boost::json::object;
using json_value = boost::json::value;
using json_array = boost::json::array;
void DaemonMetricCollector::request_loop(boost::asio::steady_timer &timer) {
timer.async_wait([&](const boost::system::error_code &e) {
std::cerr << e << std::endl;
update_sockets();
dump_asok_metrics();
auto stats_period = g_conf().get_val<int64_t>("exporter_stats_period");
// time to wait before sending requests again
timer.expires_from_now(std::chrono::seconds(stats_period));
request_loop(timer);
});
}
void DaemonMetricCollector::main() {
// time to wait before sending requests again
boost::asio::io_service io;
boost::asio::steady_timer timer{io, std::chrono::seconds(0)};
request_loop(timer);
io.run();
}
std::string DaemonMetricCollector::get_metrics() {
const std::lock_guard<std::mutex> lock(metrics_mutex);
return metrics;
}
template <class T>
void add_metric(std::unique_ptr<MetricsBuilder> &builder, T value,
std::string name, std::string description, std::string mtype,
labels_t labels) {
builder->add(std::to_string(value), name, description, mtype, labels);
}
void add_double_or_int_metric(std::unique_ptr<MetricsBuilder> &builder,
json_value value, std::string name,
std::string description, std::string mtype,
labels_t labels) {
if (value.is_int64()) {
int64_t v = value.as_int64();
add_metric(builder, v, name, description, mtype, labels);
} else if (value.is_double()) {
double v = value.as_double();
add_metric(builder, v, name, description, mtype, labels);
}
}
std::string boost_string_to_std(boost::json::string js) {
std::string res(js.data());
return res;
}
std::string quote(std::string value) { return "\"" + value + "\""; }
void DaemonMetricCollector::dump_asok_metrics() {
BlockTimer timer(__FILE__, __FUNCTION__);
std::vector<std::pair<std::string, int>> daemon_pids;
int failures = 0;
bool sort = g_conf().get_val<bool>("exporter_sort_metrics");
if (sort) {
builder =
std::unique_ptr<OrderedMetricsBuilder>(new OrderedMetricsBuilder());
} else {
builder =
std::unique_ptr<UnorderedMetricsBuilder>(new UnorderedMetricsBuilder());
}
auto prio_limit = g_conf().get_val<int64_t>("exporter_prio_limit");
for (auto &[daemon_name, sock_client] : clients) {
bool ok;
sock_client.ping(&ok);
if (!ok) {
failures++;
continue;
}
std::string counter_dump_response =
asok_request(sock_client, "counter dump", daemon_name);
if (counter_dump_response.size() == 0) {
failures++;
continue;
}
std::string counter_schema_response =
asok_request(sock_client, "counter schema", daemon_name);
if (counter_schema_response.size() == 0) {
failures++;
continue;
}
json_object counter_dump = boost::json::parse(counter_dump_response).as_object();
json_object counter_schema = boost::json::parse(counter_schema_response).as_object();
for (auto &perf_group_item : counter_schema) {
std::string perf_group = {perf_group_item.key().begin(),
perf_group_item.key().end()};
json_array perf_group_schema_array = perf_group_item.value().as_array();
json_array perf_group_dump_array = counter_dump[perf_group].as_array();
for (auto schema_itr = perf_group_schema_array.begin(),
dump_itr = perf_group_dump_array.begin();
schema_itr != perf_group_schema_array.end() &&
dump_itr != perf_group_dump_array.end();
++schema_itr, ++dump_itr) {
auto counters = schema_itr->at("counters").as_object();
auto counters_labels = schema_itr->at("labels").as_object();
auto counters_values = dump_itr->at("counters").as_object();
labels_t labels;
for (auto &label: counters_labels) {
std::string label_key = {label.key().begin(), label.key().end()};
labels[label_key] = quote(label.value().as_string().c_str());
}
for (auto &counter : counters) {
json_object counter_group = counter.value().as_object();
if (counter_group["priority"].as_int64() < prio_limit) {
continue;
}
std::string counter_name_init = {counter.key().begin(), counter.key().end()};
std::string counter_name = perf_group + "_" + counter_name_init;
promethize(counter_name);
if (counters_labels.empty()) {
auto labels_and_name = get_labels_and_metric_name(daemon_name, counter_name);
labels = labels_and_name.first;
counter_name = labels_and_name.second;
}
// For now this is only required for rgw multi-site metrics
auto multisite_labels_and_name = add_fixed_name_metrics(counter_name);
if (!multisite_labels_and_name.first.empty()) {
labels.insert(multisite_labels_and_name.first.begin(), multisite_labels_and_name.first.end());
counter_name = multisite_labels_and_name.second;
}
labels.insert({"ceph_daemon", quote(daemon_name)});
auto perf_values = counters_values.at(counter_name_init);
dump_asok_metric(counter_group, perf_values, counter_name, labels);
}
}
}
std::string config_show =
asok_request(sock_client, "config show", daemon_name);
if (config_show.size() == 0) {
failures++;
continue;
}
json_object pid_file_json = boost::json::parse(config_show).as_object();
std::string pid_path =
boost_string_to_std(pid_file_json["pid_file"].as_string());
std::string pid_str = read_file_to_string(pid_path);
if (!pid_path.size()) {
dout(1) << "pid path is empty; process metrics won't be fetched for: "
<< daemon_name << dendl;
}
if (!pid_str.empty()) {
daemon_pids.push_back({daemon_name, std::stoi(pid_str)});
}
}
dout(10) << "Perf counters retrieved for " << clients.size() - failures << "/"
<< clients.size() << " daemons." << dendl;
// get time spent on this function
timer.stop();
std::string scrap_desc(
"Time spent scraping and transforming perf counters to metrics");
labels_t scrap_labels;
scrap_labels["host"] = quote(ceph_get_hostname());
scrap_labels["function"] = quote(__FUNCTION__);
add_metric(builder, timer.get_ms(), "ceph_exporter_scrape_time", scrap_desc,
"gauge", scrap_labels);
const std::lock_guard<std::mutex> lock(metrics_mutex);
// only get metrics if there's pid path for some or all daemons isn't empty
if (daemon_pids.size() != 0) {
get_process_metrics(daemon_pids);
}
metrics = builder->dump();
}
std::vector<std::string> read_proc_stat_file(std::string path) {
std::string stat = read_file_to_string(path);
std::vector<std::string> strings;
auto parts = ceph::split(stat);
strings.assign(parts.begin(), parts.end());
return strings;
}
struct pstat read_pid_stat(int pid) {
std::string stat_path("/proc/" + std::to_string(pid) + "/stat");
std::vector<std::string> stats = read_proc_stat_file(stat_path);
struct pstat stat;
stat.minflt = std::stoul(stats[9]);
stat.majflt = std::stoul(stats[11]);
stat.utime = std::stoul(stats[13]);
stat.stime = std::stoul(stats[14]);
stat.num_threads = std::stoul(stats[19]);
stat.start_time = std::stoul(stats[21]);
stat.vm_size = std::stoul(stats[22]);
stat.resident_size = std::stoi(stats[23]);
return stat;
}
void DaemonMetricCollector::get_process_metrics(
std::vector<std::pair<std::string, int>> daemon_pids) {
std::string path("/proc");
std::stringstream ss;
for (auto &[daemon_name, pid] : daemon_pids) {
std::vector<std::string> uptimes = read_proc_stat_file("/proc/uptime");
struct pstat stat = read_pid_stat(pid);
int clk_tck = sysconf(_SC_CLK_TCK);
double start_time_seconds = stat.start_time / (double)clk_tck;
double user_time = stat.utime / (double)clk_tck;
double kernel_time = stat.stime / (double)clk_tck;
double total_time_seconds = user_time + kernel_time;
double uptime = std::stod(uptimes[0]);
double elapsed_time = uptime - start_time_seconds;
double idle_time = elapsed_time - total_time_seconds;
double usage = total_time_seconds * 100 / elapsed_time;
labels_t labels;
labels["ceph_daemon"] = quote(daemon_name);
add_metric(builder, stat.minflt, "ceph_exporter_minflt_total",
"Number of minor page faults of daemon", "counter", labels);
add_metric(builder, stat.majflt, "ceph_exporter_majflt_total",
"Number of major page faults of daemon", "counter", labels);
add_metric(builder, stat.num_threads, "ceph_exporter_num_threads",
"Number of threads used by daemon", "gauge", labels);
add_metric(builder, usage, "ceph_exporter_cpu_usage",
"CPU usage of a daemon", "gauge", labels);
std::string cpu_time_desc = "Process time in kernel/user/idle mode";
labels_t cpu_total_labels;
cpu_total_labels["ceph_daemon"] = quote(daemon_name);
cpu_total_labels["mode"] = quote("kernel");
add_metric(builder, kernel_time, "ceph_exporter_cpu_total", cpu_time_desc,
"counter", cpu_total_labels);
cpu_total_labels["mode"] = quote("user");
add_metric(builder, user_time, "ceph_exporter_cpu_total", cpu_time_desc,
"counter", cpu_total_labels);
cpu_total_labels["mode"] = quote("idle");
add_metric(builder, idle_time, "ceph_exporter_cpu_total", cpu_time_desc,
"counter", cpu_total_labels);
add_metric(builder, stat.vm_size, "ceph_exporter_vm_size",
"Virtual memory used in a daemon", "gauge", labels);
add_metric(builder, stat.resident_size, "ceph_exporter_resident_size",
"Resident memory in a daemon", "gauge", labels);
}
}
std::string DaemonMetricCollector::asok_request(AdminSocketClient &asok,
std::string command,
std::string daemon_name) {
std::string request("{\"prefix\": \"" + command + "\"}");
std::string response;
std::string err = asok.do_request(request, &response);
if (err.length() > 0 || response.substr(0, 5) == "ERROR") {
dout(1) << "command " << command << "failed for daemon " << daemon_name
<< "with error: " << err << dendl;
return "";
}
return response;
}
std::pair<labels_t, std::string>
DaemonMetricCollector::get_labels_and_metric_name(std::string daemon_name,
std::string metric_name) {
std::string new_metric_name;
labels_t labels;
new_metric_name = metric_name;
// In vstart cluster socket files for rgw are stored as radosgw.<instance_id>.asok
if (daemon_name.find("radosgw") != std::string::npos) {
std::size_t pos = daemon_name.find_last_of('.');
std::string tmp = daemon_name.substr(pos+1);
labels["instance_id"] = quote(tmp);
}
else if (daemon_name.find("rgw") != std::string::npos) {
std::string tmp = daemon_name.substr(16, std::string::npos);
std::string::size_type pos = tmp.find('.');
labels["instance_id"] = quote("rgw." + tmp.substr(0, pos));
}
else if (daemon_name.find("rbd-mirror") != std::string::npos) {
std::regex re(
"^rbd_mirror_image_([^/]+)/(?:(?:([^/]+)/"
")?)(.*)\\.(replay(?:_bytes|_latency)?)$");
std::smatch match;
if (std::regex_search(daemon_name, match, re) == true) {
new_metric_name = "ceph_rbd_mirror_image_" + match.str(4);
labels["pool"] = quote(match.str(1));
labels["namespace"] = quote(match.str(2));
labels["image"] = quote(match.str(3));
}
}
return {labels, new_metric_name};
}
// Add fixed name metrics from existing ones that have details in their names
// that should be in labels (not in name). For backward compatibility,
// a new fixed name metric is created (instead of replacing)and details are put
// in new labels. Intended for RGW sync perf. counters but extendable as required.
// See: https://tracker.ceph.com/issues/45311
std::pair<labels_t, std::string>
DaemonMetricCollector::add_fixed_name_metrics(std::string metric_name) {
std::string new_metric_name;
labels_t labels;
new_metric_name = metric_name;
std::regex re("^data_sync_from_(.*)\\.");
std::smatch match;
if (std::regex_search(metric_name, match, re) == true) {
new_metric_name = std::regex_replace(metric_name, re, "from_([^.]*)', 'from_zone");
labels["source_zone"] = quote(match.str(1));
return {labels, new_metric_name};
}
return {};
}
/*
perf_values can be either a int/double or a json_object. Since
json_value is a wrapper of both we use that class.
*/
void DaemonMetricCollector::dump_asok_metric(json_object perf_info,
json_value perf_values,
std::string name,
labels_t labels) {
int64_t type = perf_info["type"].as_int64();
std::string metric_type =
boost_string_to_std(perf_info["metric_type"].as_string());
std::string description =
boost_string_to_std(perf_info["description"].as_string());
if (type & PERFCOUNTER_LONGRUNAVG) {
int64_t count = perf_values.as_object()["avgcount"].as_int64();
add_metric(builder, count, name + "_count", description + " Count", "counter",
labels);
json_value sum_value = perf_values.as_object()["sum"];
add_double_or_int_metric(builder, sum_value, name + "_sum", description + " Total",
metric_type, labels);
} else {
add_double_or_int_metric(builder, perf_values, name, description,
metric_type, labels);
}
}
void DaemonMetricCollector::update_sockets() {
std::string sock_dir = g_conf().get_val<std::string>("exporter_sock_dir");
clients.clear();
std::filesystem::path sock_path = sock_dir;
if (!std::filesystem::is_directory(sock_path.parent_path())) {
dout(1) << "ERROR: No such directory exist" << sock_dir << dendl;
return;
}
for (const auto &entry : std::filesystem::directory_iterator(sock_dir)) {
if (entry.path().extension() == ".asok") {
std::string daemon_socket_name = entry.path().filename().string();
std::string daemon_name =
daemon_socket_name.substr(0, daemon_socket_name.size() - 5);
if (clients.find(daemon_name) == clients.end() &&
!(daemon_name.find("mgr") != std::string::npos) &&
!(daemon_name.find("ceph-exporter") != std::string::npos)) {
AdminSocketClient sock(entry.path().string());
clients.insert({daemon_name, std::move(sock)});
}
}
}
}
void OrderedMetricsBuilder::add(std::string value, std::string name,
std::string description, std::string mtype,
labels_t labels) {
if (metrics.find(name) == metrics.end()) {
Metric metric(name, mtype, description);
metrics[name] = std::move(metric);
}
Metric &metric = metrics[name];
metric.add(labels, value);
}
std::string OrderedMetricsBuilder::dump() {
for (auto &[name, metric] : metrics) {
out += metric.dump() + "\n";
}
return out;
}
void UnorderedMetricsBuilder::add(std::string value, std::string name,
std::string description, std::string mtype,
labels_t labels) {
Metric metric(name, mtype, description);
metric.add(labels, value);
out += metric.dump() + "\n\n";
}
std::string UnorderedMetricsBuilder::dump() { return out; }
void Metric::add(labels_t labels, std::string value) {
metric_entry entry;
entry.labels = labels;
entry.value = value;
entries.push_back(entry);
}
std::string Metric::dump() {
std::stringstream metric_ss;
metric_ss << "# HELP " << name << " " << description << "\n";
metric_ss << "# TYPE " << name << " " << mtype << "\n";
for (auto &entry : entries) {
std::stringstream labels_ss;
size_t i = 0;
for (auto &[label_name, label_value] : entry.labels) {
labels_ss << label_name << "=" << label_value;
if (i < entry.labels.size() - 1) {
labels_ss << ",";
}
i++;
}
metric_ss << name << "{" << labels_ss.str() << "} " << entry.value;
if (&entry != &entries.back()) {
metric_ss << "\n";
}
}
return metric_ss.str();
}
DaemonMetricCollector &collector_instance() {
static DaemonMetricCollector instance;
return instance;
}
| 17,297 | 37.269912 | 106 | cc |
null | ceph-main/src/exporter/DaemonMetricCollector.h | #pragma once
#include "common/admin_socket_client.h"
#include <map>
#include <string>
#include <vector>
#include <boost/asio.hpp>
#include <boost/json/object.hpp>
#include <filesystem>
#include <map>
#include <string>
#include <vector>
struct pstat {
unsigned long utime;
unsigned long stime;
unsigned long minflt;
unsigned long majflt;
unsigned long start_time;
int num_threads;
unsigned long vm_size;
int resident_size;
};
class MetricsBuilder;
class OrderedMetricsBuilder;
class UnorderedMetricsBuilder;
class Metric;
typedef std::map<std::string, std::string> labels_t;
class DaemonMetricCollector {
public:
void main();
std::string get_metrics();
private:
std::map<std::string, AdminSocketClient> clients;
std::string metrics;
std::mutex metrics_mutex;
std::unique_ptr<MetricsBuilder> builder;
void update_sockets();
void request_loop(boost::asio::steady_timer &timer);
void dump_asok_metrics();
void dump_asok_metric(boost::json::object perf_info,
boost::json::value perf_values, std::string name,
labels_t labels);
std::pair<labels_t, std::string>
get_labels_and_metric_name(std::string daemon_name, std::string metric_name);
std::pair<labels_t, std::string> add_fixed_name_metrics(std::string metric_name);
void get_process_metrics(std::vector<std::pair<std::string, int>> daemon_pids);
std::string asok_request(AdminSocketClient &asok, std::string command, std::string daemon_name);
};
class Metric {
private:
struct metric_entry {
labels_t labels;
std::string value;
};
std::string name;
std::string mtype;
std::string description;
std::vector<metric_entry> entries;
public:
Metric(std::string name, std::string mtype, std::string description)
: name(name), mtype(mtype), description(description) {}
Metric(const Metric &) = default;
Metric() = default;
void add(labels_t labels, std::string value);
std::string dump();
};
class MetricsBuilder {
public:
virtual ~MetricsBuilder() = default;
virtual std::string dump() = 0;
virtual void add(std::string value, std::string name, std::string description,
std::string mtype, labels_t labels) = 0;
protected:
std::string out;
};
class OrderedMetricsBuilder : public MetricsBuilder {
private:
std::map<std::string, Metric> metrics;
public:
std::string dump();
void add(std::string value, std::string name, std::string description,
std::string mtype, labels_t labels);
};
class UnorderedMetricsBuilder : public MetricsBuilder {
public:
std::string dump();
void add(std::string value, std::string name, std::string description,
std::string mtype, labels_t labels);
};
DaemonMetricCollector &collector_instance();
| 2,770 | 25.141509 | 98 | h |
null | ceph-main/src/exporter/ceph_exporter.cc | #include "common/ceph_argparse.h"
#include "common/config.h"
#include "exporter/DaemonMetricCollector.h"
#include "exporter/http_server.h"
#include "global/global_init.h"
#include "global/global_context.h"
#include <boost/thread/thread.hpp>
#include <iostream>
#include <map>
#include <string>
#define dout_context g_ceph_context
static void usage() {
std::cout << "usage: ceph-exporter [options]\n"
<< "options:\n"
" --sock-dir: The path to ceph daemons socket files dir\n"
" --addrs: Host ip address where exporter is deployed\n"
" --port: Port to deploy exporter on. Default is 9926\n"
" --prio-limit: Only perf counters greater than or equal to prio-limit are fetched. Default: 5\n"
" --stats-period: Time to wait before sending requests again to exporter server (seconds). Default: 5s"
<< std::endl;
generic_server_usage();
}
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
std::cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_DAEMON, 0);
std::string val;
for (auto i = args.begin(); i != args.end();) {
if (ceph_argparse_double_dash(args, i)) {
break;
} else if (ceph_argparse_witharg(args, i, &val, "--sock-dir", (char *)NULL)) {
cct->_conf.set_val("exporter_sock_dir", val);
} else if (ceph_argparse_witharg(args, i, &val, "--addrs", (char *)NULL)) {
cct->_conf.set_val("exporter_addr", val);
} else if (ceph_argparse_witharg(args, i, &val, "--port", (char *)NULL)) {
cct->_conf.set_val("exporter_http_port", val);
} else if (ceph_argparse_witharg(args, i, &val, "--prio-limit", (char *)NULL)) {
cct->_conf.set_val("exporter_prio_limit", val);
} else if (ceph_argparse_witharg(args, i, &val, "--stats-period", (char *)NULL)) {
cct->_conf.set_val("exporter_stats_period", val);
} else {
++i;
}
}
common_init_finish(g_ceph_context);
boost::thread server_thread(http_server_thread_entrypoint);
DaemonMetricCollector &collector = collector_instance();
collector.main();
server_thread.join();
}
| 2,378 | 35.045455 | 119 | cc |
null | ceph-main/src/exporter/http_server.cc | #include "http_server.h"
#include "common/debug.h"
#include "common/hostname.h"
#include "global/global_init.h"
#include "global/global_context.h"
#include "exporter/DaemonMetricCollector.h"
#include <boost/asio.hpp>
#include <boost/beast/core.hpp>
#include <boost/beast/http.hpp>
#include <boost/beast/version.hpp>
#include <boost/thread/thread.hpp>
#include <chrono>
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <map>
#include <memory>
#include <string>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_ceph_exporter
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
class http_connection : public std::enable_shared_from_this<http_connection> {
public:
http_connection(tcp::socket socket) : socket_(std::move(socket)) {}
// Initiate the asynchronous operations associated with the connection.
void start() {
read_request();
check_deadline();
}
private:
tcp::socket socket_;
beast::flat_buffer buffer_{8192};
http::request<http::dynamic_body> request_;
http::response<http::string_body> response_;
net::steady_timer deadline_{socket_.get_executor(), std::chrono::seconds(60)};
// Asynchronously receive a complete request message.
void read_request() {
auto self = shared_from_this();
http::async_read(socket_, buffer_, request_,
[self](beast::error_code ec, std::size_t bytes_transferred) {
boost::ignore_unused(bytes_transferred);
if (ec) {
dout(1) << "ERROR: " << ec.message() << dendl;
return;
}
else {
self->process_request();
}
});
}
// Determine what needs to be done with the request message.
void process_request() {
response_.version(request_.version());
response_.keep_alive(request_.keep_alive());
switch (request_.method()) {
case http::verb::get:
response_.result(http::status::ok);
create_response();
break;
default:
// We return responses indicating an error if
// we do not recognize the request method.
response_.result(http::status::method_not_allowed);
response_.set(http::field::content_type, "text/plain");
std::string body("Invalid request-method '" +
std::string(request_.method_string()) + "'");
response_.body() = body;
break;
}
write_response();
}
// Construct a response message based on the program state.
void create_response() {
if (request_.target() == "/") {
response_.set(http::field::content_type, "text/html; charset=utf-8");
std::string body("<html>\n"
"<head><title>Ceph Exporter</title></head>\n"
"<body>\n"
"<h1>Ceph Exporter</h1>\n"
"<p><a href='/metrics'>Metrics</a></p>"
"</body>\n"
"</html>\n");
response_.body() = body;
} else if (request_.target() == "/metrics") {
response_.set(http::field::content_type, "text/plain; charset=utf-8");
DaemonMetricCollector &collector = collector_instance();
std::string metrics = collector.get_metrics();
response_.body() = metrics;
} else {
response_.result(http::status::method_not_allowed);
response_.set(http::field::content_type, "text/plain");
response_.body() = "File not found \n";
}
}
// Asynchronously transmit the response message.
void write_response() {
auto self = shared_from_this();
response_.prepare_payload();
http::async_write(socket_, response_,
[self](beast::error_code ec, std::size_t) {
self->socket_.shutdown(tcp::socket::shutdown_send, ec);
self->deadline_.cancel();
if (ec) {
dout(1) << "ERROR: " << ec.message() << dendl;
return;
}
});
}
// Check whether we have spent enough time on this connection.
void check_deadline() {
auto self = shared_from_this();
deadline_.async_wait([self](beast::error_code ec) {
if (!ec) {
// Close socket to cancel any outstanding operation.
self->socket_.close(ec);
}
});
}
};
// "Loop" forever accepting new connections.
void http_server(tcp::acceptor &acceptor, tcp::socket &socket) {
acceptor.async_accept(socket, [&](beast::error_code ec) {
if (!ec)
std::make_shared<http_connection>(std::move(socket))->start();
http_server(acceptor, socket);
});
}
void http_server_thread_entrypoint() {
try {
std::string exporter_addr = g_conf().get_val<std::string>("exporter_addr");
auto const address = net::ip::make_address(exporter_addr);
unsigned short port = g_conf().get_val<int64_t>("exporter_http_port");
net::io_context ioc{1};
tcp::acceptor acceptor{ioc, {address, port}};
tcp::socket socket{ioc};
http_server(acceptor, socket);
dout(1) << "Http server running on " << exporter_addr << ":" << port << dendl;
ioc.run();
} catch (std::exception const &e) {
dout(1) << "Error: " << e.what() << dendl;
exit(EXIT_FAILURE);
}
}
| 5,561 | 31.717647 | 82 | cc |
null | ceph-main/src/exporter/http_server.h | #pragma once
#include <string>
void http_server_thread_entrypoint();
| 71 | 11 | 37 | h |
null | ceph-main/src/exporter/util.cc | #include "util.h"
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <cctype>
#include <chrono>
#include <fstream>
#include <iostream>
#include <sstream>
#include "common/debug.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_ceph_exporter
BlockTimer::BlockTimer(std::string file, std::string function)
: file(file), function(function), stopped(false) {
t1 = std::chrono::high_resolution_clock::now();
}
BlockTimer::~BlockTimer() {
dout(20) << file << ":" << function << ": " << ms.count() << "ms" << dendl;
}
// useful with stop
double BlockTimer::get_ms() {
return ms.count();
}
// Manually stop the timer as you might want to get the time
void BlockTimer::stop() {
if (!stopped) {
stopped = true;
t2 = std::chrono::high_resolution_clock::now();
ms = t2 - t1;
}
}
bool string_is_digit(std::string s) {
size_t i = 0;
while (std::isdigit(s[i]) && i < s.size()) {
i++;
}
return i >= s.size();
}
std::string read_file_to_string(std::string path) {
std::ifstream is(path);
std::stringstream buffer;
buffer << is.rdbuf();
return buffer.str();
}
// Must be kept in sync with promethize() in src/pybind/mgr/prometheus/module.py
void promethize(std::string &name) {
if (name[name.size() - 1] == '-') {
name[name.size() - 1] = '_';
name += "minus";
}
auto should_be_underscore = [](char ch) {
return ch == '.' || ch == '/' || ch == ' ' || ch == '-';
};
std::replace_if(name.begin(), name.end(), should_be_underscore, '_');
boost::replace_all(name, "::", "_");
boost::replace_all(name, "+", "_plus");
name = "ceph_" + name;
}
| 1,659 | 22.714286 | 80 | cc |
null | ceph-main/src/exporter/util.h | #include "common/hostname.h"
#include <chrono>
#include <string>
#define TIMED_FUNCTION() BlockTimer timer(__FILE__, __FUNCTION__)
class BlockTimer {
public:
BlockTimer(std::string file, std::string function);
~BlockTimer();
void stop();
double get_ms();
private:
std::chrono::duration<double, std::milli> ms;
std::string file, function;
bool stopped;
std::chrono::time_point<std::chrono::high_resolution_clock> t1, t2;
};
bool string_is_digit(std::string s);
std::string read_file_to_string(std::string path);
std::string get_hostname(std::string path);
void promethize(std::string &name);
| 606 | 23.28 | 68 | h |
null | ceph-main/src/extblkdev/ExtBlkDevInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file ceph/src/erasure-code/ErasureCodeInterface.h
* Copyright (C) 2013 Cloudwatt <[email protected]>
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_EXT_BLK_DEV_INTERFACE_H
#define CEPH_EXT_BLK_DEV_INTERFACE_H
/*! @file ExtBlkDevInterface.h
@brief Interface provided by extended block device plugins
Block devices with verdor specific capabilities rely on plugins implementing
**ExtBlkDevInterface** to provide access to their capabilities.
Methods returning an **int** return **0** on success and a
negative value on error.
*/
#include <string>
#include <map>
#include <ostream>
#include <memory>
#ifdef __linux__
#include <sys/capability.h>
#else
typedef void *cap_t;
#endif
#include "common/PluginRegistry.h"
namespace ceph {
class ExtBlkDevState {
uint64_t logical_total=0;
uint64_t logical_avail=0;
uint64_t physical_total=0;
uint64_t physical_avail=0;
public:
uint64_t get_logical_total(){return logical_total;}
uint64_t get_logical_avail(){return logical_avail;}
uint64_t get_physical_total(){return physical_total;}
uint64_t get_physical_avail(){return physical_avail;}
void set_logical_total(uint64_t alogical_total){logical_total=alogical_total;}
void set_logical_avail(uint64_t alogical_avail){logical_avail=alogical_avail;}
void set_physical_total(uint64_t aphysical_total){physical_total=aphysical_total;}
void set_physical_avail(uint64_t aphysical_avail){physical_avail=aphysical_avail;}
};
class ExtBlkDevInterface {
public:
virtual ~ExtBlkDevInterface() {}
/**
* Initialize the instance if device logdevname is supported
*
* Return 0 on success or a negative errno on error
*
* @param [in] logdevname name of device to check for support by this plugin
* @return 0 on success or a negative errno on error.
*/
virtual int init(const std::string& logdevname) = 0;
/**
* Return the name of the underlying device detected by **init** method
*
* @return the name of the underlying device
*/
virtual const std::string& get_devname() const = 0;
/**
* Provide status of underlying physical storage after compression
*
* Return 0 on success or a negative errno on error.
*
* @param [out] state current state of the undelying device
* @return 0 on success or a negative errno on error.
*/
virtual int get_state(ExtBlkDevState& state) = 0;
/**
* Populate property map with meta data of device.
*
* @param [in] prefix prefix to be prepended to all map values by this method
* @param [in,out] pm property map of the device, to be extended by attributes detected by this plugin
* @return 0 on success or a negative errno on error.
*/
virtual int collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm) = 0;
};
typedef std::shared_ptr<ExtBlkDevInterface> ExtBlkDevInterfaceRef;
class ExtBlkDevPlugin : public Plugin {
public:
explicit ExtBlkDevPlugin(CephContext *cct) : Plugin(cct) {}
virtual ~ExtBlkDevPlugin() {}
/**
* Indicate plugin-required capabilities in permitted set
* If a plugin requires a capability to be active in the
* permitted set when invoked, it must indicate so by setting
* the required flags in the cap_t structure passed into this method.
* The cap_t structure is empty when passed into the method, and only the
* method's modifications to the permitted set are used by ceph.
* The plugin must elevate the capabilities into the effective
* set at a later point when needed during the invocation of its
* other methods, and is responsible to restore the effective set
* before returning from the method
*
* @param [out] caps capability set indicating the necessary capabilities
*/
virtual int get_required_cap_set(cap_t caps) = 0;
/**
* Factory method, creating ExtBlkDev instances
*
* @param [in] logdevname name of logic device, may be composed of physical devices
* @param [out] ext_blk_dev object created on successful device support detection
* @return 0 on success or a negative errno on error.
*/
virtual int factory(const std::string& logdevname,
ExtBlkDevInterfaceRef& ext_blk_dev) = 0;
};
}
#endif
| 4,914 | 33.612676 | 106 | h |
null | ceph-main/src/extblkdev/ExtBlkDevPlugin.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file ceph/src/erasure-code/ErasureCodePlugin.cc
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include <errno.h>
#include "ceph_ver.h"
#include "ExtBlkDevPlugin.h"
#include "common/errno.h"
#include "include/dlfcn_compat.h"
#include "include/str_list.h"
#include "include/ceph_assert.h"
#include "common/ceph_context.h"
#include "common/debug.h"
#define dout_subsys ceph_subsys_bdev
#define dout_context cct
using namespace std;
namespace ceph {
namespace extblkdev {
#ifdef __linux__
// iterate across plugins and determine each capability's reqirement
// merge requirements into merge_caps set
int get_required_caps(CephContext *cct, cap_t &merge_caps)
{
cap_t plugin_caps = nullptr;
auto close_caps_on_return = make_scope_guard([&] {
if (plugin_caps != nullptr) {
cap_free(plugin_caps);
}
});
// plugin-private cap set to populate by a plugin
plugin_caps = cap_init();
if (plugin_caps == nullptr) {
return -errno;
}
auto registry = cct->get_plugin_registry();
std::lock_guard l(registry->lock);
// did we preload any extblkdev type plugins?
auto ptype = registry->plugins.find("extblkdev");
if (ptype != registry->plugins.end()) {
// iterate over all extblkdev plugins
for (auto& it : ptype->second) {
// clear cap set before passing to plugin
if (cap_clear(plugin_caps) < 0) {
return -errno;
}
// let plugin populate set with required caps
auto ebdplugin = dynamic_cast<ExtBlkDevPlugin*>(it.second);
if (ebdplugin == nullptr) {
derr << __func__ << " Is not an extblkdev plugin: " << it.first << dendl;
return -ENOENT;
}
int rc = ebdplugin->get_required_cap_set(plugin_caps);
if (rc != 0)
return rc;
// iterate over capabilities and check for active bits
for (int i = 0; i <= CAP_LAST_CAP; ++i) {
cap_flag_value_t val;
if (cap_get_flag(plugin_caps, i, CAP_PERMITTED, &val) < 0) {
return -errno;
}
if (val != CAP_CLEAR) {
cap_value_t arr[1];
arr[0] = i;
// set capability in merged set
if (cap_set_flag(merge_caps, CAP_PERMITTED, 1, arr, CAP_SET) < 0) {
return -errno;
}
}
}
}
}
return 0;
}
// trim away all capabilities of this process that are not explicitly set in merge_set
int trim_caps(CephContext *cct, cap_t &merge_caps)
{
cap_t proc_caps = nullptr;
auto close_caps_on_return = make_scope_guard([&] {
if (proc_caps != nullptr) {
cap_free(proc_caps);
}
});
bool changed = false;
// get process capability set
proc_caps = cap_get_proc();
if (proc_caps == nullptr) {
dout(1) << " cap_get_proc failed with errno: " << errno << dendl;
return -errno;
}
{
char *cap_str = cap_to_text(proc_caps, 0);
if (cap_str != nullptr){
dout(10) << " cap_get_proc yields: " << cap_str << dendl;
cap_free(cap_str);
}
}
// iterate over capabilities
for (int i = 0; i <= CAP_LAST_CAP; ++i) {
cap_flag_value_t val;
if (cap_get_flag(merge_caps, i, CAP_PERMITTED, &val) < 0) {
return -errno;
}
if (val == CAP_CLEAR) {
if (cap_get_flag(proc_caps, i, CAP_PERMITTED, &val) < 0) {
return -errno;
}
if (val != CAP_CLEAR) {
// if bit clear in merged set, but set in process set, clear in process set
changed = true;
cap_value_t arr[1];
arr[0] = i;
if (cap_set_flag(proc_caps, CAP_PERMITTED, 1, arr, CAP_CLEAR) < 0) {
return -errno;
}
if (cap_set_flag(proc_caps, CAP_EFFECTIVE, 1, arr, CAP_CLEAR) < 0) {
return -errno;
}
}
}
}
// apply reduced capability set to process
if (changed) {
char *cap_str = cap_to_text(proc_caps, 0);
if (cap_str != nullptr){
dout(10) << " new caps for cap_set_proc: " << cap_str << dendl;
cap_free(cap_str);
}
if (cap_set_proc(proc_caps) < 0) {
dout(1) << " cap_set_proc failed with errno: " << errno << dendl;
return -errno;
}
}
return 0;
}
int limit_caps(CephContext *cct)
{
cap_t merge_caps = nullptr;
auto close_caps_on_return = make_scope_guard([&] {
if (merge_caps != nullptr) {
cap_free(merge_caps);
}
});
// collect required caps in merge_caps
merge_caps = cap_init();
if (merge_caps == nullptr) {
return -errno;
}
int rc = get_required_caps(cct, merge_caps);
if (rc != 0) {
return rc;
}
return trim_caps(cct, merge_caps);
}
#endif
// preload set of extblkdev plugins defined in config
int preload(CephContext *cct)
{
const auto& conf = cct->_conf;
string plugins = conf.get_val<std::string>("osd_extblkdev_plugins");
dout(10) << "starting preload of extblkdev plugins: " << plugins << dendl;
list<string> plugins_list;
get_str_list(plugins, plugins_list);
auto registry = cct->get_plugin_registry();
{
std::lock_guard l(registry->lock);
for (auto& plg : plugins_list) {
dout(10) << "starting load of extblkdev plugin: " << plg << dendl;
int rc = registry->load("extblkdev", std::string("ebd_") + plg);
if (rc) {
derr << __func__ << " failed preloading extblkdev plugin: " << plg << dendl;
return rc;
}else{
dout(10) << "successful load of extblkdev plugin: " << plg << dendl;
}
}
}
#ifdef __linux__
// if we are still running as root, we do not need to trim capabilities
// as we are intended to use the privileges
if (geteuid() == 0) {
return 0;
}
return limit_caps(cct);
#else
return 0;
#endif
}
// scan extblkdev plugins for support of this device
int detect_device(CephContext *cct,
const std::string &logdevname,
ExtBlkDevInterfaceRef& ebd_impl)
{
int rc = -ENOENT;
std::string plg_name;
auto registry = cct->get_plugin_registry();
std::lock_guard l(registry->lock);
auto ptype = registry->plugins.find("extblkdev");
if (ptype == registry->plugins.end()) {
return -ENOENT;
}
for (auto& it : ptype->second) {
dout(10) << __func__ << " Trying to detect block device " << logdevname
<< " using plugin " << it.first << dendl;
auto ebdplugin = dynamic_cast<ExtBlkDevPlugin*>(it.second);
if (ebdplugin == nullptr) {
derr << __func__ << " Is not an extblkdev plugin: " << it.first << dendl;
return -ENOENT;
}
rc = ebdplugin->factory(logdevname, ebd_impl);
if (rc == 0) {
plg_name = it.first;
break;
}
}
if (rc == 0) {
dout(1) << __func__ << " using plugin " << plg_name << ", " << "volume " << ebd_impl->get_devname()
<< " maps to " << logdevname << dendl;
} else {
dout(10) << __func__ << " no plugin volume maps to " << logdevname << dendl;
}
return rc;
}
// release device object
int release_device(ExtBlkDevInterfaceRef& ebd_impl)
{
if (ebd_impl) {
ebd_impl.reset();
}
return 0;
}
}
}
| 7,639 | 27.401487 | 101 | cc |
null | ceph-main/src/extblkdev/ExtBlkDevPlugin.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file ceph/src/erasure-code/ErasureCodePlugin.h
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
* Copyright (C) 2014 Red Hat <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_EXT_BLK_DEV_PLUGIN_H
#define CEPH_EXT_BLK_DEV_PLUGIN_H
#include "ExtBlkDevInterface.h"
namespace ceph {
namespace extblkdev {
int preload(CephContext *cct);
int detect_device(CephContext *cct,
const std::string &logdevname,
ExtBlkDevInterfaceRef& ebd_impl);
int release_device(ExtBlkDevInterfaceRef& ebd_impl);
}
}
#endif
| 1,100 | 27.230769 | 71 | h |
null | ceph-main/src/extblkdev/vdo/ExtBlkDevPluginVdo.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file src/erasure-code/clay/ErasureCodePluginClay.cc
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "ceph_ver.h"
#include "ExtBlkDevPluginVdo.h"
#include "common/ceph_context.h"
// This plugin does not require any capabilities to be set
int ExtBlkDevPluginVdo::get_required_cap_set(cap_t caps)
{
return 0;
}
int ExtBlkDevPluginVdo::factory(const std::string& logdevname,
ceph::ExtBlkDevInterfaceRef& ext_blk_dev)
{
auto vdo = new ExtBlkDevVdo(cct);
int r = vdo->init(logdevname);
if (r != 0) {
delete vdo;
return r;
}
ext_blk_dev.reset(vdo);
return 0;
};
const char *__ceph_plugin_version() { return CEPH_GIT_NICE_VER; }
int __ceph_plugin_init(CephContext *cct,
const std::string& type,
const std::string& name)
{
auto plg = new ExtBlkDevPluginVdo(cct);
if(plg == 0) return -ENOMEM;
int rc = cct->get_plugin_registry()->add(type, name, plg);
if(rc != 0){
delete plg;
}
return rc;
}
| 1,559 | 25 | 73 | cc |
null | ceph-main/src/extblkdev/vdo/ExtBlkDevPluginVdo.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file src/erasure-code/clay/ErasureCodePluginClay.h
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#ifndef CEPH_EXT_BLK_DEV_PLUGIN_VDO_H
#define CEPH_EXT_BLK_DEV_PLUGIN_VDO_H
#include "ExtBlkDevVdo.h"
class ExtBlkDevPluginVdo : public ceph::ExtBlkDevPlugin {
public:
explicit ExtBlkDevPluginVdo(CephContext *cct) : ExtBlkDevPlugin(cct) {}
int get_required_cap_set(cap_t caps) override;
int factory(const std::string& logdevname,
ceph::ExtBlkDevInterfaceRef& ext_blk_dev) override;
};
#endif
| 1,103 | 30.542857 | 73 | h |
null | ceph-main/src/extblkdev/vdo/ExtBlkDevVdo.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file ceph/src/common/blkdev.cc
* Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "ExtBlkDevVdo.h"
#include "common/blkdev.h"
#include "include/stringify.h"
#include <errno.h>
#include "common/debug.h"
#define dout_subsys ceph_subsys_bdev
#define dout_context cct
#undef dout_prefix
#define dout_prefix *_dout << "vdo(" << this << ") "
int ExtBlkDevVdo::_get_vdo_stats_handle(const std::string& devname)
{
int rc = -ENOENT;
dout(10) << __func__ << " VDO init checking device: " << devname << dendl;
// we need to go from the raw devname (e.g., dm-4) to the VDO volume name.
// currently the best way seems to be to look at /dev/mapper/* ...
std::string expect = std::string("../") + devname; // expected symlink target
DIR *dir = ::opendir("/dev/mapper");
if (!dir) {
return -errno;
}
struct dirent *de = nullptr;
while ((de = ::readdir(dir))) {
if (de->d_name[0] == '.')
continue;
char fn[4096], target[4096];
snprintf(fn, sizeof(fn), "/dev/mapper/%s", de->d_name);
int r = readlink(fn, target, sizeof(target));
if (r < 0 || r >= (int)sizeof(target))
continue;
target[r] = 0;
if (expect == target) {
snprintf(fn, sizeof(fn), "/sys/kvdo/%s/statistics", de->d_name);
int vdo_fd = ::open(fn, O_RDONLY|O_CLOEXEC);
if (vdo_fd >= 0) {
name = de->d_name;
vdo_dir_fd = vdo_fd;
rc = 0;
break;
}
}
}
closedir(dir);
return rc;
}
int ExtBlkDevVdo::get_vdo_stats_handle()
{
std::set<std::string> devs = { logdevname };
while (!devs.empty()) {
std::string dev = *devs.begin();
devs.erase(devs.begin());
int rc = _get_vdo_stats_handle(dev);
if (rc == 0) {
// yay, it's vdo
return rc;
}
// ok, see if there are constituent devices
if (dev.find("dm-") == 0) {
get_dm_parents(dev, &devs);
}
}
return -ENOENT;
}
int64_t ExtBlkDevVdo::get_vdo_stat(const char *property)
{
int64_t ret = 0;
int fd = ::openat(vdo_dir_fd, property, O_RDONLY|O_CLOEXEC);
if (fd < 0) {
return 0;
}
char buf[1024];
int r = ::read(fd, buf, sizeof(buf) - 1);
if (r > 0) {
buf[r] = 0;
ret = atoll(buf);
}
VOID_TEMP_FAILURE_RETRY(::close(fd));
return ret;
}
int ExtBlkDevVdo::init(const std::string& alogdevname)
{
logdevname = alogdevname;
// get directory handle for VDO metadata
return get_vdo_stats_handle();
}
int ExtBlkDevVdo::get_state(ceph::ExtBlkDevState& state)
{
int64_t block_size = get_vdo_stat("block_size");
int64_t physical_blocks = get_vdo_stat("physical_blocks");
int64_t overhead_blocks_used = get_vdo_stat("overhead_blocks_used");
int64_t data_blocks_used = get_vdo_stat("data_blocks_used");
int64_t logical_blocks = get_vdo_stat("logical_blocks");
int64_t logical_blocks_used = get_vdo_stat("logical_blocks_used");
if (!block_size
|| !physical_blocks
|| !overhead_blocks_used
|| !data_blocks_used
|| !logical_blocks) {
dout(1) << __func__ << " VDO sysfs provided zero value for at least one statistic: " << dendl;
dout(1) << __func__ << " VDO block_size: " << block_size << dendl;
dout(1) << __func__ << " VDO physical_blocks: " << physical_blocks << dendl;
dout(1) << __func__ << " VDO overhead_blocks_used: " << overhead_blocks_used << dendl;
dout(1) << __func__ << " VDO data_blocks_used: " << data_blocks_used << dendl;
dout(1) << __func__ << " VDO logical_blocks: " << logical_blocks << dendl;
return -1;
}
int64_t avail_blocks =
physical_blocks - overhead_blocks_used - data_blocks_used;
int64_t logical_avail_blocks =
logical_blocks - logical_blocks_used;
state.set_logical_total(block_size * logical_blocks);
state.set_logical_avail(block_size * logical_avail_blocks);
state.set_physical_total(block_size * physical_blocks);
state.set_physical_avail(block_size * avail_blocks);
return 0;
}
int ExtBlkDevVdo::collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm)
{
ceph::ExtBlkDevState state;
int rc = get_state(state);
if(rc != 0){
return rc;
}
(*pm)[prefix + "vdo"] = "true";
(*pm)[prefix + "vdo_physical_size"] = stringify(state.get_physical_total());
return 0;
}
| 4,693 | 28.898089 | 100 | cc |
null | ceph-main/src/extblkdev/vdo/ExtBlkDevVdo.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* (C) Copyright IBM Corporation 2022
* Author: Martin Ohmacht <[email protected]>
*
* Based on the file ceph/src/common/blkdev.cc
* Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
*
* And also based on the file src/erasure-code/clay/ErasureCodeClay.h
* Copyright (C) 2018 Indian Institute of Science <[email protected]>
*
* Author: Myna Vajha <[email protected]>
*
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_EXT_BLK_DEV_VDO_H
#define CEPH_EXT_BLK_DEV_VDO_H
#include "extblkdev/ExtBlkDevInterface.h"
#include "include/compat.h"
class ExtBlkDevVdo final : public ceph::ExtBlkDevInterface
{
int vdo_dir_fd = -1; ///< fd for vdo sysfs directory
std::string name; // name of the underlying vdo device
std::string logdevname; // name of the top level logical device
CephContext *cct;
public:
explicit ExtBlkDevVdo(CephContext *cct) : cct(cct) {}
~ExtBlkDevVdo(){
if(vdo_dir_fd >= 0)
VOID_TEMP_FAILURE_RETRY(::close(vdo_dir_fd));
}
int _get_vdo_stats_handle(const std::string& devname);
int get_vdo_stats_handle();
int64_t get_vdo_stat(const char *property);
virtual int init(const std::string& logdevname);
virtual const std::string& get_devname() const {return name;}
virtual int get_state(ceph::ExtBlkDevState& state);
virtual int collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm);
};
#endif
| 1,741 | 31.867925 | 97 | h |
null | ceph-main/src/global/global_context.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "global/global_context.h"
#include <string.h>
#include "common/ceph_context.h"
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
#include "crimson/common/config_proxy.h"
#endif
/*
* Global variables for use from process context.
*/
namespace TOPNSPC::global {
CephContext *g_ceph_context = NULL;
ConfigProxy& g_conf() {
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
return crimson::common::local_conf();
#else
return g_ceph_context->_conf;
#endif
}
const char *g_assert_file = 0;
int g_assert_line = 0;
const char *g_assert_func = 0;
const char *g_assert_condition = 0;
unsigned long long g_assert_thread = 0;
char g_assert_thread_name[4096] = { 0 };
char g_assert_msg[8096] = { 0 };
char g_process_name[NAME_MAX + 1] = { 0 };
bool g_eio = false;
char g_eio_devname[1024] = { 0 };
char g_eio_path[PATH_MAX] = { 0 };
int g_eio_error = 0; // usually -EIO...
int g_eio_iotype = 0; // 1 = read, 2 = write
unsigned long long g_eio_offset = 0;
unsigned long long g_eio_length = 0;
int note_io_error_event(
const char *devname,
const char *path,
int error,
int iotype,
unsigned long long offset,
unsigned long long length)
{
g_eio = true;
if (devname) {
strncpy(g_eio_devname, devname, sizeof(g_eio_devname) - 1);
g_eio_devname[sizeof(g_eio_devname) - 1] = '\0';
}
if (path) {
strncpy(g_eio_path, path, sizeof(g_eio_path) - 1);
g_eio_path[sizeof(g_eio_path) - 1] = '\0';
}
g_eio_error = error;
g_eio_iotype = iotype;
g_eio_offset = offset;
g_eio_length = length;
return 0;
}
}
| 1,957 | 24.102564 | 70 | cc |
null | ceph-main/src/global/global_context.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_GLOBAL_CONTEXT_H
#define CEPH_GLOBAL_CONTEXT_H
#include <limits.h>
#include "common/config_fwd.h"
#include "include/common_fwd.h"
namespace TOPNSPC::global {
extern CephContext *g_ceph_context;
ConfigProxy& g_conf();
extern const char *g_assert_file;
extern int g_assert_line;
extern const char *g_assert_func;
extern const char *g_assert_condition;
extern unsigned long long g_assert_thread;
extern char g_assert_thread_name[4096];
extern char g_assert_msg[8096];
extern char g_process_name[NAME_MAX + 1];
extern bool g_eio;
extern char g_eio_devname[1024];
extern char g_eio_path[PATH_MAX];
extern int g_eio_error;
extern int g_eio_iotype; // IOCB_CMD_* from libaio's aio_abh.io
extern unsigned long long g_eio_offset;
extern unsigned long long g_eio_length;
extern int note_io_error_event(
const char *devname,
const char *path,
int error,
int iotype,
unsigned long long offset,
unsigned long long length);
}
using namespace TOPNSPC::global;
#endif
| 1,395 | 24.381818 | 70 | h |
null | ceph-main/src/global/global_init.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <filesystem>
#include "common/async/context_pool.h"
#include "common/ceph_argparse.h"
#include "common/code_environment.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/signal.h"
#include "common/version.h"
#include "erasure-code/ErasureCodePlugin.h"
#include "extblkdev/ExtBlkDevPlugin.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "global/pidfile.h"
#include "global/signal_handler.h"
#include "include/compat.h"
#include "include/str_list.h"
#include "mon/MonClient.h"
#ifndef _WIN32
#include <pwd.h>
#include <grp.h>
#endif
#include <errno.h>
#ifdef HAVE_SYS_PRCTL_H
#include <sys/prctl.h>
#endif
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_
namespace fs = std::filesystem;
using std::cerr;
using std::string;
static void global_init_set_globals(CephContext *cct)
{
g_ceph_context = cct;
get_process_name(g_process_name, sizeof(g_process_name));
}
static void output_ceph_version()
{
char buf[1024];
snprintf(buf, sizeof(buf), "%s, process %s, pid %d",
pretty_version_to_str().c_str(),
get_process_name_cpp().c_str(), getpid());
generic_dout(0) << buf << dendl;
}
static const char* c_str_or_null(const std::string &str)
{
if (str.empty())
return NULL;
return str.c_str();
}
static int chown_path(const std::string &pathname, const uid_t owner, const gid_t group,
const std::string &uid_str, const std::string &gid_str)
{
#ifdef _WIN32
return 0;
#else
const char *pathname_cstr = c_str_or_null(pathname);
if (!pathname_cstr) {
return 0;
}
int r = ::chown(pathname_cstr, owner, group);
if (r < 0) {
r = -errno;
cerr << "warning: unable to chown() " << pathname << " as "
<< uid_str << ":" << gid_str << ": " << cpp_strerror(r) << std::endl;
}
return r;
#endif
}
void global_pre_init(
const std::map<std::string,std::string> *defaults,
std::vector < const char* >& args,
uint32_t module_type, code_environment_t code_env,
int flags)
{
std::string conf_file_list;
std::string cluster = "";
// ensure environment arguments are included in early processing
env_to_vec(args);
CephInitParameters iparams = ceph_argparse_early_args(
args, module_type,
&cluster, &conf_file_list);
CephContext *cct = common_preinit(iparams, code_env, flags);
cct->_conf->cluster = cluster;
global_init_set_globals(cct);
auto& conf = cct->_conf;
if (flags & (CINIT_FLAG_NO_DEFAULT_CONFIG_FILE|
CINIT_FLAG_NO_MON_CONFIG)) {
conf->no_mon_config = true;
}
// alternate defaults
if (defaults) {
for (auto& i : *defaults) {
conf.set_val_default(i.first, i.second);
}
}
if (conf.get_val<bool>("no_config_file")) {
flags |= CINIT_FLAG_NO_DEFAULT_CONFIG_FILE;
}
int ret = conf.parse_config_files(c_str_or_null(conf_file_list),
&cerr, flags);
if (ret == -EDOM) {
cct->_log->flush();
cerr << "global_init: error parsing config file." << std::endl;
_exit(1);
}
else if (ret == -ENOENT) {
if (!(flags & CINIT_FLAG_NO_DEFAULT_CONFIG_FILE)) {
if (conf_file_list.length()) {
cct->_log->flush();
cerr << "global_init: unable to open config file from search list "
<< conf_file_list << std::endl;
_exit(1);
} else {
cerr << "did not load config file, using default settings."
<< std::endl;
}
}
}
else if (ret) {
cct->_log->flush();
cerr << "global_init: error reading config file. "
<< conf.get_parse_error() << std::endl;
_exit(1);
}
// environment variables override (CEPH_ARGS, CEPH_KEYRING)
conf.parse_env(cct->get_module_type());
// command line (as passed by caller)
conf.parse_argv(args);
if (!cct->_log->is_started()) {
cct->_log->start();
}
// do the --show-config[-val], if present in argv
conf.do_argv_commands();
// Now we're ready to complain about config file parse errors
g_conf().complain_about_parse_error(g_ceph_context);
}
boost::intrusive_ptr<CephContext>
global_init(const std::map<std::string,std::string> *defaults,
std::vector < const char* >& args,
uint32_t module_type, code_environment_t code_env,
int flags, bool run_pre_init)
{
// Ensure we're not calling the global init functions multiple times.
static bool first_run = true;
if (run_pre_init) {
// We will run pre_init from here (default).
ceph_assert(!g_ceph_context && first_run);
global_pre_init(defaults, args, module_type, code_env, flags);
} else {
// Caller should have invoked pre_init manually.
ceph_assert(g_ceph_context && first_run);
}
first_run = false;
// Verify flags have not changed if global_pre_init() has been called
// manually. If they have, update them.
if (g_ceph_context->get_init_flags() != flags) {
g_ceph_context->set_init_flags(flags);
if (flags & (CINIT_FLAG_NO_DEFAULT_CONFIG_FILE|
CINIT_FLAG_NO_MON_CONFIG)) {
g_conf()->no_mon_config = true;
}
}
#ifndef _WIN32
// signal stuff
int siglist[] = { SIGPIPE, 0 };
block_signals(siglist, NULL);
#endif
if (g_conf()->fatal_signal_handlers) {
install_standard_sighandlers();
}
ceph::register_assert_context(g_ceph_context);
if (g_conf()->log_flush_on_exit)
g_ceph_context->_log->set_flush_on_exit();
// drop privileges?
std::ostringstream priv_ss;
#ifndef _WIN32
// consider --setuser root a no-op, even if we're not root
if (getuid() != 0) {
if (g_conf()->setuser.length()) {
cerr << "ignoring --setuser " << g_conf()->setuser << " since I am not root"
<< std::endl;
}
if (g_conf()->setgroup.length()) {
cerr << "ignoring --setgroup " << g_conf()->setgroup
<< " since I am not root" << std::endl;
}
} else if (g_conf()->setgroup.length() ||
g_conf()->setuser.length()) {
uid_t uid = 0; // zero means no change; we can only drop privs here.
gid_t gid = 0;
std::string uid_string;
std::string gid_string;
std::string home_directory;
if (g_conf()->setuser.length()) {
char buf[4096];
struct passwd pa;
struct passwd *p = 0;
uid = atoi(g_conf()->setuser.c_str());
if (uid) {
getpwuid_r(uid, &pa, buf, sizeof(buf), &p);
} else {
getpwnam_r(g_conf()->setuser.c_str(), &pa, buf, sizeof(buf), &p);
if (!p) {
cerr << "unable to look up user '" << g_conf()->setuser << "'"
<< std::endl;
exit(1);
}
uid = p->pw_uid;
gid = p->pw_gid;
uid_string = g_conf()->setuser;
}
if (p && p->pw_dir != nullptr) {
home_directory = std::string(p->pw_dir);
}
}
if (g_conf()->setgroup.length() > 0) {
gid = atoi(g_conf()->setgroup.c_str());
if (!gid) {
char buf[4096];
struct group gr;
struct group *g = 0;
getgrnam_r(g_conf()->setgroup.c_str(), &gr, buf, sizeof(buf), &g);
if (!g) {
cerr << "unable to look up group '" << g_conf()->setgroup << "'"
<< ": " << cpp_strerror(errno) << std::endl;
exit(1);
}
gid = g->gr_gid;
gid_string = g_conf()->setgroup;
}
}
if ((uid || gid) &&
g_conf()->setuser_match_path.length()) {
// induce early expansion of setuser_match_path config option
string match_path = g_conf()->setuser_match_path;
g_conf().early_expand_meta(match_path, &cerr);
struct stat st;
int r = ::stat(match_path.c_str(), &st);
if (r < 0) {
cerr << "unable to stat setuser_match_path "
<< g_conf()->setuser_match_path
<< ": " << cpp_strerror(errno) << std::endl;
exit(1);
}
if ((uid && uid != st.st_uid) ||
(gid && gid != st.st_gid)) {
cerr << "WARNING: will not setuid/gid: " << match_path
<< " owned by " << st.st_uid << ":" << st.st_gid
<< " and not requested " << uid << ":" << gid
<< std::endl;
uid = 0;
gid = 0;
uid_string.erase();
gid_string.erase();
} else {
priv_ss << "setuser_match_path "
<< match_path << " owned by "
<< st.st_uid << ":" << st.st_gid << ". ";
}
}
g_ceph_context->set_uid_gid(uid, gid);
g_ceph_context->set_uid_gid_strings(uid_string, gid_string);
if ((flags & CINIT_FLAG_DEFER_DROP_PRIVILEGES) == 0) {
if (setgid(gid) != 0) {
cerr << "unable to setgid " << gid << ": " << cpp_strerror(errno)
<< std::endl;
exit(1);
}
#if defined(HAVE_SYS_PRCTL_H)
if (g_conf().get_val<bool>("set_keepcaps")) {
if (prctl(PR_SET_KEEPCAPS, 1) == -1) {
cerr << "warning: unable to set keepcaps flag: " << cpp_strerror(errno) << std::endl;
}
}
#endif
if (setuid(uid) != 0) {
cerr << "unable to setuid " << uid << ": " << cpp_strerror(errno)
<< std::endl;
exit(1);
}
if (setenv("HOME", home_directory.c_str(), 1) != 0) {
cerr << "warning: unable to set HOME to " << home_directory << ": "
<< cpp_strerror(errno) << std::endl;
}
priv_ss << "set uid:gid to " << uid << ":" << gid << " (" << uid_string << ":" << gid_string << ")";
} else {
priv_ss << "deferred set uid:gid to " << uid << ":" << gid << " (" << uid_string << ":" << gid_string << ")";
}
}
#endif /* _WIN32 */
#if defined(HAVE_SYS_PRCTL_H)
if (prctl(PR_SET_DUMPABLE, 1) == -1) {
cerr << "warning: unable to set dumpable flag: " << cpp_strerror(errno) << std::endl;
}
# if defined(PR_SET_THP_DISABLE)
if (!g_conf().get_val<bool>("thp") && prctl(PR_SET_THP_DISABLE, 1, 0, 0, 0) == -1) {
cerr << "warning: unable to disable THP: " << cpp_strerror(errno) << std::endl;
}
# endif
#endif
//
// Utterly important to run first network connection after setuid().
// In case of rdma transport uverbs kernel module starts returning
// -EACCESS on each operation if credentials has been changed, see
// callers of ib_safe_file_access() for details.
//
// fork() syscall also matters, so daemonization won't work in case
// of rdma.
//
if (!g_conf()->no_mon_config) {
// make sure our mini-session gets legacy values
g_conf().apply_changes(nullptr);
ceph::async::io_context_pool cp(1);
MonClient mc_bootstrap(g_ceph_context, cp);
if (mc_bootstrap.get_monmap_and_config() < 0) {
cp.stop();
g_ceph_context->_log->flush();
cerr << "failed to fetch mon config (--no-mon-config to skip)"
<< std::endl;
_exit(1);
}
cp.stop();
}
// Expand metavariables. Invoke configuration observers. Open log file.
g_conf().apply_changes(nullptr);
if (g_conf()->run_dir.length() &&
code_env == CODE_ENVIRONMENT_DAEMON &&
!(flags & CINIT_FLAG_NO_DAEMON_ACTIONS)) {
if (!fs::exists(g_conf()->run_dir.c_str())) {
std::error_code ec;
if (!fs::create_directory(g_conf()->run_dir, ec)) {
cerr << "warning: unable to create " << g_conf()->run_dir
<< ec.message() << std::endl;
}
fs::permissions(
g_conf()->run_dir.c_str(),
fs::perms::owner_all |
fs::perms::group_read | fs::perms::group_exec |
fs::perms::others_read | fs::perms::others_exec);
}
}
// call all observers now. this has the side-effect of configuring
// and opening the log file immediately.
g_conf().call_all_observers();
if (priv_ss.str().length()) {
dout(0) << priv_ss.str() << dendl;
}
if ((flags & CINIT_FLAG_DEFER_DROP_PRIVILEGES) &&
(g_ceph_context->get_set_uid() || g_ceph_context->get_set_gid())) {
// Fix ownership on log files and run directories if needed.
// Admin socket files are chown()'d during the common init path _after_
// the service thread has been started. This is sadly a bit of a hack :(
chown_path(g_conf()->run_dir,
g_ceph_context->get_set_uid(),
g_ceph_context->get_set_gid(),
g_ceph_context->get_set_uid_string(),
g_ceph_context->get_set_gid_string());
g_ceph_context->_log->chown_log_file(
g_ceph_context->get_set_uid(),
g_ceph_context->get_set_gid());
}
// Now we're ready to complain about config file parse errors
g_conf().complain_about_parse_error(g_ceph_context);
// test leak checking
if (g_conf()->debug_deliberately_leak_memory) {
derr << "deliberately leaking some memory" << dendl;
char *s = new char[1234567];
(void)s;
// cppcheck-suppress memleak
}
if (code_env == CODE_ENVIRONMENT_DAEMON && !(flags & CINIT_FLAG_NO_DAEMON_ACTIONS))
output_ceph_version();
if (g_ceph_context->crush_location.init_on_startup()) {
cerr << " failed to init_on_startup : " << cpp_strerror(errno) << std::endl;
exit(1);
}
return boost::intrusive_ptr<CephContext>{g_ceph_context, false};
}
void global_print_banner(void)
{
output_ceph_version();
}
int global_init_prefork(CephContext *cct)
{
if (g_code_env != CODE_ENVIRONMENT_DAEMON)
return -1;
const auto& conf = cct->_conf;
if (!conf->daemonize) {
if (pidfile_write(conf->pid_file) < 0)
exit(1);
if ((cct->get_init_flags() & CINIT_FLAG_DEFER_DROP_PRIVILEGES) &&
(cct->get_set_uid() || cct->get_set_gid())) {
chown_path(conf->pid_file, cct->get_set_uid(), cct->get_set_gid(),
cct->get_set_uid_string(), cct->get_set_gid_string());
}
return -1;
}
cct->notify_pre_fork();
// stop log thread
cct->_log->flush();
cct->_log->stop();
return 0;
}
void global_init_daemonize(CephContext *cct)
{
if (global_init_prefork(cct) < 0)
return;
#if !defined(_AIX) && !defined(_WIN32)
int ret = daemon(1, 1);
if (ret) {
ret = errno;
derr << "global_init_daemonize: BUG: daemon error: "
<< cpp_strerror(ret) << dendl;
exit(1);
}
global_init_postfork_start(cct);
global_init_postfork_finish(cct);
#else
# warning daemon not supported on aix
#endif
}
/* Make file descriptors 0, 1, and possibly 2 point to /dev/null.
*
* Instead of just closing fd, we redirect it to /dev/null with dup2().
* We have to do this because otherwise some arbitrary call to open() later
* in the program might get back one of these file descriptors. It's hard to
* guarantee that nobody ever writes to stdout, even though they're not
* supposed to.
*/
int reopen_as_null(CephContext *cct, int fd)
{
int newfd = open(DEV_NULL, O_RDWR | O_CLOEXEC);
if (newfd < 0) {
int err = errno;
lderr(cct) << __func__ << " failed to open /dev/null: " << cpp_strerror(err)
<< dendl;
return -1;
}
// atomically dup newfd to target fd. target fd is implicitly closed if
// open and atomically replaced; see man dup2
int r = dup2(newfd, fd);
if (r < 0) {
int err = errno;
lderr(cct) << __func__ << " failed to dup2 " << fd << ": "
<< cpp_strerror(err) << dendl;
return -1;
}
// close newfd (we cloned it to target fd)
VOID_TEMP_FAILURE_RETRY(close(newfd));
// N.B. FD_CLOEXEC is cleared on fd (see dup2(2))
return 0;
}
void global_init_postfork_start(CephContext *cct)
{
// reexpand the meta in child process
cct->_conf.finalize_reexpand_meta();
// restart log thread
cct->_log->start();
cct->notify_post_fork();
reopen_as_null(cct, STDIN_FILENO);
const auto& conf = cct->_conf;
if (pidfile_write(conf->pid_file) < 0)
exit(1);
if ((cct->get_init_flags() & CINIT_FLAG_DEFER_DROP_PRIVILEGES) &&
(cct->get_set_uid() || cct->get_set_gid())) {
chown_path(conf->pid_file, cct->get_set_uid(), cct->get_set_gid(),
cct->get_set_uid_string(), cct->get_set_gid_string());
}
}
void global_init_postfork_finish(CephContext *cct)
{
/* We only close stdout+stderr once the caller decides the daemonization
* process is finished. This way we can allow error or other messages to be
* propagated in a manner that the user is able to see.
*/
if (!(cct->get_init_flags() & CINIT_FLAG_NO_CLOSE_STDERR)) {
int ret = global_init_shutdown_stderr(cct);
if (ret) {
derr << "global_init_daemonize: global_init_shutdown_stderr failed with "
<< "error code " << ret << dendl;
exit(1);
}
}
reopen_as_null(cct, STDOUT_FILENO);
ldout(cct, 1) << "finished global_init_daemonize" << dendl;
}
void global_init_chdir(const CephContext *cct)
{
const auto& conf = cct->_conf;
if (conf->chdir.empty())
return;
if (::chdir(conf->chdir.c_str())) {
int err = errno;
derr << "global_init_chdir: failed to chdir to directory: '"
<< conf->chdir << "': " << cpp_strerror(err) << dendl;
}
}
int global_init_shutdown_stderr(CephContext *cct)
{
reopen_as_null(cct, STDERR_FILENO);
cct->_log->set_stderr_level(-2, -2);
return 0;
}
int global_init_preload_erasure_code(const CephContext *cct)
{
const auto& conf = cct->_conf;
string plugins = conf->osd_erasure_code_plugins;
// validate that this is a not a legacy plugin
std::list<string> plugins_list;
get_str_list(plugins, plugins_list);
for (auto i = plugins_list.begin(); i != plugins_list.end(); ++i) {
string plugin_name = *i;
string replacement = "";
if (plugin_name == "jerasure_generic" ||
plugin_name == "jerasure_sse3" ||
plugin_name == "jerasure_sse4" ||
plugin_name == "jerasure_neon") {
replacement = "jerasure";
}
else if (plugin_name == "shec_generic" ||
plugin_name == "shec_sse3" ||
plugin_name == "shec_sse4" ||
plugin_name == "shec_neon") {
replacement = "shec";
}
if (replacement != "") {
dout(0) << "WARNING: osd_erasure_code_plugins contains plugin "
<< plugin_name << " that is now deprecated. Please modify the value "
<< "for osd_erasure_code_plugins to use " << replacement << " instead." << dendl;
}
}
std::stringstream ss;
int r = ceph::ErasureCodePluginRegistry::instance().preload(
plugins,
conf.get_val<std::string>("erasure_code_dir"),
&ss);
if (r)
derr << ss.str() << dendl;
else
dout(0) << ss.str() << dendl;
return r;
}
| 18,241 | 27.68239 | 115 | cc |
null | ceph-main/src/global/global_init.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_GLOBAL_INIT_H
#define CEPH_COMMON_GLOBAL_INIT_H
#include <stdint.h>
#include <vector>
#include <map>
#include <boost/intrusive_ptr.hpp>
#include "include/ceph_assert.h"
#include "common/ceph_context.h"
#include "common/code_environment.h"
#include "common/common_init.h"
/*
* global_init is the first initialization function that
* daemons and utility programs need to call. It takes care of a lot of
* initialization, including setting up g_ceph_context.
*/
boost::intrusive_ptr<CephContext>
global_init(
const std::map<std::string,std::string> *defaults,
std::vector < const char* >& args,
uint32_t module_type,
code_environment_t code_env,
int flags, bool run_pre_init = true);
// just the first half; enough to get config parsed but doesn't start up the
// cct or log.
void global_pre_init(const std::map<std::string,std::string> *defaults,
std::vector < const char* >& args,
uint32_t module_type, code_environment_t code_env,
int flags);
/*
* perform all of the steps that global_init_daemonize performs just prior
* to actually forking (via daemon(3)). return 0 if we are going to proceed
* with the fork, or -1 otherwise.
*/
int global_init_prefork(CephContext *cct);
/*
* perform all the steps that global_init_daemonize performs just after
* the fork, except closing stderr, which we'll do later on.
*/
void global_init_postfork_start(CephContext *cct);
/*
* close stderr, thus completing the postfork.
*/
void global_init_postfork_finish(CephContext *cct);
/*
* global_init_daemonize handles daemonizing a process.
*
* If this is called, it *must* be called before common_init_finish.
* Note that this is equivalent to calling _prefork(), daemon(), and
* _postfork.
*/
void global_init_daemonize(CephContext *cct);
/*
* global_init_chdir changes the process directory.
*
* If this is called, it *must* be called before common_init_finish
*/
void global_init_chdir(const CephContext *cct);
/*
* Explicitly shut down stderr. Usually, you don't need to do
* this, because global_init_daemonize will do it for you. However, in some
* rare cases you need to call this explicitly.
*
* If this is called, it *must* be called before common_init_finish
*/
int global_init_shutdown_stderr(CephContext *cct);
/*
* Preload the erasure coding libraries to detect early issues with
* configuration.
*/
int global_init_preload_erasure_code(const CephContext *cct);
/**
* print daemon startup banner/warning
*/
void global_print_banner(void);
#endif
| 2,952 | 27.95098 | 76 | h |
null | ceph-main/src/global/pidfile.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/debug.h"
#include "common/errno.h"
#include "common/safe_io.h"
#include "global/pidfile.h"
#include <errno.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#if defined(__FreeBSD__)
#include <sys/param.h>
#endif
#include "include/compat.h"
using std::string;
//
// derr can be used for functions exclusively called from pidfile_write
//
// cerr must be used for functions called by pidfile_remove because
// logging is not functional when it is called. cerr output is lost
// when the caller is daemonized but it will show if not (-f)
//
#define dout_context g_ceph_context
#define dout_prefix *_dout
#define dout_subsys ceph_subsys_
struct pidfh {
int pf_fd;
string pf_path;
dev_t pf_dev;
ino_t pf_ino;
pidfh() {
reset();
}
~pidfh() {
remove();
}
bool is_open() const {
return !pf_path.empty() && pf_fd != -1;
}
void reset() {
pf_fd = -1;
pf_path.clear();
pf_dev = 0;
pf_ino = 0;
}
int verify();
int remove();
int open(std::string_view pid_file);
int write();
};
static pidfh *pfh = nullptr;
int pidfh::verify() {
// check that the file we opened still is the same
if (pf_fd == -1)
return -EINVAL;
struct stat st;
if (stat(pf_path.c_str(), &st) == -1)
return -errno;
if (st.st_dev != pf_dev || st.st_ino != pf_ino)
return -ESTALE;
return 0;
}
int pidfh::remove()
{
if (pf_path.empty())
return 0;
int ret;
if ((ret = verify()) < 0) {
if (pf_fd != -1) {
::close(pf_fd);
reset();
}
return ret;
}
// seek to the beginning of the file before reading
ret = ::lseek(pf_fd, 0, SEEK_SET);
if (ret < 0) {
std::cerr << __func__ << " lseek failed "
<< cpp_strerror(errno) << std::endl;
return -errno;
}
// check that the pid file still has our pid in it
char buf[32];
memset(buf, 0, sizeof(buf));
ssize_t res = safe_read(pf_fd, buf, sizeof(buf));
::close(pf_fd);
if (res < 0) {
std::cerr << __func__ << " safe_read failed "
<< cpp_strerror(-res) << std::endl;
return res;
}
int a = atoi(buf);
if (a != getpid()) {
std::cerr << __func__ << " the pid found in the file is "
<< a << " which is different from getpid() "
<< getpid() << std::endl;
return -EDOM;
}
ret = ::unlink(pf_path.c_str());
if (ret < 0) {
std::cerr << __func__ << " unlink " << pf_path.c_str() << " failed "
<< cpp_strerror(errno) << std::endl;
return -errno;
}
reset();
return 0;
}
int pidfh::open(std::string_view pid_file)
{
pf_path = pid_file;
int fd;
fd = ::open(pf_path.c_str(), O_CREAT|O_RDWR|O_CLOEXEC, 0644);
if (fd < 0) {
int err = errno;
derr << __func__ << ": failed to open pid file '"
<< pf_path << "': " << cpp_strerror(err) << dendl;
reset();
return -err;
}
struct stat st;
if (fstat(fd, &st) == -1) {
int err = errno;
derr << __func__ << ": failed to fstat pid file '"
<< pf_path << "': " << cpp_strerror(err) << dendl;
::close(fd);
reset();
return -err;
}
pf_fd = fd;
pf_dev = st.st_dev;
pf_ino = st.st_ino;
// Default Windows file share flags prevent other processes from writing
// to this file.
#ifndef _WIN32
struct flock l;
l.l_type = F_WRLCK;
l.l_whence = SEEK_SET;
l.l_start = 0;
l.l_len = 0;
int r = ::fcntl(pf_fd, F_SETLK, &l);
if (r < 0) {
if (errno == EAGAIN || errno == EACCES) {
derr << __func__ << ": failed to lock pidfile "
<< pf_path << " because another process locked it"
<< "': " << cpp_strerror(errno) << dendl;
} else {
derr << __func__ << ": failed to lock pidfile "
<< pf_path << "': " << cpp_strerror(errno) << dendl;
}
const auto lock_errno = errno;
::close(pf_fd);
reset();
return -lock_errno;
}
#endif
return 0;
}
int pidfh::write()
{
if (!is_open())
return 0;
char buf[32];
int len = snprintf(buf, sizeof(buf), "%d\n", getpid());
if (::ftruncate(pf_fd, 0) < 0) {
int err = errno;
derr << __func__ << ": failed to ftruncate the pid file '"
<< pf_path << "': " << cpp_strerror(err) << dendl;
return -err;
}
ssize_t res = safe_write(pf_fd, buf, len);
if (res < 0) {
derr << __func__ << ": failed to write to pid file '"
<< pf_path << "': " << cpp_strerror(-res) << dendl;
return res;
}
return 0;
}
void pidfile_remove()
{
if (pfh != nullptr)
delete pfh;
pfh = nullptr;
}
int pidfile_write(std::string_view pid_file)
{
if (pid_file.empty()) {
dout(0) << __func__ << ": ignore empty --pid-file" << dendl;
return 0;
}
ceph_assert(pfh == nullptr);
pfh = new pidfh();
if (atexit(pidfile_remove)) {
derr << __func__ << ": failed to set pidfile_remove function "
<< "to run at exit." << dendl;
return -EINVAL;
}
int r = pfh->open(pid_file);
if (r != 0) {
pidfile_remove();
return r;
}
r = pfh->write();
if (r != 0) {
pidfile_remove();
return r;
}
return 0;
}
| 5,427 | 20.625498 | 74 | cc |
null | ceph-main/src/global/pidfile.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMMON_PIDFILE_H
#define CEPH_COMMON_PIDFILE_H
#include <string_view>
// Write a pidfile with the current pid, using the configuration in the
// provided conf structure.
[[nodiscard]] int pidfile_write(std::string_view pid_file);
// Remove the pid file that was previously written by pidfile_write.
// This is safe to call in a signal handler context.
void pidfile_remove();
#endif
| 808 | 26.896552 | 71 | h |
null | ceph-main/src/global/signal_handler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/utsname.h>
#include "include/compat.h"
#include "pthread.h"
#include "common/ceph_mutex.h"
#include "common/BackTrace.h"
#include "common/debug.h"
#include "common/safe_io.h"
#include "common/version.h"
#include "include/uuid.h"
#include "global/pidfile.h"
#include "global/signal_handler.h"
#include <poll.h>
#include <signal.h>
#include <sstream>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "common/errno.h"
#if defined(_AIX)
extern char *sys_siglist[];
#endif
#define dout_context g_ceph_context
using std::ostringstream;
using std::string;
using ceph::BackTrace;
using ceph::JSONFormatter;
void install_sighandler(int signum, signal_handler_t handler, int flags)
{
int ret;
struct sigaction oldact;
struct sigaction act;
memset(&act, 0, sizeof(act));
act.sa_handler = handler;
sigemptyset(&act.sa_mask);
act.sa_flags = flags;
ret = sigaction(signum, &act, &oldact);
if (ret != 0) {
char buf[1024];
#if defined(__sun)
char message[SIG2STR_MAX];
sig2str(signum,message);
snprintf(buf, sizeof(buf), "install_sighandler: sigaction returned "
"%d when trying to install a signal handler for %s\n",
ret, message);
#else
snprintf(buf, sizeof(buf), "install_sighandler: sigaction returned "
"%d when trying to install a signal handler for %s\n",
ret, sig_str(signum));
#endif
dout_emergency(buf);
exit(1);
}
}
void sighup_handler(int signum)
{
g_ceph_context->reopen_logs();
}
static void reraise_fatal(int signum)
{
// Use default handler to dump core
signal(signum, SIG_DFL);
int ret = raise(signum);
// Normally, we won't get here. If we do, something is very weird.
char buf[1024];
if (ret) {
snprintf(buf, sizeof(buf), "reraise_fatal: failed to re-raise "
"signal %d\n", signum);
dout_emergency(buf);
}
else {
snprintf(buf, sizeof(buf), "reraise_fatal: default handler for "
"signal %d didn't terminate the process?\n", signum);
dout_emergency(buf);
}
exit(1);
}
// /etc/os-release looks like
//
// NAME=Fedora
// VERSION="28 (Server Edition)"
// ID=fedora
// VERSION_ID=28
//
// or
//
// NAME="Ubuntu"
// VERSION="16.04.3 LTS (Xenial Xerus)"
// ID=ubuntu
// ID_LIKE=debian
//
// get_from_os_release("FOO=bar\nTHIS=\"that\"\n", "FOO=", ...) will
// write "bar\0" to out buffer, which is assumed to be as large as the input
// file.
static int parse_from_os_release(
const char *file, const char *key,
char *out)
{
const char *p = strstr(file, key);
if (!p) {
return -1;
}
const char *start = p + strlen(key);
const char *end = strchr(start, '\n');
if (!end) {
return -1;
}
if (*start == '"' && *(end - 1) == '"') {
++start;
--end;
}
if (start >= end) {
return -1;
}
memcpy(out, start, end - start);
out[end - start] = 0;
return 0;
}
void generate_crash_dump(char *base,
const BackTrace& bt,
std::map<std::string,std::string> *extra)
{
if (g_ceph_context &&
g_ceph_context->_conf->crash_dir.size()) {
// -- crash dump --
// id
ostringstream idss;
utime_t now = ceph_clock_now();
now.gmtime(idss);
uuid_d uuid;
uuid.generate_random();
idss << "_" << uuid;
string id = idss.str();
std::replace(id.begin(), id.end(), ' ', '_');
snprintf(base, PATH_MAX, "%s/%s",
g_ceph_context->_conf->crash_dir.c_str(),
id.c_str());
int r = ::mkdir(base, 0700);
if (r >= 0) {
char fn[PATH_MAX*2];
snprintf(fn, sizeof(fn)-1, "%s/meta", base);
int fd = ::open(fn, O_CREAT|O_WRONLY|O_CLOEXEC, 0600);
if (fd >= 0) {
JSONFormatter jf(true);
jf.open_object_section("crash");
jf.dump_string("crash_id", id);
now.gmtime(jf.dump_stream("timestamp"));
jf.dump_string("process_name", g_process_name);
jf.dump_string("entity_name", g_ceph_context->_conf->name.to_str());
jf.dump_string("ceph_version", ceph_version_to_str());
struct utsname u;
r = uname(&u);
if (r >= 0) {
jf.dump_string("utsname_hostname", u.nodename);
jf.dump_string("utsname_sysname", u.sysname);
jf.dump_string("utsname_release", u.release);
jf.dump_string("utsname_version", u.version);
jf.dump_string("utsname_machine", u.machine);
}
#if defined(__linux__)
// os-release
int in = ::open("/etc/os-release", O_RDONLY|O_CLOEXEC);
if (in >= 0) {
char buf[4096];
r = safe_read(in, buf, sizeof(buf)-1);
if (r >= 0) {
buf[r] = 0;
char v[4096];
if (parse_from_os_release(buf, "NAME=", v) >= 0) {
jf.dump_string("os_name", v);
}
if (parse_from_os_release(buf, "ID=", v) >= 0) {
jf.dump_string("os_id", v);
}
if (parse_from_os_release(buf, "VERSION_ID=", v) >= 0) {
jf.dump_string("os_version_id", v);
}
if (parse_from_os_release(buf, "VERSION=", v) >= 0) {
jf.dump_string("os_version", v);
}
}
::close(in);
}
#endif
// assert?
if (g_assert_condition) {
jf.dump_string("assert_condition", g_assert_condition);
}
if (g_assert_func) {
jf.dump_string("assert_func", g_assert_func);
}
if (g_assert_file) {
jf.dump_string("assert_file", g_assert_file);
}
if (g_assert_line) {
jf.dump_unsigned("assert_line", g_assert_line);
}
if (g_assert_thread_name[0]) {
jf.dump_string("assert_thread_name", g_assert_thread_name);
}
if (g_assert_msg[0]) {
jf.dump_string("assert_msg", g_assert_msg);
}
// eio?
if (g_eio) {
jf.dump_bool("io_error", true);
if (g_eio_devname[0]) {
jf.dump_string("io_error_devname", g_eio_devname);
}
if (g_eio_path[0]) {
jf.dump_string("io_error_path", g_eio_path);
}
if (g_eio_error) {
jf.dump_int("io_error_code", g_eio_error);
}
if (g_eio_iotype) {
jf.dump_int("io_error_optype", g_eio_iotype);
}
if (g_eio_offset) {
jf.dump_unsigned("io_error_offset", g_eio_offset);
}
if (g_eio_length) {
jf.dump_unsigned("io_error_length", g_eio_length);
}
}
bt.dump(&jf);
if (extra) {
for (auto& i : *extra) {
jf.dump_string(i.first, i.second);
}
}
jf.close_section();
ostringstream oss;
jf.flush(oss);
string s = oss.str();
r = safe_write(fd, s.c_str(), s.size());
(void)r;
::close(fd);
}
snprintf(fn, sizeof(fn)-1, "%s/done", base);
::creat(fn, 0444);
}
}
}
static void handle_oneshot_fatal_signal(int signum)
{
constexpr static pid_t NULL_TID{0};
static std::atomic<pid_t> handler_tid{NULL_TID};
if (auto expected{NULL_TID};
!handler_tid.compare_exchange_strong(expected, ceph_gettid())) {
if (expected == ceph_gettid()) {
// The handler code may itself trigger a SIGSEGV if the heap is corrupt.
// In that case, SIG_DFL followed by return specifies that the default
// signal handler -- presumably dump core -- will handle it.
signal(signum, SIG_DFL);
} else {
// Huh, another thread got into troubles while we are handling the fault.
// If this is i.e. SIGSEGV handler, returning means retrying the faulty
// instruction one more time, and thus all those extra threads will run
// into a busy-wait basically.
}
return;
}
char buf[1024];
char pthread_name[16] = {0}; //limited by 16B include terminating null byte.
int r = ceph_pthread_getname(pthread_self(), pthread_name, sizeof(pthread_name));
(void)r;
#if defined(__sun)
char message[SIG2STR_MAX];
sig2str(signum,message);
snprintf(buf, sizeof(buf), "*** Caught signal (%s) **\n "
"in thread %llx thread_name:%s\n", message, (unsigned long long)pthread_self(),
pthread_name);
#else
snprintf(buf, sizeof(buf), "*** Caught signal (%s) **\n "
"in thread %llx thread_name:%s\n", sig_str(signum), (unsigned long long)pthread_self(),
pthread_name);
#endif
dout_emergency(buf);
pidfile_remove();
// TODO: don't use an ostringstream here. It could call malloc(), which we
// don't want inside a signal handler.
// Also fix the backtrace code not to allocate memory.
ClibBackTrace bt(1);
ostringstream oss;
bt.print(oss);
dout_emergency(oss.str());
char crash_base[PATH_MAX] = { 0 };
generate_crash_dump(crash_base, bt);
// avoid recursion back into logging code if that is where
// we got the SEGV.
if (g_ceph_context &&
g_ceph_context->_log &&
!g_ceph_context->_log->is_inside_log_lock()) {
// dump to log. this uses the heap extensively, but we're better
// off trying than not.
derr << buf << std::endl;
bt.print(*_dout);
*_dout << " NOTE: a copy of the executable, or `objdump -rdS <executable>` "
<< "is needed to interpret this.\n"
<< dendl;
g_ceph_context->_log->dump_recent();
if (crash_base[0]) {
char fn[PATH_MAX*2];
snprintf(fn, sizeof(fn)-1, "%s/log", crash_base);
g_ceph_context->_log->set_log_file(fn);
g_ceph_context->_log->reopen_log_file();
g_ceph_context->_log->dump_recent();
}
}
if (g_eio) {
// if this was an EIO crash, we don't need to trigger a core dump,
// since the problem is hardware, or some layer beneath us.
_exit(EIO);
} else {
reraise_fatal(signum);
}
}
void install_standard_sighandlers(void)
{
install_sighandler(SIGSEGV, handle_oneshot_fatal_signal, SA_NODEFER);
install_sighandler(SIGABRT, handle_oneshot_fatal_signal, SA_NODEFER);
install_sighandler(SIGBUS, handle_oneshot_fatal_signal, SA_NODEFER);
install_sighandler(SIGILL, handle_oneshot_fatal_signal, SA_NODEFER);
install_sighandler(SIGFPE, handle_oneshot_fatal_signal, SA_NODEFER);
install_sighandler(SIGXCPU, handle_oneshot_fatal_signal, SA_NODEFER);
install_sighandler(SIGXFSZ, handle_oneshot_fatal_signal, SA_NODEFER);
install_sighandler(SIGSYS, handle_oneshot_fatal_signal, SA_NODEFER);
}
/// --- safe handler ---
#include "common/Thread.h"
#include <errno.h>
#ifdef __APPLE__
#include <libproc.h>
string get_name_by_pid(pid_t pid)
{
char buf[PROC_PIDPATHINFO_MAXSIZE];
int ret = proc_pidpath(pid, buf, sizeof(buf));
if (ret == 0) {
derr << "Fail to proc_pidpath(" << pid << ")"
<< " error = " << cpp_strerror(ret)
<< dendl;
return "<unknown>";
}
return string(buf, ret);
}
#else
string get_name_by_pid(pid_t pid)
{
// If the PID is 0, its means the sender is the Kernel itself
if (pid == 0) {
return "Kernel";
}
char proc_pid_path[PATH_MAX] = {0};
snprintf(proc_pid_path, PATH_MAX, PROCPREFIX "/proc/%d/cmdline", pid);
int fd = open(proc_pid_path, O_RDONLY);
if (fd < 0) {
fd = -errno;
derr << "Fail to open '" << proc_pid_path
<< "' error = " << cpp_strerror(fd)
<< dendl;
return "<unknown>";
}
// assuming the cmdline length does not exceed PATH_MAX. if it
// really does, it's fine to return a truncated version.
char buf[PATH_MAX] = {0};
int ret = read(fd, buf, sizeof(buf));
close(fd);
if (ret < 0) {
ret = -errno;
derr << "Fail to read '" << proc_pid_path
<< "' error = " << cpp_strerror(ret)
<< dendl;
return "<unknown>";
}
std::replace(buf, buf + ret, '\0', ' ');
return string(buf, ret);
}
#endif
/**
* safe async signal handler / dispatcher
*
* This is an async unix signal handler based on the design from
*
* http://evbergen.home.xs4all.nl/unix-signals.html
*
* Features:
* - no unsafe work is done in the signal handler itself
* - callbacks are called from a regular thread
* - signals are not lost, unless multiple instances of the same signal
* are sent twice in quick succession.
*/
struct SignalHandler : public Thread {
/// to kick the thread, for shutdown, new handlers, etc.
int pipefd[2]; // write to [1], read from [0]
/// to signal shutdown
bool stop = false;
/// for an individual signal
struct safe_handler {
safe_handler() {
memset(pipefd, 0, sizeof(pipefd));
memset(&handler, 0, sizeof(handler));
memset(&info_t, 0, sizeof(info_t));
}
siginfo_t info_t;
int pipefd[2]; // write to [1], read from [0]
signal_handler_t handler;
};
/// all handlers
safe_handler *handlers[32] = {nullptr};
/// to protect the handlers array
ceph::mutex lock = ceph::make_mutex("SignalHandler::lock");
SignalHandler() {
// create signal pipe
int r = pipe_cloexec(pipefd, 0);
ceph_assert(r == 0);
r = fcntl(pipefd[0], F_SETFL, O_NONBLOCK);
ceph_assert(r == 0);
// create thread
create("signal_handler");
}
~SignalHandler() override {
shutdown();
}
void signal_thread() {
int r = write(pipefd[1], "\0", 1);
ceph_assert(r == 1);
}
void shutdown() {
stop = true;
signal_thread();
join();
}
// thread entry point
void *entry() override {
while (!stop) {
// build fd list
struct pollfd fds[33];
lock.lock();
int num_fds = 0;
fds[num_fds].fd = pipefd[0];
fds[num_fds].events = POLLIN | POLLERR;
fds[num_fds].revents = 0;
++num_fds;
for (unsigned i=0; i<32; i++) {
if (handlers[i]) {
fds[num_fds].fd = handlers[i]->pipefd[0];
fds[num_fds].events = POLLIN | POLLERR;
fds[num_fds].revents = 0;
++num_fds;
}
}
lock.unlock();
// wait for data on any of those pipes
int r = poll(fds, num_fds, -1);
if (stop)
break;
if (r > 0) {
char v;
// consume byte from signal socket, if any.
TEMP_FAILURE_RETRY(read(pipefd[0], &v, 1));
lock.lock();
for (unsigned signum=0; signum<32; signum++) {
if (handlers[signum]) {
r = read(handlers[signum]->pipefd[0], &v, 1);
if (r == 1) {
siginfo_t * siginfo = &handlers[signum]->info_t;
ostringstream message;
message << "received signal: " << sig_str(signum);
switch (siginfo->si_code) {
case SI_USER:
message << " from " << get_name_by_pid(siginfo->si_pid);
// If PID is undefined, it doesn't have a meaning to be displayed
if (siginfo->si_pid) {
message << " (PID: " << siginfo->si_pid << ")";
} else {
message << " ( Could be generated by pthread_kill(), raise(), abort(), alarm() )";
}
message << " UID: " << siginfo->si_uid;
break;
default:
/* As we have a not expected signal, let's report the structure to help debugging */
message << ", si_code : " << siginfo->si_code;
message << ", si_value (int): " << siginfo->si_value.sival_int;
message << ", si_value (ptr): " << siginfo->si_value.sival_ptr;
message << ", si_errno: " << siginfo->si_errno;
message << ", si_pid : " << siginfo->si_pid;
message << ", si_uid : " << siginfo->si_uid;
message << ", si_addr" << siginfo->si_addr;
message << ", si_status" << siginfo->si_status;
break;
}
derr << message.str() << dendl;
handlers[signum]->handler(signum);
}
}
}
lock.unlock();
}
}
return NULL;
}
void queue_signal(int signum) {
// If this signal handler is registered, the callback must be
// defined. We can do this without the lock because we will never
// have the signal handler defined without the handlers entry also
// being filled in.
ceph_assert(handlers[signum]);
int r = write(handlers[signum]->pipefd[1], " ", 1);
ceph_assert(r == 1);
}
void queue_signal_info(int signum, siginfo_t *siginfo, void * content) {
// If this signal handler is registered, the callback must be
// defined. We can do this without the lock because we will never
// have the signal handler defined without the handlers entry also
// being filled in.
ceph_assert(handlers[signum]);
memcpy(&handlers[signum]->info_t, siginfo, sizeof(siginfo_t));
int r = write(handlers[signum]->pipefd[1], " ", 1);
ceph_assert(r == 1);
}
void register_handler(int signum, signal_handler_t handler, bool oneshot);
void unregister_handler(int signum, signal_handler_t handler);
};
static SignalHandler *g_signal_handler = NULL;
static void handler_signal_hook(int signum, siginfo_t * siginfo, void * content) {
g_signal_handler->queue_signal_info(signum, siginfo, content);
}
void SignalHandler::register_handler(int signum, signal_handler_t handler, bool oneshot)
{
int r;
ceph_assert(signum >= 0 && signum < 32);
safe_handler *h = new safe_handler;
r = pipe_cloexec(h->pipefd, 0);
ceph_assert(r == 0);
r = fcntl(h->pipefd[0], F_SETFL, O_NONBLOCK);
ceph_assert(r == 0);
h->handler = handler;
lock.lock();
handlers[signum] = h;
lock.unlock();
// signal thread so that it sees our new handler
signal_thread();
// install our handler
struct sigaction oldact;
struct sigaction act;
memset(&act, 0, sizeof(act));
act.sa_handler = (signal_handler_t)handler_signal_hook;
sigfillset(&act.sa_mask); // mask all signals in the handler
act.sa_flags = SA_SIGINFO | (oneshot ? SA_RESETHAND : 0);
int ret = sigaction(signum, &act, &oldact);
ceph_assert(ret == 0);
}
void SignalHandler::unregister_handler(int signum, signal_handler_t handler)
{
ceph_assert(signum >= 0 && signum < 32);
safe_handler *h = handlers[signum];
ceph_assert(h);
ceph_assert(h->handler == handler);
// restore to default
signal(signum, SIG_DFL);
// _then_ remove our handlers entry
lock.lock();
handlers[signum] = NULL;
lock.unlock();
// this will wake up select() so that worker thread sees our handler is gone
close(h->pipefd[0]);
close(h->pipefd[1]);
delete h;
}
// -------
void init_async_signal_handler()
{
ceph_assert(!g_signal_handler);
g_signal_handler = new SignalHandler;
}
void shutdown_async_signal_handler()
{
ceph_assert(g_signal_handler);
delete g_signal_handler;
g_signal_handler = NULL;
}
void queue_async_signal(int signum)
{
ceph_assert(g_signal_handler);
g_signal_handler->queue_signal(signum);
}
void register_async_signal_handler(int signum, signal_handler_t handler)
{
ceph_assert(g_signal_handler);
g_signal_handler->register_handler(signum, handler, false);
}
void register_async_signal_handler_oneshot(int signum, signal_handler_t handler)
{
ceph_assert(g_signal_handler);
g_signal_handler->register_handler(signum, handler, true);
}
void unregister_async_signal_handler(int signum, signal_handler_t handler)
{
ceph_assert(g_signal_handler);
g_signal_handler->unregister_handler(signum, handler);
}
| 19,070 | 26.128023 | 102 | cc |
null | ceph-main/src/global/signal_handler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_GLOBAL_SIGNAL_HANDLER_H
#define CEPH_GLOBAL_SIGNAL_HANDLER_H
#include <signal.h>
#include "acconfig.h"
#include <map>
#include <string>
typedef void (*signal_handler_t)(int);
namespace ceph {
struct BackTrace;
}
#if defined(HAVE_SIGDESCR_NP)
# define sig_str(signum) sigdescr_np(signum)
#elif defined(HAVE_REENTRANT_STRSIGNAL)
# define sig_str(signum) strsignal(signum)
#else
# define sig_str(signum) sys_siglist[signum]
#endif
void install_sighandler(int signum, signal_handler_t handler, int flags);
// handles SIGHUP
void sighup_handler(int signum);
// Install the standard Ceph signal handlers
void install_standard_sighandlers(void);
/// initialize async signal handler framework
void init_async_signal_handler();
/// shutdown async signal handler framework
void shutdown_async_signal_handler();
/// queue an async signal
void queue_async_signal(int signum);
/// install a safe, async, callback for the given signal
void register_async_signal_handler(int signum, signal_handler_t handler);
void register_async_signal_handler_oneshot(int signum, signal_handler_t handler);
/// uninstall a safe async signal callback
void unregister_async_signal_handler(int signum, signal_handler_t handler);
void generate_crash_dump(char *base,
const ceph::BackTrace& bt,
std::map<std::string,std::string> *extra = 0);
#endif
| 1,758 | 25.651515 | 81 | h |
null | ceph-main/src/global/signal_handler_win32.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (c) 2019 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "global/signal_handler.h"
void install_sighandler(int signum, signal_handler_t handler, int flags) {}
void sighup_handler(int signum) {}
// Install the standard Ceph signal handlers
void install_standard_sighandlers(void){}
/// initialize async signal handler framework
void init_async_signal_handler(){}
/// shutdown async signal handler framework
void shutdown_async_signal_handler(){}
/// queue an async signal
void queue_async_signal(int signum){}
/// install a safe, async, callback for the given signal
void register_async_signal_handler(int signum, signal_handler_t handler){}
void register_async_signal_handler_oneshot(int signum, signal_handler_t handler){}
/// uninstall a safe async signal callback
void unregister_async_signal_handler(int signum, signal_handler_t handler){}
| 1,210 | 30.868421 | 82 | cc |
null | ceph-main/src/include/CompatSet.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2009 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMPATSET_H
#define CEPH_COMPATSET_H
#include <iostream>
#include <map>
#include <string>
#include "include/buffer.h"
#include "include/encoding.h"
#include "include/types.h"
#include "common/Formatter.h"
struct CompatSet {
struct Feature {
uint64_t id;
std::string name;
Feature(uint64_t _id, const std::string& _name) : id(_id), name(_name) {}
};
class FeatureSet {
uint64_t mask;
std::map<uint64_t, std::string> names;
public:
friend struct CompatSet;
friend class CephCompatSet_AllSet_Test;
friend class CephCompatSet_other_Test;
friend class CephCompatSet_merge_Test;
friend std::ostream& operator<<(std::ostream& out, const CompatSet::FeatureSet& fs);
friend std::ostream& operator<<(std::ostream& out, const CompatSet& compat);
FeatureSet() : mask(1), names() {}
void insert(const Feature& f) {
ceph_assert(f.id > 0);
ceph_assert(f.id < 64);
mask |= ((uint64_t)1<<f.id);
names[f.id] = f.name;
}
bool contains(const Feature& f) const {
return names.count(f.id);
}
bool contains(uint64_t f) const {
return names.count(f);
}
/**
* Getter instead of using name[] to be const safe
*/
std::string get_name(uint64_t const f) const {
std::map<uint64_t, std::string>::const_iterator i = names.find(f);
ceph_assert(i != names.end());
return i->second;
}
void remove(uint64_t f) {
if (names.count(f)) {
names.erase(f);
mask &= ~((uint64_t)1<<f);
}
}
void remove(const Feature& f) {
remove(f.id);
}
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
/* See below, mask always has the lowest bit set in memory, but
* unset in the encoding */
encode(mask & (~(uint64_t)1), bl);
encode(names, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
decode(mask, bl);
decode(names, bl);
/**
* Previously, there was a bug where insert did
* mask |= f.id rather than mask |= (1 << f.id).
* In FeatureSets from those version, mask always
* has the lowest bit set. Since then, masks always
* have the lowest bit unset.
*
* When we encounter such a FeatureSet, we have to
* reconstruct the mask from the names map.
*/
if (mask & 1) {
mask = 1;
std::map<uint64_t, std::string> temp_names;
temp_names.swap(names);
for (auto i = temp_names.begin(); i != temp_names.end(); ++i) {
insert(Feature(i->first, i->second));
}
} else {
mask |= 1;
}
}
void dump(ceph::Formatter *f) const {
for (auto p = names.cbegin(); p != names.cend(); ++p) {
char s[18];
snprintf(s, sizeof(s), "feature_%llu", (unsigned long long)p->first);
f->dump_string(s, p->second);
}
}
};
// These features have no impact on the read / write status
FeatureSet compat;
// If any of these features are missing, read is possible ( as long
// as no incompat feature is missing ) but it is not possible to write
FeatureSet ro_compat;
// If any of these features are missing, read or write is not possible
FeatureSet incompat;
CompatSet(FeatureSet& _compat, FeatureSet& _ro_compat, FeatureSet& _incompat) :
compat(_compat), ro_compat(_ro_compat), incompat(_incompat) {}
CompatSet() : compat(), ro_compat(), incompat() { }
/* does this filesystem implementation have the
features required to read the other? */
bool readable(CompatSet const& other) const {
return !((other.incompat.mask ^ incompat.mask) & other.incompat.mask);
}
/* does this filesystem implementation have the
features required to write the other? */
bool writeable(CompatSet const& other) const {
return readable(other) &&
!((other.ro_compat.mask ^ ro_compat.mask) & other.ro_compat.mask);
}
/* Compare this CompatSet to another.
* CAREFULLY NOTE: This operation is NOT commutative.
* a > b DOES NOT imply that b < a.
* If returns:
* 0: The CompatSets have the same feature set.
* 1: This CompatSet's features are a strict superset of the other's.
* -1: This CompatSet is missing at least one feature
* described in the other. It may still have more features, though.
*/
int compare(const CompatSet& other) const {
if ((other.compat.mask == compat.mask) &&
(other.ro_compat.mask == ro_compat.mask) &&
(other.incompat.mask == incompat.mask)) return 0;
//okay, they're not the same
//if we're writeable we have a superset of theirs on incompat and ro_compat
if (writeable(other) && !((other.compat.mask ^ compat.mask)
& other.compat.mask)) return 1;
//if we make it here, we weren't writeable or had a difference compat set
return -1;
}
/* Get the features supported by other CompatSet but not this one,
* as a CompatSet.
*/
CompatSet unsupported(const CompatSet& other) const {
CompatSet diff;
uint64_t other_compat =
((other.compat.mask ^ compat.mask) & other.compat.mask);
uint64_t other_ro_compat =
((other.ro_compat.mask ^ ro_compat.mask) & other.ro_compat.mask);
uint64_t other_incompat =
((other.incompat.mask ^ incompat.mask) & other.incompat.mask);
for (int id = 1; id < 64; ++id) {
uint64_t mask = (uint64_t)1 << id;
if (mask & other_compat) {
diff.compat.insert( Feature(id, other.compat.names.at(id)));
}
if (mask & other_ro_compat) {
diff.ro_compat.insert(Feature(id, other.ro_compat.names.at(id)));
}
if (mask & other_incompat) {
diff.incompat.insert( Feature(id, other.incompat.names.at(id)));
}
}
return diff;
}
/* Merge features supported by other CompatSet into this one.
* Return: true if some features were merged
*/
bool merge(CompatSet const & other) {
uint64_t other_compat =
((other.compat.mask ^ compat.mask) & other.compat.mask);
uint64_t other_ro_compat =
((other.ro_compat.mask ^ ro_compat.mask) & other.ro_compat.mask);
uint64_t other_incompat =
((other.incompat.mask ^ incompat.mask) & other.incompat.mask);
if (!other_compat && !other_ro_compat && !other_incompat)
return false;
for (int id = 1; id < 64; ++id) {
uint64_t mask = (uint64_t)1 << id;
if (mask & other_compat) {
compat.insert( Feature(id, other.compat.get_name(id)));
}
if (mask & other_ro_compat) {
ro_compat.insert(Feature(id, other.ro_compat.get_name(id)));
}
if (mask & other_incompat) {
incompat.insert( Feature(id, other.incompat.get_name(id)));
}
}
return true;
}
std::ostream& printlite(std::ostream& o) const {
o << "{c=[" << std::hex << compat.mask << "]";
o << ",r=[" << std::hex << ro_compat.mask << "]";
o << ",i=[" << std::hex << incompat.mask << "]}";
o << std::dec;
return o;
}
void encode(ceph::buffer::list& bl) const {
compat.encode(bl);
ro_compat.encode(bl);
incompat.encode(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
compat.decode(bl);
ro_compat.decode(bl);
incompat.decode(bl);
}
void dump(ceph::Formatter *f) const {
f->open_object_section("compat");
compat.dump(f);
f->close_section();
f->open_object_section("ro_compat");
ro_compat.dump(f);
f->close_section();
f->open_object_section("incompat");
incompat.dump(f);
f->close_section();
}
static void generate_test_instances(std::list<CompatSet*>& o) {
o.push_back(new CompatSet);
o.push_back(new CompatSet);
o.back()->compat.insert(Feature(1, "one"));
o.back()->compat.insert(Feature(2, "two"));
o.back()->ro_compat.insert(Feature(4, "four"));
o.back()->incompat.insert(Feature(3, "three"));
}
};
WRITE_CLASS_ENCODER(CompatSet)
inline std::ostream& operator<<(std::ostream& out, const CompatSet::Feature& f)
{
return out << "F(" << f.id << ", \"" << f.name << "\")";
}
inline std::ostream& operator<<(std::ostream& out, const CompatSet::FeatureSet& fs)
{
return out << fs.names;
}
inline std::ostream& operator<<(std::ostream& out, const CompatSet& compat)
{
return out << "compat=" << compat.compat
<< ",rocompat=" << compat.ro_compat
<< ",incompat=" << compat.incompat;
}
#endif
| 8,745 | 29.58042 | 88 | h |
null | ceph-main/src/include/Context.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_CONTEXT_H
#define CEPH_CONTEXT_H
#include "common/dout.h"
#include <functional>
#include <list>
#include <memory>
#include <set>
#include <boost/function.hpp>
#include <boost/system/error_code.hpp>
#include "common/error_code.h"
#include "include/ceph_assert.h"
#include "common/ceph_mutex.h"
#define mydout(cct, v) lgeneric_subdout(cct, context, v)
/*
* GenContext - abstract callback class
*/
template <typename T>
class GenContext {
GenContext(const GenContext& other);
const GenContext& operator=(const GenContext& other);
protected:
virtual void finish(T t) = 0;
public:
GenContext() {}
virtual ~GenContext() {} // we want a virtual destructor!!!
template <typename C>
void complete(C &&t) {
finish(std::forward<C>(t));
delete this;
}
template <typename C>
void operator()(C &&t) noexcept {
complete(std::forward<C>(t));
}
template<typename U = T>
auto operator()() noexcept
-> typename std::enable_if<std::is_default_constructible<U>::value,
void>::type {
complete(T{});
}
std::reference_wrapper<GenContext> func() {
return std::ref(*this);
}
};
template <typename T>
using GenContextURef = std::unique_ptr<GenContext<T> >;
/*
* Context - abstract callback class
*/
class Finisher;
class Context {
Context(const Context& other);
const Context& operator=(const Context& other);
protected:
virtual void finish(int r) = 0;
// variant of finish that is safe to call "synchronously." override should
// return true.
virtual bool sync_finish(int r) {
return false;
}
public:
Context() {}
virtual ~Context() {} // we want a virtual destructor!!!
virtual void complete(int r) {
finish(r);
delete this;
}
virtual bool sync_complete(int r) {
if (sync_finish(r)) {
delete this;
return true;
}
return false;
}
void complete(boost::system::error_code ec) {
complete(ceph::from_error_code(ec));
}
void operator()(boost::system::error_code ec) noexcept {
complete(ec);
}
void operator()() noexcept {
complete({});
}
std::reference_wrapper<Context> func() {
return std::ref(*this);
}
};
/**
* Simple context holding a single object
*/
template<class T>
class ContainerContext : public Context {
T obj;
public:
ContainerContext(T &obj) : obj(obj) {}
void finish(int r) override {}
};
template <typename T>
ContainerContext<T> *make_container_context(T &&t) {
return new ContainerContext<T>(std::forward<T>(t));
}
template <class T>
struct Wrapper : public Context {
Context *to_run;
T val;
Wrapper(Context *to_run, T val) : to_run(to_run), val(val) {}
void finish(int r) override {
if (to_run)
to_run->complete(r);
}
};
struct RunOnDelete {
Context *to_run;
RunOnDelete(Context *to_run) : to_run(to_run) {}
~RunOnDelete() {
if (to_run)
to_run->complete(0);
}
};
typedef std::shared_ptr<RunOnDelete> RunOnDeleteRef;
template <typename T>
class LambdaContext : public Context {
public:
LambdaContext(T &&t) : t(std::forward<T>(t)) {}
void finish(int r) override {
if constexpr (std::is_invocable_v<T, int>)
t(r);
else
t();
}
private:
T t;
};
template <typename T>
LambdaContext<T> *make_lambda_context(T &&t) {
return new LambdaContext<T>(std::move(t));
}
template <typename F, typename T>
struct LambdaGenContext : GenContext<T> {
F f;
LambdaGenContext(F &&f) : f(std::forward<F>(f)) {}
void finish(T t) override {
f(std::forward<T>(t));
}
};
template <typename T, typename F>
GenContextURef<T> make_gen_lambda_context(F &&f) {
return GenContextURef<T>(new LambdaGenContext<F, T>(std::move(f)));
}
/*
* finish and destroy a list of Contexts
*/
template<class C>
inline void finish_contexts(CephContext *cct, C& finished, int result = 0)
{
if (finished.empty())
return;
C ls;
ls.swap(finished); // swap out of place to avoid weird loops
if (cct)
mydout(cct,10) << ls.size() << " contexts to finish with " << result << dendl;
for (Context* c : ls) {
if (cct)
mydout(cct,10) << "---- " << c << dendl;
c->complete(result);
}
}
class C_NoopContext : public Context {
public:
void finish(int r) override { }
};
struct C_Lock : public Context {
ceph::mutex *lock;
Context *fin;
C_Lock(ceph::mutex *l, Context *c) : lock(l), fin(c) {}
~C_Lock() override {
delete fin;
}
void finish(int r) override {
if (fin) {
std::lock_guard l{*lock};
fin->complete(r);
fin = NULL;
}
}
};
/*
* C_Contexts - set of Contexts
*
* ContextType must be an ancestor class of ContextInstanceType, or the same class.
* ContextInstanceType must be default-constructable.
*/
template <class ContextType, class ContextInstanceType, class Container = std::list<ContextType *>>
class C_ContextsBase : public ContextInstanceType {
public:
CephContext *cct;
Container contexts;
C_ContextsBase(CephContext *cct_)
: cct(cct_)
{
}
~C_ContextsBase() override {
for (auto c : contexts) {
delete c;
}
}
void add(ContextType* c) {
contexts.push_back(c);
}
void take(Container& ls) {
Container c;
c.swap(ls);
if constexpr (std::is_same_v<Container, std::list<ContextType *>>) {
contexts.splice(contexts.end(), c);
} else {
contexts.insert(contexts.end(), c.begin(), c.end());
}
}
void complete(int r) override {
// Neuter any ContextInstanceType custom complete(), because although
// I want to look like it, I don't actually want to run its code.
Context::complete(r);
}
void finish(int r) override {
finish_contexts(cct, contexts, r);
}
bool empty() { return contexts.empty(); }
template<class C>
static ContextType *list_to_context(C& cs) {
if (cs.size() == 0) {
return 0;
} else if (cs.size() == 1) {
ContextType *c = cs.front();
cs.clear();
return c;
} else {
C_ContextsBase<ContextType, ContextInstanceType> *c(new C_ContextsBase<ContextType, ContextInstanceType>(0));
c->take(cs);
return c;
}
}
};
typedef C_ContextsBase<Context, Context> C_Contexts;
/*
* C_Gather
*
* ContextType must be an ancestor class of ContextInstanceType, or the same class.
* ContextInstanceType must be default-constructable.
*
* BUG:? only reports error from last sub to have an error return
*/
template <class ContextType, class ContextInstanceType>
class C_GatherBase {
private:
CephContext *cct;
int result = 0;
ContextType *onfinish;
#ifdef DEBUG_GATHER
std::set<ContextType*> waitfor;
#endif
int sub_created_count = 0;
int sub_existing_count = 0;
mutable ceph::recursive_mutex lock =
ceph::make_recursive_mutex("C_GatherBase::lock"); // disable lockdep
bool activated = false;
void sub_finish(ContextType* sub, int r) {
lock.lock();
#ifdef DEBUG_GATHER
ceph_assert(waitfor.count(sub));
waitfor.erase(sub);
#endif
--sub_existing_count;
mydout(cct,10) << "C_GatherBase " << this << ".sub_finish(r=" << r << ") " << sub
#ifdef DEBUG_GATHER
<< " (remaining " << waitfor << ")"
#endif
<< dendl;
if (r < 0 && result == 0)
result = r;
if ((activated == false) || (sub_existing_count != 0)) {
lock.unlock();
return;
}
lock.unlock();
delete_me();
}
void delete_me() {
if (onfinish) {
onfinish->complete(result);
onfinish = 0;
}
delete this;
}
class C_GatherSub : public ContextInstanceType {
C_GatherBase *gather;
public:
C_GatherSub(C_GatherBase *g) : gather(g) {}
void complete(int r) override {
// Cancel any customized complete() functionality
// from the Context subclass we're templated for,
// we only want to hit that in onfinish, not at each
// sub finish. e.g. MDSInternalContext.
Context::complete(r);
}
void finish(int r) override {
gather->sub_finish(this, r);
gather = 0;
}
~C_GatherSub() override {
if (gather)
gather->sub_finish(this, 0);
}
};
public:
C_GatherBase(CephContext *cct_, ContextType *onfinish_)
: cct(cct_), onfinish(onfinish_)
{
mydout(cct,10) << "C_GatherBase " << this << ".new" << dendl;
}
~C_GatherBase() {
mydout(cct,10) << "C_GatherBase " << this << ".delete" << dendl;
}
void set_finisher(ContextType *onfinish_) {
std::lock_guard l{lock};
ceph_assert(!onfinish);
onfinish = onfinish_;
}
void activate() {
lock.lock();
ceph_assert(activated == false);
activated = true;
if (sub_existing_count != 0) {
lock.unlock();
return;
}
lock.unlock();
delete_me();
}
ContextType *new_sub() {
std::lock_guard l{lock};
ceph_assert(activated == false);
sub_created_count++;
sub_existing_count++;
ContextType *s = new C_GatherSub(this);
#ifdef DEBUG_GATHER
waitfor.insert(s);
#endif
mydout(cct,10) << "C_GatherBase " << this << ".new_sub is " << sub_created_count << " " << s << dendl;
return s;
}
inline int get_sub_existing_count() const {
std::lock_guard l{lock};
return sub_existing_count;
}
inline int get_sub_created_count() const {
std::lock_guard l{lock};
return sub_created_count;
}
};
/*
* The C_GatherBuilder remembers each C_Context created by
* C_GatherBuilder.new_sub() in a C_Gather. When a C_Context created
* by new_sub() is complete(), C_Gather forgets about it. When
* C_GatherBuilder notices that there are no C_Context left in
* C_Gather, it calls complete() on the C_Context provided as the
* second argument of the constructor (finisher).
*
* How to use C_GatherBuilder:
*
* 1. Create a C_GatherBuilder on the stack
* 2. Call gather_bld.new_sub() as many times as you want to create new subs
* It is safe to call this 0 times, or 100, or anything in between.
* 3. If you didn't supply a finisher in the C_GatherBuilder constructor,
* set one with gather_bld.set_finisher(my_finisher)
* 4. Call gather_bld.activate()
*
* Example:
*
* C_SaferCond all_done;
* C_GatherBuilder gb(g_ceph_context, all_done);
* j.submit_entry(1, first, 0, gb.new_sub()); // add a C_Context to C_Gather
* j.submit_entry(2, first, 0, gb.new_sub()); // add a C_Context to C_Gather
* gb.activate(); // consume C_Context as soon as they complete()
* all_done.wait(); // all_done is complete() after all new_sub() are complete()
*
* The finisher may be called at any point after step 4, including immediately
* from the activate() function.
* The finisher will never be called before activate().
*
* Note: Currently, subs must be manually freed by the caller (for some reason.)
*/
template <class ContextType, class GatherType>
class C_GatherBuilderBase
{
public:
C_GatherBuilderBase(CephContext *cct_)
: cct(cct_), c_gather(NULL), finisher(NULL), activated(false)
{
}
C_GatherBuilderBase(CephContext *cct_, ContextType *finisher_)
: cct(cct_), c_gather(NULL), finisher(finisher_), activated(false)
{
}
~C_GatherBuilderBase() {
if (c_gather) {
ceph_assert(activated); // Don't forget to activate your C_Gather!
}
else {
delete finisher;
}
}
ContextType *new_sub() {
if (!c_gather) {
c_gather = new GatherType(cct, finisher);
}
return c_gather->new_sub();
}
void activate() {
if (!c_gather)
return;
ceph_assert(finisher != NULL);
activated = true;
c_gather->activate();
}
void set_finisher(ContextType *finisher_) {
finisher = finisher_;
if (c_gather)
c_gather->set_finisher(finisher);
}
GatherType *get() const {
return c_gather;
}
bool has_subs() const {
return (c_gather != NULL);
}
int num_subs_created() {
ceph_assert(!activated);
if (c_gather == NULL)
return 0;
return c_gather->get_sub_created_count();
}
int num_subs_remaining() {
ceph_assert(!activated);
if (c_gather == NULL)
return 0;
return c_gather->get_sub_existing_count();
}
private:
CephContext *cct;
GatherType *c_gather;
ContextType *finisher;
bool activated;
};
typedef C_GatherBase<Context, Context> C_Gather;
typedef C_GatherBuilderBase<Context, C_Gather > C_GatherBuilder;
template <class ContextType>
class ContextFactory {
public:
virtual ~ContextFactory() {}
virtual ContextType *build() = 0;
};
inline auto lambdafy(Context *c) {
return [fin = std::unique_ptr<Context>(c)]
(boost::system::error_code ec) mutable {
fin.release()->complete(ceph::from_error_code(ec));
};
}
#undef mydout
#endif
| 13,026 | 23.304104 | 115 | h |
null | ceph-main/src/include/Distribution.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_DISTRIBUTION_H
#define CEPH_DISTRIBUTION_H
#include <vector>
class Distribution {
std::vector<float> p;
std::vector<int> v;
public:
//Distribution() {
//}
unsigned get_width() {
return p.size();
}
void clear() {
p.clear();
v.clear();
}
void add(int val, float pr) {
p.push_back(pr);
v.push_back(val);
}
void random() {
float sum = 0.0;
for (unsigned i=0; i<p.size(); i++) {
p[i] = (float)(rand() % 10000);
sum += p[i];
}
for (unsigned i=0; i<p.size(); i++)
p[i] /= sum;
}
int sample() {
float s = (float)(rand() % 10000) / 10000.0;
for (unsigned i=0; i<p.size(); i++) {
if (s < p[i]) return v[i];
s -= p[i];
}
ceph_abort();
return v[p.size() - 1]; // hmm. :/
}
float normalize() {
float s = 0.0;
for (unsigned i=0; i<p.size(); i++)
s += p[i];
for (unsigned i=0; i<p.size(); i++)
p[i] /= s;
return s;
}
};
#endif
| 1,418 | 18.175676 | 71 | h |
null | ceph-main/src/include/addr_parsing.h | /*
* addr_parsing.h
*
* Created on: Sep 14, 2010
* Author: gregf
* contains functions used by Ceph to convert named addresses
* (eg ceph.com) into IP addresses (ie 127.0.0.1).
*/
#ifndef ADDR_PARSING_H_
#define ADDR_PARSING_H_
#ifdef __cplusplus
extern "C" {
#endif
int safe_cat(char **pstr, int *plen, int pos, const char *str2);
/*
* returns a string allocated by malloc; caller must free
*/
char *resolve_addrs(const char *orig_str);
#ifdef __cplusplus
}
#endif
#endif /* ADDR_PARSING_H_ */
| 525 | 17.137931 | 66 | h |
null | ceph-main/src/include/alloc_ptr.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_ALLOC_PTR_H
#define CEPH_ALLOC_PTR_H
#include <memory>
template <class T>
class alloc_ptr
{
public:
typedef typename std::pointer_traits< std::unique_ptr<T> >::pointer pointer;
typedef typename std::pointer_traits< std::unique_ptr<T> >::element_type element_type;
alloc_ptr() : ptr() {}
template<class U>
alloc_ptr(U&& u) : ptr(std::forward<U>(u)) {}
alloc_ptr(alloc_ptr<pointer>&& rhs) : ptr(std::move(rhs.ptr)) {}
alloc_ptr(const alloc_ptr<pointer>& rhs) = delete;
alloc_ptr& operator=(const alloc_ptr<pointer>&& rhs) {
ptr = rhs.ptr;
}
alloc_ptr& operator=(const alloc_ptr<pointer>& rhs) {
ptr = rhs.ptr;
}
void swap (alloc_ptr<pointer>& rhs) {
ptr.swap(rhs.ptr);
}
element_type* release() {
return ptr.release();
}
void reset(element_type *p = nullptr) {
ptr.reset(p);
}
element_type* get() const {
if (!ptr)
ptr.reset(new element_type);
return ptr.get();
}
element_type& operator*() const {
if (!ptr)
ptr.reset(new element_type);
return *ptr;
}
element_type* operator->() const {
if (!ptr)
ptr.reset(new element_type);
return ptr.get();
}
operator bool() const {
return !!ptr;
}
friend bool operator< (const alloc_ptr& lhs, const alloc_ptr& rhs) {
return std::less<element_type>(*lhs, *rhs);
}
friend bool operator<=(const alloc_ptr& lhs, const alloc_ptr& rhs) {
return std::less_equal<element_type>(*lhs, *rhs);
}
friend bool operator> (const alloc_ptr& lhs, const alloc_ptr& rhs) {
return std::greater<element_type>(*lhs, *rhs);
}
friend bool operator>=(const alloc_ptr& lhs, const alloc_ptr& rhs) {
return std::greater_equal<element_type>(*lhs, *rhs);
}
friend bool operator==(const alloc_ptr& lhs, const alloc_ptr& rhs) {
return *lhs == *rhs;
}
friend bool operator!=(const alloc_ptr& lhs, const alloc_ptr& rhs) {
return *lhs != *rhs;
}
private:
mutable std::unique_ptr<element_type> ptr;
};
#endif
| 2,562 | 26.858696 | 90 | h |
null | ceph-main/src/include/any.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef INCLUDE_STATIC_ANY
#define INCLUDE_STATIC_ANY
#include <any>
#include <cstddef>
#include <initializer_list>
#include <memory>
#include <typeinfo>
#include <type_traits>
#include <boost/smart_ptr/shared_ptr.hpp>
#include <boost/smart_ptr/make_shared.hpp>
namespace ceph {
namespace _any {
// Shared Functionality
// --------------------
//
// Common implementation details. Most functionality is here. We
// assume that destructors do not throw. Some of them might and
// they'll invoke terminate and that's fine.
//
// We are using the Curiously Recurring Template Pattern! We require
// that all classes inheriting from us provide:
//
// - `static constexpr size_t capacity`: Maximum capacity. No object
// larger than this may be
// stored. `dynamic` for dynamic.
// - `void* ptr() const noexcept`: returns a pointer to storage.
// (`alloc_storage` must have been called.
// `free_storage` must not have been called
// since.)
// - `void* alloc_storage(const std::size_t)`: allocate storage
// - `void free_storage() noexcept`: free storage. Must be idempotent.
//
// We provide most of the public interface, as well as the operator function,
// cast_helper, and the type() call.
// Set `capacity` to this value to indicate that there is no fixed
// capacity.
//
inline constexpr std::size_t dynamic = ~0;
// Driver Function
// ---------------
//
// The usual type-erasure control function trick. This one is simpler
// than usual since we punt on moving and copying. We could dispense
// with this and just store a deleter and a pointer to a typeinfo, but
// that would be twice the space.
//
// Moved out here so the type of `func_t` isn't dependent on the
// enclosing class.
//
enum class op { type, destroy };
template<typename T>
inline void op_func(const op o, void* p) noexcept {
static const std::type_info& type = typeid(T);
switch (o) {
case op::type:
*(reinterpret_cast<const std::type_info**>(p)) = &type;
break;
case op::destroy:
reinterpret_cast<T*>(p)->~T();
break;
}
}
using func_t = void (*)(const op, void* p) noexcept;
// The base class
// --------------
//
// The `storage_t` parameter gives the type of the value that manages
// storage and allocation. We use it to create a protected data member
// (named `storage`). This allows us to sidestep the problem in
// initialization order where, where exposed constructors were using
// trying to allocate or free storage *before* the data members of the
// derived class were initialized.
//
// Making storage_t a member type of the derived class won't work, due
// to C++'s rules for nested types being *horrible*. Just downright
// *horrible*.
//
template<typename D, typename storage_t>
class base {
// Make definitions from our superclass visible
// --------------------------------------------
//
// And check that they fit the requirements. At least those that are
// statically checkable.
//
static constexpr std::size_t capacity = D::capacity;
void* ptr() const noexcept {
static_assert(
noexcept(static_cast<const D*>(this)->ptr()) &&
std::is_same_v<decltype(static_cast<const D*>(this)->ptr()), void*>,
"‘void* ptr() const noexcept’ missing from superclass");
return static_cast<const D*>(this)->ptr();
}
void* alloc_storage(const std::size_t z) {
static_assert(
std::is_same_v<decltype(static_cast<D*>(this)->alloc_storage(z)), void*>,
"‘void* alloc_storage(const size_t)’ missing from superclass.");
return static_cast<D*>(this)->alloc_storage(z);
}
void free_storage() noexcept {
static_assert(
noexcept(static_cast<D*>(this)->free_storage()) &&
std::is_void_v<decltype(static_cast<D*>(this)->free_storage())>,
"‘void free_storage() noexcept’ missing from superclass.");
static_cast<D*>(this)->free_storage();
}
// Pile O' Templates
// -----------------
//
// These are just verbose and better typed once than twice. They're
// used for SFINAE and declaring noexcept.
//
template<class T>
struct is_in_place_type_helper : std::false_type {};
template<class T>
struct is_in_place_type_helper<std::in_place_type_t<T>> : std::true_type {};
template<class T>
static constexpr bool is_in_place_type_v =
is_in_place_type_helper<std::decay_t<T>>::value;
// SFINAE condition for value initialized
// constructors/assigners. This is analogous to the standard's
// requirement that this overload only participate in overload
// resolution if std::decay_t<T> is not the same type as the
// any-type, nor a specialization of std::in_place_type_t
//
template<typename T>
using value_condition_t = std::enable_if_t<
!std::is_same_v<std::decay_t<T>, D> &&
!is_in_place_type_v<std::decay_t<T>>>;
// This `noexcept` condition for value construction lets
// `immobile_any`'s value constructor/assigner be noexcept, so long
// as the type's copy or move constructor cooperates.
//
template<typename T>
static constexpr bool value_noexcept_v =
std::is_nothrow_constructible_v<std::decay_t<T>, T> && capacity != dynamic;
// SFINAE condition for in-place constructors/assigners
//
template<typename T, typename... Args>
using in_place_condition_t = std::enable_if_t<std::is_constructible_v<
std::decay_t<T>, Args...>>;
// Analogous to the above. Give noexcept to immobile_any::emplace
// when possible.
//
template<typename T, typename... Args>
static constexpr bool in_place_noexcept_v =
std::is_nothrow_constructible_v<std::decay_t<T>, Args...> &&
capacity != dynamic;
private:
// Functionality!
// --------------
// The driver function for the currently stored object. Whether this
// is null is the canonical way to know whether an instance has a
// value.
//
func_t func = nullptr;
// Construct an object within ourselves. As you can see we give the
// weak exception safety guarantee.
//
template<typename T, typename ...Args>
std::decay_t<T>& construct(Args&& ...args) {
using Td = std::decay_t<T>;
static_assert(capacity == dynamic || sizeof(Td) <= capacity,
"Supplied type is too large for this specialization.");
try {
func = &op_func<Td>;
return *new (reinterpret_cast<Td*>(alloc_storage(sizeof(Td))))
Td(std::forward<Args>(args)...);
} catch (...) {
reset();
throw;
}
}
protected:
// We hold the storage, even if the superclass class manipulates it,
// so that its default initialization comes soon enough for us to
// use it in our constructors.
//
storage_t storage;
public:
base() noexcept = default;
~base() noexcept {
reset();
}
protected:
// Since some of our derived classes /can/ be copied or moved.
//
base(const base& rhs) noexcept : func(rhs.func) {
if constexpr (std::is_copy_assignable_v<storage_t>) {
storage = rhs.storage;
}
}
base& operator =(const base& rhs) noexcept {
reset();
func = rhs.func;
if constexpr (std::is_copy_assignable_v<storage_t>) {
storage = rhs.storage;
}
return *this;
}
base(base&& rhs) noexcept : func(std::move(rhs.func)) {
if constexpr (std::is_move_assignable_v<storage_t>) {
storage = std::move(rhs.storage);
}
rhs.func = nullptr;
}
base& operator =(base&& rhs) noexcept {
reset();
func = rhs.func;
if constexpr (std::is_move_assignable_v<storage_t>) {
storage = std::move(rhs.storage);
}
rhs.func = nullptr;
return *this;
}
public:
// Value construct/assign
// ----------------------
//
template<typename T,
typename = value_condition_t<T>>
base(T&& t) noexcept(value_noexcept_v<T>) {
construct<T>(std::forward<T>(t));
}
// On exception, *this is set to empty.
//
template<typename T,
typename = value_condition_t<T>>
base& operator =(T&& t) noexcept(value_noexcept_v<T>) {
reset();
construct<T>(std::forward<T>(t));
return *this;
}
// In-place construct/assign
// -------------------------
//
// I really hate the way the C++ standard library treats references
// as if they were stepchildren in a Charles Dickens novel. I am
// quite upset that std::optional lacks a specialization for
// references. There's no legitimate reason for it. The whole
// 're-seat or refuse' debate is simply a canard. The optional is
// effectively a container, so of course it can be emptied or
// reassigned. No, pointers are not an acceptable substitute. A
// pointer gives an address in memory which may be null and which
// may represent an object or may a location in which an object is
// to be created. An optional reference, on the other hand, is a
// reference to an initialized, live object or /empty/. This is an
// obvious difference that should be communicable to any programmer
// reading the code through the type system.
//
// `std::any`, even in the case of in-place construction,
// only stores the decayed type. I suspect this was to get around
// the question of whether, for a std::any holding a T&,
// std::any_cast<T> should return a copy or throw
// std::bad_any_cast.
//
// I think the appropriate response in that case would be to make a
// copy if the type supports it and fail otherwise. Once a concrete
// type is known the problem solves itself.
//
// If one were inclined, one could easily load the driver function
// with a heavy subset of the type traits (those that depend only on
// the type in question) and simply /ask/ whether it's a reference.
//
// At the moment, I'm maintaining compatibility with the standard
// library except for copy/move semantics.
//
template<typename T,
typename... Args,
typename = in_place_condition_t<T, Args...>>
base(std::in_place_type_t<T>,
Args&& ...args) noexcept(in_place_noexcept_v<T, Args...>) {
construct<T>(std::forward<Args>(args)...);
}
// On exception, *this is set to empty.
//
template<typename T,
typename... Args,
typename = in_place_condition_t<T>>
std::decay_t<T>& emplace(Args&& ...args) noexcept(in_place_noexcept_v<
T, Args...>) {
reset();
return construct<T>(std::forward<Args>(args)...);
}
template<typename T,
typename U,
typename... Args,
typename = in_place_condition_t<T, std::initializer_list<U>,
Args...>>
base(std::in_place_type_t<T>,
std::initializer_list<U> i,
Args&& ...args) noexcept(in_place_noexcept_v<T, std::initializer_list<U>,
Args...>) {
construct<T>(i, std::forward<Args>(args)...);
}
// On exception, *this is set to empty.
//
template<typename T,
typename U,
typename... Args,
typename = in_place_condition_t<T, std::initializer_list<U>,
Args...>>
std::decay_t<T>& emplace(std::initializer_list<U> i,
Args&& ...args) noexcept(in_place_noexcept_v<T,
std::initializer_list<U>,
Args...>) {
reset();
return construct<T>(i,std::forward<Args>(args)...);
}
// Empty ourselves, using the subclass to free any storage.
//
void reset() noexcept {
if (has_value()) {
func(op::destroy, ptr());
func = nullptr;
}
free_storage();
}
template<typename U = storage_t,
typename = std::enable_if<std::is_swappable_v<storage_t>>>
void swap(base& rhs) {
using std::swap;
swap(func, rhs.func);
swap(storage, rhs.storage);
}
// All other functions should use this function to test emptiness
// rather than examining `func` directly.
//
bool has_value() const noexcept {
return !!func;
}
// Returns the type of the value stored, if any.
//
const std::type_info& type() const noexcept {
if (has_value()) {
const std::type_info* t;
func(op::type, reinterpret_cast<void*>(&t));
return *t;
} else {
return typeid(void);
}
}
template<typename T, typename U, typename V>
friend inline void* cast_helper(const base<U, V>& b) noexcept;
};
// Function used by all `any_cast` functions
//
// Returns a void* to the contents if they exist and match the
// requested type, otherwise `nullptr`.
//
template<typename T, typename U, typename V>
inline void* cast_helper(const base<U, V>& b) noexcept {
if (b.func && ((&op_func<T> == b.func) ||
(b.type() == typeid(T)))) {
return b.ptr();
} else {
return nullptr;
}
}
}
// `any_cast`
// ==========
//
// Just the usual gamut of `any_cast` overloads. These get a bit
// repetitive and it would be nice to think of a way to collapse them
// down a bit.
//
// The pointer pair!
//
template<typename T, typename U, typename V>
inline T* any_cast(_any::base<U, V>* a) noexcept {
if (a) {
return static_cast<T*>(_any::cast_helper<std::decay_t<T>>(*a));
}
return nullptr;
}
template<typename T, typename U, typename V>
inline const T* any_cast(const _any::base<U, V>* a) noexcept {
if (a) {
return static_cast<T*>(_any::cast_helper<std::decay_t<T>>(*a));
}
return nullptr;
}
// While we disallow copying the immobile any itself, we can allow
// anything with an extracted value that the type supports.
//
template<typename T, typename U, typename V>
inline T any_cast(_any::base<U, V>& a) {
static_assert(std::is_reference_v<T> ||
std::is_copy_constructible_v<T>,
"The supplied type must be either a reference or "
"copy constructible.");
auto p = any_cast<std::decay_t<T>>(&a);
if (p) {
return static_cast<T>(*p);
}
throw std::bad_any_cast();
}
template<typename T, typename U, typename V>
inline T any_cast(const _any::base<U, V>& a) {
static_assert(std::is_reference_v<T> ||
std::is_copy_constructible_v<T>,
"The supplied type must be either a reference or "
"copy constructible.");
auto p = any_cast<std::decay_t<T>>(&a);
if (p) {
return static_cast<T>(*p);
}
throw std::bad_any_cast();
}
template<typename T, typename U, typename V>
inline std::enable_if_t<(std::is_move_constructible_v<T> ||
std::is_copy_constructible_v<T>) &&
!std::is_rvalue_reference_v<T>, T>
any_cast(_any::base<U, V>&& a) {
auto p = any_cast<std::decay_t<T>>(&a);
if (p) {
return std::move((*p));
}
throw std::bad_any_cast();
}
template<typename T, typename U, typename V>
inline std::enable_if_t<std::is_rvalue_reference_v<T>, T>
any_cast(_any::base<U, V>&& a) {
auto p = any_cast<std::decay_t<T>>(&a);
if (p) {
return static_cast<T>(*p);
}
throw std::bad_any_cast();
}
// `immobile_any`
// ==============
//
// Sometimes, uncopyable objects exist and I want to do things with
// them. The C++ standard library is really quite keen on insisting
// things be copyable before it deigns to work. I find this annoying.
//
// Also, the allocator, while useful, is really not considerate of
// other people's time. Every time we go to visit it, it takes us
// quite an awfully long time to get away again. As such, I've been
// trying to avoid its company whenever it is convenient and seemly.
//
// We accept any type that will fit in the declared capacity. You may
// store types with throwing destructors, but terminate will be
// invoked when they throw.
//
template<std::size_t S>
class immobile_any : public _any::base<immobile_any<S>,
std::aligned_storage_t<S>> {
using base = _any::base<immobile_any<S>, std::aligned_storage_t<S>>;
friend base;
using _any::base<immobile_any<S>, std::aligned_storage_t<S>>::storage;
// Superclass requirements!
// ------------------------
//
// Simple as anything. We have a buffer of fixed size and return the
// pointer to it when asked.
//
static constexpr std::size_t capacity = S;
void* ptr() const noexcept {
return const_cast<void*>(static_cast<const void*>(&storage));
}
void* alloc_storage(std::size_t) noexcept {
return ptr();
}
void free_storage() noexcept {}
static_assert(capacity != _any::dynamic,
"That is not a valid size for an immobile_any.");
public:
immobile_any() noexcept = default;
immobile_any(const immobile_any&) = delete;
immobile_any& operator =(const immobile_any&) = delete;
immobile_any(immobile_any&&) = delete;
immobile_any& operator =(immobile_any&&) = delete;
using base::base;
using base::operator =;
void swap(immobile_any&) = delete;
};
template<typename T, std::size_t S, typename... Args>
inline immobile_any<S> make_immobile_any(Args&& ...args) {
return immobile_any<S>(std::in_place_type<T>, std::forward<Args>(args)...);
}
template<typename T, std::size_t S, typename U, typename... Args>
inline immobile_any<S> make_immobile_any(std::initializer_list<U> i, Args&& ...args) {
return immobile_any<S>(std::in_place_type<T>, i, std::forward<Args>(args)...);
}
// `unique_any`
// ============
//
// Oh dear. Now we're getting back into allocation. You don't think
// the allocator noticed all those mean things we said about it, do
// you?
//
// Well. Okay, allocator. Sometimes when it's the middle of the night
// and you're writing template code you say things you don't exactly
// mean. If it weren't for you, we wouldn't have any memory to run all
// our programs in at all. Really, I'm just being considerate of
// *your* needs, trying to avoid having to run to you every time we
// instantiate a type, making a few that can be self-sufficient…uh…
//
// **Anyway**, this is movable but not copyable, as you should expect
// from anything with ‘unique’ in the name.
//
class unique_any : public _any::base<unique_any, std::unique_ptr<std::byte[]>> {
using base = _any::base<unique_any, std::unique_ptr<std::byte[]>>;
friend base;
using base::storage;
// Superclass requirements
// -----------------------
//
// Our storage is a single chunk of RAM owned by a
// `std::unique_ptr`.
//
static constexpr std::size_t capacity = _any::dynamic;
void* ptr() const noexcept {
return static_cast<void*>(storage.get());
return nullptr;
}
void* alloc_storage(const std::size_t z) {
storage.reset(new std::byte[z]);
return ptr();
}
void free_storage() noexcept {
storage.reset();
}
public:
unique_any() noexcept = default;
~unique_any() noexcept = default;
unique_any(const unique_any&) = delete;
unique_any& operator =(const unique_any&) = delete;
// We can rely on the behavior of `unique_ptr` and the base class to
// give us a default move constructor that does the right thing.
//
unique_any(unique_any&& rhs) noexcept = default;
unique_any& operator =(unique_any&& rhs) = default;
using base::base;
using base::operator =;
};
inline void swap(unique_any& lhs, unique_any& rhs) noexcept {
lhs.swap(rhs);
}
template<typename T, typename... Args>
inline unique_any make_unique_any(Args&& ...args) {
return unique_any(std::in_place_type<T>, std::forward<Args>(args)...);
}
template<typename T, typename U, typename... Args>
inline unique_any make_unique_any(std::initializer_list<U> i, Args&& ...args) {
return unique_any(std::in_place_type<T>, i, std::forward<Args>(args)...);
}
// `shared_any`
// ============
//
// Once more with feeling!
//
// This is both copyable *and* movable. In case you need that sort of
// thing. It seemed a reasonable completion.
//
class shared_any : public _any::base<shared_any, boost::shared_ptr<std::byte[]>> {
using base = _any::base<shared_any, boost::shared_ptr<std::byte[]>>;
friend base;
using base::storage;
// Superclass requirements
// -----------------------
//
// Our storage is a single chunk of RAM allocated from the
// heap. This time it's owned by a `boost::shared_ptr` so we can use
// `boost::make_shared_noinit`. (This lets us get the optimization
// that allocates array and control block in one without wasting
// time on `memset`.)
//
static constexpr std::size_t capacity = _any::dynamic;
void* ptr() const noexcept {
return static_cast<void*>(storage.get());
}
void* alloc_storage(std::size_t n) {
storage = boost::make_shared_noinit<std::byte[]>(n);
return ptr();
}
void free_storage() noexcept {
storage.reset();
}
public:
shared_any() noexcept = default;
~shared_any() noexcept = default;
shared_any(const shared_any& rhs) noexcept = default;
shared_any& operator =(const shared_any&) noexcept = default;
shared_any(shared_any&& rhs) noexcept = default;
shared_any& operator =(shared_any&& rhs) noexcept = default;
using base::base;
using base::operator =;
};
inline void swap(shared_any& lhs, shared_any& rhs) noexcept {
lhs.swap(rhs);
}
template<typename T, typename... Args>
inline shared_any make_shared_any(Args&& ...args) {
return shared_any(std::in_place_type<T>, std::forward<Args>(args)...);
}
template<typename T, typename U, typename... Args>
inline shared_any make_shared_any(std::initializer_list<U> i, Args&& ...args) {
return shared_any(std::in_place_type<T>, i, std::forward<Args>(args)...);
}
}
#endif // INCLUDE_STATIC_ANY
| 21,732 | 29.82695 | 86 | h |
null | ceph-main/src/include/bitmapper.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BITMAPPER_H
#define CEPH_BITMAPPER_H
class bitmapper {
char *_data;
int _len;
public:
bitmapper() : _data(0), _len(0) { }
bitmapper(char *data, int len) : _data(data), _len(len) { }
void set_data(char *data, int len) { _data = data; _len = len; }
int bytes() const { return _len; }
int bits() const { return _len * 8; }
bool operator[](int b) const {
return get(b);
}
bool get(int b) const {
return _data[b >> 3] & (1 << (b&7));
}
void set(int b) {
_data[b >> 3] |= 1 << (b&7);
}
void clear(int b) {
_data[b >> 3] &= ~(1 << (b&7));
}
void toggle(int b) {
_data[b >> 3] ^= 1 << (b&7);
}
};
#endif
| 1,099 | 21.44898 | 71 | h |
null | ceph-main/src/include/blobhash.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BLOBHASH_H
#define CEPH_BLOBHASH_H
#include <cstdint>
#include "hash.h"
class blobhash {
public:
uint32_t operator()(const void* p, size_t len) {
static rjhash<std::uint32_t> H;
std::uint32_t acc = 0;
auto buf = static_cast<const unsigned char*>(p);
while (len >= sizeof(acc)) {
acc ^= unaligned_load(buf);
buf += sizeof(std::uint32_t);
len -= sizeof(std::uint32_t);
}
// handle the last few bytes of p[-(len % 4):]
switch (len) {
case 3:
acc ^= buf[2] << 16;
[[fallthrough]];
case 2:
acc ^= buf[1] << 8;
[[fallthrough]];
case 1:
acc ^= buf[0];
}
return H(acc);
}
private:
static inline std::uint32_t unaligned_load(const unsigned char* p) {
std::uint32_t result;
__builtin_memcpy(&result, p, sizeof(result));
return result;
}
};
#endif
| 1,272 | 22.574074 | 71 | h |
null | ceph-main/src/include/btree_map.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_INCLUDE_BTREE_MAP_H
#define CEPH_INCLUDE_BTREE_MAP_H
#include "include/cpp-btree/btree.h"
#include "include/cpp-btree/btree_map.h"
#include "include/ceph_assert.h" // cpp-btree uses system assert, blech
#include "include/encoding.h"
template<class T, class U>
inline void encode(const btree::btree_map<T,U>& m, ceph::buffer::list& bl)
{
using ceph::encode;
__u32 n = (__u32)(m.size());
encode(n, bl);
for (typename btree::btree_map<T,U>::const_iterator p = m.begin(); p != m.end(); ++p) {
encode(p->first, bl);
encode(p->second, bl);
}
}
template<class T, class U>
inline void encode(const btree::btree_map<T,U>& m, ceph::buffer::list& bl, uint64_t features)
{
using ceph::encode;
__u32 n = (__u32)(m.size());
encode(n, bl);
for (typename btree::btree_map<T,U>::const_iterator p = m.begin(); p != m.end(); ++p) {
encode(p->first, bl, features);
encode(p->second, bl, features);
}
}
template<class T, class U>
inline void decode(btree::btree_map<T,U>& m, ceph::buffer::list::const_iterator& p)
{
using ceph::decode;
__u32 n;
decode(n, p);
m.clear();
while (n--) {
T k;
decode(k, p);
decode(m[k], p);
}
}
template<class T, class U>
inline void encode_nohead(const btree::btree_map<T,U>& m, ceph::buffer::list& bl)
{
using ceph::encode;
for (typename btree::btree_map<T,U>::const_iterator p = m.begin(); p != m.end(); ++p) {
encode(p->first, bl);
encode(p->second, bl);
}
}
template<class T, class U>
inline void decode_nohead(int n, btree::btree_map<T,U>& m, ceph::buffer::list::const_iterator& p)
{
using ceph::decode;
m.clear();
while (n--) {
T k;
decode(k, p);
decode(m[k], p);
}
}
#endif
| 1,801 | 25.115942 | 97 | h |
null | ceph-main/src/include/buffer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BUFFER_H
#define CEPH_BUFFER_H
#if defined(__linux__) || defined(__FreeBSD__)
#include <stdlib.h>
#endif
#include <limits.h>
#ifndef _XOPEN_SOURCE
# define _XOPEN_SOURCE 600
#endif
#include <stdio.h>
#include <sys/uio.h>
#if defined(__linux__) // For malloc(2).
#include <malloc.h>
#endif
#include <inttypes.h>
#include <stdint.h>
#include <string.h>
#if !defined(__CYGWIN__) && !defined(_WIN32)
# include <sys/mman.h>
#endif
#include <iosfwd>
#include <iomanip>
#include <list>
#include <memory>
#include <vector>
#include <string>
#if __cplusplus >= 201703L
#include <string_view>
#endif // __cplusplus >= 201703L
#include <exception>
#include <type_traits>
#include "page.h"
#include "crc32c.h"
#include "buffer_fwd.h"
#ifdef __CEPH__
# include "include/ceph_assert.h"
#else
# include <assert.h>
#endif
#include "inline_memory.h"
#define CEPH_BUFFER_API
#ifdef HAVE_SEASTAR
namespace seastar {
template <typename T> class temporary_buffer;
namespace net {
class packet;
}
}
#endif // HAVE_SEASTAR
class deleter;
template<typename T> class DencDumper;
namespace ceph {
template <class T>
struct nop_delete {
void operator()(T*) {}
};
// This is not unique_ptr-like smart pointer! It just signalizes ownership
// but DOES NOT manage the resource. It WILL LEAK if not manually deleted.
// It's rather a replacement for raw pointer than any other smart one.
//
// Considered options:
// * unique_ptr with custom deleter implemented in .cc (would provide
// the non-zero-cost resource management),
// * GSL's owner<T*> (pretty neat but would impose an extra depedency),
// * unique_ptr with nop deleter,
// * raw pointer (doesn't embed ownership enforcement - std::move).
template <class T>
struct unique_leakable_ptr : public std::unique_ptr<T, ceph::nop_delete<T>> {
using std::unique_ptr<T, ceph::nop_delete<T>>::unique_ptr;
};
namespace buffer CEPH_BUFFER_API {
inline namespace v15_2_0 {
/// Actual definitions in common/error_code.h
struct error;
struct bad_alloc;
struct end_of_buffer;
struct malformed_input;
struct error_code;
/// count of cached crc hits (matching input)
int get_cached_crc();
/// count of cached crc hits (mismatching input, required adjustment)
int get_cached_crc_adjusted();
/// count of crc cache misses
int get_missed_crc();
/// enable/disable tracking of cached crcs
void track_cached_crc(bool b);
/*
* an abstract raw buffer. with a reference count.
*/
class raw;
class raw_malloc;
class raw_static;
class raw_posix_aligned;
class raw_hack_aligned;
class raw_claimed_char;
class raw_unshareable; // diagnostic, unshareable char buffer
class raw_combined;
class raw_claim_buffer;
/*
* named constructors
*/
ceph::unique_leakable_ptr<raw> copy(const char *c, unsigned len);
ceph::unique_leakable_ptr<raw> create(unsigned len);
ceph::unique_leakable_ptr<raw> create(unsigned len, char c);
ceph::unique_leakable_ptr<raw> create_in_mempool(unsigned len, int mempool);
ceph::unique_leakable_ptr<raw> claim_char(unsigned len, char *buf);
ceph::unique_leakable_ptr<raw> create_malloc(unsigned len);
ceph::unique_leakable_ptr<raw> claim_malloc(unsigned len, char *buf);
ceph::unique_leakable_ptr<raw> create_static(unsigned len, char *buf);
ceph::unique_leakable_ptr<raw> create_aligned(unsigned len, unsigned align);
ceph::unique_leakable_ptr<raw> create_aligned_in_mempool(unsigned len, unsigned align, int mempool);
ceph::unique_leakable_ptr<raw> create_page_aligned(unsigned len);
ceph::unique_leakable_ptr<raw> create_small_page_aligned(unsigned len);
ceph::unique_leakable_ptr<raw> claim_buffer(unsigned len, char *buf, deleter del);
#ifdef HAVE_SEASTAR
/// create a raw buffer to wrap seastar cpu-local memory, using foreign_ptr to
/// make it safe to share between cpus
ceph::unique_leakable_ptr<buffer::raw> create(seastar::temporary_buffer<char>&& buf);
/// create a raw buffer to wrap seastar cpu-local memory, without the safety
/// of foreign_ptr. the caller must otherwise guarantee that the buffer ptr is
/// destructed on this cpu
ceph::unique_leakable_ptr<buffer::raw> create_local(seastar::temporary_buffer<char>&& buf);
#endif
/*
* a buffer pointer. references (a subsequence of) a raw buffer.
*/
class CEPH_BUFFER_API ptr {
friend class list;
protected:
raw *_raw;
unsigned _off, _len;
private:
void release();
template<bool is_const>
class iterator_impl {
const ptr *bp; ///< parent ptr
const char *start; ///< starting pointer into bp->c_str()
const char *pos; ///< pointer into bp->c_str()
const char *end_ptr; ///< pointer to bp->end_c_str()
const bool deep; ///< if true, do not allow shallow ptr copies
iterator_impl(typename std::conditional<is_const, const ptr*, ptr*>::type p,
size_t offset, bool d)
: bp(p),
start(p->c_str() + offset),
pos(start),
end_ptr(p->end_c_str()),
deep(d)
{}
friend class ptr;
public:
using pointer = typename std::conditional<is_const, const char*, char *>::type;
pointer get_pos_add(size_t n) {
auto r = pos;
*this += n;
return r;
}
ptr get_ptr(size_t len) {
if (deep) {
return buffer::copy(get_pos_add(len), len);
} else {
size_t off = pos - bp->c_str();
*this += len;
return ptr(*bp, off, len);
}
}
iterator_impl& operator+=(size_t len);
const char *get_pos() {
return pos;
}
const char *get_end() {
return end_ptr;
}
size_t get_offset() {
return pos - start;
}
bool end() const {
return pos == end_ptr;
}
};
public:
using const_iterator = iterator_impl<true>;
using iterator = iterator_impl<false>;
ptr() : _raw(nullptr), _off(0), _len(0) {}
ptr(ceph::unique_leakable_ptr<raw> r);
// cppcheck-suppress noExplicitConstructor
ptr(unsigned l);
ptr(const char *d, unsigned l);
ptr(const ptr& p);
ptr(ptr&& p) noexcept;
ptr(const ptr& p, unsigned o, unsigned l);
ptr(const ptr& p, ceph::unique_leakable_ptr<raw> r);
ptr& operator= (const ptr& p);
ptr& operator= (ptr&& p) noexcept;
~ptr() {
// BE CAREFUL: this destructor is called also for hypercombined ptr_node.
// After freeing underlying raw, `*this` can become inaccessible as well!
release();
}
bool have_raw() const { return _raw ? true:false; }
void swap(ptr& other) noexcept;
iterator begin(size_t offset=0) {
return iterator(this, offset, false);
}
const_iterator begin(size_t offset=0) const {
return const_iterator(this, offset, false);
}
const_iterator cbegin() const {
return begin();
}
const_iterator begin_deep(size_t offset=0) const {
return const_iterator(this, offset, true);
}
// misc
bool is_aligned(unsigned align) const {
return ((uintptr_t)c_str() & (align-1)) == 0;
}
bool is_page_aligned() const { return is_aligned(CEPH_PAGE_SIZE); }
bool is_n_align_sized(unsigned align) const
{
return (length() % align) == 0;
}
bool is_n_page_sized() const { return is_n_align_sized(CEPH_PAGE_SIZE); }
bool is_partial() const {
return have_raw() && (start() > 0 || end() < raw_length());
}
int get_mempool() const;
void reassign_to_mempool(int pool);
void try_assign_to_mempool(int pool);
// accessors
const char *c_str() const;
char *c_str();
const char *end_c_str() const;
char *end_c_str();
unsigned length() const { return _len; }
unsigned offset() const { return _off; }
unsigned start() const { return _off; }
unsigned end() const { return _off + _len; }
unsigned unused_tail_length() const;
const char& operator[](unsigned n) const;
char& operator[](unsigned n);
const char *raw_c_str() const;
unsigned raw_length() const;
int raw_nref() const;
void copy_out(unsigned o, unsigned l, char *dest) const;
unsigned wasted() const;
int cmp(const ptr& o) const;
bool is_zero() const;
// modifiers
void set_offset(unsigned o) {
#ifdef __CEPH__
ceph_assert(raw_length() >= o);
#else
assert(raw_length() >= o);
#endif
_off = o;
}
void set_length(unsigned l) {
#ifdef __CEPH__
ceph_assert(raw_length() >= l);
#else
assert(raw_length() >= l);
#endif
_len = l;
}
unsigned append(char c);
unsigned append(const char *p, unsigned l);
#if __cplusplus >= 201703L
inline unsigned append(std::string_view s) {
return append(s.data(), s.length());
}
#endif // __cplusplus >= 201703L
void copy_in(unsigned o, unsigned l, const char *src, bool crc_reset = true);
void zero(bool crc_reset = true);
void zero(unsigned o, unsigned l, bool crc_reset = true);
unsigned append_zeros(unsigned l);
#ifdef HAVE_SEASTAR
/// create a temporary_buffer, copying the ptr as its deleter
operator seastar::temporary_buffer<char>() &;
/// convert to temporary_buffer, stealing the ptr as its deleter
operator seastar::temporary_buffer<char>() &&;
#endif // HAVE_SEASTAR
};
struct ptr_hook {
mutable ptr_hook* next;
ptr_hook() = default;
ptr_hook(ptr_hook* const next)
: next(next) {
}
};
class ptr_node : public ptr_hook, public ptr {
public:
struct cloner {
ptr_node* operator()(const ptr_node& clone_this);
};
struct disposer {
void operator()(ptr_node* const delete_this) {
if (!__builtin_expect(dispose_if_hypercombined(delete_this), 0)) {
delete delete_this;
}
}
};
~ptr_node() = default;
static std::unique_ptr<ptr_node, disposer>
create(ceph::unique_leakable_ptr<raw> r) {
return create_hypercombined(std::move(r));
}
static std::unique_ptr<ptr_node, disposer>
create(const unsigned l) {
return create_hypercombined(buffer::create(l));
}
template <class... Args>
static std::unique_ptr<ptr_node, disposer>
create(Args&&... args) {
return std::unique_ptr<ptr_node, disposer>(
new ptr_node(std::forward<Args>(args)...));
}
static ptr_node* copy_hypercombined(const ptr_node& copy_this);
private:
friend list;
template <class... Args>
ptr_node(Args&&... args) : ptr(std::forward<Args>(args)...) {
}
ptr_node(const ptr_node&) = default;
ptr& operator= (const ptr& p) = delete;
ptr& operator= (ptr&& p) noexcept = delete;
ptr_node& operator= (const ptr_node& p) = delete;
ptr_node& operator= (ptr_node&& p) noexcept = delete;
void swap(ptr& other) noexcept = delete;
void swap(ptr_node& other) noexcept = delete;
static bool dispose_if_hypercombined(ptr_node* delete_this);
static std::unique_ptr<ptr_node, disposer> create_hypercombined(
ceph::unique_leakable_ptr<raw> r);
};
/*
* list - the useful bit!
*/
class CEPH_BUFFER_API list {
public:
// this the very low-level implementation of singly linked list
// ceph::buffer::list is built on. We don't use intrusive slist
// of Boost (or any other 3rd party) to save extra dependencies
// in our public headers.
class buffers_t {
// _root.next can be thought as _head
ptr_hook _root;
ptr_hook* _tail;
public:
template <class T>
class buffers_iterator {
typename std::conditional<
std::is_const<T>::value, const ptr_hook*, ptr_hook*>::type cur;
template <class U> friend class buffers_iterator;
public:
using value_type = T;
using reference = typename std::add_lvalue_reference<T>::type;
using pointer = typename std::add_pointer<T>::type;
using difference_type = std::ptrdiff_t;
using iterator_category = std::forward_iterator_tag;
template <class U>
buffers_iterator(U* const p)
: cur(p) {
}
// copy constructor
buffers_iterator(const buffers_iterator<T>& other)
: cur(other.cur) {
}
// converting constructor, from iterator -> const_iterator only
template <class U, typename std::enable_if<
std::is_const<T>::value && !std::is_const<U>::value, int>::type = 0>
buffers_iterator(const buffers_iterator<U>& other)
: cur(other.cur) {
}
buffers_iterator() = default;
T& operator*() const {
return *reinterpret_cast<T*>(cur);
}
T* operator->() const {
return reinterpret_cast<T*>(cur);
}
buffers_iterator& operator++() {
cur = cur->next;
return *this;
}
buffers_iterator operator++(int) {
const auto temp(*this);
++*this;
return temp;
}
template <class U>
buffers_iterator& operator=(buffers_iterator<U>& other) {
cur = other.cur;
return *this;
}
bool operator==(const buffers_iterator& rhs) const {
return cur == rhs.cur;
}
bool operator!=(const buffers_iterator& rhs) const {
return !(*this==rhs);
}
};
typedef buffers_iterator<const ptr_node> const_iterator;
typedef buffers_iterator<ptr_node> iterator;
typedef const ptr_node& const_reference;
typedef ptr_node& reference;
buffers_t()
: _root(&_root),
_tail(&_root) {
}
buffers_t(const buffers_t&) = delete;
buffers_t(buffers_t&& other)
: _root(other._root.next == &other._root ? &_root : other._root.next),
_tail(other._tail == &other._root ? &_root : other._tail) {
other._root.next = &other._root;
other._tail = &other._root;
_tail->next = &_root;
}
buffers_t& operator=(buffers_t&& other) {
if (&other != this) {
clear_and_dispose();
swap(other);
}
return *this;
}
void push_back(reference item) {
item.next = &_root;
// this updates _root.next when called on empty
_tail->next = &item;
_tail = &item;
}
void push_front(reference item) {
item.next = _root.next;
_root.next = &item;
_tail = _tail == &_root ? &item : _tail;
}
// *_after
iterator erase_after(const_iterator it) {
const auto* to_erase = it->next;
it->next = to_erase->next;
_root.next = _root.next == to_erase ? to_erase->next : _root.next;
_tail = _tail == to_erase ? (ptr_hook*)&*it : _tail;
return it->next;
}
void insert_after(const_iterator it, reference item) {
item.next = it->next;
it->next = &item;
_root.next = it == end() ? &item : _root.next;
_tail = const_iterator(_tail) == it ? &item : _tail;
}
void splice_back(buffers_t& other) {
if (other.empty()) {
return;
}
other._tail->next = &_root;
// will update root.next if empty() == true
_tail->next = other._root.next;
_tail = other._tail;
other._root.next = &other._root;
other._tail = &other._root;
}
bool empty() const { return _tail == &_root; }
const_iterator begin() const {
return _root.next;
}
const_iterator before_begin() const {
return &_root;
}
const_iterator end() const {
return &_root;
}
iterator begin() {
return _root.next;
}
iterator before_begin() {
return &_root;
}
iterator end() {
return &_root;
}
reference front() {
return reinterpret_cast<reference>(*_root.next);
}
reference back() {
return reinterpret_cast<reference>(*_tail);
}
const_reference front() const {
return reinterpret_cast<const_reference>(*_root.next);
}
const_reference back() const {
return reinterpret_cast<const_reference>(*_tail);
}
void clone_from(const buffers_t& other) {
clear_and_dispose();
for (auto& node : other) {
ptr_node* clone = ptr_node::cloner()(node);
push_back(*clone);
}
}
void clear_and_dispose() {
ptr_node::disposer dispose;
for (auto it = begin(), e = end(); it != e; /* nop */) {
auto& node = *it++;
dispose(&node);
}
_tail = &_root;
_root.next = _tail;
}
iterator erase_after_and_dispose(iterator it) {
auto* to_dispose = &*std::next(it);
auto ret = erase_after(it);
ptr_node::disposer()(to_dispose);
return ret;
}
void swap(buffers_t& other) {
const auto copy_root = _root;
_root.next = \
other._root.next == &other._root ? &this->_root : other._root.next;
other._root.next = \
copy_root.next == &_root ? &other._root : copy_root.next;
const auto copy_tail = _tail;
_tail = other._tail == &other._root ? &this->_root : other._tail;
other._tail = copy_tail == &_root ? &other._root : copy_tail;
_tail->next = &_root;
other._tail->next = &other._root;
}
};
class iterator;
private:
// my private bits
buffers_t _buffers;
// track bufferptr we can modify (especially ::append() to). Not all bptrs
// bufferlist holds have this trait -- if somebody ::push_back(const ptr&),
// he expects it won't change.
ptr_node* _carriage;
unsigned _len, _num;
template <bool is_const>
class CEPH_BUFFER_API iterator_impl {
protected:
typedef typename std::conditional<is_const,
const list,
list>::type bl_t;
typedef typename std::conditional<is_const,
const buffers_t,
buffers_t >::type list_t;
typedef typename std::conditional<is_const,
typename buffers_t::const_iterator,
typename buffers_t::iterator>::type list_iter_t;
bl_t* bl;
list_t* ls; // meh.. just here to avoid an extra pointer dereference..
list_iter_t p;
unsigned off; // in bl
unsigned p_off; // in *p
friend class iterator_impl<true>;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = typename std::conditional<is_const, const char, char>::type;
using difference_type = std::ptrdiff_t;
using pointer = typename std::add_pointer<value_type>::type;
using reference = typename std::add_lvalue_reference<value_type>::type;
// constructor. position.
iterator_impl()
: bl(0), ls(0), off(0), p_off(0) {}
iterator_impl(bl_t *l, unsigned o=0);
iterator_impl(bl_t *l, unsigned o, list_iter_t ip, unsigned po)
: bl(l), ls(&bl->_buffers), p(ip), off(o), p_off(po) {}
iterator_impl(const list::iterator& i);
/// get current iterator offset in buffer::list
unsigned get_off() const { return off; }
/// get number of bytes remaining from iterator position to the end of the buffer::list
unsigned get_remaining() const { return bl->length() - off; }
/// true if iterator is at the end of the buffer::list
bool end() const {
return p == ls->end();
//return off == bl->length();
}
void seek(unsigned o);
char operator*() const;
iterator_impl& operator+=(unsigned o);
iterator_impl& operator++();
ptr get_current_ptr() const;
bool is_pointing_same_raw(const ptr& other) const;
bl_t& get_bl() const { return *bl; }
// copy data out.
// note that these all _append_ to dest!
void copy(unsigned len, char *dest);
// deprecated, use copy_deep()
void copy(unsigned len, ptr &dest) __attribute__((deprecated));
void copy_deep(unsigned len, ptr &dest);
void copy_shallow(unsigned len, ptr &dest);
void copy(unsigned len, list &dest);
void copy(unsigned len, std::string &dest);
void copy_all(list &dest);
// get a pointer to the currenet iterator position, return the
// number of bytes we can read from that position (up to want),
// and advance the iterator by that amount.
size_t get_ptr_and_advance(size_t want, const char **p);
/// calculate crc from iterator position
uint32_t crc32c(size_t length, uint32_t crc);
friend bool operator==(const iterator_impl& lhs,
const iterator_impl& rhs) {
return &lhs.get_bl() == &rhs.get_bl() && lhs.get_off() == rhs.get_off();
}
friend bool operator!=(const iterator_impl& lhs,
const iterator_impl& rhs) {
return &lhs.get_bl() != &rhs.get_bl() || lhs.get_off() != rhs.get_off();
}
};
public:
typedef iterator_impl<true> const_iterator;
class CEPH_BUFFER_API iterator : public iterator_impl<false> {
public:
iterator() = default;
iterator(bl_t *l, unsigned o=0);
iterator(bl_t *l, unsigned o, list_iter_t ip, unsigned po);
// copy data in
void copy_in(unsigned len, const char *src, bool crc_reset = true);
void copy_in(unsigned len, const list& otherl);
};
struct reserve_t {
char* bp_data;
unsigned* bp_len;
unsigned* bl_len;
};
class contiguous_appender {
ceph::bufferlist& bl;
ceph::bufferlist::reserve_t space;
char* pos;
bool deep;
/// running count of bytes appended that are not reflected by @pos
size_t out_of_band_offset = 0;
contiguous_appender(bufferlist& bl, size_t len, bool d)
: bl(bl),
space(bl.obtain_contiguous_space(len)),
pos(space.bp_data),
deep(d) {
}
void flush_and_continue() {
const size_t l = pos - space.bp_data;
*space.bp_len += l;
*space.bl_len += l;
space.bp_data = pos;
}
friend class list;
template<typename Type> friend class ::DencDumper;
public:
~contiguous_appender() {
flush_and_continue();
}
size_t get_out_of_band_offset() const {
return out_of_band_offset;
}
void append(const char* __restrict__ p, size_t l) {
maybe_inline_memcpy(pos, p, l, 16);
pos += l;
}
char *get_pos_add(size_t len) {
char *r = pos;
pos += len;
return r;
}
char *get_pos() const {
return pos;
}
void append(const bufferptr& p) {
const auto plen = p.length();
if (!plen) {
return;
}
if (deep) {
append(p.c_str(), plen);
} else {
flush_and_continue();
bl.append(p);
space = bl.obtain_contiguous_space(0);
out_of_band_offset += plen;
}
}
void append(const bufferlist& l) {
if (deep) {
for (const auto &p : l._buffers) {
append(p.c_str(), p.length());
}
} else {
flush_and_continue();
bl.append(l);
space = bl.obtain_contiguous_space(0);
out_of_band_offset += l.length();
}
}
size_t get_logical_offset() const {
return out_of_band_offset + (pos - space.bp_data);
}
};
contiguous_appender get_contiguous_appender(size_t len, bool deep=false) {
return contiguous_appender(*this, len, deep);
}
class contiguous_filler {
friend buffer::list;
char* pos;
contiguous_filler(char* const pos) : pos(pos) {}
public:
void advance(const unsigned len) {
pos += len;
}
void copy_in(const unsigned len, const char* const src) {
memcpy(pos, src, len);
advance(len);
}
char* c_str() {
return pos;
}
};
// The contiguous_filler is supposed to be not costlier than a single
// pointer. Keep it dumb, please.
static_assert(sizeof(contiguous_filler) == sizeof(char*),
"contiguous_filler should be no costlier than pointer");
class page_aligned_appender {
bufferlist& bl;
unsigned min_alloc;
page_aligned_appender(list *l, unsigned min_pages)
: bl(*l),
min_alloc(min_pages * CEPH_PAGE_SIZE) {
}
void _refill(size_t len);
template <class Func>
void _append_common(size_t len, Func&& impl_f) {
const auto free_in_last = bl.get_append_buffer_unused_tail_length();
const auto first_round = std::min(len, free_in_last);
if (first_round) {
impl_f(first_round);
}
// no C++17 for the sake of the C++11 guarantees of librados, sorry.
const auto second_round = len - first_round;
if (second_round) {
_refill(second_round);
impl_f(second_round);
}
}
friend class list;
public:
void append(const bufferlist& l) {
bl.append(l);
bl.obtain_contiguous_space(0);
}
void append(const char* buf, size_t entire_len) {
_append_common(entire_len,
[buf, this] (const size_t chunk_len) mutable {
bl.append(buf, chunk_len);
buf += chunk_len;
});
}
void append_zero(size_t entire_len) {
_append_common(entire_len, [this] (const size_t chunk_len) {
bl.append_zero(chunk_len);
});
}
void substr_of(const list& bl, unsigned off, unsigned len) {
for (const auto& bptr : bl.buffers()) {
if (off >= bptr.length()) {
off -= bptr.length();
continue;
}
const auto round_size = std::min(bptr.length() - off, len);
append(bptr.c_str() + off, round_size);
len -= round_size;
off = 0;
}
}
};
page_aligned_appender get_page_aligned_appender(unsigned min_pages=1) {
return page_aligned_appender(this, min_pages);
}
private:
// always_empty_bptr has no underlying raw but its _len is always 0.
// This is useful for e.g. get_append_buffer_unused_tail_length() as
// it allows to avoid conditionals on hot paths.
static ptr_node always_empty_bptr;
ptr_node& refill_append_space(const unsigned len);
// for page_aligned_appender; never ever expose this publicly!
// carriage / append_buffer is just an implementation's detail.
ptr& get_append_buffer() {
return *_carriage;
}
public:
// cons/des
list()
: _carriage(&always_empty_bptr),
_len(0),
_num(0) {
}
// cppcheck-suppress noExplicitConstructor
// cppcheck-suppress noExplicitConstructor
list(unsigned prealloc)
: _carriage(&always_empty_bptr),
_len(0),
_num(0) {
reserve(prealloc);
}
list(const list& other)
: _carriage(&always_empty_bptr),
_len(other._len),
_num(other._num) {
_buffers.clone_from(other._buffers);
}
list(list&& other) noexcept
: _buffers(std::move(other._buffers)),
_carriage(other._carriage),
_len(other._len),
_num(other._num) {
other.clear();
}
~list() {
_buffers.clear_and_dispose();
}
list& operator= (const list& other) {
if (this != &other) {
_carriage = &always_empty_bptr;
_buffers.clone_from(other._buffers);
_len = other._len;
_num = other._num;
}
return *this;
}
list& operator= (list&& other) noexcept {
_buffers = std::move(other._buffers);
_carriage = other._carriage;
_len = other._len;
_num = other._num;
other.clear();
return *this;
}
uint64_t get_wasted_space() const;
unsigned get_num_buffers() const { return _num; }
const ptr_node& front() const { return _buffers.front(); }
const ptr_node& back() const { return _buffers.back(); }
int get_mempool() const;
void reassign_to_mempool(int pool);
void try_assign_to_mempool(int pool);
size_t get_append_buffer_unused_tail_length() const {
return _carriage->unused_tail_length();
}
const buffers_t& buffers() const { return _buffers; }
buffers_t& mut_buffers() { return _buffers; }
void swap(list& other) noexcept;
unsigned length() const {
#if 0
// DEBUG: verify _len
unsigned len = 0;
for (std::list<ptr>::const_iterator it = _buffers.begin();
it != _buffers.end();
it++) {
len += (*it).length();
}
#ifdef __CEPH__
ceph_assert(len == _len);
#else
assert(len == _len);
#endif // __CEPH__
#endif
return _len;
}
bool contents_equal(const buffer::list& other) const;
bool contents_equal(const void* other, size_t length) const;
bool is_provided_buffer(const char *dst) const;
bool is_aligned(unsigned align) const;
bool is_page_aligned() const;
bool is_n_align_sized(unsigned align) const;
bool is_n_page_sized() const;
bool is_aligned_size_and_memory(unsigned align_size,
unsigned align_memory) const;
bool is_zero() const;
// modifiers
void clear() noexcept {
_carriage = &always_empty_bptr;
_buffers.clear_and_dispose();
_len = 0;
_num = 0;
}
void push_back(const ptr& bp) {
if (bp.length() == 0)
return;
_buffers.push_back(*ptr_node::create(bp).release());
_len += bp.length();
_num += 1;
}
void push_back(ptr&& bp) {
if (bp.length() == 0)
return;
_len += bp.length();
_num += 1;
_buffers.push_back(*ptr_node::create(std::move(bp)).release());
_carriage = &always_empty_bptr;
}
void push_back(const ptr_node&) = delete;
void push_back(ptr_node&) = delete;
void push_back(ptr_node&&) = delete;
void push_back(std::unique_ptr<ptr_node, ptr_node::disposer> bp) {
_carriage = bp.get();
_len += bp->length();
_num += 1;
_buffers.push_back(*bp.release());
}
void push_back(raw* const r) = delete;
void push_back(ceph::unique_leakable_ptr<raw> r) {
_buffers.push_back(*ptr_node::create(std::move(r)).release());
_carriage = &_buffers.back();
_len += _buffers.back().length();
_num += 1;
}
void zero();
void zero(unsigned o, unsigned l);
bool is_contiguous() const;
void rebuild();
void rebuild(std::unique_ptr<ptr_node, ptr_node::disposer> nb);
bool rebuild_aligned(unsigned align);
// max_buffers = 0 mean don't care _buffers.size(), other
// must make _buffers.size() <= max_buffers after rebuilding.
bool rebuild_aligned_size_and_memory(unsigned align_size,
unsigned align_memory,
unsigned max_buffers = 0);
bool rebuild_page_aligned();
void reserve(size_t prealloc);
[[deprecated("in favor of operator=(list&&)")]] void claim(list& bl) {
*this = std::move(bl);
}
void claim_append(list& bl);
void claim_append(list&& bl) {
claim_append(bl);
}
// copy with explicit volatile-sharing semantics
void share(const list& bl)
{
if (this != &bl) {
clear();
for (const auto& bp : bl._buffers) {
_buffers.push_back(*ptr_node::create(bp).release());
}
_len = bl._len;
_num = bl._num;
}
}
#ifdef HAVE_SEASTAR
/// convert the bufferlist into a network packet
operator seastar::net::packet() &&;
#endif
iterator begin(size_t offset=0) {
return iterator(this, offset);
}
iterator end() {
return iterator(this, _len, _buffers.end(), 0);
}
const_iterator begin(size_t offset=0) const {
return const_iterator(this, offset);
}
const_iterator cbegin(size_t offset=0) const {
return begin(offset);
}
const_iterator end() const {
return const_iterator(this, _len, _buffers.end(), 0);
}
void append(char c);
void append(const char *data, unsigned len);
void append(std::string s) {
append(s.data(), s.length());
}
#if __cplusplus >= 201703L
// To forcibly disambiguate between string and string_view in the
// case of arrays
template<std::size_t N>
void append(const char (&s)[N]) {
append(s, N);
}
void append(const char* s) {
append(s, strlen(s));
}
void append(std::string_view s) {
append(s.data(), s.length());
}
#endif // __cplusplus >= 201703L
void append(const ptr& bp);
void append(ptr&& bp);
void append(const ptr& bp, unsigned off, unsigned len);
void append(const list& bl);
/// append each non-empty line from the stream and add '\n',
/// so a '\n' will be added even the stream does not end with EOL.
///
/// For example, if the stream contains "ABC\n\nDEF", "ABC\nDEF\n" is
/// actually appended.
void append(std::istream& in);
contiguous_filler append_hole(unsigned len);
void append_zero(unsigned len);
void prepend_zero(unsigned len);
reserve_t obtain_contiguous_space(const unsigned len);
/*
* get a char
*/
const char& operator[](unsigned n) const;
char *c_str();
std::string to_str() const;
void substr_of(const list& other, unsigned off, unsigned len);
// funky modifer
void splice(unsigned off, unsigned len, list *claim_by=0 /*, bufferlist& replace_with */);
void write(int off, int len, std::ostream& out) const;
void encode_base64(list& o);
void decode_base64(list& o);
void write_stream(std::ostream &out) const;
void hexdump(std::ostream &out, bool trailing_newline = true) const;
ssize_t pread_file(const char *fn, uint64_t off, uint64_t len, std::string *error);
int read_file(const char *fn, std::string *error);
ssize_t read_fd(int fd, size_t len);
ssize_t recv_fd(int fd, size_t len);
int write_file(const char *fn, int mode=0644);
int write_fd(int fd) const;
int write_fd(int fd, uint64_t offset) const;
int send_fd(int fd) const;
template<typename VectorT>
void prepare_iov(VectorT *piov) const {
#ifdef __CEPH__
ceph_assert(_num <= IOV_MAX);
#else
assert(_num <= IOV_MAX);
#endif
piov->resize(_num);
unsigned n = 0;
for (auto& p : _buffers) {
(*piov)[n].iov_base = (void *)p.c_str();
(*piov)[n].iov_len = p.length();
++n;
}
}
struct iovec_t {
uint64_t offset;
uint64_t length;
std::vector<iovec> iov;
};
using iov_vec_t = std::vector<iovec_t>;
iov_vec_t prepare_iovs() const;
uint32_t crc32c(uint32_t crc) const;
void invalidate_crc();
// These functions return a bufferlist with a pointer to a single
// static buffer. They /must/ not outlive the memory they
// reference.
static list static_from_mem(char* c, size_t l);
static list static_from_cstring(char* c);
static list static_from_string(std::string& s);
};
} // inline namespace v15_2_0
/*
* efficient hash of one or more bufferlists
*/
class hash {
uint32_t crc;
public:
hash() : crc(0) { }
// cppcheck-suppress noExplicitConstructor
hash(uint32_t init) : crc(init) { }
void update(const buffer::list& bl) {
crc = bl.crc32c(crc);
}
uint32_t digest() {
return crc;
}
};
inline bool operator==(const bufferlist &lhs, const bufferlist &rhs) {
if (lhs.length() != rhs.length())
return false;
return std::equal(lhs.begin(), lhs.end(), rhs.begin());
}
inline bool operator<(const bufferlist& lhs, const bufferlist& rhs) {
auto l = lhs.begin(), r = rhs.begin();
for (; l != lhs.end() && r != rhs.end(); ++l, ++r) {
if (*l < *r) return true;
if (*l > *r) return false;
}
return (l == lhs.end()) && (r != rhs.end()); // lhs.length() < rhs.length()
}
inline bool operator<=(const bufferlist& lhs, const bufferlist& rhs) {
auto l = lhs.begin(), r = rhs.begin();
for (; l != lhs.end() && r != rhs.end(); ++l, ++r) {
if (*l < *r) return true;
if (*l > *r) return false;
}
return l == lhs.end(); // lhs.length() <= rhs.length()
}
inline bool operator!=(const bufferlist &l, const bufferlist &r) {
return !(l == r);
}
inline bool operator>(const bufferlist& lhs, const bufferlist& rhs) {
return rhs < lhs;
}
inline bool operator>=(const bufferlist& lhs, const bufferlist& rhs) {
return rhs <= lhs;
}
std::ostream& operator<<(std::ostream& out, const buffer::ptr& bp);
std::ostream& operator<<(std::ostream& out, const buffer::raw &r);
std::ostream& operator<<(std::ostream& out, const buffer::list& bl);
inline bufferhash& operator<<(bufferhash& l, const bufferlist &r) {
l.update(r);
return l;
}
} // namespace buffer
} // namespace ceph
#endif
| 35,734 | 26.594595 | 102 | h |
null | ceph-main/src/include/buffer_fwd.h | #ifndef BUFFER_FWD_H
#define BUFFER_FWD_H
namespace ceph {
namespace buffer {
inline namespace v15_2_0 {
class ptr;
class list;
}
class hash;
}
using bufferptr = buffer::ptr;
using bufferlist = buffer::list;
using bufferhash = buffer::hash;
}
#endif
| 288 | 13.45 | 34 | h |
null | ceph-main/src/include/buffer_raw.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_BUFFER_RAW_H
#define CEPH_BUFFER_RAW_H
#include <map>
#include <utility>
#include <type_traits>
#include "common/ceph_atomic.h"
#include "include/buffer.h"
#include "include/mempool.h"
#include "include/spinlock.h"
namespace ceph::buffer {
inline namespace v15_2_0 {
class raw {
public:
// In the future we might want to have a slab allocator here with few
// embedded slots. This would allow to avoid the "if" in dtor of ptr_node.
std::aligned_storage<sizeof(ptr_node),
alignof(ptr_node)>::type bptr_storage;
protected:
char *data;
unsigned len;
public:
ceph::atomic<unsigned> nref { 0 };
int mempool;
std::pair<size_t, size_t> last_crc_offset {std::numeric_limits<size_t>::max(), std::numeric_limits<size_t>::max()};
std::pair<uint32_t, uint32_t> last_crc_val;
mutable ceph::spinlock crc_spinlock;
explicit raw(unsigned l, int mempool=mempool::mempool_buffer_anon)
: data(nullptr), len(l), nref(0), mempool(mempool) {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(1, len);
}
raw(char *c, unsigned l, int mempool=mempool::mempool_buffer_anon)
: data(c), len(l), nref(0), mempool(mempool) {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(1, len);
}
virtual ~raw() {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(
-1, -(int)len);
}
void _set_len(unsigned l) {
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(
-1, -(int)len);
len = l;
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(1, len);
}
void reassign_to_mempool(int pool) {
if (pool == mempool) {
return;
}
mempool::get_pool(mempool::pool_index_t(mempool)).adjust_count(
-1, -(int)len);
mempool = pool;
mempool::get_pool(mempool::pool_index_t(pool)).adjust_count(1, len);
}
void try_assign_to_mempool(int pool) {
if (mempool == mempool::mempool_buffer_anon) {
reassign_to_mempool(pool);
}
}
private:
// no copying.
// cppcheck-suppress noExplicitConstructor
raw(const raw &other) = delete;
const raw& operator=(const raw &other) = delete;
public:
char *get_data() const {
return data;
}
unsigned get_len() const {
return len;
}
bool get_crc(const std::pair<size_t, size_t> &fromto,
std::pair<uint32_t, uint32_t> *crc) const {
std::lock_guard lg(crc_spinlock);
if (last_crc_offset == fromto) {
*crc = last_crc_val;
return true;
}
return false;
}
void set_crc(const std::pair<size_t, size_t> &fromto,
const std::pair<uint32_t, uint32_t> &crc) {
std::lock_guard lg(crc_spinlock);
last_crc_offset = fromto;
last_crc_val = crc;
}
void invalidate_crc() {
std::lock_guard lg(crc_spinlock);
last_crc_offset.first = std::numeric_limits<size_t>::max();
last_crc_offset.second = std::numeric_limits<size_t>::max();
}
};
} // inline namespace v15_2_0
} // namespace ceph::buffer
#endif // CEPH_BUFFER_RAW_H
| 3,507 | 27.991736 | 119 | h |
null | ceph-main/src/include/byteorder.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#pragma once
#include <boost/endian/conversion.hpp>
#include "int_types.h"
template<typename T>
inline T swab(T val) {
return boost::endian::endian_reverse(val);
}
template<typename T>
struct ceph_le {
private:
T v;
public:
ceph_le() = default;
explicit ceph_le(T nv)
: v{boost::endian::native_to_little(nv)}
{}
ceph_le<T>& operator=(T nv) {
v = boost::endian::native_to_little(nv);
return *this;
}
constexpr operator T() const { return boost::endian::little_to_native(v); }
friend inline bool operator==(ceph_le a, ceph_le b) {
return a.v == b.v;
}
} __attribute__ ((packed));
using ceph_le64 = ceph_le<__u64>;
using ceph_le32 = ceph_le<__u32>;
using ceph_le16 = ceph_le<__u16>;
using ceph_les64 = ceph_le<__s64>;
using ceph_les32 = ceph_le<__s32>;
using ceph_les16 = ceph_le<__s16>;
inline ceph_les64 init_les64(__s64 x) {
ceph_les64 v;
v = x;
return v;
}
inline ceph_les32 init_les32(__s32 x) {
ceph_les32 v;
v = x;
return v;
}
inline ceph_les16 init_les16(__s16 x) {
ceph_les16 v;
v = x;
return v;
}
| 1,137 | 19.321429 | 77 | h |
null | ceph-main/src/include/ceph_assert.h | #ifndef CEPH_ASSERT_H
#define CEPH_ASSERT_H
#include <cstdlib>
#include <string>
#ifndef __STRING
# define __STRING(x) #x
#endif
#if defined(__linux__)
#include <features.h>
#elif defined(__FreeBSD__)
#include <sys/cdefs.h>
#define __GNUC_PREREQ(minor, major) __GNUC_PREREQ__(minor, major)
#elif defined(__sun) || defined(_AIX)
#include "include/compat.h"
#include <assert.h>
#endif
#ifdef __CEPH__
# include "acconfig.h"
#endif
#include "include/common_fwd.h"
namespace ceph {
struct BackTrace;
/*
* Select a function-name variable based on compiler tests, and any compiler
* specific overrides.
*/
#if defined(HAVE_PRETTY_FUNC)
# define __CEPH_ASSERT_FUNCTION __PRETTY_FUNCTION__
#elif defined(HAVE_FUNC)
# define __CEPH_ASSERT_FUNCTION __func__
#else
# define __CEPH_ASSERT_FUNCTION ((__const char *) 0)
#endif
extern void register_assert_context(CephContext *cct);
struct assert_data {
const char *assertion;
const char *file;
const int line;
const char *function;
};
extern void __ceph_assert_fail(const char *assertion, const char *file, int line, const char *function)
__attribute__ ((__noreturn__));
extern void __ceph_assert_fail(const assert_data &ctx)
__attribute__ ((__noreturn__));
extern void __ceph_assertf_fail(const char *assertion, const char *file, int line, const char *function, const char* msg, ...)
__attribute__ ((__noreturn__));
extern void __ceph_assert_warn(const char *assertion, const char *file, int line, const char *function);
[[noreturn]] void __ceph_abort(const char *file, int line, const char *func,
const std::string& msg);
[[noreturn]] void __ceph_abortf(const char *file, int line, const char *func,
const char* msg, ...);
#define _CEPH_ASSERT_VOID_CAST static_cast<void>
#define assert_warn(expr) \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assert_warn (__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION))
}
using namespace ceph;
/*
* ceph_abort aborts the program with a nice backtrace.
*
* Currently, it's the same as assert(0), but we may one day make assert a
* debug-only thing, like it is in many projects.
*/
#define ceph_abort(msg, ...) \
::ceph::__ceph_abort( __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, "abort() called")
#define ceph_abort_msg(msg) \
::ceph::__ceph_abort( __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, msg)
#define ceph_abort_msgf(...) \
::ceph::__ceph_abortf( __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, __VA_ARGS__)
#ifdef __SANITIZE_ADDRESS__
#define ceph_assert(expr) \
do { \
((expr)) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assert_fail(__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION); \
} while (false)
#else
#define ceph_assert(expr) \
do { static const ceph::assert_data assert_data_ctx = \
{__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION}; \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assert_fail(assert_data_ctx)); } while(false)
#endif
// this variant will *never* get compiled out to NDEBUG in the future.
// (ceph_assert currently doesn't either, but in the future it might.)
#ifdef __SANITIZE_ADDRESS__
#define ceph_assert_always(expr) \
do { \
((expr)) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assert_fail(__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION); \
} while(false)
#else
#define ceph_assert_always(expr) \
do { static const ceph::assert_data assert_data_ctx = \
{__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION}; \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assert_fail(assert_data_ctx)); } while(false)
#endif
// Named by analogy with printf. Along with an expression, takes a format
// string and parameters which are printed if the assertion fails.
#define assertf(expr, ...) \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assertf_fail (__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, __VA_ARGS__))
#define ceph_assertf(expr, ...) \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assertf_fail (__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, __VA_ARGS__))
// this variant will *never* get compiled out to NDEBUG in the future.
// (ceph_assertf currently doesn't either, but in the future it might.)
#define ceph_assertf_always(expr, ...) \
((expr) \
? _CEPH_ASSERT_VOID_CAST (0) \
: ::ceph::__ceph_assertf_fail (__STRING(expr), __FILE__, __LINE__, __CEPH_ASSERT_FUNCTION, __VA_ARGS__))
#endif
| 5,066 | 33.236486 | 126 | h |
null | ceph-main/src/include/ceph_features.h | #ifndef __CEPH_FEATURES
#define __CEPH_FEATURES
#include "sys/types.h"
/*
* Each time we reclaim bits for reuse we need to specify another
* bitmask that, if all bits are set, indicates we have the new
* incarnation of that feature. Base case is 1 (first use)
*/
#define CEPH_FEATURE_INCARNATION_1 (0ull)
#define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // SERVER_JEWEL
#define CEPH_FEATURE_INCARNATION_3 ((1ull<<57)|(1ull<<28)) // SERVER_MIMIC
#define DEFINE_CEPH_FEATURE(bit, incarnation, name) \
const static uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \
const static uint64_t CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
// this bit is ignored but still advertised by release *when*
#define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \
const static uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \
const static uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \
(1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation);
// this bit is ignored by release *unused* and not advertised by
// release *unadvertised*
#define DEFINE_CEPH_FEATURE_RETIRED(bit, inc, name, unused, unadvertised)
// test for a feature. this test is safer than a typical mask against
// the bit because it ensures that we have the bit AND the marker for the
// bit's incarnation. this must be used in any case where the features
// bits may include an old meaning of the bit.
#define HAVE_FEATURE(x, name) \
(((x) & (CEPH_FEATUREMASK_##name)) == (CEPH_FEATUREMASK_##name))
/*
* Notes on deprecation:
*
* For feature bits used *only* on the server-side:
*
* - In the first phase we indicate that a feature is DEPRECATED as of
* a particular release. This is the first major release X (say,
* mimic) that does not depend on its peers advertising the feature.
* That is, it safely assumes its peers all have the feature. We
* indicate this with the DEPRECATED macro. For example,
*
* DEFINE_CEPH_FEATURE_DEPRECATED( 2, 1, MON_METADATA, MIMIC)
*
* because 13.2.z (mimic) did not care if its peers advertised this
* feature bit.
*
* - In the second phase we stop advertising the the bit and call it
* RETIRED. This can normally be done 2 major releases
* following the one in which we marked the feature DEPRECATED. In
* the above example, for 15.0.z (octopus) we can say:
*
* DEFINE_CEPH_FEATURE_RETIRED( 2, 1, MON_METADATA, MIMIC, OCTOPUS)
*
* - The bit can be reused in the next release that will never talk to
* a pre-octopus daemon (13 mimic or 14 nautlius) that advertises the
* bit: in this case, the 16.y.z (P-release).
*
* This ensures that no two versions who have different meanings for
* the bit ever speak to each other.
*/
/*
* Notes on the kernel client:
*
* - "X" means that the feature bit has been advertised and supported
* since kernel X
*
* - "X req" means that the feature bit has been advertised and required
* since kernel X
*
* The remaining feature bits are not and have never been used by the
* kernel client.
*/
DEFINE_CEPH_FEATURE( 0, 1, UID)
DEFINE_CEPH_FEATURE( 1, 1, NOSRCADDR) // 2.6.35 req
DEFINE_CEPH_FEATURE_RETIRED( 2, 1, MONCLOCKCHECK, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE( 2, 3, SERVER_NAUTILUS)
DEFINE_CEPH_FEATURE( 3, 1, FLOCK) // 2.6.36
DEFINE_CEPH_FEATURE( 4, 1, SUBSCRIBE2) // 4.6 req
DEFINE_CEPH_FEATURE( 5, 1, MONNAMES)
DEFINE_CEPH_FEATURE( 6, 1, RECONNECT_SEQ) // 3.10 req
DEFINE_CEPH_FEATURE( 7, 1, DIRLAYOUTHASH) // 2.6.38
DEFINE_CEPH_FEATURE( 8, 1, OBJECTLOCATOR)
DEFINE_CEPH_FEATURE( 9, 1, PGID64) // 3.9 req
DEFINE_CEPH_FEATURE(10, 1, INCSUBOSDMAP)
DEFINE_CEPH_FEATURE(11, 1, PGPOOL3) // 3.9 req
DEFINE_CEPH_FEATURE(12, 1, OSDREPLYMUX)
DEFINE_CEPH_FEATURE(13, 1, OSDENC) // 3.9 req
DEFINE_CEPH_FEATURE_RETIRED(14, 1, OMAP, HAMMER, JEWEL)
DEFINE_CEPH_FEATURE(14, 2, SERVER_KRAKEN)
DEFINE_CEPH_FEATURE(15, 1, MONENC)
DEFINE_CEPH_FEATURE_RETIRED(16, 1, QUERY_T, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(16, 3, SERVER_OCTOPUS)
DEFINE_CEPH_FEATURE(16, 3, OSD_REPOP_MLCOD)
DEFINE_CEPH_FEATURE_RETIRED(17, 1, INDEP_PG_MAP, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(17, 3, OS_PERF_STAT_NS)
DEFINE_CEPH_FEATURE(18, 1, CRUSH_TUNABLES) // 3.6
DEFINE_CEPH_FEATURE_RETIRED(19, 1, CHUNKY_SCRUB, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(19, 2, OSD_PGLOG_HARDLIMIT)
DEFINE_CEPH_FEATURE_RETIRED(20, 1, MON_NULLROUTE, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(20, 3, SERVER_PACIFIC)
DEFINE_CEPH_FEATURE_RETIRED(21, 1, MON_GV, HAMMER, JEWEL)
DEFINE_CEPH_FEATURE(21, 2, SERVER_LUMINOUS) // 4.13
DEFINE_CEPH_FEATURE(21, 2, RESEND_ON_SPLIT) // overlap
DEFINE_CEPH_FEATURE(21, 2, RADOS_BACKOFF) // overlap
DEFINE_CEPH_FEATURE(21, 2, OSDMAP_PG_UPMAP) // overlap
DEFINE_CEPH_FEATURE(21, 2, CRUSH_CHOOSE_ARGS) // overlap
DEFINE_CEPH_FEATURE_RETIRED(22, 1, BACKFILL_RESERVATION, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(22, 2, OSD_FIXED_COLLECTION_LIST)
DEFINE_CEPH_FEATURE(23, 1, MSG_AUTH) // 3.19 req (unless nocephx_require_signatures)
DEFINE_CEPH_FEATURE_RETIRED(24, 1, RECOVERY_RESERVATION, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(24, 2, RECOVERY_RESERVATION_2)
DEFINE_CEPH_FEATURE(25, 1, CRUSH_TUNABLES2) // 3.9
DEFINE_CEPH_FEATURE(26, 1, CREATEPOOLID)
DEFINE_CEPH_FEATURE(27, 1, REPLY_CREATE_INODE) // 3.9
DEFINE_CEPH_FEATURE_RETIRED(28, 1, OSD_HBMSGS, HAMMER, JEWEL)
DEFINE_CEPH_FEATURE(28, 2, SERVER_MIMIC)
DEFINE_CEPH_FEATURE(29, 1, MDSENC) // 4.7
DEFINE_CEPH_FEATURE(30, 1, OSDHASHPSPOOL) // 3.9
DEFINE_CEPH_FEATURE_RETIRED(31, 1, MON_SINGLE_PAXOS, NAUTILUS, PACIFIC)
DEFINE_CEPH_FEATURE(31, 3, SERVER_REEF)
DEFINE_CEPH_FEATURE_RETIRED(32, 1, OSD_SNAPMAPPER, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(32, 3, STRETCH_MODE)
DEFINE_CEPH_FEATURE_RETIRED(33, 1, MON_SCRUB, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(33, 3, SERVER_QUINCY)
DEFINE_CEPH_FEATURE_RETIRED(34, 1, OSD_PACKED_RECOVERY, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE(34, 3, RANGE_BLOCKLIST)
DEFINE_CEPH_FEATURE(35, 1, OSD_CACHEPOOL) // 3.14
DEFINE_CEPH_FEATURE(36, 1, CRUSH_V2) // 3.14
DEFINE_CEPH_FEATURE(37, 1, EXPORT_PEER) // 3.14
DEFINE_CEPH_FEATURE_RETIRED(38, 1, OSD_ERASURE_CODES, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE(39, 1, OSDMAP_ENC) // 3.15
DEFINE_CEPH_FEATURE(40, 1, MDS_INLINE_DATA) // 3.19
DEFINE_CEPH_FEATURE(41, 1, CRUSH_TUNABLES3) // 3.15
DEFINE_CEPH_FEATURE(41, 1, OSD_PRIMARY_AFFINITY) // overlap
DEFINE_CEPH_FEATURE(42, 1, MSGR_KEEPALIVE2) // 4.3 (for consistency)
DEFINE_CEPH_FEATURE(43, 1, OSD_POOLRESEND) // 4.13
DEFINE_CEPH_FEATURE_RETIRED(44, 1, ERASURE_CODE_PLUGINS_V2, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(45, 1, OSD_SET_ALLOC_HINT, JEWEL, LUMINOUS)
// available
DEFINE_CEPH_FEATURE(46, 1, OSD_FADVISE_FLAGS)
DEFINE_CEPH_FEATURE_RETIRED(46, 1, OSD_REPOP, JEWEL, LUMINOUS) // overlap
DEFINE_CEPH_FEATURE_RETIRED(46, 1, OSD_OBJECT_DIGEST, JEWEL, LUMINOUS) // overlap
DEFINE_CEPH_FEATURE_RETIRED(46, 1, OSD_TRANSACTION_MAY_LAYOUT, JEWEL, LUMINOUS) // overlap
DEFINE_CEPH_FEATURE(47, 1, MDS_QUOTA) // 4.17
DEFINE_CEPH_FEATURE(48, 1, CRUSH_V4) // 4.1
DEFINE_CEPH_FEATURE_RETIRED(49, 1, OSD_MIN_SIZE_RECOVERY, JEWEL, LUMINOUS)
DEFINE_CEPH_FEATURE_RETIRED(49, 1, OSD_PROXY_FEATURES, JEWEL, LUMINOUS) // overlap
// available
DEFINE_CEPH_FEATURE_RETIRED(50, 1, MON_METADATA, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(51, 1, OSD_BITWISE_HOBJ_SORT, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(52, 1, OSD_PROXY_WRITE_FEATURES, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(53, 1, ERASURE_CODE_PLUGINS_V3, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(54, 1, OSD_HITSET_GMT, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE_RETIRED(55, 1, HAMMER_0_94_4, MIMIC, OCTOPUS)
// available
DEFINE_CEPH_FEATURE(56, 1, NEW_OSDOP_ENCODING) // 4.13 (for pg_pool_t >= v25)
DEFINE_CEPH_FEATURE(57, 1, MON_STATEFUL_SUB) // 4.13
DEFINE_CEPH_FEATURE_RETIRED(57, 1, MON_ROUTE_OSDMAP, MIMIC, OCTOPUS) // overlap
DEFINE_CEPH_FEATURE(57, 1, SERVER_JEWEL) // overlap
DEFINE_CEPH_FEATURE(58, 1, CRUSH_TUNABLES5) // 4.5
DEFINE_CEPH_FEATURE(58, 1, NEW_OSDOPREPLY_ENCODING) // overlap
DEFINE_CEPH_FEATURE(58, 1, FS_FILE_LAYOUT_V2) // overlap
DEFINE_CEPH_FEATURE(59, 1, FS_BTIME)
DEFINE_CEPH_FEATURE(59, 1, FS_CHANGE_ATTR) // overlap
DEFINE_CEPH_FEATURE(59, 1, MSG_ADDR2) // overlap
DEFINE_CEPH_FEATURE(60, 1, OSD_RECOVERY_DELETES) // *do not share this bit*
DEFINE_CEPH_FEATURE(61, 1, CEPHX_V2) // 4.19, *do not share this bit*
DEFINE_CEPH_FEATURE(62, 1, RESERVED) // do not use; used as a sentinel
DEFINE_CEPH_FEATURE_RETIRED(63, 1, RESERVED_BROKEN, LUMINOUS, QUINCY) // client-facing
// available
/*
* Features supported. Should be everything above.
*/
#define CEPH_FEATURES_ALL \
(CEPH_FEATURE_UID | \
CEPH_FEATURE_NOSRCADDR | \
CEPH_FEATURE_FLOCK | \
CEPH_FEATURE_SUBSCRIBE2 | \
CEPH_FEATURE_MONNAMES | \
CEPH_FEATURE_RECONNECT_SEQ | \
CEPH_FEATURE_DIRLAYOUTHASH | \
CEPH_FEATURE_OBJECTLOCATOR | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_INCSUBOSDMAP | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDREPLYMUX | \
CEPH_FEATURE_OSDENC | \
CEPH_FEATURE_MONENC | \
CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_CREATEPOOLID | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
CEPH_FEATURE_MDSENC | \
CEPH_FEATURE_OSDHASHPSPOOL | \
CEPH_FEATURE_NEW_OSDOP_ENCODING | \
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
CEPH_FEATURE_OSD_CACHEPOOL | \
CEPH_FEATURE_CRUSH_V2 | \
CEPH_FEATURE_EXPORT_PEER | \
CEPH_FEATURE_OSDMAP_ENC | \
CEPH_FEATURE_MDS_INLINE_DATA | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
CEPH_FEATURE_OSD_POOLRESEND | \
CEPH_FEATURE_OSD_FADVISE_FLAGS | \
CEPH_FEATURE_MDS_QUOTA | \
CEPH_FEATURE_CRUSH_V4 | \
CEPH_FEATURE_MON_STATEFUL_SUB | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
CEPH_FEATURE_SERVER_JEWEL | \
CEPH_FEATURE_FS_FILE_LAYOUT_V2 | \
CEPH_FEATURE_SERVER_KRAKEN | \
CEPH_FEATURE_FS_BTIME | \
CEPH_FEATURE_FS_CHANGE_ATTR | \
CEPH_FEATURE_MSG_ADDR2 | \
CEPH_FEATURE_SERVER_LUMINOUS | \
CEPH_FEATURE_RESEND_ON_SPLIT | \
CEPH_FEATURE_RADOS_BACKOFF | \
CEPH_FEATURE_OSD_RECOVERY_DELETES | \
CEPH_FEATURE_SERVER_MIMIC | \
CEPH_FEATURE_RECOVERY_RESERVATION_2 | \
CEPH_FEATURE_SERVER_NAUTILUS | \
CEPH_FEATURE_CEPHX_V2 | \
CEPH_FEATURE_OSD_PGLOG_HARDLIMIT | \
CEPH_FEATUREMASK_SERVER_OCTOPUS | \
CEPH_FEATUREMASK_STRETCH_MODE | \
CEPH_FEATUREMASK_OSD_REPOP_MLCOD | \
CEPH_FEATUREMASK_SERVER_PACIFIC | \
CEPH_FEATURE_OSD_FIXED_COLLECTION_LIST | \
CEPH_FEATUREMASK_SERVER_QUINCY | \
CEPH_FEATURE_RANGE_BLOCKLIST | \
CEPH_FEATUREMASK_SERVER_REEF | \
0ULL)
#define CEPH_FEATURES_SUPPORTED_DEFAULT CEPH_FEATURES_ALL
/*
* crush related features
*/
#define CEPH_FEATURES_CRUSH \
(CEPH_FEATURE_CRUSH_TUNABLES | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
CEPH_FEATURE_CRUSH_V2 | \
CEPH_FEATURE_CRUSH_V4 | \
CEPH_FEATUREMASK_CRUSH_CHOOSE_ARGS)
/*
* make sure we don't try to use the reserved features
*/
#define CEPH_STATIC_ASSERT(x) (void)(sizeof(int[((x)==0) ? -1 : 0]))
static inline void ____build_time_check_for_reserved_bits(void) {
CEPH_STATIC_ASSERT((CEPH_FEATURES_ALL & CEPH_FEATURE_RESERVED) == 0);
}
#endif
| 11,605 | 40.302491 | 92 | h |
null | ceph-main/src/include/ceph_frag.h | #ifndef FS_CEPH_FRAG_H
#define FS_CEPH_FRAG_H
/*
* "Frags" are a way to describe a subset of a 32-bit number space,
* using a mask and a value to match against that mask. Any given frag
* (subset of the number space) can be partitioned into 2^n sub-frags.
*
* Frags are encoded into a 32-bit word:
* 8 upper bits = "bits"
* 24 lower bits = "value"
* (We could go to 5+27 bits, but who cares.)
*
* We use the _most_ significant bits of the 24 bit value. This makes
* values logically sort.
*
* Unfortunately, because the "bits" field is still in the high bits, we
* can't sort encoded frags numerically. However, it does allow you
* to feed encoded frags as values into frag_contains_value.
*/
static inline __u32 ceph_frag_make(__u32 b, __u32 v)
{
return (b << 24) |
(v & (0xffffffu << (24-b)) & 0xffffffu);
}
static inline __u32 ceph_frag_bits(__u32 f)
{
return f >> 24;
}
static inline __u32 ceph_frag_value(__u32 f)
{
return f & 0xffffffu;
}
static inline __u32 ceph_frag_mask(__u32 f)
{
return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu;
}
static inline __u32 ceph_frag_mask_shift(__u32 f)
{
return 24 - ceph_frag_bits(f);
}
static inline int ceph_frag_contains_value(__u32 f, __u32 v)
{
return (v & ceph_frag_mask(f)) == ceph_frag_value(f);
}
static inline int ceph_frag_contains_frag(__u32 f, __u32 sub)
{
/* is sub as specific as us, and contained by us? */
return ceph_frag_bits(sub) >= ceph_frag_bits(f) &&
(ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f);
}
static inline __u32 ceph_frag_parent(__u32 f)
{
return ceph_frag_make(ceph_frag_bits(f) - 1,
ceph_frag_value(f) & (ceph_frag_mask(f) << 1));
}
static inline int ceph_frag_is_left_child(__u32 f)
{
return ceph_frag_bits(f) > 0 &&
(ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0;
}
static inline int ceph_frag_is_right_child(__u32 f)
{
return ceph_frag_bits(f) > 0 &&
(ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1;
}
static inline __u32 ceph_frag_sibling(__u32 f)
{
return ceph_frag_make(ceph_frag_bits(f),
ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f)));
}
static inline __u32 ceph_frag_left_child(__u32 f)
{
return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f));
}
static inline __u32 ceph_frag_right_child(__u32 f)
{
return ceph_frag_make(ceph_frag_bits(f)+1,
ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f))));
}
static inline __u32 ceph_frag_make_child(__u32 f, int by, int i)
{
int newbits = ceph_frag_bits(f) + by;
return ceph_frag_make(newbits,
ceph_frag_value(f) | (i << (24 - newbits)));
}
static inline int ceph_frag_is_leftmost(__u32 f)
{
return ceph_frag_value(f) == 0;
}
static inline int ceph_frag_is_rightmost(__u32 f)
{
return ceph_frag_value(f) == ceph_frag_mask(f);
}
static inline __u32 ceph_frag_next(__u32 f)
{
return ceph_frag_make(ceph_frag_bits(f),
ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f)));
}
/*
* comparator to sort frags logically, as when traversing the
* number space in ascending order...
*/
int ceph_frag_compare(__u32 a, __u32 b);
#endif
| 3,114 | 27.318182 | 73 | h |
null | ceph-main/src/include/ceph_fs.h | /*
* ceph_fs.h - Ceph constants and data types to share between kernel and
* user space.
*
* Most types in this file are defined as little-endian, and are
* primarily intended to describe data structures that pass over the
* wire or that are stored on disk.
*
* LGPL-2.1 or LGPL-3.0
*/
#ifndef CEPH_FS_H
#define CEPH_FS_H
#include "msgr.h"
#include "rados.h"
#include "include/encoding.h"
#include "include/denc.h"
/*
* The data structures defined here are shared between Linux kernel and
* user space. Also, those data structures are maintained always in
* little-endian byte order, even on big-endian systems. This is handled
* differently in kernel vs. user space. For use as kernel headers, the
* little-endian fields need to use the __le16/__le32/__le64 types. These
* are markers that indicate endian conversion routines must be used
* whenever such fields are accessed, which can be verified by checker
* tools like "sparse". For use as user-space headers, the little-endian
* fields instead use types ceph_le16/ceph_le32/ceph_le64, which are C++
* classes that implement automatic endian conversion on every access.
* To still allow for header sharing, this file uses the __le types, but
* redefines those to the ceph_ types when compiled in user space.
*/
#ifndef __KERNEL__
#include "byteorder.h"
#define __le16 ceph_le16
#define __le32 ceph_le32
#define __le64 ceph_le64
#endif
/*
* subprotocol versions. when specific messages types or high-level
* protocols change, bump the affected components. we keep rev
* internal cluster protocols separately from the public,
* client-facing protocol.
*/
#define CEPH_OSDC_PROTOCOL 24 /* server/client */
#define CEPH_MDSC_PROTOCOL 32 /* server/client */
#define CEPH_MONC_PROTOCOL 15 /* server/client */
#define CEPH_INO_ROOT 1
/*
* hidden .ceph dir, which is no longer created but
* recognised in existing filesystems so that we
* don't try to fragment it.
*/
#define CEPH_INO_CEPH 2
#define CEPH_INO_GLOBAL_SNAPREALM 3
#define CEPH_INO_LOST_AND_FOUND 4 /* reserved ino for use in recovery */
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
/*
* ceph_file_layout - describe data layout for a file/inode
*/
struct ceph_file_layout {
/* file -> object mapping */
__le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple
of page size. */
__le32 fl_stripe_count; /* over this many objects */
__le32 fl_object_size; /* until objects are this big, then move to
new objects */
__le32 fl_cas_hash; /* UNUSED. 0 = none; 1 = sha256 */
/* pg -> disk layout */
__le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */
/* object -> pg layout */
__le32 fl_unused; /* unused; used to be preferred primary for pg (-1 for none) */
__le32 fl_pg_pool; /* namespace, crush rule, rep level */
} __attribute__ ((packed));
#define CEPH_MIN_STRIPE_UNIT 65536
struct ceph_dir_layout {
__u8 dl_dir_hash; /* see ceph_hash.h for ids */
__u8 dl_unused1;
__u16 dl_unused2;
__u32 dl_unused3;
} __attribute__ ((packed));
/* crypto algorithms */
#define CEPH_CRYPTO_NONE 0x0
#define CEPH_CRYPTO_AES 0x1
#define CEPH_AES_IV "cephsageyudagreg"
/* security/authentication protocols */
#define CEPH_AUTH_UNKNOWN 0x0
#define CEPH_AUTH_NONE 0x1
#define CEPH_AUTH_CEPHX 0x2
/* msgr2 protocol modes */
#define CEPH_CON_MODE_UNKNOWN 0x0
#define CEPH_CON_MODE_CRC 0x1
#define CEPH_CON_MODE_SECURE 0x2
extern const char *ceph_con_mode_name(int con_mode);
/* For options with "_", like: GSS_GSS
which means: Mode/Protocol to validate "authentication_authorization",
where:
- Authentication: Verifying the identity of an entity.
- Authorization: Verifying that an authenticated entity has
the right to access a particular resource.
*/
#define CEPH_AUTH_GSS 0x4
#define CEPH_AUTH_GSS_GSS CEPH_AUTH_GSS
#define CEPH_AUTH_UID_DEFAULT ((__u64) -1)
/*********************************************
* message layer
*/
/*
* message types
*/
/* misc */
#define CEPH_MSG_SHUTDOWN 1
#define CEPH_MSG_PING 2
/* client <-> monitor */
#define CEPH_MSG_MON_MAP 4
#define CEPH_MSG_MON_GET_MAP 5
#define CEPH_MSG_MON_GET_OSDMAP 6
#define CEPH_MSG_MON_METADATA 7
#define CEPH_MSG_STATFS 13
#define CEPH_MSG_STATFS_REPLY 14
#define CEPH_MSG_MON_SUBSCRIBE 15
#define CEPH_MSG_MON_SUBSCRIBE_ACK 16
#define CEPH_MSG_AUTH 17
#define CEPH_MSG_AUTH_REPLY 18
#define CEPH_MSG_MON_GET_VERSION 19
#define CEPH_MSG_MON_GET_VERSION_REPLY 20
/* client <-> mds */
#define CEPH_MSG_MDS_MAP 21
#define CEPH_MSG_CLIENT_SESSION 22
#define CEPH_MSG_CLIENT_RECONNECT 23
#define CEPH_MSG_CLIENT_REQUEST 24
#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
#define CEPH_MSG_CLIENT_REPLY 26
#define CEPH_MSG_CLIENT_RECLAIM 27
#define CEPH_MSG_CLIENT_RECLAIM_REPLY 28
#define CEPH_MSG_CLIENT_METRICS 29
#define CEPH_MSG_CLIENT_CAPS 0x310
#define CEPH_MSG_CLIENT_LEASE 0x311
#define CEPH_MSG_CLIENT_SNAP 0x312
#define CEPH_MSG_CLIENT_CAPRELEASE 0x313
#define CEPH_MSG_CLIENT_QUOTA 0x314
/* pool ops */
#define CEPH_MSG_POOLOP_REPLY 48
#define CEPH_MSG_POOLOP 49
/* osd */
#define CEPH_MSG_OSD_MAP 41
#define CEPH_MSG_OSD_OP 42
#define CEPH_MSG_OSD_OPREPLY 43
#define CEPH_MSG_WATCH_NOTIFY 44
#define CEPH_MSG_OSD_BACKOFF 61
/* FSMap subscribers (see all MDS clusters at once) */
#define CEPH_MSG_FS_MAP 45
/* FSMapUser subscribers (get MDS clusters name->ID mapping) */
#define CEPH_MSG_FS_MAP_USER 103
/* watch-notify operations */
enum {
CEPH_WATCH_EVENT_NOTIFY = 1, /* notifying watcher */
CEPH_WATCH_EVENT_NOTIFY_COMPLETE = 2, /* notifier notified when done */
CEPH_WATCH_EVENT_DISCONNECT = 3, /* we were disconnected */
};
const char *ceph_watch_event_name(int o);
/* pool operations */
enum {
POOL_OP_CREATE = 0x01,
POOL_OP_DELETE = 0x02,
POOL_OP_AUID_CHANGE = 0x03,
POOL_OP_CREATE_SNAP = 0x11,
POOL_OP_DELETE_SNAP = 0x12,
POOL_OP_CREATE_UNMANAGED_SNAP = 0x21,
POOL_OP_DELETE_UNMANAGED_SNAP = 0x22,
};
struct ceph_mon_request_header {
__le64 have_version;
__le16 session_mon;
__le64 session_mon_tid;
} __attribute__ ((packed));
struct ceph_mon_statfs {
struct ceph_mon_request_header monhdr;
struct ceph_fsid fsid;
} __attribute__ ((packed));
struct ceph_statfs {
__le64 kb, kb_used, kb_avail;
__le64 num_objects;
} __attribute__ ((packed));
struct ceph_mon_statfs_reply {
struct ceph_fsid fsid;
__le64 version;
struct ceph_statfs st;
} __attribute__ ((packed));
const char *ceph_pool_op_name(int op);
struct ceph_mon_poolop {
struct ceph_mon_request_header monhdr;
struct ceph_fsid fsid;
__le32 pool;
__le32 op;
__le64 __old_auid; // obsolete
__le64 snapid;
__le32 name_len;
} __attribute__ ((packed));
struct ceph_mon_poolop_reply {
struct ceph_mon_request_header monhdr;
struct ceph_fsid fsid;
__le32 reply_code;
__le32 epoch;
char has_data;
char data[0];
} __attribute__ ((packed));
struct ceph_mon_unmanaged_snap {
__le64 snapid;
} __attribute__ ((packed));
struct ceph_osd_getmap {
struct ceph_mon_request_header monhdr;
struct ceph_fsid fsid;
__le32 start;
} __attribute__ ((packed));
struct ceph_mds_getmap {
struct ceph_mon_request_header monhdr;
struct ceph_fsid fsid;
} __attribute__ ((packed));
struct ceph_client_mount {
struct ceph_mon_request_header monhdr;
} __attribute__ ((packed));
#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */
struct ceph_mon_subscribe_item {
__le64 start;
__u8 flags;
} __attribute__ ((packed));
struct ceph_mon_subscribe_ack {
__le32 duration; /* seconds */
struct ceph_fsid fsid;
} __attribute__ ((packed));
/*
* mdsmap flags
*/
#define CEPH_MDSMAP_NOT_JOINABLE (1<<0) /* standbys cannot join */
#define CEPH_MDSMAP_DOWN (CEPH_MDSMAP_NOT_JOINABLE) /* backwards compat */
#define CEPH_MDSMAP_ALLOW_SNAPS (1<<1) /* cluster allowed to create snapshots */
/* deprecated #define CEPH_MDSMAP_ALLOW_MULTIMDS (1<<2) cluster allowed to have >1 active MDS */
/* deprecated #define CEPH_MDSMAP_ALLOW_DIRFRAGS (1<<3) cluster allowed to fragment directories */
#define CEPH_MDSMAP_ALLOW_MULTIMDS_SNAPS (1<<4) /* cluster alllowed to enable MULTIMDS
and SNAPS at the same time */
#define CEPH_MDSMAP_ALLOW_STANDBY_REPLAY (1<<5) /* cluster alllowed to enable MULTIMDS */
#define CEPH_MDSMAP_REFUSE_CLIENT_SESSION (1<<6) /* cluster allowed to refuse client session
request */
#define CEPH_MDSMAP_DEFAULTS (CEPH_MDSMAP_ALLOW_SNAPS | \
CEPH_MDSMAP_ALLOW_MULTIMDS_SNAPS)
/*
* mds states
* > 0 -> in
* <= 0 -> out
*/
#define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */
#define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees.
empty log. */
#define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */
#define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */
#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */
#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */
#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */
#define CEPH_MDS_STATE_REPLAYONCE -9 /* Legacy, unused */
#define CEPH_MDS_STATE_NULL -10
#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */
#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed
operations (import, rename, etc.) */
#define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */
#define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */
#define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */
#define CEPH_MDS_STATE_ACTIVE 13 /* up, active */
#define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */
#define CEPH_MDS_STATE_DAMAGED 15 /* rank not replayable, need repair */
extern const char *ceph_mds_state_name(int s);
/*
* metadata lock types.
* - these are bitmasks.. we can compose them
* - they also define the lock ordering by the MDS
* - a few of these are internal to the mds
*/
#define CEPH_LOCK_DN (1 << 0)
#define CEPH_LOCK_DVERSION (1 << 1)
#define CEPH_LOCK_ISNAP (1 << 4) /* snapshot lock. MDS internal */
#define CEPH_LOCK_IPOLICY (1 << 5) /* policy lock on dirs. MDS internal */
#define CEPH_LOCK_IFILE (1 << 6)
#define CEPH_LOCK_INEST (1 << 7) /* mds internal */
#define CEPH_LOCK_IDFT (1 << 8) /* dir frag tree */
#define CEPH_LOCK_IAUTH (1 << 9)
#define CEPH_LOCK_ILINK (1 << 10)
#define CEPH_LOCK_IXATTR (1 << 11)
#define CEPH_LOCK_IFLOCK (1 << 12) /* advisory file locks */
#define CEPH_LOCK_IVERSION (1 << 13) /* mds internal */
#define CEPH_LOCK_IFIRST CEPH_LOCK_ISNAP
/* client_session ops */
enum {
CEPH_SESSION_REQUEST_OPEN,
CEPH_SESSION_OPEN,
CEPH_SESSION_REQUEST_CLOSE,
CEPH_SESSION_CLOSE,
CEPH_SESSION_REQUEST_RENEWCAPS,
CEPH_SESSION_RENEWCAPS,
CEPH_SESSION_STALE,
CEPH_SESSION_RECALL_STATE,
CEPH_SESSION_FLUSHMSG,
CEPH_SESSION_FLUSHMSG_ACK,
CEPH_SESSION_FORCE_RO,
// A response to REQUEST_OPEN indicating that the client should
// permanently desist from contacting the MDS
CEPH_SESSION_REJECT,
CEPH_SESSION_REQUEST_FLUSH_MDLOG
};
// flags for state reclaim
#define CEPH_RECLAIM_RESET 1
extern const char *ceph_session_op_name(int op);
struct ceph_mds_session_head {
__le32 op;
__le64 seq;
struct ceph_timespec stamp;
__le32 max_caps, max_leases;
} __attribute__ ((packed));
/* client_request */
/*
* metadata ops.
* & 0x001000 -> write op
* & 0x010000 -> follow symlink (e.g. stat(), not lstat()).
& & 0x100000 -> use weird ino/path trace
*/
#define CEPH_MDS_OP_WRITE 0x001000
enum {
CEPH_MDS_OP_LOOKUP = 0x00100,
CEPH_MDS_OP_GETATTR = 0x00101,
CEPH_MDS_OP_LOOKUPHASH = 0x00102,
CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
CEPH_MDS_OP_LOOKUPINO = 0x00104,
CEPH_MDS_OP_LOOKUPNAME = 0x00105,
CEPH_MDS_OP_GETVXATTR = 0x00106,
CEPH_MDS_OP_DUMMY = 0x00107,
CEPH_MDS_OP_SETXATTR = 0x01105,
CEPH_MDS_OP_RMXATTR = 0x01106,
CEPH_MDS_OP_SETLAYOUT = 0x01107,
CEPH_MDS_OP_SETATTR = 0x01108,
CEPH_MDS_OP_SETFILELOCK= 0x01109,
CEPH_MDS_OP_GETFILELOCK= 0x00110,
CEPH_MDS_OP_SETDIRLAYOUT=0x0110a,
CEPH_MDS_OP_MKNOD = 0x01201,
CEPH_MDS_OP_LINK = 0x01202,
CEPH_MDS_OP_UNLINK = 0x01203,
CEPH_MDS_OP_RENAME = 0x01204,
CEPH_MDS_OP_MKDIR = 0x01220,
CEPH_MDS_OP_RMDIR = 0x01221,
CEPH_MDS_OP_SYMLINK = 0x01222,
CEPH_MDS_OP_CREATE = 0x01301,
CEPH_MDS_OP_OPEN = 0x00302,
CEPH_MDS_OP_READDIR = 0x00305,
CEPH_MDS_OP_LOOKUPSNAP = 0x00400,
CEPH_MDS_OP_MKSNAP = 0x01400,
CEPH_MDS_OP_RMSNAP = 0x01401,
CEPH_MDS_OP_LSSNAP = 0x00402,
CEPH_MDS_OP_RENAMESNAP = 0x01403,
CEPH_MDS_OP_READDIR_SNAPDIFF = 0x01404,
// internal op
CEPH_MDS_OP_FRAGMENTDIR= 0x01500,
CEPH_MDS_OP_EXPORTDIR = 0x01501,
CEPH_MDS_OP_FLUSH = 0x01502,
CEPH_MDS_OP_ENQUEUE_SCRUB = 0x01503,
CEPH_MDS_OP_REPAIR_FRAGSTATS = 0x01504,
CEPH_MDS_OP_REPAIR_INODESTATS = 0x01505,
CEPH_MDS_OP_RDLOCK_FRAGSSTATS = 0x01507
};
extern const char *ceph_mds_op_name(int op);
// setattr mask is an int
#ifndef CEPH_SETATTR_MODE
#define CEPH_SETATTR_MODE (1 << 0)
#define CEPH_SETATTR_UID (1 << 1)
#define CEPH_SETATTR_GID (1 << 2)
#define CEPH_SETATTR_MTIME (1 << 3)
#define CEPH_SETATTR_ATIME (1 << 4)
#define CEPH_SETATTR_SIZE (1 << 5)
#define CEPH_SETATTR_CTIME (1 << 6)
#define CEPH_SETATTR_MTIME_NOW (1 << 7)
#define CEPH_SETATTR_ATIME_NOW (1 << 8)
#define CEPH_SETATTR_BTIME (1 << 9)
#define CEPH_SETATTR_KILL_SGUID (1 << 10)
#define CEPH_SETATTR_FSCRYPT_AUTH (1 << 11)
#define CEPH_SETATTR_FSCRYPT_FILE (1 << 12)
#define CEPH_SETATTR_KILL_SUID (1 << 13)
#define CEPH_SETATTR_KILL_SGID (1 << 14)
#endif
/*
* open request flags
*/
#define CEPH_O_RDONLY 00000000
#define CEPH_O_WRONLY 00000001
#define CEPH_O_RDWR 00000002
#define CEPH_O_CREAT 00000100
#define CEPH_O_EXCL 00000200
#define CEPH_O_TRUNC 00001000
#define CEPH_O_LAZY 00020000
#define CEPH_O_DIRECTORY 00200000
#define CEPH_O_NOFOLLOW 00400000
int ceph_flags_sys2wire(int flags);
/*
* Ceph setxattr request flags.
*/
#define CEPH_XATTR_CREATE (1 << 0)
#define CEPH_XATTR_REPLACE (1 << 1)
#define CEPH_XATTR_REMOVE (1 << 31)
/*
* readdir/readdir_snapdiff request flags;
*/
#define CEPH_READDIR_REPLY_BITFLAGS (1<<0)
/*
* readdir/readdir_snapdiff reply flags.
*/
#define CEPH_READDIR_FRAG_END (1<<0)
#define CEPH_READDIR_FRAG_COMPLETE (1<<8)
#define CEPH_READDIR_HASH_ORDER (1<<9)
#define CEPH_READDIR_OFFSET_HASH (1<<10)
/* Note that this is embedded wthin ceph_mds_request_head_legacy. */
union ceph_mds_request_args_legacy {
struct {
__le32 mask; /* CEPH_CAP_* */
} __attribute__ ((packed)) getattr;
struct {
__le32 mode;
__le32 uid;
__le32 gid;
struct ceph_timespec mtime;
struct ceph_timespec atime;
__le64 size, old_size; /* old_size needed by truncate */
__le32 mask; /* CEPH_SETATTR_* */
} __attribute__ ((packed)) setattr;
struct {
__le32 frag; /* which dir fragment */
__le32 max_entries; /* how many dentries to grab */
__le32 max_bytes;
__le16 flags;
__le32 offset_hash;
} __attribute__ ((packed)) readdir;
struct {
__le32 mode;
__le32 rdev;
} __attribute__ ((packed)) mknod;
struct {
__le32 mode;
} __attribute__ ((packed)) mkdir;
struct {
__le32 flags;
__le32 mode;
__le32 stripe_unit; /* layout for newly created file */
__le32 stripe_count; /* ... */
__le32 object_size;
__le32 pool; /* if >= 0 and CREATEPOOLID feature */
__le32 mask; /* CEPH_CAP_* */
__le64 old_size; /* if O_TRUNC */
} __attribute__ ((packed)) open;
struct {
__le32 flags;
__le32 osdmap_epoch; /* use for set file/dir layout */
} __attribute__ ((packed)) setxattr;
struct {
struct ceph_file_layout layout;
} __attribute__ ((packed)) setlayout;
struct {
__u8 rule; /* currently fcntl or flock */
__u8 type; /* shared, exclusive, remove*/
__le64 owner; /* who requests/holds the lock */
__le64 pid; /* process id requesting the lock */
__le64 start; /* initial location to lock */
__le64 length; /* num bytes to lock from start */
__u8 wait; /* will caller wait for lock to become available? */
} __attribute__ ((packed)) filelock_change;
} __attribute__ ((packed));
#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
#define CEPH_MDS_FLAG_ASYNC 4 /* request is async */
struct ceph_mds_request_head_legacy {
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
__u8 num_retry, num_fwd; /* count retry, fwd attempts */
__le16 num_releases; /* # include cap/lease release records */
__le32 op; /* mds op code */
__le32 caller_uid, caller_gid;
__le64 ino; /* use this ino for openc, mkdir, mknod,
etc. (if replaying) */
union ceph_mds_request_args_legacy args;
} __attribute__ ((packed));
/*
* Note that this is embedded wthin ceph_mds_request_head. Also, compatibility
* with the ceph_mds_request_args_legacy must be maintained!
*/
union ceph_mds_request_args {
struct {
__le32 mask; /* CEPH_CAP_* */
} __attribute__ ((packed)) getattr;
struct {
__le32 mode;
__le32 uid;
__le32 gid;
struct ceph_timespec mtime;
struct ceph_timespec atime;
__le64 size, old_size; /* old_size needed by truncate */
__le32 mask; /* CEPH_SETATTR_* */
struct ceph_timespec btime;
} __attribute__ ((packed)) setattr;
struct {
__le32 frag; /* which dir fragment */
__le32 max_entries; /* how many dentries to grab */
__le32 max_bytes;
__le16 flags;
__le32 offset_hash;
} __attribute__ ((packed)) readdir;
struct {
__le32 mode;
__le32 rdev;
} __attribute__ ((packed)) mknod;
struct {
__le32 mode;
} __attribute__ ((packed)) mkdir;
struct {
__le32 flags;
__le32 mode;
__le32 stripe_unit; /* layout for newly created file */
__le32 stripe_count; /* ... */
__le32 object_size;
__le32 pool; /* if >= 0 and CREATEPOOLID feature */
__le32 mask; /* CEPH_CAP_* */
__le64 old_size; /* if O_TRUNC */
} __attribute__ ((packed)) open;
struct {
__le32 flags;
__le32 osdmap_epoch; /* use for set file/dir layout */
} __attribute__ ((packed)) setxattr;
struct {
struct ceph_file_layout layout;
} __attribute__ ((packed)) setlayout;
struct {
__u8 rule; /* currently fcntl or flock */
__u8 type; /* shared, exclusive, remove*/
__le64 owner; /* who requests/holds the lock */
__le64 pid; /* process id requesting the lock */
__le64 start; /* initial location to lock */
__le64 length; /* num bytes to lock from start */
__u8 wait; /* will caller wait for lock to become available? */
} __attribute__ ((packed)) filelock_change;
struct {
__le32 mask; /* CEPH_CAP_* */
__le64 snapid;
__le64 parent;
__le32 hash;
} __attribute__ ((packed)) lookupino;
struct {
__le32 frag; /* which dir fragment */
__le32 max_entries; /* how many dentries to grab */
__le32 max_bytes;
__le16 flags;
__le32 offset_hash;
__le64 snap_other;
} __attribute__ ((packed)) snapdiff;
} __attribute__ ((packed));
#define CEPH_MDS_REQUEST_HEAD_VERSION 2
/*
* Note that any change to this structure must ensure that it is compatible
* with ceph_mds_request_head_legacy.
*/
struct ceph_mds_request_head {
__le16 version;
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
__u8 num_retry, num_fwd; /* legacy count retry and fwd attempts */
__le16 num_releases; /* # include cap/lease release records */
__le32 op; /* mds op code */
__le32 caller_uid, caller_gid;
__le64 ino; /* use this ino for openc, mkdir, mknod,
etc. (if replaying) */
union ceph_mds_request_args args;
__le32 ext_num_retry; /* new count retry attempts */
__le32 ext_num_fwd; /* new count fwd attempts */
} __attribute__ ((packed));
void inline encode(const struct ceph_mds_request_head& h, ceph::buffer::list& bl, bool old_version) {
using ceph::encode;
encode(h.version, bl);
encode(h.oldest_client_tid, bl);
encode(h.mdsmap_epoch, bl);
encode(h.flags, bl);
// For old MDS daemons
__u8 num_retry = __u32(h.ext_num_retry);
__u8 num_fwd = __u32(h.ext_num_fwd);
encode(num_retry, bl);
encode(num_fwd, bl);
encode(h.num_releases, bl);
encode(h.op, bl);
encode(h.caller_uid, bl);
encode(h.caller_gid, bl);
encode(h.ino, bl);
bl.append((char*)&h.args, sizeof(h.args));
if (!old_version) {
encode(h.ext_num_retry, bl);
encode(h.ext_num_fwd, bl);
}
}
void inline decode(struct ceph_mds_request_head& h, ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
decode(h.version, bl);
decode(h.oldest_client_tid, bl);
decode(h.mdsmap_epoch, bl);
decode(h.flags, bl);
decode(h.num_retry, bl);
decode(h.num_fwd, bl);
decode(h.num_releases, bl);
decode(h.op, bl);
decode(h.caller_uid, bl);
decode(h.caller_gid, bl);
decode(h.ino, bl);
bl.copy(sizeof(h.args), (char*)&(h.args));
if (h.version >= 2) {
decode(h.ext_num_retry, bl);
decode(h.ext_num_fwd, bl);
} else {
h.ext_num_retry = h.num_retry;
h.ext_num_fwd = h.num_fwd;
}
}
/* cap/lease release record */
struct ceph_mds_request_release {
__le64 ino, cap_id; /* ino and unique cap id */
__le32 caps, wanted; /* new issued, wanted */
__le32 seq, issue_seq, mseq;
__le32 dname_seq; /* if releasing a dentry lease, a */
__le32 dname_len; /* string follows. */
} __attribute__ ((packed));
static inline void
copy_from_legacy_head(struct ceph_mds_request_head *head,
struct ceph_mds_request_head_legacy *legacy)
{
struct ceph_mds_request_head_legacy *embedded_legacy =
(struct ceph_mds_request_head_legacy *)&head->oldest_client_tid;
*embedded_legacy = *legacy;
}
static inline void
copy_to_legacy_head(struct ceph_mds_request_head_legacy *legacy,
struct ceph_mds_request_head *head)
{
struct ceph_mds_request_head_legacy *embedded_legacy =
(struct ceph_mds_request_head_legacy *)&head->oldest_client_tid;
*legacy = *embedded_legacy;
}
/* client reply */
struct ceph_mds_reply_head {
__le32 op;
__le32 result;
__le32 mdsmap_epoch;
__u8 safe; /* true if committed to disk */
__u8 is_dentry, is_target; /* true if dentry, target inode records
are included with reply */
} __attribute__ ((packed));
/* one for each node split */
struct ceph_frag_tree_split {
__le32 frag; /* this frag splits... */
__le32 by; /* ...by this many bits */
} __attribute__ ((packed));
struct ceph_frag_tree_head {
__le32 nsplits; /* num ceph_frag_tree_split records */
struct ceph_frag_tree_split splits[];
} __attribute__ ((packed));
/* capability issue, for bundling with mds reply */
struct ceph_mds_reply_cap {
__le32 caps, wanted; /* caps issued, wanted */
__le64 cap_id;
__le32 seq, mseq;
__le64 realm; /* snap realm */
__u8 flags; /* CEPH_CAP_FLAG_* */
} __attribute__ ((packed));
#define CEPH_CAP_FLAG_AUTH (1 << 0) /* cap is issued by auth mds */
#define CEPH_CAP_FLAG_RELEASE (1 << 1) /* ask client to release the cap */
/* reply_lease follows dname, and reply_inode */
struct ceph_mds_reply_lease {
__le16 mask; /* lease type(s) */
__le32 duration_ms; /* lease duration */
__le32 seq;
} __attribute__ ((packed));
#define CEPH_LEASE_VALID (1 | 2) /* old and new bit values */
#define CEPH_LEASE_PRIMARY_LINK 4 /* primary linkage */
struct ceph_mds_reply_dirfrag {
__le32 frag; /* fragment */
__le32 auth; /* auth mds, if this is a delegation point */
__le32 ndist; /* number of mds' this is replicated on */
__le32 dist[];
} __attribute__ ((packed));
#define CEPH_LOCK_FCNTL 1
#define CEPH_LOCK_FLOCK 2
#define CEPH_LOCK_FCNTL_INTR 3
#define CEPH_LOCK_FLOCK_INTR 4
#define CEPH_LOCK_SHARED 1
#define CEPH_LOCK_EXCL 2
#define CEPH_LOCK_UNLOCK 4
struct ceph_filelock {
__le64 start;/* file offset to start lock at */
__le64 length; /* num bytes to lock; 0 for all following start */
__le64 client; /* which client holds the lock */
__le64 owner; /* who requests/holds the lock */
__le64 pid; /* process id holding the lock on the client */
__u8 type; /* shared lock, exclusive lock, or unlock */
} __attribute__ ((packed));
/* file access modes */
#define CEPH_FILE_MODE_PIN 0
#define CEPH_FILE_MODE_RD 1
#define CEPH_FILE_MODE_WR 2
#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
#define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */
int ceph_flags_to_mode(int flags);
/* inline data state */
#define CEPH_INLINE_NONE ((__u64)-1)
#define CEPH_INLINE_MAX_SIZE CEPH_MIN_STRIPE_UNIT
/* capability bits */
#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */
/* generic cap bits */
/* note: these definitions are duplicated in mds/locks.c */
#define CEPH_CAP_GSHARED 1 /* client can reads */
#define CEPH_CAP_GEXCL 2 /* client can read and update */
#define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */
#define CEPH_CAP_GRD 8 /* (file) client can read */
#define CEPH_CAP_GWR 16 /* (file) client can write */
#define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */
#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */
#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */
#define CEPH_CAP_SIMPLE_BITS 2
#define CEPH_CAP_FILE_BITS 8
/* per-lock shift */
#define CEPH_CAP_SAUTH 2
#define CEPH_CAP_SLINK 4
#define CEPH_CAP_SXATTR 6
#define CEPH_CAP_SFILE 8
/* composed values */
#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH)
#define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH)
#define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK)
#define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK)
#define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR)
#define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR)
#define CEPH_CAP_FILE(x) ((x) << CEPH_CAP_SFILE)
#define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE)
#define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE)
#define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE)
#define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE)
#define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE)
#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE)
#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE)
#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE)
/* cap masks (for getattr) */
#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN
#define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */
#define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN
#define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED
#define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED
#define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED
#define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED
#define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED
#define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED
#define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED
#define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */
#define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED
#define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \
CEPH_CAP_AUTH_SHARED | \
CEPH_CAP_LINK_SHARED | \
CEPH_CAP_FILE_SHARED | \
CEPH_CAP_XATTR_SHARED)
#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
CEPH_CAP_FILE_RD)
#define CEPH_STAT_RSTAT CEPH_CAP_FILE_WREXTEND
#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
CEPH_CAP_LINK_SHARED | \
CEPH_CAP_XATTR_SHARED | \
CEPH_CAP_FILE_SHARED)
#define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \
CEPH_CAP_FILE_CACHE)
#define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \
CEPH_CAP_LINK_EXCL | \
CEPH_CAP_XATTR_EXCL | \
CEPH_CAP_FILE_EXCL)
#define CEPH_CAP_ANY_FILE_RD (CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE | \
CEPH_CAP_FILE_SHARED)
#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \
CEPH_CAP_FILE_EXCL)
#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR)
#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \
CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \
CEPH_CAP_PIN)
#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \
CEPH_LOCK_IXATTR)
/* cap masks async dir operations */
#define CEPH_CAP_DIR_CREATE CEPH_CAP_FILE_CACHE
#define CEPH_CAP_DIR_UNLINK CEPH_CAP_FILE_RD
#define CEPH_CAP_ANY_DIR_OPS (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | \
CEPH_CAP_FILE_WREXTEND | CEPH_CAP_FILE_LAZYIO)
int ceph_caps_for_mode(int mode);
enum {
CEPH_CAP_OP_GRANT, /* mds->client grant */
CEPH_CAP_OP_REVOKE, /* mds->client revoke */
CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */
CEPH_CAP_OP_EXPORT, /* mds has exported the cap */
CEPH_CAP_OP_IMPORT, /* mds has imported the cap */
CEPH_CAP_OP_UPDATE, /* client->mds update */
CEPH_CAP_OP_DROP, /* client->mds drop cap bits */
CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */
CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */
CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */
CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */
CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */
CEPH_CAP_OP_RENEW, /* client->mds renewal request */
};
extern const char *ceph_cap_op_name(int op);
/* extra info for cap import/export */
struct ceph_mds_cap_peer {
__le64 cap_id;
__le32 seq;
__le32 mseq;
__le32 mds;
__u8 flags;
} __attribute__ ((packed));
/*
* caps message, used for capability callbacks, acks, requests, etc.
*/
struct ceph_mds_caps_head {
__le32 op; /* CEPH_CAP_OP_* */
__le64 ino, realm;
__le64 cap_id;
__le32 seq, issue_seq;
__le32 caps, wanted, dirty; /* latest issued/wanted/dirty */
__le32 migrate_seq;
__le64 snap_follows;
__le32 snap_trace_len;
/* authlock */
__le32 uid, gid, mode;
/* linklock */
__le32 nlink;
/* xattrlock */
__le32 xattr_len;
__le64 xattr_version;
} __attribute__ ((packed));
struct ceph_mds_caps_non_export_body {
/* all except export */
/* filelock */
__le64 size, max_size, truncate_size;
__le32 truncate_seq;
struct ceph_timespec mtime, atime, ctime;
struct ceph_file_layout layout;
__le32 time_warp_seq;
} __attribute__ ((packed));
struct ceph_mds_caps_export_body {
/* export message */
struct ceph_mds_cap_peer peer;
} __attribute__ ((packed));
/* cap release msg head */
struct ceph_mds_cap_release {
__le32 num; /* number of cap_items that follow */
} __attribute__ ((packed));
struct ceph_mds_cap_item {
__le64 ino;
__le64 cap_id;
__le32 migrate_seq, seq;
} __attribute__ ((packed));
#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */
#define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */
#define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */
#define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */
extern const char *ceph_lease_op_name(int o);
/* lease msg header */
struct ceph_mds_lease {
__u8 action; /* CEPH_MDS_LEASE_* */
__le16 mask; /* which lease */
__le64 ino;
__le64 first, last; /* snap range */
__le32 seq;
__le32 duration_ms; /* duration of renewal */
} __attribute__ ((packed));
/* followed by a __le32+string for dname */
/* client reconnect */
struct ceph_mds_cap_reconnect {
__le64 cap_id;
__le32 wanted;
__le32 issued;
__le64 snaprealm;
__le64 pathbase; /* base ino for our path to this ino */
__le32 flock_len; /* size of flock state blob, if any */
} __attribute__ ((packed));
/* followed by flock blob */
struct ceph_mds_cap_reconnect_v1 {
__le64 cap_id;
__le32 wanted;
__le32 issued;
__le64 size;
struct ceph_timespec mtime, atime;
__le64 snaprealm;
__le64 pathbase; /* base ino for our path to this ino */
} __attribute__ ((packed));
struct ceph_mds_snaprealm_reconnect {
__le64 ino; /* snap realm base */
__le64 seq; /* snap seq for this snap realm */
__le64 parent; /* parent realm */
} __attribute__ ((packed));
/*
* snaps
*/
enum {
CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */
CEPH_SNAP_OP_CREATE,
CEPH_SNAP_OP_DESTROY,
CEPH_SNAP_OP_SPLIT,
};
extern const char *ceph_snap_op_name(int o);
/* snap msg header */
struct ceph_mds_snap_head {
__le32 op; /* CEPH_SNAP_OP_* */
__le64 split; /* ino to split off, if any */
__le32 num_split_inos; /* # inos belonging to new child realm */
__le32 num_split_realms; /* # child realms udner new child realm */
__le32 trace_len; /* size of snap trace blob */
} __attribute__ ((packed));
/* followed by split ino list, then split realms, then the trace blob */
/*
* encode info about a snaprealm, as viewed by a client
*/
struct ceph_mds_snap_realm {
__le64 ino; /* ino */
__le64 created; /* snap: when created */
__le64 parent; /* ino: parent realm */
__le64 parent_since; /* snap: same parent since */
__le64 seq; /* snap: version */
__le32 num_snaps;
__le32 num_prior_parent_snaps;
} __attribute__ ((packed));
/* followed by my snap list, then prior parent snap list */
#ifndef __KERNEL__
#undef __le16
#undef __le32
#undef __le64
#endif
#endif
| 35,052 | 31.516698 | 101 | h |
null | ceph-main/src/include/ceph_fuse.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank Storage, Inc.
* Copyright (C) 2014 Red Hat <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef CEPH_FUSE_H
#define CEPH_FUSE_H
/*
* The API version that we want to use, regardless of what the
* library version is. Note that this must be defined before
* fuse.h is included.
*/
#ifndef FUSE_USE_VERSION
#define FUSE_USE_VERSION 312
#endif
#include <fuse.h>
#include "acconfig.h"
/*
* Redefine the FUSE_VERSION macro defined in "fuse_common.h"
* header file, because the MINOR numner has been forgotten to
* update since libfuse 3.2 to 3.8. We need to fetch the MINOR
* number from pkgconfig file.
*/
#ifdef FUSE_VERSION
#undef FUSE_VERSION
#define FUSE_VERSION FUSE_MAKE_VERSION(CEPH_FUSE_MAJOR_VERSION, CEPH_FUSE_MINOR_VERSION)
#endif
static inline int filler_compat(fuse_fill_dir_t filler,
void *buf, const char *name,
const struct stat *stbuf,
off_t off)
{
return filler(buf, name, stbuf, off
#if FUSE_VERSION >= FUSE_MAKE_VERSION(3, 0)
, static_cast<enum fuse_fill_dir_flags>(0)
#endif
);
}
#endif /* CEPH_FUSE_H */
| 1,529 | 28.423077 | 88 | h |
null | ceph-main/src/include/ceph_hash.h | #ifndef FS_CEPH_HASH_H
#define FS_CEPH_HASH_H
#define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */
#define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */
extern unsigned ceph_str_hash_linux(const char *s, unsigned len);
extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len);
extern unsigned ceph_str_hash(int type, const char *s, unsigned len);
extern const char *ceph_str_hash_name(int type);
extern bool ceph_str_hash_valid(int type);
#endif
| 476 | 30.8 | 69 | h |
null | ceph-main/src/include/color.h | #ifndef CEPH_COLOR_H
#define CEPH_COLOR_H
#define TEXT_NORMAL "\033[0m"
/*#define TEXT_HAZARD "\033[5;31m"*/
#define TEXT_RED "\033[0;31m"
#define TEXT_GREEN "\033[0;32m"
#define TEXT_YELLOW "\033[0;33m"
#define TEXT_BLUE "\033[0;34m"
#define TEXT_MAGENTA "\033[0;35m"
#define TEXT_CYAN "\033[0;36m"
#endif
| 309 | 21.142857 | 36 | h |
null | ceph-main/src/include/common_fwd.h | #pragma once
#if defined(WITH_SEASTAR) && !defined(WITH_ALIEN)
#define TOPNSPC crimson
#else
#define TOPNSPC ceph
#endif
namespace TOPNSPC::common {
class CephContext;
class PerfCounters;
class PerfCountersBuilder;
class PerfCountersCollection;
class PerfCountersCollectionImpl;
class PerfGuard;
class RefCountedObject;
class RefCountedObjectSafe;
class RefCountedCond;
class RefCountedWaitObject;
class ConfigProxy;
}
using TOPNSPC::common::CephContext;
using TOPNSPC::common::PerfCounters;
using TOPNSPC::common::PerfCountersBuilder;
using TOPNSPC::common::PerfCountersCollection;
using TOPNSPC::common::PerfCountersCollectionImpl;
using TOPNSPC::common::PerfGuard;
using TOPNSPC::common::RefCountedObject;
using TOPNSPC::common::RefCountedObjectSafe;
using TOPNSPC::common::RefCountedCond;
using TOPNSPC::common::RefCountedWaitObject;
using TOPNSPC::common::ConfigProxy;
| 898 | 26.242424 | 50 | h |
null | ceph-main/src/include/compact_map.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMPACT_MAP_H
#define CEPH_COMPACT_MAP_H
#include "buffer.h"
#include "encoding.h"
#include <map>
#include <memory>
#include "include/encoding.h"
template <class Key, class T, class Map>
class compact_map_base {
protected:
std::unique_ptr<Map> map;
void alloc_internal() {
if (!map)
map.reset(new Map);
}
void free_internal() {
map.reset();
}
template <class It>
class const_iterator_base {
const compact_map_base *map;
It it;
const_iterator_base() : map(0) { }
const_iterator_base(const compact_map_base* m) : map(m) { }
const_iterator_base(const compact_map_base *m, const It& i) : map(m), it(i) { }
friend class compact_map_base;
friend class iterator_base;
public:
const_iterator_base(const const_iterator_base& o) {
map = o.map;
it = o.it;
}
bool operator==(const const_iterator_base& o) const {
return (map == o.map) && (!map->map || it == o.it);
}
bool operator!=(const const_iterator_base& o) const {
return !(*this == o);;
}
const_iterator_base& operator=(const const_iterator_base& o) {
map = o.map;
it = o.it;
return *this;
}
const_iterator_base& operator++() {
++it;
return *this;
}
const_iterator_base& operator--() {
--it;
return *this;
}
const std::pair<const Key,T>& operator*() {
return *it;
}
const std::pair<const Key,T>* operator->() {
return it.operator->();
}
};
template <class It>
class iterator_base {
private:
const compact_map_base* map;
It it;
iterator_base() : map(0) { }
iterator_base(compact_map_base* m) : map(m) { }
iterator_base(compact_map_base* m, const It& i) : map(m), it(i) { }
friend class compact_map_base;
public:
iterator_base(const iterator_base& o) {
map = o.map;
it = o.it;
}
bool operator==(const iterator_base& o) const {
return (map == o.map) && (!map->map || it == o.it);
}
bool operator!=(const iterator_base& o) const {
return !(*this == o);;
}
iterator_base& operator=(const iterator_base& o) {
map = o.map;
it = o.it;
return *this;
}
iterator_base& operator++() {
++it;
return *this;
}
iterator_base operator++(int) {
iterator_base tmp = *this;
++it;
return tmp;
}
iterator_base& operator--() {
--it;
return *this;
}
std::pair<const Key,T>& operator*() {
return *it;
}
std::pair<const Key,T>* operator->() {
return it.operator->();
}
operator const_iterator_base<It>() const {
return const_iterator_base<It>(map, it);
}
};
public:
class iterator : public iterator_base<typename Map::iterator> {
public:
iterator() { }
iterator(const iterator_base<typename Map::iterator>& o)
: iterator_base<typename Map::iterator>(o) { }
iterator(compact_map_base* m) : iterator_base<typename Map::iterator>(m) { }
iterator(compact_map_base* m, const typename Map::iterator& i)
: iterator_base<typename Map::iterator>(m, i) { }
};
class const_iterator : public const_iterator_base<typename Map::const_iterator> {
public:
const_iterator() { }
const_iterator(const iterator_base<typename Map::const_iterator>& o)
: const_iterator_base<typename Map::const_iterator>(o) { }
const_iterator(const compact_map_base* m) : const_iterator_base<typename Map::const_iterator>(m) { }
const_iterator(const compact_map_base* m, const typename Map::const_iterator& i)
: const_iterator_base<typename Map::const_iterator>(m, i) { }
};
class reverse_iterator : public iterator_base<typename Map::reverse_iterator> {
public:
reverse_iterator() { }
reverse_iterator(const iterator_base<typename Map::reverse_iterator>& o)
: iterator_base<typename Map::reverse_iterator>(o) { }
reverse_iterator(compact_map_base* m) : iterator_base<typename Map::reverse_iterator>(m) { }
reverse_iterator(compact_map_base* m, const typename Map::reverse_iterator& i)
: iterator_base<typename Map::reverse_iterator>(m, i) { }
};
class const_reverse_iterator : public const_iterator_base<typename Map::const_reverse_iterator> {
public:
const_reverse_iterator() { }
const_reverse_iterator(const iterator_base<typename Map::const_reverse_iterator>& o)
: iterator_base<typename Map::const_reverse_iterator>(o) { }
const_reverse_iterator(const compact_map_base* m) : const_iterator_base<typename Map::const_reverse_iterator>(m) { }
const_reverse_iterator(const compact_map_base* m, const typename Map::const_reverse_iterator& i)
: const_iterator_base<typename Map::const_reverse_iterator>(m, i) { }
};
compact_map_base(const compact_map_base& o) {
if (o.map) {
alloc_internal();
*map = *o.map;
}
}
compact_map_base() {}
~compact_map_base() {}
bool empty() const {
return !map || map->empty();
}
size_t size() const {
return map ? map->size() : 0;
}
bool operator==(const compact_map_base& o) const {
return (empty() && o.empty()) || (map && o.map && *map == *o.map);
}
bool operator!=(const compact_map_base& o) const {
return !(*this == o);
}
size_t count (const Key& k) const {
return map ? map->count(k) : 0;
}
iterator erase (iterator p) {
if (map) {
ceph_assert(this == p.map);
auto it = map->erase(p.it);
if (map->empty()) {
free_internal();
return iterator(this);
} else {
return iterator(this, it);
}
} else {
return iterator(this);
}
}
size_t erase (const Key& k) {
if (!map)
return 0;
size_t r = map->erase(k);
if (map->empty())
free_internal();
return r;
}
void clear() {
free_internal();
}
void swap(compact_map_base& o) {
map.swap(o.map);
}
compact_map_base& operator=(const compact_map_base& o) {
if (o.map) {
alloc_internal();
*map = *o.map;
} else
free_internal();
return *this;
}
iterator insert(const std::pair<const Key, T>& val) {
alloc_internal();
return iterator(this, map->insert(val));
}
template <class... Args>
std::pair<iterator,bool> emplace ( Args&&... args ) {
alloc_internal();
auto em = map->emplace(std::forward<Args>(args)...);
return std::pair<iterator,bool>(iterator(this, em.first), em.second);
}
iterator begin() {
if (!map)
return iterator(this);
return iterator(this, map->begin());
}
iterator end() {
if (!map)
return iterator(this);
return iterator(this, map->end());
}
reverse_iterator rbegin() {
if (!map)
return reverse_iterator(this);
return reverse_iterator(this, map->rbegin());
}
reverse_iterator rend() {
if (!map)
return reverse_iterator(this);
return reverse_iterator(this, map->rend());
}
iterator find(const Key& k) {
if (!map)
return iterator(this);
return iterator(this, map->find(k));
}
iterator lower_bound(const Key& k) {
if (!map)
return iterator(this);
return iterator(this, map->lower_bound(k));
}
iterator upper_bound(const Key& k) {
if (!map)
return iterator(this);
return iterator(this, map->upper_bound(k));
}
const_iterator begin() const {
if (!map)
return const_iterator(this);
return const_iterator(this, map->begin());
}
const_iterator end() const {
if (!map)
return const_iterator(this);
return const_iterator(this, map->end());
}
const_reverse_iterator rbegin() const {
if (!map)
return const_reverse_iterator(this);
return const_reverse_iterator(this, map->rbegin());
}
const_reverse_iterator rend() const {
if (!map)
return const_reverse_iterator(this);
return const_reverse_iterator(this, map->rend());
}
const_iterator find(const Key& k) const {
if (!map)
return const_iterator(this);
return const_iterator(this, map->find(k));
}
const_iterator lower_bound(const Key& k) const {
if (!map)
return const_iterator(this);
return const_iterator(this, map->lower_bound(k));
}
const_iterator upper_bound(const Key& k) const {
if (!map)
return const_iterator(this);
return const_iterator(this, map->upper_bound(k));
}
void encode(ceph::buffer::list &bl) const {
using ceph::encode;
if (map)
encode(*map, bl);
else
encode((uint32_t)0, bl);
}
void encode(ceph::buffer::list &bl, uint64_t features) const {
using ceph::encode;
if (map)
encode(*map, bl, features);
else
encode((uint32_t)0, bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
using ceph::decode;
using ceph::decode_nohead;
uint32_t n;
decode(n, p);
if (n > 0) {
alloc_internal();
decode_nohead(n, *map, p);
} else
free_internal();
}
};
template<class Key, class T, class Map>
inline void encode(const compact_map_base<Key, T, Map>& m, ceph::buffer::list& bl) {
m.encode(bl);
}
template<class Key, class T, class Map>
inline void encode(const compact_map_base<Key, T, Map>& m, ceph::buffer::list& bl,
uint64_t features) {
m.encode(bl, features);
}
template<class Key, class T, class Map>
inline void decode(compact_map_base<Key, T, Map>& m, ceph::buffer::list::const_iterator& p) {
m.decode(p);
}
template <class Key, class T, class Compare = std::less<Key>, class Alloc = std::allocator< std::pair<const Key, T> > >
class compact_map : public compact_map_base<Key, T, std::map<Key,T,Compare,Alloc> > {
public:
T& operator[](const Key& k) {
this->alloc_internal();
return (*(this->map))[k];
}
};
template <class Key, class T, class Compare = std::less<Key>, class Alloc = std::allocator< std::pair<const Key, T> > >
inline std::ostream& operator<<(std::ostream& out, const compact_map<Key, T, Compare, Alloc>& m)
{
out << "{";
bool first = true;
for (const auto &p : m) {
if (!first)
out << ",";
out << p.first << "=" << p.second;
first = false;
}
out << "}";
return out;
}
template <class Key, class T, class Compare = std::less<Key>, class Alloc = std::allocator< std::pair<const Key, T> > >
class compact_multimap : public compact_map_base<Key, T, std::multimap<Key,T,Compare,Alloc> > {
};
template <class Key, class T, class Compare = std::less<Key>, class Alloc = std::allocator< std::pair<const Key, T> > >
inline std::ostream& operator<<(std::ostream& out, const compact_multimap<Key, T, Compare, Alloc>& m)
{
out << "{{";
bool first = true;
for (const auto &p : m) {
if (!first)
out << ",";
out << p.first << "=" << p.second;
first = false;
}
out << "}}";
return out;
}
#endif
| 11,062 | 27.809896 | 122 | h |
null | ceph-main/src/include/compact_set.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COMPACT_SET_H
#define CEPH_COMPACT_SET_H
#include "buffer.h"
#include "encoding.h"
#include <memory>
#include <set>
template <class T, class Set>
class compact_set_base {
protected:
std::unique_ptr<Set> set;
void alloc_internal() {
if (!set)
set.reset(new Set);
}
void free_internal() {
set.reset();
}
template <class It>
class iterator_base {
private:
const compact_set_base* set;
It it;
iterator_base() : set(0) { }
iterator_base(const compact_set_base* s) : set(s) { }
iterator_base(const compact_set_base* s, const It& i) : set(s), it(i) { }
friend class compact_set_base;
public:
iterator_base(const iterator_base& o) {
set = o.set;
it = o.it;
}
bool operator==(const iterator_base& o) const {
return (set == o.set) && (!set->set || it == o.it);
}
bool operator!=(const iterator_base& o) const {
return !(*this == o);;
}
iterator_base& operator=(const iterator_base& o) {
set->set = o.set;
it = o.it;
return *this;
}
iterator_base& operator++() {
++it;
return *this;
}
iterator_base operator++(int) {
iterator_base tmp = *this;
++it;
return tmp;
}
iterator_base& operator--() {
--it;
return *this;
}
const T& operator*() {
return *it;
}
};
public:
class const_iterator : public iterator_base<typename Set::const_iterator> {
public:
const_iterator() { }
const_iterator(const iterator_base<typename Set::const_iterator>& o)
: iterator_base<typename Set::const_iterator>(o) { }
const_iterator(const compact_set_base* s) : iterator_base<typename Set::const_iterator>(s) { }
const_iterator(const compact_set_base* s, const typename Set::const_iterator& i)
: iterator_base<typename Set::const_iterator>(s, i) { }
};
class iterator : public iterator_base<typename Set::iterator> {
public:
iterator() { }
iterator(const iterator_base<typename Set::iterator>& o)
: iterator_base<typename Set::iterator>(o) { }
iterator(compact_set_base* s) : iterator_base<typename Set::iterator>(s) { }
iterator(compact_set_base* s, const typename Set::iterator& i)
: iterator_base<typename Set::iterator>(s, i) { }
operator const_iterator() const {
return const_iterator(this->set, this->it);
}
};
class const_reverse_iterator : public iterator_base<typename Set::const_reverse_iterator> {
public:
const_reverse_iterator() { }
const_reverse_iterator(const iterator_base<typename Set::const_reverse_iterator>& o)
: iterator_base<typename Set::const_reverse_iterator>(o) { }
const_reverse_iterator(const compact_set_base* s) : iterator_base<typename Set::const_reverse_iterator>(s) { }
const_reverse_iterator(const compact_set_base* s, const typename Set::const_reverse_iterator& i)
: iterator_base<typename Set::const_reverse_iterator>(s, i) { }
};
class reverse_iterator : public iterator_base<typename Set::reverse_iterator> {
public:
reverse_iterator() { }
reverse_iterator(const iterator_base<typename Set::reverse_iterator>& o)
: iterator_base<typename Set::reverse_iterator>(o) { }
reverse_iterator(compact_set_base* s) : iterator_base<typename Set::reverse_iterator>(s) { }
reverse_iterator(compact_set_base* s, const typename Set::reverse_iterator& i)
: iterator_base<typename Set::reverse_iterator>(s, i) { }
operator const_iterator() const {
return const_iterator(this->set, this->it);
}
};
compact_set_base() {}
compact_set_base(const compact_set_base& o) {
if (o.set) {
alloc_internal();
*set = *o.set;
}
}
~compact_set_base() {}
bool empty() const {
return !set || set->empty();
}
size_t size() const {
return set ? set->size() : 0;
}
bool operator==(const compact_set_base& o) const {
return (empty() && o.empty()) || (set && o.set && *set == *o.set);
}
bool operator!=(const compact_set_base& o) const {
return !(*this == o);
}
size_t count(const T& t) const {
return set ? set->count(t) : 0;
}
iterator erase (iterator p) {
if (set) {
ceph_assert(this == p.set);
auto it = set->erase(p.it);
if (set->empty()) {
free_internal();
return iterator(this);
} else {
return iterator(this, it);
}
} else {
return iterator(this);
}
}
size_t erase (const T& t) {
if (!set)
return 0;
size_t r = set->erase(t);
if (set->empty())
free_internal();
return r;
}
void clear() {
free_internal();
}
void swap(compact_set_base& o) {
set.swap(o.set);
}
compact_set_base& operator=(const compact_set_base& o) {
if (o.set) {
alloc_internal();
*set = *o.set;
} else
free_internal();
return *this;
}
std::pair<iterator,bool> insert(const T& t) {
alloc_internal();
std::pair<typename Set::iterator,bool> r = set->insert(t);
return std::make_pair(iterator(this, r.first), r.second);
}
template <class... Args>
std::pair<iterator,bool> emplace ( Args&&... args ) {
alloc_internal();
auto em = set->emplace(std::forward<Args>(args)...);
return std::pair<iterator,bool>(iterator(this, em.first), em.second);
}
iterator begin() {
if (!set)
return iterator(this);
return iterator(this, set->begin());
}
iterator end() {
if (!set)
return iterator(this);
return iterator(this, set->end());
}
reverse_iterator rbegin() {
if (!set)
return reverse_iterator(this);
return reverse_iterator(this, set->rbegin());
}
reverse_iterator rend() {
if (!set)
return reverse_iterator(this);
return reverse_iterator(this, set->rend());
}
iterator find(const T& t) {
if (!set)
return iterator(this);
return iterator(this, set->find(t));
}
iterator lower_bound(const T& t) {
if (!set)
return iterator(this);
return iterator(this, set->lower_bound(t));
}
iterator upper_bound(const T& t) {
if (!set)
return iterator(this);
return iterator(this, set->upper_bound(t));
}
const_iterator begin() const {
if (!set)
return const_iterator(this);
return const_iterator(this, set->begin());
}
const_iterator end() const {
if (!set)
return const_iterator(this);
return const_iterator(this, set->end());
}
const_reverse_iterator rbegin() const {
if (!set)
return const_reverse_iterator(this);
return const_reverse_iterator(this, set->rbegin());
}
const_reverse_iterator rend() const {
if (!set)
return const_reverse_iterator(this);
return const_reverse_iterator(this, set->rend());
}
const_iterator find(const T& t) const {
if (!set)
return const_iterator(this);
return const_iterator(this, set->find(t));
}
const_iterator lower_bound(const T& t) const {
if (!set)
return const_iterator(this);
return const_iterator(this, set->lower_bound(t));
}
const_iterator upper_bound(const T& t) const {
if (!set)
return const_iterator(this);
return const_iterator(this, set->upper_bound(t));
}
void encode(ceph::buffer::list &bl) const {
using ceph::encode;
if (set)
encode(*set, bl);
else
encode((uint32_t)0, bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
using ceph::decode;
uint32_t n;
decode(n, p);
if (n > 0) {
alloc_internal();
ceph::decode_nohead(n, *set, p);
} else
free_internal();
}
};
template<class T, class Set>
inline void encode(const compact_set_base<T, Set>& m, ceph::buffer::list& bl) {
m.encode(bl);
}
template<class T, class Set>
inline void decode(compact_set_base<T, Set>& m, ceph::buffer::list::const_iterator& p) {
m.decode(p);
}
template <class T, class Compare = std::less<T>, class Alloc = std::allocator<T> >
class compact_set : public compact_set_base<T, std::set<T, Compare, Alloc> > {
};
template <class T, class Compare = std::less<T>, class Alloc = std::allocator<T> >
inline std::ostream& operator<<(std::ostream& out, const compact_set<T,Compare,Alloc>& s)
{
bool first = true;
for (auto &v : s) {
if (!first)
out << ",";
out << v;
first = false;
}
return out;
}
#endif
| 8,630 | 27.205882 | 116 | h |
null | ceph-main/src/include/compat.h | /*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 Stanislav Sedov <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef CEPH_COMPAT_H
#define CEPH_COMPAT_H
#include "acconfig.h"
#include <sys/types.h>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#if defined(__linux__)
#define PROCPREFIX
#endif
#include <fcntl.h>
#ifndef F_OFD_SETLK
#define F_OFD_SETLK F_SETLK
#endif
#include <sys/stat.h>
#ifdef _WIN32
#include "include/win32/fs_compat.h"
#endif
#ifndef ACCESSPERMS
#define ACCESSPERMS (S_IRWXU|S_IRWXG|S_IRWXO)
#endif
#ifndef ALLPERMS
#define ALLPERMS (S_ISUID|S_ISGID|S_ISVTX|S_IRWXU|S_IRWXG|S_IRWXO)
#endif
#if defined(__FreeBSD__)
// FreeBSD supports Linux procfs with its compatibility module
// And all compatibility stuff is standard mounted on this
#define PROCPREFIX "/compat/linux"
#ifndef MSG_MORE
#define MSG_MORE 0
#endif
#ifndef O_DSYNC
#define O_DSYNC O_SYNC
#endif
/* And include the extra required include file */
#include <pthread_np.h>
#include <sys/param.h>
#include <sys/cpuset.h>
#define cpu_set_t cpuset_t
int sched_setaffinity(pid_t pid, size_t cpusetsize,
cpu_set_t *mask);
#endif /* __FreeBSD__ */
#if defined(__APPLE__)
struct cpu_set_t;
#endif
#if defined(__APPLE__) || defined(__FreeBSD__)
/* Make sure that ENODATA is defined in the correct way */
#ifdef ENODATA
#if (ENODATA == 9919)
// #warning ENODATA already defined to be 9919, redefining to fix
// Silencing this warning because it fires at all files where compat.h
// is included after boost files.
//
// This value stems from the definition in the boost library
// And when this case occurs it is due to the fact that boost files
// are included before this file. Redefinition might not help in this
// case since already parsed code has evaluated to the wrong value.
// This would warrrant for d definition that would actually be evaluated
// at the location of usage and report a possible conflict.
// This is left up to a future improvement
#elif (ENODATA != 87)
// #warning ENODATA already defined to a value different from 87 (ENOATRR), refining to fix
#endif
#undef ENODATA
#endif
#define ENODATA ENOATTR
// Fix clock accuracy
#if !defined(CLOCK_MONOTONIC_COARSE)
#if defined(CLOCK_MONOTONIC_FAST)
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST
#else
#define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
#endif
#endif
#if !defined(CLOCK_REALTIME_COARSE)
#if defined(CLOCK_REALTIME_FAST)
#define CLOCK_REALTIME_COARSE CLOCK_REALTIME_FAST
#else
#define CLOCK_REALTIME_COARSE CLOCK_REALTIME
#endif
#endif
/* get PATH_MAX */
#include <limits.h>
#ifndef EUCLEAN
#define EUCLEAN 117
#endif
#ifndef EREMOTEIO
#define EREMOTEIO 121
#endif
#ifndef EKEYREJECTED
#define EKEYREJECTED 129
#endif
#ifndef XATTR_CREATE
#define XATTR_CREATE 1
#endif
#endif /* __APPLE__ */
#ifndef HOST_NAME_MAX
#ifdef MAXHOSTNAMELEN
#define HOST_NAME_MAX MAXHOSTNAMELEN
#else
#define HOST_NAME_MAX 255
#endif
#endif /* HOST_NAME_MAX */
/* O_LARGEFILE is not defined/required on OSX/FreeBSD */
#ifndef O_LARGEFILE
#define O_LARGEFILE 0
#endif
/* Could be relevant for other platforms */
#ifndef ERESTART
#define ERESTART EINTR
#endif
#ifndef TEMP_FAILURE_RETRY
#define TEMP_FAILURE_RETRY(expression) ({ \
__typeof(expression) __result; \
do { \
__result = (expression); \
} while (__result == -1 && errno == EINTR); \
__result; })
#endif
#ifdef __cplusplus
# define VOID_TEMP_FAILURE_RETRY(expression) \
static_cast<void>(TEMP_FAILURE_RETRY(expression))
#else
# define VOID_TEMP_FAILURE_RETRY(expression) \
do { (void)TEMP_FAILURE_RETRY(expression); } while (0)
#endif
#if defined(__FreeBSD__) || defined(__APPLE__)
#define lseek64(fd, offset, whence) lseek(fd, offset, whence)
#endif
#if defined(__sun) || defined(_AIX)
#define LOG_AUTHPRIV (10<<3)
#define LOG_FTP (11<<3)
#define __STRING(x) "x"
#endif
#if defined(__sun) || defined(_AIX) || defined(_WIN32)
#define IFTODT(mode) (((mode) & 0170000) >> 12)
#endif
#if defined(_AIX)
#define MSG_DONTWAIT MSG_NONBLOCK
#endif
#if defined(HAVE_PTHREAD_SETNAME_NP)
#if defined(__APPLE__)
#define ceph_pthread_setname(thread, name) ({ \
int __result = 0; \
if (thread == pthread_self()) \
__result = pthread_setname_np(name); \
__result; })
#else
#define ceph_pthread_setname pthread_setname_np
#endif
#elif defined(HAVE_PTHREAD_SET_NAME_NP)
/* Fix a small name diff and return 0 */
#define ceph_pthread_setname(thread, name) ({ \
pthread_set_name_np(thread, name); \
0; })
#else
/* compiler warning free success noop */
#define ceph_pthread_setname(thread, name) ({ \
int __i = 0; \
__i; })
#endif
#if defined(HAVE_PTHREAD_GETNAME_NP)
#define ceph_pthread_getname pthread_getname_np
#elif defined(HAVE_PTHREAD_GET_NAME_NP)
#define ceph_pthread_getname(thread, name, len) ({ \
pthread_get_name_np(thread, name, len); \
0; })
#else
/* compiler warning free success noop */
#define ceph_pthread_getname(thread, name, len) ({ \
if (name != NULL) \
*name = '\0'; \
0; })
#endif
int ceph_posix_fallocate(int fd, off_t offset, off_t len);
#ifdef __cplusplus
extern "C" {
#endif
int pipe_cloexec(int pipefd[2], int flags);
char *ceph_strerror_r(int errnum, char *buf, size_t buflen);
unsigned get_page_size();
// On success, returns the number of bytes written to the buffer. On
// failure, returns -1.
ssize_t get_self_exe_path(char* path, int buff_length);
int ceph_memzero_s(void *dest, size_t destsz, size_t count);
#ifdef __cplusplus
}
#endif
#if defined(_WIN32)
#include "include/win32/winsock_compat.h"
#include <windows.h>
#include <time.h>
#include "include/win32/win32_errno.h"
// There are a few name collisions between Windows headers and Ceph.
// Updating Ceph definitions would be the prefferable fix in order to avoid
// confussion, unless it requires too many changes, in which case we're going
// to redefine Windows values by adding the "WIN32_" prefix.
#define WIN32_DELETE 0x00010000L
#undef DELETE
#define WIN32_ERROR 0
#undef ERROR
#ifndef uint
typedef unsigned int uint;
#endif
typedef _sigset_t sigset_t;
typedef unsigned int blksize_t;
typedef unsigned __int64 blkcnt_t;
typedef unsigned short nlink_t;
typedef long long loff_t;
#define CPU_SETSIZE (sizeof(size_t)*8)
typedef union
{
char cpuset[CPU_SETSIZE/8];
size_t _align;
} cpu_set_t;
struct iovec {
void *iov_base;
size_t iov_len;
};
#define SHUT_RD SD_RECEIVE
#define SHUT_WR SD_SEND
#define SHUT_RDWR SD_BOTH
#ifndef SIGINT
#define SIGINT 2
#endif
#ifndef SIGKILL
#define SIGKILL 9
#endif
#define IOV_MAX 1024
#ifdef __cplusplus
extern "C" {
#endif
ssize_t readv(int fd, const struct iovec *iov, int iov_cnt);
ssize_t writev(int fd, const struct iovec *iov, int iov_cnt);
int fsync(int fd);
ssize_t pread(int fd, void *buf, size_t count, off_t offset);
ssize_t pwrite(int fd, const void *buf, size_t count, off_t offset);
long int lrand48(void);
int random();
int pipe(int pipefd[2]);
int posix_memalign(void **memptr, size_t alignment, size_t size);
char *strptime(const char *s, const char *format, struct tm *tm);
int chown(const char *path, uid_t owner, gid_t group);
int fchown(int fd, uid_t owner, gid_t group);
int lchown(const char *path, uid_t owner, gid_t group);
int setenv(const char *name, const char *value, int overwrite);
int geteuid();
int getegid();
int getuid();
int getgid();
#define unsetenv(name) _putenv_s(name, "")
int win_socketpair(int socks[2]);
#ifdef __MINGW32__
extern _CRTIMP errno_t __cdecl _putenv_s(const char *_Name,const char *_Value);
#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define htobe16(x) __builtin_bswap16(x)
#define htole16(x) (x)
#define be16toh(x) __builtin_bswap16(x)
#define le16toh(x) (x)
#define htobe32(x) __builtin_bswap32(x)
#define htole32(x) (x)
#define be32toh(x) __builtin_bswap32(x)
#define le32toh(x) (x)
#define htobe64(x) __builtin_bswap64(x)
#define htole64(x) (x)
#define be64toh(x) __builtin_bswap64(x)
#define le64toh(x) (x)
#endif // defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#endif // __MINGW32__
#ifdef __cplusplus
}
#endif
#define compat_closesocket closesocket
// Use "aligned_free" when freeing memory allocated using posix_memalign or
// _aligned_malloc. Using "free" will crash.
static inline void aligned_free(void* ptr) {
_aligned_free(ptr);
}
// O_CLOEXEC is not defined on Windows. Since handles aren't inherited
// with subprocesses unless explicitly requested, we'll define this
// flag as a no-op.
#define O_CLOEXEC 0
#define SOCKOPT_VAL_TYPE char*
#define DEV_NULL "nul"
#else /* WIN32 */
#define SOCKOPT_VAL_TYPE void*
static inline void aligned_free(void* ptr) {
free(ptr);
}
static inline int compat_closesocket(int fildes) {
return close(fildes);
}
#define DEV_NULL "/dev/null"
#endif /* WIN32 */
/* Supplies code to be run at startup time before invoking main().
* Use as:
*
* CEPH_CONSTRUCTOR(my_constructor) {
* ...some code...
* }
*/
#ifdef _MSC_VER
#pragma section(".CRT$XCU",read)
#define CEPH_CONSTRUCTOR(f) \
static void __cdecl f(void); \
__declspec(allocate(".CRT$XCU")) static void (__cdecl*f##_)(void) = f; \
static void __cdecl f(void)
#else
#define CEPH_CONSTRUCTOR(f) \
static void f(void) __attribute__((constructor)); \
static void f(void)
#endif
/* This should only be used with the socket API. */
static inline int ceph_sock_errno() {
#ifdef _WIN32
return wsae_to_errno(WSAGetLastError());
#else
return errno;
#endif
}
// Needed on Windows when handling binary files. Without it, line
// endings will be replaced and certain characters can be treated as
// EOF.
#ifndef O_BINARY
#define O_BINARY 0
#endif
#endif /* !CEPH_COMPAT_H */
| 10,220 | 23.27791 | 91 | h |
null | ceph-main/src/include/coredumpctl.h | #pragma once
#include "acconfig.h"
#ifdef HAVE_SYS_PRCTL_H
#include <iostream>
#include <sys/prctl.h>
#include "common/errno.h"
class PrCtl {
int saved_state = -1;
static int get_dumpable() {
int r = prctl(PR_GET_DUMPABLE);
if (r == -1) {
r = errno;
std::cerr << "warning: unable to get dumpable flag: " << cpp_strerror(r)
<< std::endl;
}
return r;
}
static int set_dumpable(bool new_state) {
int r = prctl(PR_SET_DUMPABLE, new_state);
if (r) {
r = -errno;
std::cerr << "warning: unable to " << (new_state ? "set" : "unset")
<< " dumpable flag: " << cpp_strerror(r)
<< std::endl;
}
return r;
}
public:
PrCtl(int new_state = 0) {
int r = get_dumpable();
if (r == -1) {
return;
}
if (r != new_state) {
if (!set_dumpable(new_state)) {
saved_state = r;
}
}
}
~PrCtl() {
if (saved_state < 0) {
return;
}
set_dumpable(saved_state);
}
};
#else
#ifdef RLIMIT_CORE
#include <sys/resource.h>
#include <iostream>
#include <sys/resource.h>
#include "common/errno.h"
class PrCtl {
rlimit saved_lim;
static int get_dumpable(rlimit* saved) {
int r = getrlimit(RLIMIT_CORE, saved);
if (r) {
r = errno;
std::cerr << "warning: unable to getrlimit(): " << cpp_strerror(r)
<< std::endl;
}
return r;
}
static void set_dumpable(const rlimit& rlim) {
int r = setrlimit(RLIMIT_CORE, &rlim);
if (r) {
r = -errno;
std::cerr << "warning: unable to setrlimit(): " << cpp_strerror(r)
<< std::endl;
}
}
public:
PrCtl(int new_state = 0) {
int r = get_dumpable(&saved_lim);
if (r == -1) {
return;
}
rlimit new_lim;
if (new_state) {
new_lim.rlim_cur = saved_lim.rlim_max;
} else {
new_lim.rlim_cur = new_lim.rlim_max = 0;
}
if (new_lim.rlim_cur == saved_lim.rlim_cur) {
return;
}
set_dumpable(new_lim);
}
~PrCtl() {
set_dumpable(saved_lim);
}
};
#else
struct PrCtl {
// to silence the Wunused-variable warning
PrCtl() {}
};
#endif // RLIMIT_CORE
#endif
| 2,187 | 19.641509 | 78 | h |
null | ceph-main/src/include/counter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_COUNTER_H
#define CEPH_COUNTER_H
#include <atomic>
template <typename T>
class Counter {
public:
Counter() {
_count()++;
_increments()++;
}
Counter(const Counter &rhs) {
_count()++;
_increments()++;
}
Counter(Counter &&rhs) {}
~Counter() {
_count()--;
}
static uint64_t count() {
return _count();
}
static uint64_t increments() {
return _increments();
}
static uint64_t decrements() {
return increments()-count();
}
private:
static std::atomic<uint64_t> &_count() {
static std::atomic<uint64_t> c;
return c;
}
static std::atomic<uint64_t> &_increments() {
static std::atomic<uint64_t> i;
return i;
}
};
#endif
| 1,114 | 18.561404 | 70 | h |
null | ceph-main/src/include/cpp_lib_backport.h | #pragma once
#include <cstring>
#include <type_traits>
namespace std {
#ifndef __cpp_lib_bit_cast
#define __cpp_lib_bit_cast 201806L
/// Create a value of type `To` from the bits of `from`.
template<typename To, typename From>
requires (sizeof(To) == sizeof(From)) &&
std::is_trivially_copyable_v<From> &&
std::is_trivially_copyable_v<To>
[[nodiscard]] constexpr To
bit_cast(const From& from) noexcept {
#if __has_builtin(__builtin_bit_cast)
return __builtin_bit_cast(To, from);
#else
static_assert(std::is_trivially_constructible_v<To>);
To to;
std::memcpy(&to, &from, sizeof(To));
return to;
#endif
}
#endif // __cpp_lib_bit_cast
} // namespace std
| 685 | 21.129032 | 56 | h |
null | ceph-main/src/include/crc32c.h | #ifndef CEPH_CRC32C_H
#define CEPH_CRC32C_H
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef uint32_t (*ceph_crc32c_func_t)(uint32_t crc, unsigned char const *data, unsigned length);
/*
* this is a static global with the chosen crc32c implementation for
* the given architecture.
*/
extern ceph_crc32c_func_t ceph_crc32c_func;
extern ceph_crc32c_func_t ceph_choose_crc32(void);
/**
* calculate crc32c for data that is entirely 0 (ZERO)
*
* Note: works the same as ceph_crc32c_func for data == nullptr,
* but faster than the optimized assembly on certain architectures.
* This is faster than intel optimized assembly, but not as fast as
* ppc64le optimized assembly.
*
* @param crc initial value
* @param length length of buffer
*/
uint32_t ceph_crc32c_zeros(uint32_t crc, unsigned length);
/**
* calculate crc32c
*
* Note: if the data pointer is NULL, we calculate a crc value as if
* it were zero-filled.
*
* @param crc initial value
* @param data pointer to data buffer
* @param length length of buffer
*/
static inline uint32_t ceph_crc32c(uint32_t crc, unsigned char const *data, unsigned length)
{
#ifndef HAVE_POWER8
if (!data && length > 16)
return ceph_crc32c_zeros(crc, length);
#endif /* HAVE_POWER8 */
return ceph_crc32c_func(crc, data, length);
}
#ifdef __cplusplus
}
#endif
#endif
| 1,356 | 22.396552 | 97 | h |
null | ceph-main/src/include/demangle.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Allen Samuels <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_INCLUDE_DEMANGLE
#define CEPH_INCLUDE_DEMANGLE
//// Stole this code from http://stackoverflow.com/questions/281818/unmangling-the-result-of-stdtype-infoname
#ifdef __GNUG__
#include <cstdlib>
#include <memory>
#include <cxxabi.h>
static std::string ceph_demangle(const char* name)
{
int status = -4; // some arbitrary value to eliminate the compiler warning
// enable c++11 by passing the flag -std=c++11 to g++
std::unique_ptr<char, void(*)(void*)> res {
abi::__cxa_demangle(name, NULL, NULL, &status),
std::free
};
return (status == 0) ? res.get() : name ;
}
#else
// does nothing if not g++
static std::string demangle(const char* name)
{
return name;
}
#endif
#endif
| 1,146 | 22.408163 | 109 | h |
null | ceph-main/src/include/denc.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Allen Samuels <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
// If you #include "include/encoding.h" you get the old-style *and*
// the new-style definitions. (The old-style needs denc_traits<> in
// order to disable the container helpers when new-style traits are
// present.)
// You can also just #include "include/denc.h" and get only the
// new-style helpers. The eventual goal is to drop the legacy
// definitions.
#ifndef _ENC_DEC_H
#define _ENC_DEC_H
#include <array>
#include <bit>
#include <cstring>
#include <concepts>
#include <map>
#include <optional>
#include <set>
#include <string>
#include <type_traits>
#include <vector>
#include <boost/container/flat_map.hpp>
#include <boost/container/flat_set.hpp>
#include <boost/container/small_vector.hpp>
#include <boost/intrusive/set.hpp>
#include <boost/optional.hpp>
#include "include/cpp_lib_backport.h"
#include "include/compat.h"
#include "include/int_types.h"
#include "include/scope_guard.h"
#include "buffer.h"
#include "byteorder.h"
#include "common/convenience.h"
#include "common/error_code.h"
template<typename T, typename=void>
struct denc_traits {
static constexpr bool supported = false;
static constexpr bool featured = false;
static constexpr bool bounded = false;
static constexpr bool need_contiguous = true;
};
template<typename T>
inline constexpr bool denc_supported = denc_traits<T>::supported;
// hack for debug only; FIXME
//#include <iostream>
//using std::cout;
// Define this to compile in a dump of all encoded objects to disk to
// populate ceph-object-corpus. Note that there is an almost
// identical implementation in encoding.h, but you only need to define
// ENCODE_DUMP_PATH here.
//
// See src/test/encoding/generate-corpus-objects.sh.
//
//#define ENCODE_DUMP_PATH /tmp/something
#ifdef ENCODE_DUMP_PATH
# include <cstdio>
# include <sys/types.h>
# include <sys/stat.h>
# include <fcntl.h>
# define ENCODE_STR(x) #x
# define ENCODE_STRINGIFY(x) ENCODE_STR(x)
template<typename T>
class DencDumper {
public:
DencDumper(const char* name,
const ceph::bufferlist::contiguous_appender& appender)
: name{name},
appender{appender},
bl_offset{appender.bl.length()},
space_offset{space_size()},
start{appender.get_pos()}
{}
~DencDumper() {
if (do_sample()) {
dump();
}
}
private:
static bool do_sample() {
// this hackery with bits below is just to get a semi-reasonable
// distribution across time. it is somewhat exponential but not
// quite.
i++;
int bits = 0;
for (unsigned t = i; t; bits++)
t &= t - 1;
return bits <= 2;
}
size_t space_size() const {
return appender.get_logical_offset() - appender.get_out_of_band_offset();
}
void dump() const {
char fn[PATH_MAX];
::snprintf(fn, sizeof(fn),
ENCODE_STRINGIFY(ENCODE_DUMP_PATH) "/%s__%d.%x", name,
getpid(), i++);
int fd = ::open(fn, O_WRONLY|O_TRUNC|O_CREAT|O_CLOEXEC|O_BINARY, 0644);
if (fd < 0) {
return;
}
auto close_fd = make_scope_guard([fd] { ::close(fd); });
if (auto bl_delta = appender.bl.length() - bl_offset; bl_delta > 0) {
ceph::bufferlist dump_bl;
appender.bl.begin(bl_offset + space_offset).copy(bl_delta - space_offset, dump_bl);
const size_t space_len = space_size();
dump_bl.append(appender.get_pos() - space_len, space_len);
dump_bl.write_fd(fd);
} else {
size_t len = appender.get_pos() - start;
[[maybe_unused]] int r = ::write(fd, start, len);
}
}
const char* name;
const ceph::bufferlist::contiguous_appender& appender;
const size_t bl_offset;
const size_t space_offset;
const char* start;
static int i;
};
template<typename T> int DencDumper<T>::i = 0;
# define DENC_DUMP_PRE(Type) \
DencDumper<Type> _denc_dumper{#Type, p};
#else
# define DENC_DUMP_PRE(Type)
#endif
/*
top level level functions look like so
======================================
inline void denc(const T& o, size_t& p, uint64_t features=0);
inline void denc(const T& o, ceph::buffer::list::contiguous_appender& p,
uint64_t features=0);
inline void denc(T& o, ceph::buffer::ptr::const_iterator& p, uint64_t features=0);
or (for featured objects)
inline void denc(const T& o, size_t& p, uint64_t features);
inline void denc(const T& o, ceph::buffer::list::contiguous_appender& p,
uint64_t features);
inline void denc(T& o, ceph::buffer::ptr::const_iterator& p, uint64_t features);
- These are symmetrical, so that they can be used from the magic DENC
method of writing the bound_encode/encode/decode methods all in one go;
they differ only in the type of p.
- These are automatically fabricated via a template that calls into
the denc_traits<> methods (see below), provided denc_traits<T>::supported
is defined and true. They never need to be written explicitly.
static denc_traits<> definitions look like so
=============================================
template<>
struct denc_traits<T> {
static constexpr bool supported = true;
static constexpr bool bounded = false;
static constexpr bool featured = false;
static constexpr bool need_contiguous = true;
static void bound_encode(const T &o, size_t& p, uint64_t f=0);
static void encode(const T &o, ceph::buffer::list::contiguous_appender& p,
uint64_t f=0);
static void decode(T& o, ceph::buffer::ptr::const_iterator &p, uint64_t f=0);
};
or (for featured objects)
template<>
struct denc_traits<T> {
static constexpr bool supported = true;
static constexpr bool bounded = false;
static constexpr bool featured = true;
static constexpr bool need_contiguous = true;
static void bound_encode(const T &o, size_t& p, uint64_t f);
static void encode(const T &o, ceph::buffer::list::contiguous_appender& p,
uint64_t f);
static void decode(T& o, ceph::buffer::ptr::const_iterator &p, uint64_t f=0);
};
- denc_traits<T> is normally declared via the WRITE_CLASS_DENC(type) macro,
which is used in place of the old-style WRITE_CLASS_ENCODER(type) macro.
There are _FEATURED and _BOUNDED variants. The class traits simply call
into class methods of the same name (see below).
- denc_traits<T> can also be written explicitly for some type to indicate
how it should be encoded. This is the "source of truth" for how a type
is encoded.
- denc_traits<T> are declared for the base integer types, string, ceph::buffer::ptr,
and ceph::buffer::list base types.
- denc_traits<std::foo<T>>-like traits are declared for standard container
types.
class methods look like so
==========================
void bound_encode(size_t& p) const;
void encode(ceph::buffer::list::contiguous_appender& p) const;
void decode(ceph::buffer::ptr::const_iterator &p);
or (for featured objects)
void bound_encode(size_t& p, uint64_t f) const;
void encode(ceph::buffer::list::contiguous_appender& p, uint64_t f) const;
void decode(ceph::buffer::ptr::const_iterator &p);
- These are normally invoked by the denc_traits<> methods that are
declared via WRITE_CLASS_DENC, although you can also invoke them explicitly
in your code.
- These methods are optimised for contiguous buffer, but denc() will try
rebuild a contigous one if the decoded ceph::buffer::list is segmented. If you are
concerned about the cost, you might want to define yet another method:
void decode(ceph::buffer::list::iterator &p);
- These can be defined either explicitly (as above), or can be "magically"
defined all in one go using the DENC macro and DENC_{START,FINISH} helpers
(which work like the legacy {ENCODE,DECODE}_{START,FINISH} macros):
class foo_t {
...
DENC(foo_t, v, p) {
DENC_START(1, 1, p);
denc(v.foo, p);
denc(v.bar, p);
denc(v.baz, p);
DENC_FINISH(p);
}
...
};
WRITE_CLASS_DENC(foo_t)
*/
// ---------------------------------------------------------------------
// raw types
namespace _denc {
template<typename T, typename... U>
concept is_any_of = (std::same_as<T, U> || ...);
template<typename T, typename=void> struct underlying_type {
using type = T;
};
template<typename T>
struct underlying_type<T, std::enable_if_t<std::is_enum_v<T>>> {
using type = std::underlying_type_t<T>;
};
template<typename T>
using underlying_type_t = typename underlying_type<T>::type;
}
template<class It>
concept is_const_iterator = requires(It& it, size_t n) {
{ it.get_pos_add(n) } -> std::same_as<const char*>;
};
template<typename T, is_const_iterator It>
const T& get_pos_add(It& i) {
return *reinterpret_cast<const T*>(i.get_pos_add(sizeof(T)));
}
template<typename T, class It>
requires (!is_const_iterator<It>)
T& get_pos_add(It& i) {
return *reinterpret_cast<T*>(i.get_pos_add(sizeof(T)));
}
template<typename T>
requires _denc::is_any_of<_denc::underlying_type_t<T>,
ceph_le64, ceph_le32, ceph_le16, uint8_t
#ifndef _CHAR_IS_SIGNED
, int8_t
#endif
>
struct denc_traits<T> {
static constexpr bool supported = true;
static constexpr bool featured = false;
static constexpr bool bounded = true;
static constexpr bool need_contiguous = false;
static void bound_encode(const T &o, size_t& p, uint64_t f=0) {
p += sizeof(T);
}
template<class It>
requires (!is_const_iterator<It>)
static void encode(const T &o, It& p, uint64_t f=0) {
get_pos_add<T>(p) = o;
}
template<is_const_iterator It>
static void decode(T& o, It& p, uint64_t f=0) {
o = get_pos_add<T>(p);
}
static void decode(T& o, ceph::buffer::list::const_iterator &p) {
p.copy(sizeof(T), reinterpret_cast<char*>(&o));
}
};
// -----------------------------------------------------------------------
// integer types
// itype == internal type
// otype == external type, i.e., the type on the wire
// NOTE: the overload resolution ensures that the legacy encode/decode methods
// defined for int types is preferred to the ones defined using the specialized
// template, and hence get selected. This machinery prevents these these from
// getting glued into the legacy encode/decode methods; the overhead of setting
// up a contiguous_appender etc is likely to be slower.
namespace _denc {
template<typename T> struct ExtType {
using type = void;
};
template<typename T>
requires _denc::is_any_of<T,
int16_t, uint16_t>
struct ExtType<T> {
using type = ceph_le16;
};
template<typename T>
requires _denc::is_any_of<T,
int32_t, uint32_t>
struct ExtType<T> {
using type = ceph_le32;
};
template<typename T>
requires _denc::is_any_of<T,
int64_t, uint64_t>
struct ExtType<T> {
using type = ceph_le64;
};
template<>
struct ExtType<bool> {
using type = uint8_t;
};
template<typename T>
using ExtType_t = typename ExtType<T>::type;
} // namespace _denc
template<typename T>
requires (!std::is_void_v<_denc::ExtType_t<T>>)
struct denc_traits<T>
{
static constexpr bool supported = true;
static constexpr bool featured = false;
static constexpr bool bounded = true;
static constexpr bool need_contiguous = false;
using etype = _denc::ExtType_t<T>;
static void bound_encode(const T &o, size_t& p, uint64_t f=0) {
p += sizeof(etype);
}
template<class It>
requires (!is_const_iterator<It>)
static void encode(const T &o, It& p, uint64_t f=0) {
get_pos_add<etype>(p) = o;
}
template<is_const_iterator It>
static void decode(T& o, It &p, uint64_t f=0) {
o = get_pos_add<etype>(p);
}
static void decode(T& o, ceph::buffer::list::const_iterator &p) {
etype e;
p.copy(sizeof(etype), reinterpret_cast<char*>(&e));
o = e;
}
};
// varint
//
// high bit of each byte indicates another byte follows.
template<typename T>
inline void denc_varint(T v, size_t& p) {
p += sizeof(T) + 1;
}
template<typename T>
inline void denc_varint(T v, ceph::buffer::list::contiguous_appender& p) {
uint8_t byte = v & 0x7f;
v >>= 7;
while (v) {
byte |= 0x80;
get_pos_add<__u8>(p) = byte;
byte = (v & 0x7f);
v >>= 7;
}
get_pos_add<__u8>(p) = byte;
}
template<typename T>
inline void denc_varint(T& v, ceph::buffer::ptr::const_iterator& p) {
uint8_t byte = *(__u8*)p.get_pos_add(1);
v = byte & 0x7f;
int shift = 7;
while (byte & 0x80) {
byte = get_pos_add<__u8>(p);
v |= (T)(byte & 0x7f) << shift;
shift += 7;
}
}
// signed varint encoding
//
// low bit = 1 = negative, 0 = positive
// high bit of every byte indicates whether another byte follows.
inline void denc_signed_varint(int64_t v, size_t& p) {
p += sizeof(v) + 2;
}
template<class It>
requires (!is_const_iterator<It>)
void denc_signed_varint(int64_t v, It& p) {
if (v < 0) {
v = (-v << 1) | 1;
} else {
v <<= 1;
}
denc_varint(v, p);
}
template<typename T, is_const_iterator It>
inline void denc_signed_varint(T& v, It& p)
{
int64_t i = 0;
denc_varint(i, p);
if (i & 1) {
v = -(i >> 1);
} else {
v = i >> 1;
}
}
// varint + lowz encoding
//
// first(low) 2 bits = how many low zero bits (nibbles)
// high bit of each byte = another byte follows
// (so, 5 bits data in first byte, 7 bits data thereafter)
inline void denc_varint_lowz(uint64_t v, size_t& p) {
p += sizeof(v) + 2;
}
inline void denc_varint_lowz(uint64_t v,
ceph::buffer::list::contiguous_appender& p) {
int lowznib = v ? (std::countr_zero(v) / 4) : 0;
if (lowznib > 3)
lowznib = 3;
v >>= lowznib * 4;
v <<= 2;
v |= lowznib;
denc_varint(v, p);
}
template<typename T>
inline void denc_varint_lowz(T& v, ceph::buffer::ptr::const_iterator& p)
{
uint64_t i = 0;
denc_varint(i, p);
int lowznib = (i & 3);
i >>= 2;
i <<= lowznib * 4;
v = i;
}
// signed varint + lowz encoding
//
// first low bit = 1 for negative, 0 for positive
// next 2 bits = how many low zero bits (nibbles)
// high bit of each byte = another byte follows
// (so, 4 bits data in first byte, 7 bits data thereafter)
inline void denc_signed_varint_lowz(int64_t v, size_t& p) {
p += sizeof(v) + 2;
}
template<class It>
requires (!is_const_iterator<It>)
inline void denc_signed_varint_lowz(int64_t v, It& p) {
bool negative = false;
if (v < 0) {
v = -v;
negative = true;
}
unsigned lowznib = v ? (std::countr_zero(std::bit_cast<uint64_t>(v)) / 4) : 0u;
if (lowznib > 3)
lowznib = 3;
v >>= lowznib * 4;
v <<= 3;
v |= lowznib << 1;
v |= (int)negative;
denc_varint(v, p);
}
template<typename T, is_const_iterator It>
inline void denc_signed_varint_lowz(T& v, It& p)
{
int64_t i = 0;
denc_varint(i, p);
int lowznib = (i & 6) >> 1;
if (i & 1) {
i >>= 3;
i <<= lowznib * 4;
v = -i;
} else {
i >>= 3;
i <<= lowznib * 4;
v = i;
}
}
// LBA
//
// first 1-3 bits = how many low zero bits
// *0 = 12 (common 4 K alignment case)
// *01 = 16
// *011 = 20
// *111 = byte
// then 28-30 bits of data
// then last bit = another byte follows
// high bit of each subsequent byte = another byte follows
inline void denc_lba(uint64_t v, size_t& p) {
p += sizeof(v) + 2;
}
template<class It>
requires (!is_const_iterator<It>)
inline void denc_lba(uint64_t v, It& p) {
int low_zero_nibbles = v ? std::countr_zero(v) / 4 : 0;
int pos;
uint32_t word;
int t = low_zero_nibbles - 3;
if (t < 0) {
pos = 3;
word = 0x7;
} else if (t < 3) {
v >>= (low_zero_nibbles * 4);
pos = t + 1;
word = (1 << t) - 1;
} else {
v >>= 20;
pos = 3;
word = 0x3;
}
word |= (v << pos) & 0x7fffffff;
v >>= 31 - pos;
if (!v) {
*(ceph_le32*)p.get_pos_add(sizeof(uint32_t)) = word;
return;
}
word |= 0x80000000;
*(ceph_le32*)p.get_pos_add(sizeof(uint32_t)) = word;
uint8_t byte = v & 0x7f;
v >>= 7;
while (v) {
byte |= 0x80;
*(__u8*)p.get_pos_add(1) = byte;
byte = (v & 0x7f);
v >>= 7;
}
*(__u8*)p.get_pos_add(1) = byte;
}
template<is_const_iterator It>
inline void denc_lba(uint64_t& v, It& p) {
uint32_t word = *(ceph_le32*)p.get_pos_add(sizeof(uint32_t));
int shift = 0;
switch (word & 7) {
case 0:
case 2:
case 4:
case 6:
v = (uint64_t)(word & 0x7ffffffe) << (12 - 1);
shift = 12 + 30;
break;
case 1:
case 5:
v = (uint64_t)(word & 0x7ffffffc) << (16 - 2);
shift = 16 + 29;
break;
case 3:
v = (uint64_t)(word & 0x7ffffff8) << (20 - 3);
shift = 20 + 28;
break;
case 7:
v = (uint64_t)(word & 0x7ffffff8) >> 3;
shift = 28;
}
uint8_t byte = word >> 24;
while (byte & 0x80) {
byte = *(__u8*)p.get_pos_add(1);
v |= (uint64_t)(byte & 0x7f) << shift;
shift += 7;
}
}
// ---------------------------------------------------------------------
// denc top-level methods that call into denc_traits<T> methods
template<typename T, typename traits=denc_traits<T>>
inline std::enable_if_t<traits::supported> denc(
const T& o,
size_t& p,
uint64_t f=0)
{
if constexpr (traits::featured) {
traits::bound_encode(o, p, f);
} else {
traits::bound_encode(o, p);
}
}
template<typename T, class It, typename traits=denc_traits<T>>
requires traits::supported && (!is_const_iterator<It>)
inline void
denc(const T& o,
It& p,
uint64_t features=0)
{
if constexpr (traits::featured) {
traits::encode(o, p, features);
} else {
traits::encode(o, p);
}
}
template<typename T, is_const_iterator It, typename traits=denc_traits<T>>
requires traits::supported
inline void
denc(T& o,
It& p,
uint64_t features=0)
{
if constexpr (traits::featured) {
traits::decode(o, p, features);
} else {
traits::decode(o, p);
}
}
namespace _denc {
template<typename T, typename = void>
struct has_legacy_denc : std::false_type {};
template<typename T>
struct has_legacy_denc<T, decltype(std::declval<T&>()
.decode(std::declval<
ceph::buffer::list::const_iterator&>()))>
: std::true_type {
static void decode(T& v, ceph::buffer::list::const_iterator& p) {
v.decode(p);
}
};
template<typename T>
struct has_legacy_denc<T,
std::enable_if_t<
!denc_traits<T>::need_contiguous>> : std::true_type {
static void decode(T& v, ceph::buffer::list::const_iterator& p) {
denc_traits<T>::decode(v, p);
}
};
}
template<typename T,
typename traits=denc_traits<T>,
typename has_legacy_denc=_denc::has_legacy_denc<T>>
inline std::enable_if_t<traits::supported &&
has_legacy_denc::value> denc(
T& o,
ceph::buffer::list::const_iterator& p)
{
has_legacy_denc::decode(o, p);
}
// ---------------------------------------------------------------------
// base types and containers
//
// std::string
//
template<typename A>
struct denc_traits<std::basic_string<char,std::char_traits<char>,A>> {
private:
using value_type = std::basic_string<char,std::char_traits<char>,A>;
public:
static constexpr bool supported = true;
static constexpr bool featured = false;
static constexpr bool bounded = false;
static constexpr bool need_contiguous = false;
static void bound_encode(const value_type& s, size_t& p, uint64_t f=0) {
p += sizeof(uint32_t) + s.size();
}
template<class It>
static void encode(const value_type& s,
It& p,
uint64_t f=0) {
denc((uint32_t)s.size(), p);
memcpy(p.get_pos_add(s.size()), s.data(), s.size());
}
template<class It>
static void decode(value_type& s,
It& p,
uint64_t f=0) {
uint32_t len;
denc(len, p);
decode_nohead(len, s, p);
}
static void decode(value_type& s, ceph::buffer::list::const_iterator& p)
{
uint32_t len;
denc(len, p);
decode_nohead(len, s, p);
}
template<class It>
static void decode_nohead(size_t len, value_type& s, It& p) {
s.clear();
if (len) {
s.append(p.get_pos_add(len), len);
}
}
static void decode_nohead(size_t len, value_type& s,
ceph::buffer::list::const_iterator& p) {
if (len) {
if constexpr (std::is_same_v<value_type, std::string>) {
s.clear();
p.copy(len, s);
} else {
s.resize(len);
p.copy(len, s.data());
}
} else {
s.clear();
}
}
template<class It>
requires (!is_const_iterator<It>)
static void
encode_nohead(const value_type& s, It& p) {
auto len = s.length();
maybe_inline_memcpy(p.get_pos_add(len), s.data(), len, 16);
}
};
//
// ceph::buffer::ptr
//
template<>
struct denc_traits<ceph::buffer::ptr> {
static constexpr bool supported = true;
static constexpr bool featured = false;
static constexpr bool bounded = false;
static constexpr bool need_contiguous = false;
static void bound_encode(const ceph::buffer::ptr& v, size_t& p, uint64_t f=0) {
p += sizeof(uint32_t) + v.length();
}
template <class It>
requires (!is_const_iterator<It>)
static void
encode(const ceph::buffer::ptr& v, It& p, uint64_t f=0) {
denc((uint32_t)v.length(), p);
p.append(v);
}
template <is_const_iterator It>
static void
decode(ceph::buffer::ptr& v, It& p, uint64_t f=0) {
uint32_t len;
denc(len, p);
v = p.get_ptr(len);
}
static void decode(ceph::buffer::ptr& v, ceph::buffer::list::const_iterator& p) {
uint32_t len;
denc(len, p);
ceph::buffer::list s;
p.copy(len, s);
if (len) {
if (s.get_num_buffers() == 1)
v = s.front();
else
v = ceph::buffer::copy(s.c_str(), s.length());
}
}
};
//
// ceph::buffer::list
//
template<>
struct denc_traits<ceph::buffer::list> {
static constexpr bool supported = true;
static constexpr bool featured = false;
static constexpr bool bounded = false;
static constexpr bool need_contiguous = false;
static void bound_encode(const ceph::buffer::list& v, size_t& p, uint64_t f=0) {
p += sizeof(uint32_t) + v.length();
}
static void encode(const ceph::buffer::list& v, ceph::buffer::list::contiguous_appender& p,
uint64_t f=0) {
denc((uint32_t)v.length(), p);
p.append(v);
}
static void decode(ceph::buffer::list& v, ceph::buffer::ptr::const_iterator& p, uint64_t f=0) {
uint32_t len = 0;
denc(len, p);
v.clear();
v.push_back(p.get_ptr(len));
}
static void decode(ceph::buffer::list& v, ceph::buffer::list::const_iterator& p) {
uint32_t len;
denc(len, p);
v.clear();
p.copy(len, v);
}
static void encode_nohead(const ceph::buffer::list& v,
ceph::buffer::list::contiguous_appender& p) {
p.append(v);
}
static void decode_nohead(size_t len, ceph::buffer::list& v,
ceph::buffer::ptr::const_iterator& p) {
v.clear();
if (len) {
v.append(p.get_ptr(len));
}
}
static void decode_nohead(size_t len, ceph::buffer::list& v,
ceph::buffer::list::const_iterator& p) {
v.clear();
p.copy(len, v);
}
};
//
// std::pair<A, B>
//
template<typename A, typename B>
struct denc_traits<
std::pair<A, B>,
std::enable_if_t<denc_supported<std::remove_const_t<A>> && denc_supported<B>>> {
typedef denc_traits<A> a_traits;
typedef denc_traits<B> b_traits;
static constexpr bool supported = true;
static constexpr bool featured = a_traits::featured || b_traits::featured ;
static constexpr bool bounded = a_traits::bounded && b_traits::bounded;
static constexpr bool need_contiguous = (a_traits::need_contiguous ||
b_traits::need_contiguous);
static void bound_encode(const std::pair<A,B>& v, size_t& p, uint64_t f = 0) {
if constexpr (featured) {
denc(v.first, p, f);
denc(v.second, p, f);
} else {
denc(v.first, p);
denc(v.second, p);
}
}
static void encode(const std::pair<A,B>& v, ceph::buffer::list::contiguous_appender& p,
uint64_t f = 0) {
if constexpr (featured) {
denc(v.first, p, f);
denc(v.second, p, f);
} else {
denc(v.first, p);
denc(v.second, p);
}
}
static void decode(std::pair<A,B>& v, ceph::buffer::ptr::const_iterator& p, uint64_t f=0) {
denc(const_cast<std::remove_const_t<A>&>(v.first), p, f);
denc(v.second, p, f);
}
template<typename AA=A>
static std::enable_if_t<!!sizeof(AA) && !need_contiguous>
decode(std::pair<A,B>& v, ceph::buffer::list::const_iterator& p,
uint64_t f = 0) {
denc(const_cast<std::remove_const_t<AA>&>(v.first), p);
denc(v.second, p);
}
};
namespace _denc {
template<template<class...> class C, typename Details, typename ...Ts>
struct container_base {
private:
using container = C<Ts...>;
using T = typename Details::T;
public:
using traits = denc_traits<T>;
static constexpr bool supported = true;
static constexpr bool featured = traits::featured;
static constexpr bool bounded = false;
static constexpr bool need_contiguous = traits::need_contiguous;
template<typename U=T>
static void bound_encode(const container& s, size_t& p, uint64_t f = 0) {
p += sizeof(uint32_t);
if constexpr (traits::bounded) {
#if _GLIBCXX_USE_CXX11_ABI
// intensionally not calling container's empty() method to not prohibit
// compiler from optimizing the check if it and the ::size() operate on
// different memory (observed when std::list::empty() works on pointers,
// not the size field).
if (const auto elem_num = s.size(); elem_num > 0) {
#else
if (!s.empty()) {
const auto elem_num = s.size();
#endif
// STL containers use weird element types like std::pair<const K, V>;
// cast to something we have denc_traits for.
size_t elem_size = 0;
if constexpr (traits::featured) {
denc(static_cast<const T&>(*s.begin()), elem_size, f);
} else {
denc(static_cast<const T&>(*s.begin()), elem_size);
}
p += elem_size * elem_num;
}
} else {
for (const T& e : s) {
if constexpr (traits::featured) {
denc(e, p, f);
} else {
denc(e, p);
}
}
}
}
template<typename U=T>
static void encode(const container& s,
ceph::buffer::list::contiguous_appender& p,
uint64_t f = 0) {
denc((uint32_t)s.size(), p);
if constexpr (traits::featured) {
encode_nohead(s, p, f);
} else {
encode_nohead(s, p);
}
}
static void decode(container& s, ceph::buffer::ptr::const_iterator& p,
uint64_t f = 0) {
uint32_t num;
denc(num, p);
decode_nohead(num, s, p, f);
}
template<typename U=T>
static std::enable_if_t<!!sizeof(U) && !need_contiguous>
decode(container& s, ceph::buffer::list::const_iterator& p) {
uint32_t num;
denc(num, p);
decode_nohead(num, s, p);
}
// nohead
static void encode_nohead(const container& s, ceph::buffer::list::contiguous_appender& p,
uint64_t f = 0) {
for (const T& e : s) {
if constexpr (traits::featured) {
denc(e, p, f);
} else {
denc(e, p);
}
}
}
static void decode_nohead(size_t num, container& s,
ceph::buffer::ptr::const_iterator& p,
uint64_t f=0) {
s.clear();
Details::reserve(s, num);
while (num--) {
T t;
denc(t, p, f);
Details::insert(s, std::move(t));
}
}
template<typename U=T>
static std::enable_if_t<!!sizeof(U) && !need_contiguous>
decode_nohead(size_t num, container& s,
ceph::buffer::list::const_iterator& p) {
s.clear();
Details::reserve(s, num);
while (num--) {
T t;
denc(t, p);
Details::insert(s, std::move(t));
}
}
};
template<typename T>
class container_has_reserve {
template<typename U, U> struct SFINAE_match;
template<typename U>
static std::true_type test(SFINAE_match<T(*)(typename T::size_type),
&U::reserve>*);
template<typename U>
static std::false_type test(...);
public:
static constexpr bool value = decltype(
test<denc_traits<T>>(0))::value;
};
template<typename T>
inline constexpr bool container_has_reserve_v =
container_has_reserve<T>::value;
template<typename Container>
struct container_details_base {
using T = typename Container::value_type;
static void reserve(Container& c, size_t s) {
if constexpr (container_has_reserve_v<Container>) {
c.reserve(s);
}
}
};
template<typename Container>
struct pushback_details : public container_details_base<Container> {
template<typename ...Args>
static void insert(Container& c, Args&& ...args) {
c.emplace_back(std::forward<Args>(args)...);
}
};
}
template<typename T, typename ...Ts>
struct denc_traits<
std::list<T, Ts...>,
typename std::enable_if_t<denc_traits<T>::supported>>
: public _denc::container_base<std::list,
_denc::pushback_details<std::list<T, Ts...>>,
T, Ts...> {};
template<typename T, typename ...Ts>
struct denc_traits<
std::vector<T, Ts...>,
typename std::enable_if_t<denc_traits<T>::supported>>
: public _denc::container_base<std::vector,
_denc::pushback_details<std::vector<T, Ts...>>,
T, Ts...> {};
template<typename T, std::size_t N, typename ...Ts>
struct denc_traits<
boost::container::small_vector<T, N, Ts...>,
typename std::enable_if_t<denc_traits<T>::supported>> {
private:
using container = boost::container::small_vector<T, N, Ts...>;
public:
using traits = denc_traits<T>;
static constexpr bool supported = true;
static constexpr bool featured = traits::featured;
static constexpr bool bounded = false;
static constexpr bool need_contiguous = traits::need_contiguous;
template<typename U=T>
static void bound_encode(const container& s, size_t& p, uint64_t f = 0) {
p += sizeof(uint32_t);
if constexpr (traits::bounded) {
if (!s.empty()) {
const auto elem_num = s.size();
size_t elem_size = 0;
if constexpr (traits::featured) {
denc(*s.begin(), elem_size, f);
} else {
denc(*s.begin(), elem_size);
}
p += elem_size * elem_num;
}
} else {
for (const T& e : s) {
if constexpr (traits::featured) {
denc(e, p, f);
} else {
denc(e, p);
}
}
}
}
template<typename U=T>
static void encode(const container& s,
ceph::buffer::list::contiguous_appender& p,
uint64_t f = 0) {
denc((uint32_t)s.size(), p);
if constexpr (traits::featured) {
encode_nohead(s, p, f);
} else {
encode_nohead(s, p);
}
}
static void decode(container& s, ceph::buffer::ptr::const_iterator& p,
uint64_t f = 0) {
uint32_t num;
denc(num, p);
decode_nohead(num, s, p, f);
}
template<typename U=T>
static std::enable_if_t<!!sizeof(U) && !need_contiguous>
decode(container& s, ceph::buffer::list::const_iterator& p) {
uint32_t num;
denc(num, p);
decode_nohead(num, s, p);
}
// nohead
static void encode_nohead(const container& s, ceph::buffer::list::contiguous_appender& p,
uint64_t f = 0) {
for (const T& e : s) {
if constexpr (traits::featured) {
denc(e, p, f);
} else {
denc(e, p);
}
}
}
static void decode_nohead(size_t num, container& s,
ceph::buffer::ptr::const_iterator& p,
uint64_t f=0) {
s.clear();
s.reserve(num);
while (num--) {
T t;
denc(t, p, f);
s.push_back(std::move(t));
}
}
template<typename U=T>
static std::enable_if_t<!!sizeof(U) && !need_contiguous>
decode_nohead(size_t num, container& s,
ceph::buffer::list::const_iterator& p) {
s.clear();
s.reserve(num);
while (num--) {
T t;
denc(t, p);
s.push_back(std::move(t));
}
}
};
namespace _denc {
template<typename Container>
struct setlike_details : public container_details_base<Container> {
using T = typename Container::value_type;
template<typename ...Args>
static void insert(Container& c, Args&& ...args) {
c.emplace_hint(c.cend(), std::forward<Args>(args)...);
}
};
}
template<typename T, typename ...Ts>
struct denc_traits<
std::set<T, Ts...>,
std::enable_if_t<denc_traits<T>::supported>>
: public _denc::container_base<std::set,
_denc::setlike_details<std::set<T, Ts...>>,
T, Ts...> {};
template<typename T, typename ...Ts>
struct denc_traits<
boost::container::flat_set<T, Ts...>,
std::enable_if_t<denc_traits<T>::supported>>
: public _denc::container_base<
boost::container::flat_set,
_denc::setlike_details<boost::container::flat_set<T, Ts...>>,
T, Ts...> {};
namespace _denc {
template<typename Container>
struct maplike_details : public container_details_base<Container> {
using T = typename Container::value_type;
template<typename ...Args>
static void insert(Container& c, Args&& ...args) {
c.emplace_hint(c.cend(), std::forward<Args>(args)...);
}
};
}
template<typename A, typename B, typename ...Ts>
struct denc_traits<
std::map<A, B, Ts...>,
std::enable_if_t<denc_traits<A>::supported &&
denc_traits<B>::supported>>
: public _denc::container_base<std::map,
_denc::maplike_details<std::map<A, B, Ts...>>,
A, B, Ts...> {};
template<typename A, typename B, typename ...Ts>
struct denc_traits<
boost::container::flat_map<A, B, Ts...>,
std::enable_if_t<denc_traits<A>::supported &&
denc_traits<B>::supported>>
: public _denc::container_base<
boost::container::flat_map,
_denc::maplike_details<boost::container::flat_map<
A, B, Ts...>>,
A, B, Ts...> {};
template<typename T, size_t N>
struct denc_traits<
std::array<T, N>,
std::enable_if_t<denc_traits<T>::supported>> {
private:
using container = std::array<T, N>;
public:
using traits = denc_traits<T>;
static constexpr bool supported = true;
static constexpr bool featured = traits::featured;
static constexpr bool bounded = traits::bounded;
static constexpr bool need_contiguous = traits::need_contiguous;
static void bound_encode(const container& s, size_t& p, uint64_t f = 0) {
if constexpr (traits::bounded) {
if constexpr (traits::featured) {
if (!s.empty()) {
size_t elem_size = 0;
denc(*s.begin(), elem_size, f);
p += elem_size * s.size();
}
} else {
size_t elem_size = 0;
denc(*s.begin(), elem_size);
p += elem_size * N;
}
} else {
for (const auto& e : s) {
if constexpr (traits::featured) {
denc(e, p, f);
} else {
denc(e, p);
}
}
}
}
static void encode(const container& s, ceph::buffer::list::contiguous_appender& p,
uint64_t f = 0) {
for (const auto& e : s) {
if constexpr (traits::featured) {
denc(e, p, f);
} else {
denc(e, p);
}
}
}
static void decode(container& s, ceph::buffer::ptr::const_iterator& p,
uint64_t f = 0) {
for (auto& e : s)
denc(e, p, f);
}
template<typename U=T>
static std::enable_if_t<!!sizeof(U) &&
!need_contiguous>
decode(container& s, ceph::buffer::list::const_iterator& p) {
for (auto& e : s) {
denc(e, p);
}
}
};
template<typename... Ts>
struct denc_traits<
std::tuple<Ts...>,
std::enable_if_t<(denc_traits<Ts>::supported && ...)>> {
private:
static_assert(sizeof...(Ts) > 0,
"Zero-length tuples are not supported.");
using container = std::tuple<Ts...>;
public:
static constexpr bool supported = true;
static constexpr bool featured = (denc_traits<Ts>::featured || ...);
static constexpr bool bounded = (denc_traits<Ts>::bounded && ...);
static constexpr bool need_contiguous =
(denc_traits<Ts>::need_contiguous || ...);
template<typename U = container>
static std::enable_if_t<denc_traits<U>::featured>
bound_encode(const container& s, size_t& p, uint64_t f) {
ceph::for_each(s, [&p, f] (const auto& e) {
if constexpr (denc_traits<std::decay_t<decltype(e)>>::featured) {
denc(e, p, f);
} else {
denc(e, p);
}
});
}
template<typename U = container>
static std::enable_if_t<!denc_traits<U>::featured>
bound_encode(const container& s, size_t& p) {
ceph::for_each(s, [&p] (const auto& e) {
denc(e, p);
});
}
template<typename U = container>
static std::enable_if_t<denc_traits<U>::featured>
encode(const container& s, ceph::buffer::list::contiguous_appender& p,
uint64_t f) {
ceph::for_each(s, [&p, f] (const auto& e) {
if constexpr (denc_traits<std::decay_t<decltype(e)>>::featured) {
denc(e, p, f);
} else {
denc(e, p);
}
});
}
template<typename U = container>
static std::enable_if_t<!denc_traits<U>::featured>
encode(const container& s, ceph::buffer::list::contiguous_appender& p) {
ceph::for_each(s, [&p] (const auto& e) {
denc(e, p);
});
}
static void decode(container& s, ceph::buffer::ptr::const_iterator& p,
uint64_t f = 0) {
ceph::for_each(s, [&p] (auto& e) {
denc(e, p);
});
}
template<typename U = container>
static std::enable_if_t<!denc_traits<U>::need_contiguous>
decode(container& s, ceph::buffer::list::const_iterator& p, uint64_t f = 0) {
ceph::for_each(s, [&p] (auto& e) {
denc(e, p);
});
}
};
//
// boost::optional<T>
//
template<typename T>
struct denc_traits<
boost::optional<T>,
std::enable_if_t<denc_traits<T>::supported>> {
using traits = denc_traits<T>;
static constexpr bool supported = true;
static constexpr bool featured = traits::featured;
static constexpr bool bounded = false;
static constexpr bool need_contiguous = traits::need_contiguous;
static void bound_encode(const boost::optional<T>& v, size_t& p,
uint64_t f = 0) {
p += sizeof(bool);
if (v) {
if constexpr (featured) {
denc(*v, p, f);
} else {
denc(*v, p);
}
}
}
static void encode(const boost::optional<T>& v,
ceph::buffer::list::contiguous_appender& p,
uint64_t f = 0) {
denc((bool)v, p);
if (v) {
if constexpr (featured) {
denc(*v, p, f);
} else {
denc(*v, p);
}
}
}
static void decode(boost::optional<T>& v, ceph::buffer::ptr::const_iterator& p,
uint64_t f = 0) {
bool x;
denc(x, p, f);
if (x) {
v = T{};
denc(*v, p, f);
} else {
v = boost::none;
}
}
template<typename U = T>
static std::enable_if_t<!!sizeof(U) && !need_contiguous>
decode(boost::optional<T>& v, ceph::buffer::list::const_iterator& p) {
bool x;
denc(x, p);
if (x) {
v = T{};
denc(*v, p);
} else {
v = boost::none;
}
}
template<typename U = T>
static void encode_nohead(const boost::optional<T>& v,
ceph::buffer::list::contiguous_appender& p,
uint64_t f = 0) {
if (v) {
if constexpr (featured) {
denc(*v, p, f);
} else {
denc(*v, p);
}
}
}
static void decode_nohead(bool num, boost::optional<T>& v,
ceph::buffer::ptr::const_iterator& p, uint64_t f = 0) {
if (num) {
v = T();
denc(*v, p, f);
} else {
v = boost::none;
}
}
};
template<>
struct denc_traits<boost::none_t> {
static constexpr bool supported = true;
static constexpr bool featured = false;
static constexpr bool bounded = true;
static constexpr bool need_contiguous = false;
static void bound_encode(const boost::none_t& v, size_t& p) {
p += sizeof(bool);
}
static void encode(const boost::none_t& v,
ceph::buffer::list::contiguous_appender& p) {
denc(false, p);
}
};
//
// std::optional<T>
//
template<typename T>
struct denc_traits<
std::optional<T>,
std::enable_if_t<denc_traits<T>::supported>> {
using traits = denc_traits<T>;
static constexpr bool supported = true;
static constexpr bool featured = traits::featured;
static constexpr bool bounded = false;
static constexpr bool need_contiguous = traits::need_contiguous;
static void bound_encode(const std::optional<T>& v, size_t& p,
uint64_t f = 0) {
p += sizeof(bool);
if (v) {
if constexpr (featured) {
denc(*v, p, f);
} else {
denc(*v, p);
}
}
}
static void encode(const std::optional<T>& v,
ceph::buffer::list::contiguous_appender& p,
uint64_t f = 0) {
denc((bool)v, p);
if (v) {
if constexpr (featured) {
denc(*v, p, f);
} else {
denc(*v, p);
}
}
}
static void decode(std::optional<T>& v, ceph::buffer::ptr::const_iterator& p,
uint64_t f = 0) {
bool x;
denc(x, p, f);
if (x) {
v = T{};
denc(*v, p, f);
} else {
v = std::nullopt;
}
}
template<typename U = T>
static std::enable_if_t<!!sizeof(U) && !need_contiguous>
decode(std::optional<T>& v, ceph::buffer::list::const_iterator& p) {
bool x;
denc(x, p);
if (x) {
v = T{};
denc(*v, p);
} else {
v = std::nullopt;
}
}
static void encode_nohead(const std::optional<T>& v,
ceph::buffer::list::contiguous_appender& p,
uint64_t f = 0) {
if (v) {
if constexpr (featured) {
denc(*v, p, f);
} else {
denc(*v, p);
}
}
}
static void decode_nohead(bool num, std::optional<T>& v,
ceph::buffer::ptr::const_iterator& p, uint64_t f = 0) {
if (num) {
v = T();
denc(*v, p, f);
} else {
v = std::nullopt;
}
}
};
template<>
struct denc_traits<std::nullopt_t> {
static constexpr bool supported = true;
static constexpr bool featured = false;
static constexpr bool bounded = true;
static constexpr bool need_contiguous = false;
static void bound_encode(const std::nullopt_t& v, size_t& p) {
p += sizeof(bool);
}
static void encode(const std::nullopt_t& v,
ceph::buffer::list::contiguous_appender& p) {
denc(false, p);
}
};
// ----------------------------------------------------------------------
// class helpers
// Write denc_traits<> for a class that defines bound_encode/encode/decode
// methods.
#define WRITE_CLASS_DENC(T) _DECLARE_CLASS_DENC(T, false)
#define WRITE_CLASS_DENC_BOUNDED(T) _DECLARE_CLASS_DENC(T, true)
#define _DECLARE_CLASS_DENC(T, b) \
template<> struct denc_traits<T> { \
static constexpr bool supported = true; \
static constexpr bool featured = false; \
static constexpr bool bounded = b; \
static constexpr bool need_contiguous = !_denc::has_legacy_denc<T>::value;\
static void bound_encode(const T& v, size_t& p, uint64_t f=0) { \
v.bound_encode(p); \
} \
static void encode(const T& v, ::ceph::buffer::list::contiguous_appender& p, \
uint64_t f=0) { \
v.encode(p); \
} \
static void decode(T& v, ::ceph::buffer::ptr::const_iterator& p, uint64_t f=0) { \
v.decode(p); \
} \
};
#define WRITE_CLASS_DENC_FEATURED(T) _DECLARE_CLASS_DENC_FEATURED(T, false)
#define WRITE_CLASS_DENC_FEATURED_BOUNDED(T) _DECLARE_CLASS_DENC_FEATURED(T, true)
#define _DECLARE_CLASS_DENC_FEATURED(T, b) \
template<> struct denc_traits<T> { \
static constexpr bool supported = true; \
static constexpr bool featured = true; \
static constexpr bool bounded = b; \
static constexpr bool need_contiguous = !_denc::has_legacy_denc<T>::value;\
static void bound_encode(const T& v, size_t& p, uint64_t f) { \
v.bound_encode(p, f); \
} \
static void encode(const T& v, ::ceph::buffer::list::contiguous_appender& p, \
uint64_t f) { \
v.encode(p, f); \
} \
static void decode(T& v, ::ceph::buffer::ptr::const_iterator& p, uint64_t f=0) { \
v.decode(p, f); \
} \
};
// ----------------------------------------------------------------------
// encoded_sizeof_wrapper
namespace ceph {
template <typename T, typename traits=denc_traits<T>>
constexpr std::enable_if_t<traits::supported && traits::bounded, size_t>
encoded_sizeof_bounded() {
size_t p = 0;
traits::bound_encode(T(), p);
return p;
}
template <typename T, typename traits=denc_traits<T>>
std::enable_if_t<traits::supported, size_t>
encoded_sizeof(const T &t) {
size_t p = 0;
traits::bound_encode(t, p);
return p;
}
} // namespace ceph
// ----------------------------------------------------------------------
// encode/decode wrappers
// These glue the new-style denc world into old-style calls to encode
// and decode by calling into denc_traits<> methods (when present).
namespace ceph {
template<typename T, typename traits=denc_traits<T>>
inline std::enable_if_t<traits::supported && !traits::featured> encode(
const T& o,
ceph::buffer::list& bl,
uint64_t features_unused=0)
{
size_t len = 0;
traits::bound_encode(o, len);
auto a = bl.get_contiguous_appender(len);
traits::encode(o, a);
}
template<typename T, typename traits=denc_traits<T>>
inline std::enable_if_t<traits::supported && traits::featured> encode(
const T& o, ::ceph::buffer::list& bl,
uint64_t features)
{
size_t len = 0;
traits::bound_encode(o, len, features);
auto a = bl.get_contiguous_appender(len);
traits::encode(o, a, features);
}
template<typename T,
typename traits=denc_traits<T>>
inline std::enable_if_t<traits::supported && !traits::need_contiguous> decode(
T& o,
::ceph::buffer::list::const_iterator& p)
{
if (p.end())
throw ::ceph::buffer::end_of_buffer();
const auto& bl = p.get_bl();
const auto remaining = bl.length() - p.get_off();
// it is expensive to rebuild a contigous buffer and drop it, so avoid this.
if (!p.is_pointing_same_raw(bl.back()) && remaining > CEPH_PAGE_SIZE) {
traits::decode(o, p);
} else {
// ensure we get a contigous buffer... until the end of the
// ceph::buffer::list. we don't really know how much we'll need here,
// unfortunately. hopefully it is already contiguous and we're just
// bumping the raw ref and initializing the ptr tmp fields.
ceph::buffer::ptr tmp;
auto t = p;
t.copy_shallow(remaining, tmp);
auto cp = std::cbegin(tmp);
traits::decode(o, cp);
p += cp.get_offset();
}
}
template<typename T,
typename traits=denc_traits<T>>
inline std::enable_if_t<traits::supported && traits::need_contiguous> decode(
T& o,
ceph::buffer::list::const_iterator& p)
{
if (p.end())
throw ceph::buffer::end_of_buffer();
// ensure we get a contigous buffer... until the end of the
// ceph::buffer::list. we don't really know how much we'll need here,
// unfortunately. hopefully it is already contiguous and we're just
// bumping the raw ref and initializing the ptr tmp fields.
ceph::buffer::ptr tmp;
auto t = p;
t.copy_shallow(p.get_bl().length() - p.get_off(), tmp);
auto cp = std::cbegin(tmp);
traits::decode(o, cp);
p += cp.get_offset();
}
// nohead variants
template<typename T, typename traits=denc_traits<T>>
inline std::enable_if_t<traits::supported &&
!traits::featured> encode_nohead(
const T& o,
ceph::buffer::list& bl)
{
size_t len = 0;
traits::bound_encode(o, len);
auto a = bl.get_contiguous_appender(len);
traits::encode_nohead(o, a);
}
template<typename T, typename traits=denc_traits<T>>
inline std::enable_if_t<traits::supported && !traits::featured> decode_nohead(
size_t num,
T& o,
ceph::buffer::list::const_iterator& p)
{
if (!num)
return;
if (p.end())
throw ceph::buffer::end_of_buffer();
if constexpr (traits::need_contiguous) {
ceph::buffer::ptr tmp;
auto t = p;
if constexpr (denc_traits<typename T::value_type>::bounded) {
size_t element_size = 0;
typename T::value_type v;
denc_traits<typename T::value_type>::bound_encode(v, element_size);
t.copy_shallow(num * element_size, tmp);
} else {
t.copy_shallow(p.get_bl().length() - p.get_off(), tmp);
}
auto cp = std::cbegin(tmp);
traits::decode_nohead(num, o, cp);
p += cp.get_offset();
} else {
traits::decode_nohead(num, o, p);
}
}
}
// ----------------------------------------------------------------
// DENC
// These are some class methods we need to do the version and length
// wrappers for DENC_{START,FINISH} for inter-version
// interoperability.
#define DENC_HELPERS \
/* bound_encode */ \
static void _denc_start(size_t& p, \
__u8 *struct_v, \
__u8 *struct_compat, \
char **, uint32_t *) { \
p += 2 + 4; \
} \
static void _denc_finish(size_t& p, \
__u8 *struct_v, \
__u8 *struct_compat, \
char **, uint32_t *) { } \
/* encode */ \
static void _denc_start(::ceph::buffer::list::contiguous_appender& p, \
__u8 *struct_v, \
__u8 *struct_compat, \
char **len_pos, \
uint32_t *start_oob_off) { \
denc(*struct_v, p); \
denc(*struct_compat, p); \
*len_pos = p.get_pos_add(4); \
*start_oob_off = p.get_out_of_band_offset(); \
} \
static void _denc_finish(::ceph::buffer::list::contiguous_appender& p, \
__u8 *struct_v, \
__u8 *struct_compat, \
char **len_pos, \
uint32_t *start_oob_off) { \
*(ceph_le32*)*len_pos = p.get_pos() - *len_pos - sizeof(uint32_t) + \
p.get_out_of_band_offset() - *start_oob_off; \
} \
/* decode */ \
static void _denc_start(::ceph::buffer::ptr::const_iterator& p, \
__u8 *struct_v, \
__u8 *struct_compat, \
char **start_pos, \
uint32_t *struct_len) { \
denc(*struct_v, p); \
denc(*struct_compat, p); \
denc(*struct_len, p); \
*start_pos = const_cast<char*>(p.get_pos()); \
} \
static void _denc_finish(::ceph::buffer::ptr::const_iterator& p, \
__u8 *struct_v, __u8 *struct_compat, \
char **start_pos, \
uint32_t *struct_len) { \
const char *pos = p.get_pos(); \
char *end = *start_pos + *struct_len; \
if (pos > end) { \
throw ::ceph::buffer::malformed_input(__PRETTY_FUNCTION__); \
} \
if (pos < end) { \
p += end - pos; \
} \
}
// Helpers for versioning the encoding. These correspond to the
// {ENCODE,DECODE}_{START,FINISH} macros.
#define DENC_START(v, compat, p) \
__u8 struct_v = v; \
__u8 struct_compat = compat; \
char *_denc_pchar; \
uint32_t _denc_u32; \
_denc_start(p, &struct_v, &struct_compat, &_denc_pchar, &_denc_u32); \
do {
#define DENC_FINISH(p) \
} while (false); \
_denc_finish(p, &struct_v, &struct_compat, &_denc_pchar, &_denc_u32);
// ----------------------------------------------------------------------
// Helpers for writing a unified bound_encode/encode/decode
// implementation that won't screw up buffer size estimations.
#define DENC(Type, v, p) \
DENC_HELPERS \
void bound_encode(size_t& p) const { \
_denc_friend(*this, p); \
} \
void encode(::ceph::buffer::list::contiguous_appender& p) const { \
DENC_DUMP_PRE(Type); \
_denc_friend(*this, p); \
} \
void decode(::ceph::buffer::ptr::const_iterator& p) { \
_denc_friend(*this, p); \
} \
template<typename T, typename P> \
friend std::enable_if_t<std::is_same_v<T, Type> || \
std::is_same_v<T, const Type>> \
_denc_friend(T& v, P& p)
#define DENC_FEATURED(Type, v, p, f) \
DENC_HELPERS \
void bound_encode(size_t& p, uint64_t f) const { \
_denc_friend(*this, p, f); \
} \
void encode(::ceph::buffer::list::contiguous_appender& p, uint64_t f) const { \
DENC_DUMP_PRE(Type); \
_denc_friend(*this, p, f); \
} \
void decode(::ceph::buffer::ptr::const_iterator& p, uint64_t f=0) { \
_denc_friend(*this, p, f); \
} \
template<typename T, typename P> \
friend std::enable_if_t<std::is_same_v<T, Type> || \
std::is_same_v<T, const Type>> \
_denc_friend(T& v, P& p, uint64_t f)
#endif
| 52,631 | 26.759494 | 97 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.