repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/librbd/cache/pwl/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_TYPES_H
#define CEPH_LIBRBD_CACHE_PWL_TYPES_H
#include "acconfig.h"
#ifdef WITH_RBD_RWL
#include "libpmemobj.h"
#endif
#include <vector>
#include "librbd/BlockGuard.h"
#include "librbd/io/Types.h"
namespace ceph {
class Formatter;
}
class Context;
enum {
l_librbd_pwl_first = 26500,
// All read requests
l_librbd_pwl_rd_req, // read requests
l_librbd_pwl_rd_bytes, // bytes read
l_librbd_pwl_rd_latency, // average req completion latency
// Read requests completed from RWL (no misses)
l_librbd_pwl_rd_hit_req, // read requests
l_librbd_pwl_rd_hit_bytes, // bytes read
l_librbd_pwl_rd_hit_latency, // average req completion latency
// Reed requests with hit and miss extents
l_librbd_pwl_rd_part_hit_req, // read ops
// Per SyncPoint's LogEntry number and write bytes distribution
l_librbd_pwl_syncpoint_hist,
// All write requests
l_librbd_pwl_wr_req, // write requests
l_librbd_pwl_wr_bytes, // bytes written
l_librbd_pwl_wr_req_def, // write requests deferred for resources
l_librbd_pwl_wr_req_def_lanes, // write requests deferred for lanes
l_librbd_pwl_wr_req_def_log, // write requests deferred for log entries
l_librbd_pwl_wr_req_def_buf, // write requests deferred for buffer space
l_librbd_pwl_wr_req_overlap, // write requests detained for overlap
l_librbd_pwl_wr_req_queued, // write requests queued for prior barrier
// Write log operations (1 .. n per request that appends to the log)
l_librbd_pwl_log_ops, // log append ops
l_librbd_pwl_log_op_bytes, // average bytes written per log op
/*
Req and op average latencies to the beginning of and over various phases:
+------------------------------+------+-------------------------------+
| Phase | Name | Description |
+------------------------------+------+-------------------------------+
| Arrive at RWL | arr |Arrives as a request |
+------------------------------+------+-------------------------------+
| Allocate resources | all |time spent in block guard for |
| | |overlap sequencing occurs |
| | |before this point |
+------------------------------+------+-------------------------------+
| Dispatch | dis |Op lifetime begins here. time |
| | |spent in allocation waiting for|
| | |resources occurs before this |
| | |point |
+------------------------------+------+-------------------------------+
| Payload buffer persist and | buf |time spent queued for |
|replicate | |replication occurs before here |
+------------------------------+------+-------------------------------+
| Payload buffer persist | bufc |bufc - buf is just the persist |
|complete | |time |
+------------------------------+------+-------------------------------+
| Log append | app |time spent queued for append |
| | |occurs before here |
+------------------------------+------+-------------------------------+
| Append complete | appc |appc - app is just the time |
| | |spent in the append operation |
+------------------------------+------+-------------------------------+
| Complete | cmp |write persisted, replicated, |
| | |and globally visible |
+------------------------------+------+-------------------------------+
*/
/* Request times */
l_librbd_pwl_req_arr_to_all_t, // arrival to allocation elapsed time - same as time deferred in block guard
l_librbd_pwl_req_arr_to_dis_t, // arrival to dispatch elapsed time
l_librbd_pwl_req_all_to_dis_t, // Time spent allocating or waiting to allocate resources
l_librbd_pwl_wr_latency, // average req (persist) completion latency
l_librbd_pwl_wr_latency_hist, // Histogram of write req (persist) completion latency vs. bytes written
l_librbd_pwl_wr_caller_latency, // average req completion (to caller) latency
/* Request times for requests that never waited for space*/
l_librbd_pwl_nowait_req_arr_to_all_t, // arrival to allocation elapsed time - same as time deferred in block guard
l_librbd_pwl_nowait_req_arr_to_dis_t, // arrival to dispatch elapsed time
l_librbd_pwl_nowait_req_all_to_dis_t, // Time spent allocating or waiting to allocate resources
l_librbd_pwl_nowait_wr_latency, // average req (persist) completion latency
l_librbd_pwl_nowait_wr_latency_hist, // Histogram of write req (persist) completion latency vs. bytes written
l_librbd_pwl_nowait_wr_caller_latency, // average req completion (to caller) latency
/* Log operation times */
l_librbd_pwl_log_op_alloc_t, // elapsed time of pmemobj_reserve()
l_librbd_pwl_log_op_alloc_t_hist, // Histogram of elapsed time of pmemobj_reserve()
l_librbd_pwl_log_op_dis_to_buf_t, // dispatch to buffer persist elapsed time
l_librbd_pwl_log_op_dis_to_app_t, // dispatch to log append elapsed time
l_librbd_pwl_log_op_dis_to_cmp_t, // dispatch to persist completion elapsed time
l_librbd_pwl_log_op_dis_to_cmp_t_hist, // Histogram of dispatch to persist completion elapsed time
l_librbd_pwl_log_op_buf_to_app_t, // data buf persist + append wait time
l_librbd_pwl_log_op_buf_to_bufc_t,// data buf persist / replicate elapsed time
l_librbd_pwl_log_op_buf_to_bufc_t_hist,// data buf persist time vs bytes histogram
l_librbd_pwl_log_op_app_to_cmp_t, // log entry append + completion wait time
l_librbd_pwl_log_op_app_to_appc_t, // log entry append / replicate elapsed time
l_librbd_pwl_log_op_app_to_appc_t_hist, // log entry append time (vs. op bytes) histogram
l_librbd_pwl_discard,
l_librbd_pwl_discard_bytes,
l_librbd_pwl_discard_latency,
l_librbd_pwl_aio_flush,
l_librbd_pwl_aio_flush_def,
l_librbd_pwl_aio_flush_latency,
l_librbd_pwl_ws,
l_librbd_pwl_ws_bytes, // Bytes modified by write same, probably much larger than WS payload bytes
l_librbd_pwl_ws_latency,
l_librbd_pwl_cmp,
l_librbd_pwl_cmp_bytes,
l_librbd_pwl_cmp_latency,
l_librbd_pwl_cmp_fails,
l_librbd_pwl_internal_flush,
l_librbd_pwl_writeback_latency,
l_librbd_pwl_invalidate_cache,
l_librbd_pwl_invalidate_discard_cache,
l_librbd_pwl_append_tx_t,
l_librbd_pwl_retire_tx_t,
l_librbd_pwl_append_tx_t_hist,
l_librbd_pwl_retire_tx_t_hist,
l_librbd_pwl_last,
};
enum {
WRITE_LOG_CACHE_ENTRY_VALID = 1U << 0, /* if 0, this entry is free */
WRITE_LOG_CACHE_ENTRY_SYNC_POINT = 1U << 1, /* No data. No write sequence number.
Marks sync point for this sync gen number */
WRITE_LOG_CACHE_ENTRY_SEQUENCED = 1U << 2, /* write sequence number is valid */
WRITE_LOG_CACHE_ENTRY_HAS_DATA = 1U << 3, /* write_data field is valid (else ignore) */
WRITE_LOG_CACHE_ENTRY_DISCARD = 1U << 4, /* has_data will be 0 if this is a discard */
WRITE_LOG_CACHE_ENTRY_WRITESAME = 1U << 5, /* ws_datalen indicates length of data at write_bytes */
};
namespace librbd {
namespace cache {
namespace pwl {
class ImageExtentBuf;
const int IN_FLIGHT_FLUSH_WRITE_LIMIT = 64;
const int IN_FLIGHT_FLUSH_BYTES_LIMIT = (1 * 1024 * 1024);
/* Limit work between sync points */
const uint64_t MAX_WRITES_PER_SYNC_POINT = 256;
const uint64_t MAX_BYTES_PER_SYNC_POINT = (1024 * 1024 * 8);
const uint32_t MIN_WRITE_ALLOC_SIZE = 512;
const uint32_t MIN_WRITE_ALLOC_SSD_SIZE = 4096;
const uint32_t LOG_STATS_INTERVAL_SECONDS = 5;
/**** Write log entries ****/
const unsigned long int MAX_ALLOC_PER_TRANSACTION = 8;
const unsigned long int MAX_FREE_PER_TRANSACTION = 1;
const unsigned int MAX_CONCURRENT_WRITES = (1024 * 1024);
const uint64_t DEFAULT_POOL_SIZE = 1u<<30;
const uint64_t MIN_POOL_SIZE = DEFAULT_POOL_SIZE;
const uint64_t POOL_SIZE_ALIGN = 1 << 20;
constexpr double USABLE_SIZE = (7.0 / 10);
const uint64_t BLOCK_ALLOC_OVERHEAD_BYTES = 16;
const uint8_t RWL_LAYOUT_VERSION = 1;
const uint8_t SSD_LAYOUT_VERSION = 1;
const uint64_t MAX_LOG_ENTRIES = (1024 * 1024);
const double AGGRESSIVE_RETIRE_HIGH_WATER = 0.75;
const double RETIRE_HIGH_WATER = 0.50;
const double RETIRE_LOW_WATER = 0.40;
const int RETIRE_BATCH_TIME_LIMIT_MS = 250;
const uint64_t CONTROL_BLOCK_MAX_LOG_ENTRIES = 32;
const uint64_t SPAN_MAX_DATA_LEN = (16 * 1024 * 1024);
/* offset of ring on SSD */
const uint64_t DATA_RING_BUFFER_OFFSET = 8192;
/* Defer a set of Contexts until destruct/exit. Used for deferring
* work on a given thread until a required lock is dropped. */
class DeferredContexts {
private:
std::vector<Context*> contexts;
public:
~DeferredContexts();
void add(Context* ctx);
};
/* Pmem structures */
#ifdef WITH_RBD_RWL
POBJ_LAYOUT_BEGIN(rbd_pwl);
POBJ_LAYOUT_ROOT(rbd_pwl, struct WriteLogPoolRoot);
POBJ_LAYOUT_TOID(rbd_pwl, uint8_t);
POBJ_LAYOUT_TOID(rbd_pwl, struct WriteLogCacheEntry);
POBJ_LAYOUT_END(rbd_pwl);
#endif
struct WriteLogCacheEntry {
uint64_t sync_gen_number = 0;
uint64_t write_sequence_number = 0;
uint64_t image_offset_bytes;
uint64_t write_bytes;
#ifdef WITH_RBD_RWL
TOID(uint8_t) write_data;
#endif
#ifdef WITH_RBD_SSD_CACHE
uint64_t write_data_pos = 0; /* SSD data offset */
#endif
uint8_t flags = 0;
uint32_t ws_datalen = 0; /* Length of data buffer (writesame only) */
uint32_t entry_index = 0; /* For debug consistency check. Can be removed if
* we need the space */
WriteLogCacheEntry(uint64_t image_offset_bytes=0, uint64_t write_bytes=0)
: image_offset_bytes(image_offset_bytes), write_bytes(write_bytes) {}
BlockExtent block_extent();
uint64_t get_offset_bytes();
uint64_t get_write_bytes();
bool is_entry_valid() const {
return flags & WRITE_LOG_CACHE_ENTRY_VALID;
}
bool is_sync_point() const {
return flags & WRITE_LOG_CACHE_ENTRY_SYNC_POINT;
}
bool is_sequenced() const {
return flags & WRITE_LOG_CACHE_ENTRY_SEQUENCED;
}
bool has_data() const {
return flags & WRITE_LOG_CACHE_ENTRY_HAS_DATA;
}
bool is_discard() const {
return flags & WRITE_LOG_CACHE_ENTRY_DISCARD;
}
bool is_writesame() const {
return flags & WRITE_LOG_CACHE_ENTRY_WRITESAME;
}
bool is_write() const {
/* Log entry is a basic write */
return !is_sync_point() && !is_discard() && !is_writesame();
}
bool is_writer() const {
/* Log entry is any type that writes data */
return is_write() || is_discard() || is_writesame();
}
void set_entry_valid(bool flag) {
if (flag) {
flags |= WRITE_LOG_CACHE_ENTRY_VALID;
} else {
flags &= ~WRITE_LOG_CACHE_ENTRY_VALID;
}
}
void set_sync_point(bool flag) {
if (flag) {
flags |= WRITE_LOG_CACHE_ENTRY_SYNC_POINT;
} else {
flags &= ~WRITE_LOG_CACHE_ENTRY_SYNC_POINT;
}
}
void set_sequenced(bool flag) {
if (flag) {
flags |= WRITE_LOG_CACHE_ENTRY_SEQUENCED;
} else {
flags &= ~WRITE_LOG_CACHE_ENTRY_SEQUENCED;
}
}
void set_has_data(bool flag) {
if (flag) {
flags |= WRITE_LOG_CACHE_ENTRY_HAS_DATA;
} else {
flags &= ~WRITE_LOG_CACHE_ENTRY_HAS_DATA;
}
}
void set_discard(bool flag) {
if (flag) {
flags |= WRITE_LOG_CACHE_ENTRY_DISCARD;
} else {
flags &= ~WRITE_LOG_CACHE_ENTRY_DISCARD;
}
}
void set_writesame(bool flag) {
if (flag) {
flags |= WRITE_LOG_CACHE_ENTRY_WRITESAME;
} else {
flags &= ~WRITE_LOG_CACHE_ENTRY_WRITESAME;
}
}
friend std::ostream& operator<<(std::ostream& os,
const WriteLogCacheEntry &entry);
#ifdef WITH_RBD_SSD_CACHE
DENC(WriteLogCacheEntry, v, p) {
DENC_START(1, 1, p);
denc(v.sync_gen_number, p);
denc(v.write_sequence_number, p);
denc(v.image_offset_bytes, p);
denc(v.write_bytes, p);
denc(v.write_data_pos, p);
denc(v.flags, p);
denc(v.ws_datalen, p);
denc(v.entry_index, p);
DENC_FINISH(p);
}
#endif
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<WriteLogCacheEntry*>& ls);
};
struct WriteLogPoolRoot {
#ifdef WITH_RBD_RWL
union {
struct {
uint8_t layout_version;
};
uint64_t _u64;
} header;
TOID(struct WriteLogCacheEntry) log_entries; /* contiguous array of log entries */
#endif
#ifdef WITH_RBD_SSD_CACHE
uint64_t layout_version = 0;
uint64_t cur_sync_gen = 0; /* TODO: remove it when changing disk format */
#endif
uint64_t pool_size;
uint64_t flushed_sync_gen; /* All writing entries with this or a lower
* sync gen number are flushed. */
uint32_t block_size;
uint32_t num_log_entries;
uint64_t first_free_entry; /* The free entry following the latest valid
* entry, which is going to be written */
uint64_t first_valid_entry; /* The oldest valid entry to be retired */
#ifdef WITH_RBD_SSD_CACHE
DENC(WriteLogPoolRoot, v, p) {
DENC_START(1, 1, p);
denc(v.layout_version, p);
denc(v.cur_sync_gen, p);
denc(v.pool_size, p);
denc(v.flushed_sync_gen, p);
denc(v.block_size, p);
denc(v.num_log_entries, p);
denc(v.first_free_entry, p);
denc(v.first_valid_entry, p);
DENC_FINISH(p);
}
#endif
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<WriteLogPoolRoot*>& ls);
};
struct WriteBufferAllocation {
unsigned int allocation_size = 0;
#ifdef WITH_RBD_RWL
pobj_action buffer_alloc_action;
TOID(uint8_t) buffer_oid = OID_NULL;
#endif
bool allocated = false;
utime_t allocation_lat;
};
static inline io::Extent image_extent(const BlockExtent& block_extent) {
return io::Extent(block_extent.block_start,
block_extent.block_end - block_extent.block_start);
}
template <typename ExtentsType>
class ExtentsSummary {
public:
uint64_t total_bytes;
uint64_t first_image_byte;
uint64_t last_image_byte;
explicit ExtentsSummary(const ExtentsType &extents);
friend std::ostream &operator<<(std::ostream &os,
const ExtentsSummary &s) {
os << "total_bytes=" << s.total_bytes
<< ", first_image_byte=" << s.first_image_byte
<< ", last_image_byte=" << s.last_image_byte;
return os;
}
BlockExtent block_extent() {
return BlockExtent(first_image_byte, last_image_byte);
}
io::Extent image_extent() {
return librbd::cache::pwl::image_extent(block_extent());
}
};
io::Extent whole_volume_extent();
BlockExtent block_extent(const io::Extent& image_extent);
Context * override_ctx(int r, Context *ctx);
class ImageExtentBuf : public io::Extent {
public:
bufferlist m_bl;
bool need_to_truncate;
int truncate_offset;
bool writesame;
ImageExtentBuf() {}
ImageExtentBuf(io::Extent extent,
bool need_to_truncate = false, uint64_t truncate_offset = 0,
bool writesame = false)
: io::Extent(extent), need_to_truncate(need_to_truncate),
truncate_offset(truncate_offset), writesame(writesame) {}
ImageExtentBuf(io::Extent extent, bufferlist bl,
bool need_to_truncate = false, uint64_t truncate_offset = 0,
bool writesame = false)
: io::Extent(extent), m_bl(bl), need_to_truncate(need_to_truncate),
truncate_offset(truncate_offset), writesame(writesame) {}
};
std::string unique_lock_name(const std::string &name, void *address);
} // namespace pwl
} // namespace cache
} // namespace librbd
#ifdef WITH_RBD_SSD_CACHE
WRITE_CLASS_DENC(librbd::cache::pwl::WriteLogCacheEntry)
WRITE_CLASS_DENC(librbd::cache::pwl::WriteLogPoolRoot)
#endif
#endif // CEPH_LIBRBD_CACHE_PWL_TYPES_H
| 16,288 | 35.522422 | 118 | h |
null | ceph-main/src/librbd/cache/pwl/rwl/Builder.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_RWL_BUILDER_H
#define CEPH_LIBRBD_CACHE_PWL_RWL_BUILDER_H
#include <iostream>
#include "LogEntry.h"
#include "ReadRequest.h"
#include "Request.h"
#include "LogOperation.h"
#include "librbd/cache/ImageWriteback.h"
#include "librbd/cache/pwl/Builder.h"
namespace librbd {
namespace cache {
namespace pwl {
namespace rwl {
template <typename T>
class Builder : public pwl::Builder<T> {
public:
std::shared_ptr<pwl::WriteLogEntry> create_write_log_entry(
uint64_t image_offset_bytes, uint64_t write_bytes) override {
return std::make_shared<WriteLogEntry>(image_offset_bytes, write_bytes);
}
std::shared_ptr<pwl::WriteLogEntry> create_write_log_entry(
std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes) override {
return std::make_shared<WriteLogEntry>(
sync_point_entry, image_offset_bytes, write_bytes);
}
std::shared_ptr<pwl::WriteLogEntry> create_writesame_log_entry(
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length) override {
return std::make_shared<WriteSameLogEntry>(
image_offset_bytes, write_bytes, data_length);
}
std::shared_ptr<pwl::WriteLogEntry> create_writesame_log_entry(
std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length) override {
return std::make_shared<WriteSameLogEntry>(
sync_point_entry, image_offset_bytes, write_bytes, data_length);
}
pwl::C_WriteRequest<T> *create_write_request(
T &pwl, utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req) override {
return new C_WriteRequest<T>(
pwl, arrived, std::move(image_extents), std::move(bl),
fadvise_flags, lock, perfcounter, user_req);
}
pwl::C_WriteSameRequest<T> *create_writesame_request(
T &pwl, utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req) override {
return new C_WriteSameRequest<T>(
pwl, arrived, std::move(image_extents), std::move(bl),
fadvise_flags, lock, perfcounter, user_req);
}
pwl::C_WriteRequest<T> *create_comp_and_write_request(
T &pwl, utime_t arrived, io::Extents &&image_extents,
bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset,
const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req) override {
return new rwl::C_CompAndWriteRequest<T>(
pwl, arrived, std::move(image_extents), std::move(cmp_bl),
std::move(bl), mismatch_offset, fadvise_flags,
lock, perfcounter, user_req);
}
std::shared_ptr<pwl::WriteLogOperation> create_write_log_operation(
WriteLogOperationSet &set, uint64_t image_offset_bytes,
uint64_t write_bytes, CephContext *cct,
std::shared_ptr<pwl::WriteLogEntry> write_log_entry) {
return std::make_shared<WriteLogOperation>(
set, image_offset_bytes, write_bytes, cct, write_log_entry);
}
std::shared_ptr<pwl::WriteLogOperation> create_write_log_operation(
WriteLogOperationSet &set, uint64_t image_offset_bytes,
uint64_t write_bytes, uint32_t data_len, CephContext *cct,
std::shared_ptr<pwl::WriteLogEntry> writesame_log_entry) {
return std::make_shared<WriteLogOperation>(
set, image_offset_bytes, write_bytes, data_len, cct,
writesame_log_entry);
}
std::shared_ptr<pwl::DiscardLogOperation> create_discard_log_operation(
std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes,
uint64_t write_bytes, uint32_t discard_granularity_bytes,
utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct) {
return std::make_shared<DiscardLogOperation>(
sync_point, image_offset_bytes, write_bytes, discard_granularity_bytes,
dispatch_time, perfcounter, cct);
}
C_ReadRequest *create_read_request(CephContext *cct, utime_t arrived,
PerfCounters *perfcounter, ceph::bufferlist *bl, Context *on_finish) {
return new C_ReadRequest(cct, arrived, perfcounter, bl, on_finish);
}
};
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_RWL_BUILDER_H
| 4,540 | 41.046296 | 79 | h |
null | ceph-main/src/librbd/cache/pwl/rwl/LogEntry.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/cache/ImageWriteback.h"
#include "LogEntry.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::rwl::WriteLogEntry: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
namespace rwl {
void WriteLogEntry::writeback(
librbd::cache::ImageWritebackInterface &image_writeback, Context *ctx) {
/* Pass a copy of the pmem buffer to ImageWriteback (which may hang on to the
* bl even after flush()). */
bufferlist entry_bl;
buffer::list entry_bl_copy;
copy_cache_bl(&entry_bl_copy);
entry_bl_copy.begin(0).copy(write_bytes(), entry_bl);
image_writeback.aio_write({{ram_entry.image_offset_bytes,
ram_entry.write_bytes}},
std::move(entry_bl), 0, ctx);
}
void WriteLogEntry::init_cache_bp() {
ceph_assert(!this->cache_bp.have_raw());
cache_bp = buffer::ptr(buffer::create_static(this->write_bytes(),
(char*)this->cache_buffer));
}
void WriteLogEntry::init_bl(buffer::ptr &bp, buffer::list &bl) {
if(!is_writesame) {
bl.append(bp);
return;
}
for (uint64_t i = 0; i < ram_entry.write_bytes / ram_entry.ws_datalen; i++) {
bl.append(bp);
}
int trailing_partial = ram_entry.write_bytes % ram_entry.ws_datalen;
if (trailing_partial) {
bl.append(bp, 0, trailing_partial);
}
}
void WriteLogEntry::init_cache_buffer(
std::vector<WriteBufferAllocation>::iterator allocation) {
this->ram_entry.write_data = allocation->buffer_oid;
ceph_assert(!TOID_IS_NULL(this->ram_entry.write_data));
cache_buffer = D_RW(this->ram_entry.write_data);
}
buffer::list& WriteLogEntry::get_cache_bl() {
if (0 == bl_refs) {
std::lock_guard locker(m_entry_bl_lock);
if (0 == bl_refs) {
//init pmem bufferlist
cache_bl.clear();
init_cache_bp();
ceph_assert(cache_bp.have_raw());
int before_bl = cache_bp.raw_nref();
this->init_bl(cache_bp, cache_bl);
int after_bl = cache_bp.raw_nref();
bl_refs = after_bl - before_bl;
}
ceph_assert(0 != bl_refs);
}
return cache_bl;
}
void WriteLogEntry::copy_cache_bl(bufferlist *out_bl) {
this->get_cache_bl();
// cache_bp is now initialized
ceph_assert(cache_bp.length() == cache_bp.raw_length());
buffer::ptr cloned_bp = cache_bp.begin_deep().get_ptr(cache_bp.length());
out_bl->clear();
this->init_bl(cloned_bp, *out_bl);
}
unsigned int WriteLogEntry::reader_count() const {
if (cache_bp.have_raw()) {
return (cache_bp.raw_nref() - bl_refs - 1);
} else {
return 0;
}
}
void WriteSameLogEntry::writeback(
librbd::cache::ImageWritebackInterface &image_writeback, Context *ctx) {
bufferlist entry_bl;
buffer::list entry_bl_copy;
copy_cache_bl(&entry_bl_copy);
entry_bl_copy.begin(0).copy(write_bytes(), entry_bl);
image_writeback.aio_writesame(ram_entry.image_offset_bytes,
ram_entry.write_bytes,
std::move(entry_bl), 0, ctx);
}
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
| 3,299 | 29.841121 | 79 | cc |
null | ceph-main/src/librbd/cache/pwl/rwl/LogEntry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_RWL_LOG_ENTRY_H
#define CEPH_LIBRBD_CACHE_PWL_RWL_LOG_ENTRY_H
#include "librbd/cache/pwl/LogEntry.h"
namespace librbd {
namespace cache {
class ImageWritebackInterface;
namespace pwl {
namespace rwl {
class WriteLogEntry : public pwl::WriteLogEntry {
public:
WriteLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes)
: pwl::WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes) {}
WriteLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes)
: pwl::WriteLogEntry(image_offset_bytes, write_bytes) {}
WriteLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length)
: pwl::WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes,
data_length) {}
WriteLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length)
: pwl::WriteLogEntry(image_offset_bytes, write_bytes, data_length) {}
~WriteLogEntry() {}
WriteLogEntry(const WriteLogEntry&) = delete;
WriteLogEntry &operator=(const WriteLogEntry&) = delete;
void writeback(librbd::cache::ImageWritebackInterface &image_writeback,
Context *ctx) override;
void init_cache_bp() override;
void init_bl(buffer::ptr &bp, buffer::list &bl) override;
void init_cache_buffer(
std::vector<WriteBufferAllocation>::iterator allocation) override;
buffer::list &get_cache_bl() override;
void copy_cache_bl(bufferlist *out_bl) override;
unsigned int reader_count() const override;
};
class WriteSameLogEntry : public WriteLogEntry {
public:
WriteSameLogEntry(std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length)
: WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes,
data_length) {}
WriteSameLogEntry(uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length)
: WriteLogEntry(image_offset_bytes, write_bytes, data_length) {}
~WriteSameLogEntry() {}
WriteSameLogEntry(const WriteSameLogEntry&) = delete;
WriteSameLogEntry &operator=(const WriteSameLogEntry&) = delete;
void writeback(librbd::cache::ImageWritebackInterface &image_writeback,
Context *ctx) override;
};
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_RWL_LOG_ENTRY_H
| 2,712 | 38.318841 | 78 | h |
null | ceph-main/src/librbd/cache/pwl/rwl/LogOperation.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "LogOperation.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::rwl::LogOperation: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
namespace rwl {
void WriteLogOperation::copy_bl_to_cache_buffer(
std::vector<WriteBufferAllocation>::iterator allocation) {
/* operation is a shared_ptr, so write_op is only good as long as operation is
* in scope */
bufferlist::iterator i(&bl);
m_perfcounter->inc(l_librbd_pwl_log_op_bytes, log_entry->write_bytes());
ldout(m_cct, 20) << bl << dendl;
log_entry->init_cache_buffer(allocation);
i.copy((unsigned)log_entry->write_bytes(), (char*)log_entry->cache_buffer);
}
void DiscardLogOperation::init_op(
uint64_t current_sync_gen, bool persist_on_flush,
uint64_t last_op_sequence_num, Context *write_persist,
Context *write_append) {
log_entry->init(current_sync_gen, persist_on_flush, last_op_sequence_num);
this->on_write_append = write_append;
this->on_write_persist = write_persist;
}
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
| 1,289 | 31.25 | 80 | cc |
null | ceph-main/src/librbd/cache/pwl/rwl/LogOperation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_RWL_LOG_OPERATION_H
#define CEPH_LIBRBD_CACHE_PWL_RWL_LOG_OPERATION_H
#include "librbd/cache/pwl/LogOperation.h"
namespace librbd {
namespace cache {
namespace pwl {
namespace rwl {
class WriteLogOperation : public pwl::WriteLogOperation {
public:
WriteLogOperation(
WriteLogOperationSet &set, uint64_t image_offset_bytes,
uint64_t write_bytes, CephContext *cct,
std::shared_ptr<pwl::WriteLogEntry> write_log_entry)
: pwl::WriteLogOperation(set, image_offset_bytes, write_bytes, cct,
write_log_entry) {}
WriteLogOperation(
WriteLogOperationSet &set, uint64_t image_offset_bytes,
uint64_t write_bytes, uint32_t data_len, CephContext *cct,
std::shared_ptr<pwl::WriteLogEntry> writesame_log_entry)
: pwl::WriteLogOperation(set, image_offset_bytes, write_bytes, cct,
writesame_log_entry) {}
void copy_bl_to_cache_buffer(
std::vector<WriteBufferAllocation>::iterator allocation) override;
};
class DiscardLogOperation : public pwl::DiscardLogOperation {
public:
DiscardLogOperation(
std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes,
uint64_t write_bytes, uint32_t discard_granularity_bytes,
utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct)
: pwl::DiscardLogOperation(sync_point, image_offset_bytes, write_bytes,
discard_granularity_bytes, dispatch_time,
perfcounter, cct) {}
void init_op(
uint64_t current_sync_gen, bool persist_on_flush,
uint64_t last_op_sequence_num, Context *write_persist,
Context *write_append) override;
};
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_RWL_LOG_OPERATION_H
| 1,957 | 33.964286 | 76 | h |
null | ceph-main/src/librbd/cache/pwl/rwl/ReadRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ReadRequest.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::rwl::ReadRequest: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
namespace rwl {
void C_ReadRequest::finish(int r) {
ldout(m_cct, 20) << "(" << get_name() << "): r=" << r << dendl;
int hits = 0;
int misses = 0;
int hit_bytes = 0;
int miss_bytes = 0;
if (r >= 0) {
/*
* At this point the miss read has completed. We'll iterate through
* read_extents and produce *m_out_bl by assembling pieces of miss_bl
* and the individual hit extent bufs in the read extents that represent
* hits.
*/
uint64_t miss_bl_offset = 0;
for (auto extent : read_extents) {
if (extent->m_bl.length()) {
/* This was a hit */
ceph_assert(extent->second == extent->m_bl.length());
++hits;
hit_bytes += extent->second;
m_out_bl->claim_append(extent->m_bl);
} else {
/* This was a miss. */
++misses;
miss_bytes += extent->second;
bufferlist miss_extent_bl;
miss_extent_bl.substr_of(miss_bl, miss_bl_offset, extent->second);
/* Add this read miss bufferlist to the output bufferlist */
m_out_bl->claim_append(miss_extent_bl);
/* Consume these bytes in the read miss bufferlist */
miss_bl_offset += extent->second;
}
}
}
ldout(m_cct, 20) << "(" << get_name() << "): r=" << r << " bl=" << *m_out_bl << dendl;
utime_t now = ceph_clock_now();
ceph_assert((int)m_out_bl->length() == hit_bytes + miss_bytes);
m_on_finish->complete(r);
m_perfcounter->inc(l_librbd_pwl_rd_bytes, hit_bytes + miss_bytes);
m_perfcounter->inc(l_librbd_pwl_rd_hit_bytes, hit_bytes);
m_perfcounter->tinc(l_librbd_pwl_rd_latency, now - m_arrived_time);
if (!misses) {
m_perfcounter->inc(l_librbd_pwl_rd_hit_req, 1);
m_perfcounter->tinc(l_librbd_pwl_rd_hit_latency, now - m_arrived_time);
} else {
if (hits) {
m_perfcounter->inc(l_librbd_pwl_rd_part_hit_req, 1);
}
}
}
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
| 2,326 | 31.774648 | 88 | cc |
null | ceph-main/src/librbd/cache/pwl/rwl/ReadRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_RWL_READ_REQUEST_H
#define CEPH_LIBRBD_CACHE_PWL_RWL_READ_REQUEST_H
#include "librbd/cache/pwl/ReadRequest.h"
namespace librbd {
namespace cache {
namespace pwl {
namespace rwl {
typedef std::vector<pwl::ImageExtentBuf> ImageExtentBufs;
class C_ReadRequest : public pwl::C_ReadRequest {
protected:
using pwl::C_ReadRequest::m_cct;
using pwl::C_ReadRequest::m_on_finish;
using pwl::C_ReadRequest::m_out_bl;
using pwl::C_ReadRequest::m_arrived_time;
using pwl::C_ReadRequest::m_perfcounter;
public:
C_ReadRequest(CephContext *cct, utime_t arrived, PerfCounters *perfcounter, bufferlist *out_bl, Context *on_finish)
: pwl::C_ReadRequest(cct, arrived, perfcounter, out_bl, on_finish) {}
void finish(int r) override;
};
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_RWL_READ_REQUEST_H
| 1,000 | 27.6 | 117 | h |
null | ceph-main/src/librbd/cache/pwl/rwl/Request.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Request.h"
#include "librbd/cache/pwl/AbstractWriteLog.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::rwl::Request: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
namespace rwl {
template <typename T>
void C_WriteRequest<T>::setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated,
uint64_t *number_lanes, uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) {
ceph_assert(!this->m_resources.allocated);
auto image_extents_size = this->image_extents.size();
this->m_resources.buffers.reserve(image_extents_size);
*bytes_cached = 0;
*bytes_allocated = 0;
*number_lanes = image_extents_size;
*number_log_entries = image_extents_size;
*number_unpublished_reserves = image_extents_size;
for (auto &extent : this->image_extents) {
this->m_resources.buffers.emplace_back();
struct WriteBufferAllocation &buffer = this->m_resources.buffers.back();
buffer.allocation_size = MIN_WRITE_ALLOC_SIZE;
buffer.allocated = false;
*bytes_cached += extent.second;
if (extent.second > buffer.allocation_size) {
buffer.allocation_size = extent.second;
}
*bytes_allocated += buffer.allocation_size;
}
*bytes_dirtied = *bytes_cached;
}
template <typename T>
std::ostream &operator<<(std::ostream &os,
const C_CompAndWriteRequest<T> &req) {
os << (C_WriteRequest<T>&)req
<< " cmp_bl=" << req.cmp_bl
<< ", read_bl=" << req.read_bl
<< ", compare_succeeded=" << req.compare_succeeded
<< ", mismatch_offset=" << req.mismatch_offset;
return os;
}
template <typename T>
void C_WriteSameRequest<T>::setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated,
uint64_t *number_lanes, uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) {
ceph_assert(this->image_extents.size() == 1);
*number_log_entries = 1;
*bytes_dirtied += this->image_extents[0].second;
auto pattern_length = this->bl.length();
this->m_resources.buffers.emplace_back();
struct WriteBufferAllocation &buffer = this->m_resources.buffers.back();
buffer.allocation_size = MIN_WRITE_ALLOC_SIZE;
buffer.allocated = false;
*bytes_cached += pattern_length;
if (pattern_length > buffer.allocation_size) {
buffer.allocation_size = pattern_length;
}
*bytes_allocated += buffer.allocation_size;
}
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::rwl::C_WriteRequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
template class librbd::cache::pwl::rwl::C_WriteSameRequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
template class librbd::cache::pwl::rwl::C_CompAndWriteRequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
| 3,092 | 34.551724 | 119 | cc |
null | ceph-main/src/librbd/cache/pwl/rwl/Request.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_RWL_REQUEST_H
#define CEPH_LIBRBD_CACHE_RWL_REQUEST_H
#include "librbd/cache/pwl/Request.h"
namespace librbd {
class BlockGuardCell;
namespace cache {
namespace pwl {
namespace rwl {
template <typename T>
class C_WriteRequest : public pwl::C_WriteRequest<T> {
public:
C_WriteRequest(
T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset,
const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: pwl::C_WriteRequest<T>(
pwl, arrived, std::move(image_extents), std::move(cmp_bl),
std::move(bl), mismatch_offset, fadvise_flags,
lock, perfcounter, user_req) {}
C_WriteRequest(
T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: pwl::C_WriteRequest<T>(
pwl, arrived, std::move(image_extents), std::move(bl),
fadvise_flags, lock, perfcounter, user_req) {}
protected:
//Plain writes will allocate one buffer per request extent
void setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied,
uint64_t *bytes_allocated, uint64_t *number_lanes,
uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) override;
};
template <typename T>
class C_CompAndWriteRequest : public C_WriteRequest<T> {
public:
C_CompAndWriteRequest(
T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset,
const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: C_WriteRequest<T>(
pwl, arrived, std::move(image_extents), std::move(cmp_bl),
std::move(bl), mismatch_offset, fadvise_flags,
lock, perfcounter, user_req) {}
const char *get_name() const override {
return "C_CompAndWriteRequest";
}
template <typename U>
friend std::ostream &operator<<(std::ostream &os,
const C_CompAndWriteRequest<U> &req);
};
template <typename T>
class C_WriteSameRequest : public pwl::C_WriteSameRequest<T> {
public:
C_WriteSameRequest(
T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: pwl::C_WriteSameRequest<T>(
pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags,
lock, perfcounter, user_req) {}
void setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied,
uint64_t *bytes_allocated, uint64_t *number_lanes,
uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) override;
};
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_RWL_REQUEST_H
| 3,097 | 33.043956 | 77 | h |
null | ceph-main/src/librbd/cache/pwl/rwl/WriteLog.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "WriteLog.h"
#include "include/buffer.h"
#include "include/Context.h"
#include "include/ceph_assert.h"
#include "common/deleter.h"
#include "common/dout.h"
#include "common/environment.h"
#include "common/errno.h"
#include "common/WorkQueue.h"
#include "common/Timer.h"
#include "common/perf_counters.h"
#include "librbd/ImageCtx.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/cache/pwl/ImageCacheState.h"
#include "librbd/cache/pwl/LogEntry.h"
#include "librbd/plugin/Api.h"
#include <map>
#include <vector>
#undef dout_subsys
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::rwl::WriteLog: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
using namespace std;
using namespace librbd::cache::pwl;
namespace rwl {
const unsigned long int OPS_APPENDED_TOGETHER = MAX_ALLOC_PER_TRANSACTION;
template <typename I>
Builder<AbstractWriteLog<I>>* WriteLog<I>::create_builder() {
m_builderobj = new Builder<This>();
return m_builderobj;
}
template <typename I>
WriteLog<I>::WriteLog(
I &image_ctx, librbd::cache::pwl::ImageCacheState<I>* cache_state,
ImageWritebackInterface& image_writeback,
plugin::Api<I>& plugin_api)
: AbstractWriteLog<I>(image_ctx, cache_state, create_builder(), image_writeback,
plugin_api),
m_pwl_pool_layout_name(POBJ_LAYOUT_NAME(rbd_pwl))
{
}
template <typename I>
WriteLog<I>::~WriteLog() {
m_log_pool = nullptr;
delete m_builderobj;
}
template <typename I>
void WriteLog<I>::collect_read_extents(
uint64_t read_buffer_offset, LogMapEntry<GenericWriteLogEntry> map_entry,
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,
std::vector<bufferlist*> &bls_to_read, uint64_t entry_hit_length,
Extent hit_extent, pwl::C_ReadRequest *read_ctx) {
/* Make a bl for this hit extent. This will add references to the
* write_entry->pmem_bp */
buffer::list hit_bl;
/* Create buffer object referring to pmem pool for this read hit */
auto write_entry = map_entry.log_entry;
buffer::list entry_bl_copy;
write_entry->copy_cache_bl(&entry_bl_copy);
entry_bl_copy.begin(read_buffer_offset).copy(entry_hit_length, hit_bl);
ceph_assert(hit_bl.length() == entry_hit_length);
/* Add hit extent to read extents */
auto hit_extent_buf = std::make_shared<ImageExtentBuf>(hit_extent, hit_bl);
read_ctx->read_extents.push_back(hit_extent_buf);
}
template <typename I>
void WriteLog<I>::complete_read(
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,
std::vector<bufferlist*> &bls_to_read, Context *ctx) {
ctx->complete(0);
}
/*
* Allocate the (already reserved) write log entries for a set of operations.
*
* Locking:
* Acquires lock
*/
template <typename I>
void WriteLog<I>::alloc_op_log_entries(GenericLogOperations &ops)
{
TOID(struct WriteLogPoolRoot) pool_root;
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
struct WriteLogCacheEntry *pmem_log_entries = D_RW(D_RW(pool_root)->log_entries);
ceph_assert(ceph_mutex_is_locked_by_me(this->m_log_append_lock));
/* Allocate the (already reserved) log entries */
std::unique_lock locker(m_lock);
for (auto &operation : ops) {
uint32_t entry_index = this->m_first_free_entry;
this->m_first_free_entry = (this->m_first_free_entry + 1) % this->m_total_log_entries;
auto &log_entry = operation->get_log_entry();
log_entry->log_entry_index = entry_index;
log_entry->ram_entry.entry_index = entry_index;
log_entry->cache_entry = &pmem_log_entries[entry_index];
log_entry->ram_entry.set_entry_valid(true);
m_log_entries.push_back(log_entry);
ldout(m_image_ctx.cct, 20) << "operation=[" << *operation << "]" << dendl;
}
if (m_cache_state->empty && !m_log_entries.empty()) {
m_cache_state->empty = false;
this->update_image_cache_state();
this->write_image_cache_state(locker);
}
}
/*
* Write and persist the (already allocated) write log entries and
* data buffer allocations for a set of ops. The data buffer for each
* of these must already have been persisted to its reserved area.
*/
template <typename I>
int WriteLog<I>::append_op_log_entries(GenericLogOperations &ops)
{
CephContext *cct = m_image_ctx.cct;
GenericLogOperationsVector entries_to_flush;
TOID(struct WriteLogPoolRoot) pool_root;
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
int ret = 0;
ceph_assert(ceph_mutex_is_locked_by_me(this->m_log_append_lock));
if (ops.empty()) {
return 0;
}
entries_to_flush.reserve(OPS_APPENDED_TOGETHER);
/* Write log entries to ring and persist */
utime_t now = ceph_clock_now();
for (auto &operation : ops) {
if (!entries_to_flush.empty()) {
/* Flush these and reset the list if the current entry wraps to the
* tail of the ring */
if (entries_to_flush.back()->get_log_entry()->log_entry_index >
operation->get_log_entry()->log_entry_index) {
ldout(m_image_ctx.cct, 20) << "entries to flush wrap around the end of the ring at "
<< "operation=[" << *operation << "]" << dendl;
flush_op_log_entries(entries_to_flush);
entries_to_flush.clear();
now = ceph_clock_now();
}
}
ldout(m_image_ctx.cct, 20) << "Copying entry for operation at index="
<< operation->get_log_entry()->log_entry_index
<< " from " << &operation->get_log_entry()->ram_entry
<< " to " << operation->get_log_entry()->cache_entry
<< " operation=[" << *operation << "]" << dendl;
operation->log_append_start_time = now;
*operation->get_log_entry()->cache_entry = operation->get_log_entry()->ram_entry;
ldout(m_image_ctx.cct, 20) << "APPENDING: index="
<< operation->get_log_entry()->log_entry_index
<< " pmem_entry=[" << *operation->get_log_entry()->cache_entry
<< "]" << dendl;
entries_to_flush.push_back(operation);
}
flush_op_log_entries(entries_to_flush);
/* Drain once for all */
pmemobj_drain(m_log_pool);
/*
* Atomically advance the log head pointer and publish the
* allocations for all the data buffers they refer to.
*/
utime_t tx_start = ceph_clock_now();
TX_BEGIN(m_log_pool) {
D_RW(pool_root)->first_free_entry = this->m_first_free_entry;
for (auto &operation : ops) {
if (operation->reserved_allocated()) {
auto write_op = (std::shared_ptr<WriteLogOperation>&) operation;
pmemobj_tx_publish(&write_op->buffer_alloc->buffer_alloc_action, 1);
} else {
ldout(m_image_ctx.cct, 20) << "skipping non-write op: " << *operation << dendl;
}
}
} TX_ONCOMMIT {
} TX_ONABORT {
lderr(cct) << "failed to commit " << ops.size()
<< " log entries (" << this->m_log_pool_name << ")" << dendl;
ceph_assert(false);
ret = -EIO;
} TX_FINALLY {
} TX_END;
utime_t tx_end = ceph_clock_now();
m_perfcounter->tinc(l_librbd_pwl_append_tx_t, tx_end - tx_start);
m_perfcounter->hinc(
l_librbd_pwl_append_tx_t_hist, utime_t(tx_end - tx_start).to_nsec(), ops.size());
for (auto &operation : ops) {
operation->log_append_comp_time = tx_end;
}
return ret;
}
/*
* Flush the persistent write log entries set of ops. The entries must
* be contiguous in persistent memory.
*/
template <typename I>
void WriteLog<I>::flush_op_log_entries(GenericLogOperationsVector &ops)
{
if (ops.empty()) {
return;
}
if (ops.size() > 1) {
ceph_assert(ops.front()->get_log_entry()->cache_entry < ops.back()->get_log_entry()->cache_entry);
}
ldout(m_image_ctx.cct, 20) << "entry count=" << ops.size()
<< " start address="
<< ops.front()->get_log_entry()->cache_entry
<< " bytes="
<< ops.size() * sizeof(*(ops.front()->get_log_entry()->cache_entry))
<< dendl;
pmemobj_flush(m_log_pool,
ops.front()->get_log_entry()->cache_entry,
ops.size() * sizeof(*(ops.front()->get_log_entry()->cache_entry)));
}
template <typename I>
void WriteLog<I>::remove_pool_file() {
if (m_log_pool) {
ldout(m_image_ctx.cct, 6) << "closing pmem pool" << dendl;
pmemobj_close(m_log_pool);
}
if (m_cache_state->clean) {
ldout(m_image_ctx.cct, 5) << "Removing empty pool file: " << this->m_log_pool_name << dendl;
if (remove(this->m_log_pool_name.c_str()) != 0) {
lderr(m_image_ctx.cct) << "failed to remove empty pool \"" << this->m_log_pool_name << "\": "
<< pmemobj_errormsg() << dendl;
} else {
m_cache_state->present = false;
}
} else {
ldout(m_image_ctx.cct, 5) << "Not removing pool file: " << this->m_log_pool_name << dendl;
}
}
template <typename I>
bool WriteLog<I>::initialize_pool(Context *on_finish, pwl::DeferredContexts &later) {
CephContext *cct = m_image_ctx.cct;
int r = -EINVAL;
TOID(struct WriteLogPoolRoot) pool_root;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
if (access(this->m_log_pool_name.c_str(), F_OK) != 0) {
if ((m_log_pool =
pmemobj_create(this->m_log_pool_name.c_str(),
this->m_pwl_pool_layout_name,
this->m_log_pool_size,
(S_IWUSR | S_IRUSR))) == NULL) {
lderr(cct) << "failed to create pool: " << this->m_log_pool_name
<< ". error: " << pmemobj_errormsg() << dendl;
m_cache_state->present = false;
m_cache_state->clean = true;
m_cache_state->empty = true;
/* TODO: filter/replace errnos that are meaningless to the caller */
on_finish->complete(-errno);
return false;
}
m_cache_state->present = true;
m_cache_state->clean = true;
m_cache_state->empty = true;
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
/* new pool, calculate and store metadata */
size_t effective_pool_size = (size_t)(this->m_log_pool_size * USABLE_SIZE);
size_t small_write_size = MIN_WRITE_ALLOC_SIZE + BLOCK_ALLOC_OVERHEAD_BYTES + sizeof(struct WriteLogCacheEntry);
uint64_t num_small_writes = (uint64_t)(effective_pool_size / small_write_size);
if (num_small_writes > MAX_LOG_ENTRIES) {
num_small_writes = MAX_LOG_ENTRIES;
}
if (num_small_writes <= 2) {
lderr(cct) << "num_small_writes needs to > 2" << dendl;
goto err_close_pool;
}
this->m_bytes_allocated_cap = effective_pool_size;
/* Log ring empty */
m_first_free_entry = 0;
m_first_valid_entry = 0;
TX_BEGIN(m_log_pool) {
TX_ADD(pool_root);
D_RW(pool_root)->header.layout_version = RWL_LAYOUT_VERSION;
D_RW(pool_root)->log_entries =
TX_ZALLOC(struct WriteLogCacheEntry,
sizeof(struct WriteLogCacheEntry) * num_small_writes);
D_RW(pool_root)->pool_size = this->m_log_pool_size;
D_RW(pool_root)->flushed_sync_gen = this->m_flushed_sync_gen;
D_RW(pool_root)->block_size = MIN_WRITE_ALLOC_SIZE;
D_RW(pool_root)->num_log_entries = num_small_writes;
D_RW(pool_root)->first_free_entry = m_first_free_entry;
D_RW(pool_root)->first_valid_entry = m_first_valid_entry;
} TX_ONCOMMIT {
this->m_total_log_entries = D_RO(pool_root)->num_log_entries;
this->m_free_log_entries = D_RO(pool_root)->num_log_entries - 1; // leave one free
} TX_ONABORT {
this->m_total_log_entries = 0;
this->m_free_log_entries = 0;
lderr(cct) << "failed to initialize pool: " << this->m_log_pool_name
<< ". pmemobj TX errno: " << pmemobj_tx_errno() << dendl;
r = -pmemobj_tx_errno();
goto err_close_pool;
} TX_FINALLY {
} TX_END;
} else {
ceph_assert(m_cache_state->present);
/* Open existing pool */
if ((m_log_pool =
pmemobj_open(this->m_log_pool_name.c_str(),
this->m_pwl_pool_layout_name)) == NULL) {
lderr(cct) << "failed to open pool (" << this->m_log_pool_name << "): "
<< pmemobj_errormsg() << dendl;
on_finish->complete(-errno);
return false;
}
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
if (D_RO(pool_root)->header.layout_version != RWL_LAYOUT_VERSION) {
// TODO: will handle upgrading version in the future
lderr(cct) << "pool layout version is "
<< D_RO(pool_root)->header.layout_version
<< " expected " << RWL_LAYOUT_VERSION << dendl;
goto err_close_pool;
}
if (D_RO(pool_root)->block_size != MIN_WRITE_ALLOC_SIZE) {
lderr(cct) << "pool block size is " << D_RO(pool_root)->block_size
<< " expected " << MIN_WRITE_ALLOC_SIZE << dendl;
goto err_close_pool;
}
this->m_log_pool_size = D_RO(pool_root)->pool_size;
this->m_flushed_sync_gen = D_RO(pool_root)->flushed_sync_gen;
this->m_total_log_entries = D_RO(pool_root)->num_log_entries;
m_first_free_entry = D_RO(pool_root)->first_free_entry;
m_first_valid_entry = D_RO(pool_root)->first_valid_entry;
if (m_first_free_entry < m_first_valid_entry) {
/* Valid entries wrap around the end of the ring, so first_free is lower
* than first_valid. If first_valid was == first_free+1, the entry at
* first_free would be empty. The last entry is never used, so in
* that case there would be zero free log entries. */
this->m_free_log_entries = this->m_total_log_entries - (m_first_valid_entry - m_first_free_entry) -1;
} else {
/* first_valid is <= first_free. If they are == we have zero valid log
* entries, and n-1 free log entries */
this->m_free_log_entries = this->m_total_log_entries - (m_first_free_entry - m_first_valid_entry) -1;
}
size_t effective_pool_size = (size_t)(this->m_log_pool_size * USABLE_SIZE);
this->m_bytes_allocated_cap = effective_pool_size;
load_existing_entries(later);
m_cache_state->clean = this->m_dirty_log_entries.empty();
m_cache_state->empty = m_log_entries.empty();
}
return true;
err_close_pool:
pmemobj_close(m_log_pool);
on_finish->complete(r);
return false;
}
/*
* Loads the log entries from an existing log.
*
* Creates the in-memory structures to represent the state of the
* re-opened log.
*
* Finds the last appended sync point, and any sync points referred to
* in log entries, but missing from the log. These missing sync points
* are created and scheduled for append. Some rudimentary consistency
* checking is done.
*
* Rebuilds the m_blocks_to_log_entries map, to make log entries
* readable.
*
* Places all writes on the dirty entries list, which causes them all
* to be flushed.
*
*/
template <typename I>
void WriteLog<I>::load_existing_entries(DeferredContexts &later) {
TOID(struct WriteLogPoolRoot) pool_root;
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
struct WriteLogCacheEntry *pmem_log_entries = D_RW(D_RW(pool_root)->log_entries);
uint64_t entry_index = m_first_valid_entry;
/* The map below allows us to find sync point log entries by sync
* gen number, which is necessary so write entries can be linked to
* their sync points. */
std::map<uint64_t, std::shared_ptr<SyncPointLogEntry>> sync_point_entries;
/* The map below tracks sync points referred to in writes but not
* appearing in the sync_point_entries map. We'll use this to
* determine which sync points are missing and need to be
* created. */
std::map<uint64_t, bool> missing_sync_points;
/*
* Read the existing log entries. Construct an in-memory log entry
* object of the appropriate type for each. Add these to the global
* log entries list.
*
* Write entries will not link to their sync points yet. We'll do
* that in the next pass. Here we'll accumulate a map of sync point
* gen numbers that are referred to in writes but do not appearing in
* the log.
*/
while (entry_index != m_first_free_entry) {
WriteLogCacheEntry *pmem_entry = &pmem_log_entries[entry_index];
std::shared_ptr<GenericLogEntry> log_entry = nullptr;
ceph_assert(pmem_entry->entry_index == entry_index);
this->update_entries(&log_entry, pmem_entry, missing_sync_points,
sync_point_entries, entry_index);
log_entry->ram_entry = *pmem_entry;
log_entry->cache_entry = pmem_entry;
log_entry->log_entry_index = entry_index;
log_entry->completed = true;
m_log_entries.push_back(log_entry);
entry_index = (entry_index + 1) % this->m_total_log_entries;
}
this->update_sync_points(missing_sync_points, sync_point_entries, later);
}
template <typename I>
void WriteLog<I>::inc_allocated_cached_bytes(
std::shared_ptr<pwl::GenericLogEntry> log_entry) {
if (log_entry->is_write_entry()) {
this->m_bytes_allocated += std::max(log_entry->write_bytes(), MIN_WRITE_ALLOC_SIZE);
this->m_bytes_cached += log_entry->write_bytes();
}
}
template <typename I>
void WriteLog<I>::write_data_to_buffer(
std::shared_ptr<pwl::WriteLogEntry> ws_entry,
WriteLogCacheEntry *pmem_entry) {
ws_entry->cache_buffer = D_RW(pmem_entry->write_data);
}
/**
* Retire up to MAX_ALLOC_PER_TRANSACTION of the oldest log entries
* that are eligible to be retired. Returns true if anything was
* retired.
*/
template <typename I>
bool WriteLog<I>::retire_entries(const unsigned long int frees_per_tx) {
CephContext *cct = m_image_ctx.cct;
GenericLogEntriesVector retiring_entries;
uint32_t initial_first_valid_entry;
uint32_t first_valid_entry;
std::lock_guard retire_locker(this->m_log_retire_lock);
ldout(cct, 20) << "Look for entries to retire" << dendl;
{
/* Entry readers can't be added while we hold m_entry_reader_lock */
RWLock::WLocker entry_reader_locker(this->m_entry_reader_lock);
std::lock_guard locker(m_lock);
initial_first_valid_entry = this->m_first_valid_entry;
first_valid_entry = this->m_first_valid_entry;
while (!m_log_entries.empty() && retiring_entries.size() < frees_per_tx &&
this->can_retire_entry(m_log_entries.front())) {
auto entry = m_log_entries.front();
if (entry->log_entry_index != first_valid_entry) {
lderr(cct) << "retiring entry index (" << entry->log_entry_index
<< ") and first valid log entry index (" << first_valid_entry
<< ") must be ==." << dendl;
}
ceph_assert(entry->log_entry_index == first_valid_entry);
first_valid_entry = (first_valid_entry + 1) % this->m_total_log_entries;
m_log_entries.pop_front();
retiring_entries.push_back(entry);
/* Remove entry from map so there will be no more readers */
if ((entry->write_bytes() > 0) || (entry->bytes_dirty() > 0)) {
auto gen_write_entry = static_pointer_cast<GenericWriteLogEntry>(entry);
if (gen_write_entry) {
this->m_blocks_to_log_entries.remove_log_entry(gen_write_entry);
}
}
}
}
if (retiring_entries.size()) {
ldout(cct, 20) << "Retiring " << retiring_entries.size() << " entries" << dendl;
TOID(struct WriteLogPoolRoot) pool_root;
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
utime_t tx_start;
utime_t tx_end;
/* Advance first valid entry and release buffers */
{
uint64_t flushed_sync_gen;
std::lock_guard append_locker(this->m_log_append_lock);
{
std::lock_guard locker(m_lock);
flushed_sync_gen = this->m_flushed_sync_gen;
}
tx_start = ceph_clock_now();
TX_BEGIN(m_log_pool) {
if (D_RO(pool_root)->flushed_sync_gen < flushed_sync_gen) {
ldout(m_image_ctx.cct, 20) << "flushed_sync_gen in log updated from "
<< D_RO(pool_root)->flushed_sync_gen << " to "
<< flushed_sync_gen << dendl;
D_RW(pool_root)->flushed_sync_gen = flushed_sync_gen;
}
D_RW(pool_root)->first_valid_entry = first_valid_entry;
for (auto &entry: retiring_entries) {
if (entry->write_bytes()) {
ldout(cct, 20) << "Freeing " << entry->ram_entry.write_data.oid.pool_uuid_lo
<< "." << entry->ram_entry.write_data.oid.off << dendl;
TX_FREE(entry->ram_entry.write_data);
} else {
ldout(cct, 20) << "Retiring non-write: " << *entry << dendl;
}
}
} TX_ONCOMMIT {
} TX_ONABORT {
lderr(cct) << "failed to commit free of" << retiring_entries.size()
<< " log entries (" << this->m_log_pool_name << ")" << dendl;
ceph_assert(false);
} TX_FINALLY {
} TX_END;
tx_end = ceph_clock_now();
}
m_perfcounter->tinc(l_librbd_pwl_retire_tx_t, tx_end - tx_start);
m_perfcounter->hinc(l_librbd_pwl_retire_tx_t_hist, utime_t(tx_end - tx_start).to_nsec(),
retiring_entries.size());
bool need_update_state = false;
/* Update runtime copy of first_valid, and free entries counts */
{
std::lock_guard locker(m_lock);
ceph_assert(this->m_first_valid_entry == initial_first_valid_entry);
this->m_first_valid_entry = first_valid_entry;
this->m_free_log_entries += retiring_entries.size();
if (!m_cache_state->empty && m_log_entries.empty()) {
m_cache_state->empty = true;
this->update_image_cache_state();
need_update_state = true;
}
for (auto &entry: retiring_entries) {
if (entry->write_bytes()) {
ceph_assert(this->m_bytes_cached >= entry->write_bytes());
this->m_bytes_cached -= entry->write_bytes();
uint64_t entry_allocation_size = entry->write_bytes();
if (entry_allocation_size < MIN_WRITE_ALLOC_SIZE) {
entry_allocation_size = MIN_WRITE_ALLOC_SIZE;
}
ceph_assert(this->m_bytes_allocated >= entry_allocation_size);
this->m_bytes_allocated -= entry_allocation_size;
}
}
this->m_alloc_failed_since_retire = false;
this->wake_up();
}
if (need_update_state) {
std::unique_lock locker(m_lock);
this->write_image_cache_state(locker);
}
} else {
ldout(cct, 20) << "Nothing to retire" << dendl;
return false;
}
return true;
}
template <typename I>
void WriteLog<I>::construct_flush_entries(pwl::GenericLogEntries entries_to_flush,
DeferredContexts &post_unlock,
bool has_write_entry) {
bool invalidating = this->m_invalidating; // snapshot so we behave consistently
for (auto &log_entry : entries_to_flush) {
GuardedRequestFunctionContext *guarded_ctx =
new GuardedRequestFunctionContext([this, log_entry, invalidating]
(GuardedRequestFunctionContext &guard_ctx) {
log_entry->m_cell = guard_ctx.cell;
Context *ctx = this->construct_flush_entry(log_entry, invalidating);
if (!invalidating) {
ctx = new LambdaContext(
[this, log_entry, ctx](int r) {
m_image_ctx.op_work_queue->queue(new LambdaContext(
[this, log_entry, ctx](int r) {
ldout(m_image_ctx.cct, 15) << "flushing:" << log_entry
<< " " << *log_entry << dendl;
log_entry->writeback(this->m_image_writeback, ctx);
}), 0);
});
}
ctx->complete(0);
});
this->detain_flush_guard_request(log_entry, guarded_ctx);
}
}
const unsigned long int ops_flushed_together = 4;
/*
* Performs the pmem buffer flush on all scheduled ops, then schedules
* the log event append operation for all of them.
*/
template <typename I>
void WriteLog<I>::flush_then_append_scheduled_ops(void)
{
GenericLogOperations ops;
bool ops_remain = false;
ldout(m_image_ctx.cct, 20) << dendl;
do {
{
ops.clear();
std::lock_guard locker(m_lock);
if (m_ops_to_flush.size()) {
auto last_in_batch = m_ops_to_flush.begin();
unsigned int ops_to_flush = m_ops_to_flush.size();
if (ops_to_flush > ops_flushed_together) {
ops_to_flush = ops_flushed_together;
}
ldout(m_image_ctx.cct, 20) << "should flush " << ops_to_flush << dendl;
std::advance(last_in_batch, ops_to_flush);
ops.splice(ops.end(), m_ops_to_flush, m_ops_to_flush.begin(), last_in_batch);
ops_remain = !m_ops_to_flush.empty();
ldout(m_image_ctx.cct, 20) << "flushing " << ops.size() << ", remain "
<< m_ops_to_flush.size() << dendl;
} else {
ops_remain = false;
}
}
if (ops_remain) {
enlist_op_flusher();
}
/* Ops subsequently scheduled for flush may finish before these,
* which is fine. We're unconcerned with completion order until we
* get to the log message append step. */
if (ops.size()) {
flush_pmem_buffer(ops);
schedule_append_ops(ops, nullptr);
}
} while (ops_remain);
append_scheduled_ops();
}
/*
* Performs the log event append operation for all of the scheduled
* events.
*/
template <typename I>
void WriteLog<I>::append_scheduled_ops(void) {
GenericLogOperations ops;
int append_result = 0;
bool ops_remain = false;
bool appending = false; /* true if we set m_appending */
ldout(m_image_ctx.cct, 20) << dendl;
do {
ops.clear();
this->append_scheduled(ops, ops_remain, appending, true);
if (ops.size()) {
std::lock_guard locker(this->m_log_append_lock);
alloc_op_log_entries(ops);
append_result = append_op_log_entries(ops);
}
int num_ops = ops.size();
if (num_ops) {
/* New entries may be flushable. Completion will wake up flusher. */
this->complete_op_log_entries(std::move(ops), append_result);
}
} while (ops_remain);
}
template <typename I>
void WriteLog<I>::enlist_op_flusher()
{
this->m_async_flush_ops++;
this->m_async_op_tracker.start_op();
Context *flush_ctx = new LambdaContext([this](int r) {
flush_then_append_scheduled_ops();
this->m_async_flush_ops--;
this->m_async_op_tracker.finish_op();
});
this->m_work_queue.queue(flush_ctx);
}
template <typename I>
void WriteLog<I>::setup_schedule_append(
pwl::GenericLogOperationsVector &ops, bool do_early_flush,
C_BlockIORequestT *req) {
if (do_early_flush) {
/* This caller is waiting for persist, so we'll use their thread to
* expedite it */
flush_pmem_buffer(ops);
this->schedule_append(ops);
} else {
/* This is probably not still the caller's thread, so do the payload
* flushing/replicating later. */
schedule_flush_and_append(ops);
}
}
/*
* Takes custody of ops. They'll all get their log entries appended,
* and have their on_write_persist contexts completed once they and
* all prior log entries are persisted everywhere.
*/
template <typename I>
void WriteLog<I>::schedule_append_ops(GenericLogOperations &ops, C_BlockIORequestT *req)
{
bool need_finisher;
GenericLogOperationsVector appending;
std::copy(std::begin(ops), std::end(ops), std::back_inserter(appending));
{
std::lock_guard locker(m_lock);
need_finisher = this->m_ops_to_append.empty() && !this->m_appending;
this->m_ops_to_append.splice(this->m_ops_to_append.end(), ops);
}
if (need_finisher) {
//enlist op appender
this->m_async_append_ops++;
this->m_async_op_tracker.start_op();
Context *append_ctx = new LambdaContext([this](int r) {
append_scheduled_ops();
this->m_async_append_ops--;
this->m_async_op_tracker.finish_op();
});
this->m_work_queue.queue(append_ctx);
}
for (auto &op : appending) {
op->appending();
}
}
/*
* Takes custody of ops. They'll all get their pmem blocks flushed,
* then get their log entries appended.
*/
template <typename I>
void WriteLog<I>::schedule_flush_and_append(GenericLogOperationsVector &ops)
{
GenericLogOperations to_flush(ops.begin(), ops.end());
bool need_finisher;
ldout(m_image_ctx.cct, 20) << dendl;
{
std::lock_guard locker(m_lock);
need_finisher = m_ops_to_flush.empty();
m_ops_to_flush.splice(m_ops_to_flush.end(), to_flush);
}
if (need_finisher) {
enlist_op_flusher();
}
}
template <typename I>
void WriteLog<I>::process_work() {
CephContext *cct = m_image_ctx.cct;
int max_iterations = 4;
bool wake_up_requested = false;
uint64_t aggressive_high_water_bytes = this->m_bytes_allocated_cap * AGGRESSIVE_RETIRE_HIGH_WATER;
uint64_t high_water_bytes = this->m_bytes_allocated_cap * RETIRE_HIGH_WATER;
uint64_t low_water_bytes = this->m_bytes_allocated_cap * RETIRE_LOW_WATER;
uint64_t aggressive_high_water_entries = this->m_total_log_entries * AGGRESSIVE_RETIRE_HIGH_WATER;
uint64_t high_water_entries = this->m_total_log_entries * RETIRE_HIGH_WATER;
uint64_t low_water_entries = this->m_total_log_entries * RETIRE_LOW_WATER;
ldout(cct, 20) << dendl;
do {
{
std::lock_guard locker(m_lock);
this->m_wake_up_requested = false;
}
if (this->m_alloc_failed_since_retire || this->m_invalidating ||
this->m_bytes_allocated > high_water_bytes ||
(m_log_entries.size() > high_water_entries)) {
int retired = 0;
utime_t started = ceph_clock_now();
ldout(m_image_ctx.cct, 10) << "alloc_fail=" << this->m_alloc_failed_since_retire
<< ", allocated > high_water="
<< (this->m_bytes_allocated > high_water_bytes)
<< ", allocated_entries > high_water="
<< (m_log_entries.size() > high_water_entries)
<< dendl;
while (this->m_alloc_failed_since_retire || this->m_invalidating ||
(this->m_bytes_allocated > high_water_bytes) ||
(m_log_entries.size() > high_water_entries) ||
(((this->m_bytes_allocated > low_water_bytes) ||
(m_log_entries.size() > low_water_entries)) &&
(utime_t(ceph_clock_now() - started).to_msec() < RETIRE_BATCH_TIME_LIMIT_MS))) {
if (!retire_entries((this->m_shutting_down || this->m_invalidating ||
(this->m_bytes_allocated > aggressive_high_water_bytes) ||
(m_log_entries.size() > aggressive_high_water_entries) ||
this->m_alloc_failed_since_retire)
? MAX_ALLOC_PER_TRANSACTION
: MAX_FREE_PER_TRANSACTION)) {
break;
}
retired++;
this->dispatch_deferred_writes();
this->process_writeback_dirty_entries();
}
ldout(m_image_ctx.cct, 10) << "Retired " << retired << " times" << dendl;
}
this->dispatch_deferred_writes();
this->process_writeback_dirty_entries();
{
std::lock_guard locker(m_lock);
wake_up_requested = this->m_wake_up_requested;
}
} while (wake_up_requested && --max_iterations > 0);
{
std::lock_guard locker(m_lock);
this->m_wake_up_scheduled = false;
/* Reschedule if it's still requested */
if (this->m_wake_up_requested) {
this->wake_up();
}
}
}
/*
* Flush the pmem regions for the data blocks of a set of operations
*
* V is expected to be GenericLogOperations<I>, or GenericLogOperationsVector<I>
*/
template <typename I>
template <typename V>
void WriteLog<I>::flush_pmem_buffer(V& ops)
{
utime_t now = ceph_clock_now();
for (auto &operation : ops) {
if (operation->reserved_allocated()) {
operation->buf_persist_start_time = now;
} else {
ldout(m_image_ctx.cct, 20) << "skipping non-write op: "
<< *operation << dendl;
}
}
for (auto &operation : ops) {
if(operation->is_writing_op()) {
auto log_entry = static_pointer_cast<WriteLogEntry>(operation->get_log_entry());
pmemobj_flush(m_log_pool, log_entry->cache_buffer, log_entry->write_bytes());
}
}
/* Drain once for all */
pmemobj_drain(m_log_pool);
now = ceph_clock_now();
for (auto &operation : ops) {
if (operation->reserved_allocated()) {
operation->buf_persist_comp_time = now;
} else {
ldout(m_image_ctx.cct, 20) << "skipping non-write op: "
<< *operation << dendl;
}
}
}
/**
* Update/persist the last flushed sync point in the log
*/
template <typename I>
void WriteLog<I>::persist_last_flushed_sync_gen()
{
TOID(struct WriteLogPoolRoot) pool_root;
pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
uint64_t flushed_sync_gen;
std::lock_guard append_locker(this->m_log_append_lock);
{
std::lock_guard locker(m_lock);
flushed_sync_gen = this->m_flushed_sync_gen;
}
if (D_RO(pool_root)->flushed_sync_gen < flushed_sync_gen) {
ldout(m_image_ctx.cct, 15) << "flushed_sync_gen in log updated from "
<< D_RO(pool_root)->flushed_sync_gen << " to "
<< flushed_sync_gen << dendl;
TX_BEGIN(m_log_pool) {
D_RW(pool_root)->flushed_sync_gen = flushed_sync_gen;
} TX_ONCOMMIT {
} TX_ONABORT {
lderr(m_image_ctx.cct) << "failed to commit update of flushed sync point" << dendl;
ceph_assert(false);
} TX_FINALLY {
} TX_END;
}
}
template <typename I>
void WriteLog<I>::reserve_cache(C_BlockIORequestT *req,
bool &alloc_succeeds, bool &no_space) {
std::vector<WriteBufferAllocation>& buffers = req->get_resources_buffers();
for (auto &buffer : buffers) {
utime_t before_reserve = ceph_clock_now();
buffer.buffer_oid = pmemobj_reserve(m_log_pool,
&buffer.buffer_alloc_action,
buffer.allocation_size,
0 /* Object type */);
buffer.allocation_lat = ceph_clock_now() - before_reserve;
if (TOID_IS_NULL(buffer.buffer_oid)) {
ldout(m_image_ctx.cct, 5) << "can't allocate all data buffers: "
<< pmemobj_errormsg() << ". "
<< *req << dendl;
alloc_succeeds = false;
no_space = true; /* Entries need to be retired */
if (this->m_free_log_entries == this->m_total_log_entries - 1) {
/* When the cache is empty, there is still no space to allocate.
* Defragment. */
pmemobj_defrag(m_log_pool, NULL, 0, NULL);
}
break;
} else {
buffer.allocated = true;
}
ldout(m_image_ctx.cct, 20) << "Allocated " << buffer.buffer_oid.oid.pool_uuid_lo
<< "." << buffer.buffer_oid.oid.off
<< ", size=" << buffer.allocation_size << dendl;
}
}
template<typename I>
void WriteLog<I>::copy_bl_to_buffer(
WriteRequestResources *resources, std::unique_ptr<WriteLogOperationSet> &op_set) {
auto allocation = resources->buffers.begin();
for (auto &operation : op_set->operations) {
operation->copy_bl_to_cache_buffer(allocation);
allocation++;
}
}
template <typename I>
bool WriteLog<I>::alloc_resources(C_BlockIORequestT *req) {
bool alloc_succeeds = true;
uint64_t bytes_allocated = 0;
uint64_t bytes_cached = 0;
uint64_t bytes_dirtied = 0;
uint64_t num_lanes = 0;
uint64_t num_unpublished_reserves = 0;
uint64_t num_log_entries = 0;
ldout(m_image_ctx.cct, 20) << dendl;
// Setup buffer, and get all the number of required resources
req->setup_buffer_resources(&bytes_cached, &bytes_dirtied, &bytes_allocated,
&num_lanes, &num_log_entries, &num_unpublished_reserves);
alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied,
bytes_allocated, num_lanes, num_log_entries,
num_unpublished_reserves);
std::vector<WriteBufferAllocation>& buffers = req->get_resources_buffers();
if (!alloc_succeeds) {
/* On alloc failure, free any buffers we did allocate */
for (auto &buffer : buffers) {
if (buffer.allocated) {
pmemobj_cancel(m_log_pool, &buffer.buffer_alloc_action, 1);
}
}
}
req->set_allocated(alloc_succeeds);
return alloc_succeeds;
}
template <typename I>
void WriteLog<I>::complete_user_request(Context *&user_req, int r) {
user_req->complete(r);
// Set user_req as null as it is deleted
user_req = nullptr;
}
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::rwl::WriteLog<librbd::ImageCtx>;
| 36,933 | 35.496047 | 116 | cc |
null | ceph-main/src/librbd/cache/pwl/rwl/WriteLog.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_REPLICATED_WRITE_LOG
#define CEPH_LIBRBD_CACHE_REPLICATED_WRITE_LOG
#include <functional>
#include <libpmemobj.h>
#include <list>
#include "common/Timer.h"
#include "common/RWLock.h"
#include "common/WorkQueue.h"
#include "common/AsyncOpTracker.h"
#include "librbd/cache/ImageWriteback.h"
#include "librbd/Utils.h"
#include "librbd/BlockGuard.h"
#include "librbd/cache/Types.h"
#include "librbd/cache/pwl/AbstractWriteLog.h"
#include "librbd/cache/pwl/LogMap.h"
#include "librbd/cache/pwl/LogOperation.h"
#include "librbd/cache/pwl/Request.h"
#include "librbd/cache/pwl/rwl/Builder.h"
class Context;
namespace librbd {
struct ImageCtx;
namespace cache {
namespace pwl {
namespace rwl {
template <typename ImageCtxT>
class WriteLog : public AbstractWriteLog<ImageCtxT> {
public:
WriteLog(
ImageCtxT &image_ctx, librbd::cache::pwl::ImageCacheState<ImageCtxT>* cache_state,
ImageWritebackInterface& image_writeback,
plugin::Api<ImageCtxT>& plugin_api);
~WriteLog();
WriteLog(const WriteLog&) = delete;
WriteLog &operator=(const WriteLog&) = delete;
typedef io::Extent Extent;
using This = AbstractWriteLog<ImageCtxT>;
using C_WriteRequestT = pwl::C_WriteRequest<This>;
using C_WriteSameRequestT = pwl::C_WriteSameRequest<This>;
void copy_bl_to_buffer(
WriteRequestResources *resources, std::unique_ptr<WriteLogOperationSet> &op_set) override;
void complete_user_request(Context *&user_req, int r) override;
private:
using C_BlockIORequestT = pwl::C_BlockIORequest<This>;
using C_FlushRequestT = pwl::C_FlushRequest<This>;
using C_DiscardRequestT = pwl::C_DiscardRequest<This>;
PMEMobjpool *m_log_pool = nullptr;
Builder<This> *m_builderobj;
const char* m_pwl_pool_layout_name;
const uint64_t MAX_EXTENT_SIZE = 1048576;
Builder<This>* create_builder();
void remove_pool_file();
void load_existing_entries(pwl::DeferredContexts &later);
void alloc_op_log_entries(pwl::GenericLogOperations &ops);
int append_op_log_entries(pwl::GenericLogOperations &ops);
void flush_then_append_scheduled_ops(void);
void enlist_op_flusher();
void flush_op_log_entries(pwl::GenericLogOperationsVector &ops);
template <typename V>
void flush_pmem_buffer(V& ops);
void inc_allocated_cached_bytes(
std::shared_ptr<pwl::GenericLogEntry> log_entry) override;
protected:
using AbstractWriteLog<ImageCtxT>::m_lock;
using AbstractWriteLog<ImageCtxT>::m_log_entries;
using AbstractWriteLog<ImageCtxT>::m_image_ctx;
using AbstractWriteLog<ImageCtxT>::m_perfcounter;
using AbstractWriteLog<ImageCtxT>::m_ops_to_flush;
using AbstractWriteLog<ImageCtxT>::m_cache_state;
using AbstractWriteLog<ImageCtxT>::m_first_free_entry;
using AbstractWriteLog<ImageCtxT>::m_first_valid_entry;
void process_work() override;
void schedule_append_ops(pwl::GenericLogOperations &ops, C_BlockIORequestT *req) override;
void append_scheduled_ops(void) override;
void reserve_cache(C_BlockIORequestT *req,
bool &alloc_succeeds, bool &no_space) override;
void collect_read_extents(
uint64_t read_buffer_offset, LogMapEntry<GenericWriteLogEntry> map_entry,
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,
std::vector<bufferlist*> &bls_to_read, uint64_t entry_hit_length,
Extent hit_extent, pwl::C_ReadRequest *read_ctx) override;
void complete_read(
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,
std::vector<bufferlist*> &bls_to_read, Context *ctx) override;
bool retire_entries(const unsigned long int frees_per_tx) override;
void persist_last_flushed_sync_gen() override;
bool alloc_resources(C_BlockIORequestT *req) override;
void schedule_flush_and_append(pwl::GenericLogOperationsVector &ops) override;
void setup_schedule_append(
pwl::GenericLogOperationsVector &ops, bool do_early_flush,
C_BlockIORequestT *req) override;
void construct_flush_entries(pwl::GenericLogEntries entries_to_flush,
DeferredContexts &post_unlock,
bool has_write_entry) override;
bool initialize_pool(Context *on_finish, pwl::DeferredContexts &later) override;
void write_data_to_buffer(
std::shared_ptr<pwl::WriteLogEntry> ws_entry,
pwl::WriteLogCacheEntry *pmem_entry) override;
uint64_t get_max_extent() override {
return MAX_EXTENT_SIZE;
}
};
} // namespace rwl
} // namespace pwl
} // namespace cache
} // namespace librbd
extern template class librbd::cache::pwl::rwl::WriteLog<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_REPLICATED_WRITE_LOG
| 4,695 | 36.568 | 96 | h |
null | ceph-main/src/librbd/cache/pwl/ssd/Builder.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_SSD_BUILDER_H
#define CEPH_LIBRBD_CACHE_PWL_SSD_BUILDER_H
#include <iostream>
#include "LogEntry.h"
#include "ReadRequest.h"
#include "Request.h"
#include "LogOperation.h"
#include "librbd/cache/ImageWriteback.h"
#include "librbd/cache/pwl/Builder.h"
namespace librbd {
namespace cache {
namespace pwl {
namespace ssd {
template <typename T>
class Builder : public pwl::Builder<T> {
public:
std::shared_ptr<pwl::WriteLogEntry> create_write_log_entry(
uint64_t image_offset_bytes, uint64_t write_bytes) override {
return std::make_shared<WriteLogEntry>(image_offset_bytes, write_bytes);
}
std::shared_ptr<pwl::WriteLogEntry> create_write_log_entry(
std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes) override {
return std::make_shared<WriteLogEntry>(
sync_point_entry, image_offset_bytes, write_bytes);
}
std::shared_ptr<pwl::WriteLogEntry> create_writesame_log_entry(
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length) override {
return std::make_shared<WriteSameLogEntry>(
image_offset_bytes, write_bytes, data_length);
}
std::shared_ptr<pwl::WriteLogEntry> create_writesame_log_entry(
std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length) override {
return std::make_shared<WriteSameLogEntry>(
sync_point_entry, image_offset_bytes, write_bytes, data_length);
}
pwl::C_WriteRequest<T> *create_write_request(
T &pwl, utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req) override {
return new C_WriteRequest<T>(
pwl, arrived, std::move(image_extents), std::move(bl),
fadvise_flags, lock, perfcounter, user_req);
}
pwl::C_WriteSameRequest<T> *create_writesame_request(
T &pwl, utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req) override {
return new C_WriteSameRequest<T>(
pwl, arrived, std::move(image_extents), std::move(bl),
fadvise_flags, lock, perfcounter, user_req);
}
pwl::C_WriteRequest<T> *create_comp_and_write_request(
T &pwl, utime_t arrived, io::Extents &&image_extents,
bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset,
const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req) override {
return new C_CompAndWriteRequest<T>(
pwl, arrived, std::move(image_extents), std::move(cmp_bl),
std::move(bl), mismatch_offset, fadvise_flags,
lock, perfcounter, user_req);
}
std::shared_ptr<pwl::WriteLogOperation> create_write_log_operation(
WriteLogOperationSet &set, uint64_t image_offset_bytes,
uint64_t write_bytes, CephContext *cct,
std::shared_ptr<pwl::WriteLogEntry> write_log_entry) {
return std::make_shared<WriteLogOperation>(
set, image_offset_bytes, write_bytes, cct, write_log_entry);
}
std::shared_ptr<pwl::WriteLogOperation> create_write_log_operation(
WriteLogOperationSet &set, uint64_t image_offset_bytes,
uint64_t write_bytes, uint32_t data_len, CephContext *cct,
std::shared_ptr<pwl::WriteLogEntry> writesame_log_entry) {
return std::make_shared<WriteLogOperation>(
set, image_offset_bytes, write_bytes, data_len, cct,
writesame_log_entry);
}
std::shared_ptr<pwl::DiscardLogOperation> create_discard_log_operation(
std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes,
uint64_t write_bytes, uint32_t discard_granularity_bytes,
utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct) {
return std::make_shared<DiscardLogOperation>(
sync_point, image_offset_bytes, write_bytes, discard_granularity_bytes,
dispatch_time, perfcounter, cct);
}
C_ReadRequest *create_read_request(CephContext *cct, utime_t arrived,
PerfCounters *perfcounter, ceph::bufferlist *bl, Context *on_finish) {
return new C_ReadRequest(cct, arrived, perfcounter, bl, on_finish);
}
};
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_SSD_BUILDER_H
| 4,536 | 40.623853 | 79 | h |
null | ceph-main/src/librbd/cache/pwl/ssd/LogEntry.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/cache/ImageWriteback.h"
#include "librbd/cache/pwl/ssd/LogEntry.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::ssd::WriteLogEntry: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
namespace ssd {
void WriteLogEntry::init_cache_bl(
bufferlist &src_bl, uint64_t off, uint64_t len) {
cache_bl.clear();
cache_bl.substr_of(src_bl, off, len);
}
buffer::list& WriteLogEntry::get_cache_bl() {
return cache_bl;
}
void WriteLogEntry::copy_cache_bl(bufferlist *out) {
std::lock_guard locker(m_entry_bl_lock);
*out = cache_bl;
}
void WriteLogEntry::remove_cache_bl() {
std::lock_guard locker(m_entry_bl_lock);
cache_bl.clear();
}
unsigned int WriteLogEntry::get_aligned_data_size() const {
if (cache_bl.length()) {
return round_up_to(cache_bl.length(), MIN_WRITE_ALLOC_SSD_SIZE);
}
return round_up_to(write_bytes(), MIN_WRITE_ALLOC_SSD_SIZE);
}
void WriteLogEntry::writeback_bl(
librbd::cache::ImageWritebackInterface &image_writeback,
Context *ctx, ceph::bufferlist&& bl) {
image_writeback.aio_write({{ram_entry.image_offset_bytes,
ram_entry.write_bytes}},
std::move(bl), 0, ctx);
}
void WriteSameLogEntry::writeback_bl(
librbd::cache::ImageWritebackInterface &image_writeback,
Context *ctx, ceph::bufferlist &&bl) {
image_writeback.aio_writesame(ram_entry.image_offset_bytes,
ram_entry.write_bytes,
std::move(bl), 0, ctx);
}
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
| 1,847 | 27.875 | 74 | cc |
null | ceph-main/src/librbd/cache/pwl/ssd/LogEntry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// // vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_SSD_LOG_ENTRY_H
#define CEPH_LIBRBD_CACHE_PWL_SSD_LOG_ENTRY_H
#include "librbd/cache/pwl/LogEntry.h"
namespace librbd {
namespace cache {
class ImageWritebackInterface;
namespace pwl {
namespace ssd {
class WriteLogEntry : public pwl::WriteLogEntry {
public:
WriteLogEntry(
std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes)
: pwl::WriteLogEntry(sync_point_entry, image_offset_bytes, write_bytes) {}
WriteLogEntry(
uint64_t image_offset_bytes, uint64_t write_bytes)
: pwl::WriteLogEntry(image_offset_bytes, write_bytes) {}
WriteLogEntry(
std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length)
: pwl::WriteLogEntry(sync_point_entry, image_offset_bytes,
write_bytes, data_length) {}
WriteLogEntry(
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length)
: pwl::WriteLogEntry(image_offset_bytes, write_bytes, data_length) {}
~WriteLogEntry() {}
WriteLogEntry(const WriteLogEntry&) = delete;
WriteLogEntry &operator=(const WriteLogEntry&) = delete;
void writeback_bl(librbd::cache::ImageWritebackInterface &image_writeback,
Context *ctx, ceph::bufferlist &&bl) override;
void init_cache_bl(bufferlist &src_bl, uint64_t off, uint64_t len) override;
buffer::list &get_cache_bl() override;
void copy_cache_bl(bufferlist *out) override;
void remove_cache_bl() override;
unsigned int get_aligned_data_size() const override;
void inc_bl_refs() { bl_refs++; };
void dec_bl_refs() { bl_refs--; };
unsigned int reader_count() const override {
return bl_refs;
}
};
class WriteSameLogEntry : public WriteLogEntry {
public:
WriteSameLogEntry(
std::shared_ptr<SyncPointLogEntry> sync_point_entry,
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length)
: WriteLogEntry(sync_point_entry, image_offset_bytes,
write_bytes, data_length) {}
WriteSameLogEntry(
uint64_t image_offset_bytes, uint64_t write_bytes,
uint32_t data_length)
: WriteLogEntry(image_offset_bytes, write_bytes, data_length) {}
~WriteSameLogEntry() {}
WriteSameLogEntry(const WriteSameLogEntry&) = delete;
WriteSameLogEntry &operator=(const WriteSameLogEntry&) = delete;
void writeback_bl(librbd::cache::ImageWritebackInterface &image_writeback,
Context *ctx, ceph::bufferlist &&bl) override;
};
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_SSD_LOG_ENTRY_H
| 2,804 | 35.907895 | 78 | h |
null | ceph-main/src/librbd/cache/pwl/ssd/LogOperation.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "LogOperation.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::ssd::LogOperation: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
namespace ssd {
void DiscardLogOperation::init_op(
uint64_t current_sync_gen, bool persist_on_flush,
uint64_t last_op_sequence_num, Context *write_persist,
Context *write_append) {
log_entry->init(current_sync_gen, persist_on_flush, last_op_sequence_num);
if (persist_on_flush) {
this->on_write_append = new LambdaContext(
[write_persist, write_append] (int r) {
write_append->complete(r);
write_persist->complete(r);
});
} else {
this->on_write_append = write_append;
this->on_write_persist = write_persist;
}
}
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
| 1,036 | 27.027027 | 76 | cc |
null | ceph-main/src/librbd/cache/pwl/ssd/LogOperation.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_SSD_LOG_OPERATION_H
#define CEPH_LIBRBD_CACHE_PWL_SSD_LOG_OPERATION_H
#include "librbd/cache/pwl/LogOperation.h"
namespace librbd {
namespace cache {
namespace pwl {
namespace ssd {
class DiscardLogOperation : public pwl::DiscardLogOperation {
public:
DiscardLogOperation(
std::shared_ptr<SyncPoint> sync_point, uint64_t image_offset_bytes,
uint64_t write_bytes, uint32_t discard_granularity_bytes,
utime_t dispatch_time, PerfCounters *perfcounter, CephContext *cct)
: pwl::DiscardLogOperation(sync_point, image_offset_bytes, write_bytes,
discard_granularity_bytes, dispatch_time,
perfcounter, cct) {}
void init_op(
uint64_t current_sync_gen, bool persist_on_flush,
uint64_t last_op_sequence_num, Context *write_persist,
Context *write_append) override;
};
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_SSD_LOG_OPERATION_H
| 1,131 | 30.444444 | 75 | h |
null | ceph-main/src/librbd/cache/pwl/ssd/ReadRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ReadRequest.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::ssd::ReadRequest: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
namespace ssd {
void C_ReadRequest::finish(int r) {
ldout(m_cct, 20) << "(" << get_name() << "): r=" << r << dendl;
int hits = 0;
int misses = 0;
int hit_bytes = 0;
int miss_bytes = 0;
if (r >= 0) {
/*
* At this point the miss read has completed. We'll iterate through
* m_read_extents and produce *m_out_bl by assembling pieces of m_miss_bl
* and the individual hit extent bufs in the read extents that represent
* hits.
*/
uint64_t miss_bl_offset = 0;
for (auto extent : read_extents) {
if (extent->m_bl.length()) {
/* This was a hit */
bufferlist data_bl;
if (extent->writesame) {
int data_len = extent->m_bl.length();
int read_buffer_offset = extent->truncate_offset;
if (extent->need_to_truncate && extent->truncate_offset >= data_len) {
read_buffer_offset = (extent->truncate_offset) % data_len;
}
// build data and truncate
bufferlist temp_bl;
uint64_t total_left_bytes = read_buffer_offset + extent->second;
while (total_left_bytes > 0) {
temp_bl.append(extent->m_bl);
total_left_bytes = total_left_bytes - data_len;
}
data_bl.substr_of(temp_bl, read_buffer_offset, extent->second);
m_out_bl->claim_append(data_bl);
} else if (extent->need_to_truncate) {
assert(extent->m_bl.length() >= extent->truncate_offset + extent->second);
data_bl.substr_of(extent->m_bl, extent->truncate_offset, extent->second);
m_out_bl->claim_append(data_bl);
} else {
assert(extent->second == extent->m_bl.length());
m_out_bl->claim_append(extent->m_bl);
}
++hits;
hit_bytes += extent->second;
} else {
/* This was a miss. */
++misses;
miss_bytes += extent->second;
bufferlist miss_extent_bl;
miss_extent_bl.substr_of(miss_bl, miss_bl_offset, extent->second);
/* Add this read miss bufferlist to the output bufferlist */
m_out_bl->claim_append(miss_extent_bl);
/* Consume these bytes in the read miss bufferlist */
miss_bl_offset += extent->second;
}
}
}
ldout(m_cct, 20) << "(" << get_name() << "): r=" << r << " bl=" << *m_out_bl << dendl;
utime_t now = ceph_clock_now();
ceph_assert((int)m_out_bl->length() == hit_bytes + miss_bytes);
m_on_finish->complete(r);
m_perfcounter->inc(l_librbd_pwl_rd_bytes, hit_bytes + miss_bytes);
m_perfcounter->inc(l_librbd_pwl_rd_hit_bytes, hit_bytes);
m_perfcounter->tinc(l_librbd_pwl_rd_latency, now - m_arrived_time);
if (!misses) {
m_perfcounter->inc(l_librbd_pwl_rd_hit_req, 1);
m_perfcounter->tinc(l_librbd_pwl_rd_hit_latency, now - m_arrived_time);
} else {
if (hits) {
m_perfcounter->inc(l_librbd_pwl_rd_part_hit_req, 1);
}
}
}
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
| 3,374 | 35.290323 | 88 | cc |
null | ceph-main/src/librbd/cache/pwl/ssd/ReadRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_SSD_READ_REQUEST_H
#define CEPH_LIBRBD_CACHE_PWL_SSD_READ_REQUEST_H
#include "librbd/cache/pwl/ReadRequest.h"
namespace librbd {
namespace cache {
namespace pwl {
namespace ssd {
typedef std::vector<pwl::ImageExtentBuf> ImageExtentBufs;
class C_ReadRequest : public pwl::C_ReadRequest {
protected:
using pwl::C_ReadRequest::m_cct;
using pwl::C_ReadRequest::m_on_finish;
using pwl::C_ReadRequest::m_out_bl;
using pwl::C_ReadRequest::m_arrived_time;
using pwl::C_ReadRequest::m_perfcounter;
public:
C_ReadRequest(CephContext *cct, utime_t arrived, PerfCounters *perfcounter, bufferlist *out_bl, Context *on_finish)
: pwl::C_ReadRequest(cct, arrived, perfcounter, out_bl, on_finish) {}
void finish(int r) override;
};
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_PWL_SSD_READ_REQUEST_H
| 1,000 | 27.6 | 117 | h |
null | ceph-main/src/librbd/cache/pwl/ssd/Request.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Request.h"
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::ssd::Request: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
namespace ssd {
template <typename T>
void C_WriteRequest<T>::setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated,
uint64_t *number_lanes, uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) {
*bytes_cached = 0;
*bytes_allocated = 0;
*number_log_entries = this->image_extents.size();
for (auto &extent : this->image_extents) {
*bytes_cached += extent.second;
*bytes_allocated += round_up_to(extent.second, MIN_WRITE_ALLOC_SSD_SIZE);
}
*bytes_dirtied = *bytes_cached;
}
template <typename T>
std::ostream &operator<<(std::ostream &os,
const C_CompAndWriteRequest<T> &req) {
os << (C_WriteRequest<T>&)req
<< " cmp_bl=" << req.cmp_bl
<< ", read_bl=" << req.read_bl
<< ", compare_succeeded=" << req.compare_succeeded
<< ", mismatch_offset=" << req.mismatch_offset;
return os;
}
template <typename T>
void C_WriteSameRequest<T>::setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied, uint64_t *bytes_allocated,
uint64_t *number_lanes, uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) {
ceph_assert(this->image_extents.size() == 1);
*number_log_entries = 1;
*bytes_dirtied = this->image_extents[0].second;
*bytes_cached = this->bl.length();
*bytes_allocated = round_up_to(*bytes_cached, MIN_WRITE_ALLOC_SSD_SIZE);
}
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::ssd::C_WriteRequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
template class librbd::cache::pwl::ssd::C_WriteSameRequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
template class librbd::cache::pwl::ssd::C_CompAndWriteRequest<librbd::cache::pwl::AbstractWriteLog<librbd::ImageCtx> >;
| 2,219 | 33.6875 | 119 | cc |
null | ceph-main/src/librbd/cache/pwl/ssd/Request.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_SSD_REQUEST_H
#define CEPH_LIBRBD_CACHE_SSD_REQUEST_H
#include "librbd/cache/pwl/Request.h"
namespace librbd {
class BlockGuardCell;
namespace cache {
namespace pwl {
template<typename T>
class AbstractWriteLog;
namespace ssd {
template <typename T>
class C_WriteRequest : public pwl::C_WriteRequest<T> {
public:
C_WriteRequest(
T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset,
const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: pwl::C_WriteRequest<T>(
pwl, arrived, std::move(image_extents), std::move(cmp_bl),
std::move(bl), mismatch_offset, fadvise_flags,
lock, perfcounter, user_req) {}
C_WriteRequest(
T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: pwl::C_WriteRequest<T>(
pwl, arrived, std::move(image_extents), std::move(bl),
fadvise_flags, lock, perfcounter, user_req) {}
protected:
void setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied,
uint64_t *bytes_allocated, uint64_t *number_lanes,
uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) override;
};
template <typename T>
class C_CompAndWriteRequest : public C_WriteRequest<T> {
public:
C_CompAndWriteRequest(
T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& cmp_bl, bufferlist&& bl, uint64_t *mismatch_offset,
const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: C_WriteRequest<T>(
pwl, arrived, std::move(image_extents), std::move(cmp_bl),
std::move(bl), mismatch_offset,fadvise_flags,
lock, perfcounter, user_req) {}
const char *get_name() const override {
return "C_CompAndWriteRequest";
}
template <typename U>
friend std::ostream &operator<<(std::ostream &os,
const C_CompAndWriteRequest<U> &req);
};
template <typename T>
class C_WriteSameRequest : public pwl::C_WriteSameRequest<T> {
public:
C_WriteSameRequest(
T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
PerfCounters *perfcounter, Context *user_req)
: pwl::C_WriteSameRequest<T>(
pwl, arrived, std::move(image_extents), std::move(bl), fadvise_flags,
lock, perfcounter, user_req) {}
void setup_buffer_resources(
uint64_t *bytes_cached, uint64_t *bytes_dirtied,
uint64_t *bytes_allocated, uint64_t *number_lanes,
uint64_t *number_log_entries,
uint64_t *number_unpublished_reserves) override;
};
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_SSD_REQUEST_H
| 3,081 | 32.139785 | 77 | h |
null | ceph-main/src/librbd/cache/pwl/ssd/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_SSD_TYPES_H
#define CEPH_LIBRBD_CACHE_SSD_TYPES_H
#include "acconfig.h"
#include "librbd/io/Types.h"
#include "librbd/cache/pwl/Types.h"
namespace librbd {
namespace cache {
namespace pwl {
namespace ssd {
struct SuperBlock{
WriteLogPoolRoot root;
DENC(SuperBlock, v, p) {
DENC_START(1, 1, p);
denc(v.root, p);
DENC_FINISH(p);
}
void dump(Formatter *f) const {
f->dump_object("super", root);
}
static void generate_test_instances(std::list<SuperBlock*>& ls) {
ls.push_back(new SuperBlock());
ls.push_back(new SuperBlock);
ls.back()->root.layout_version = 3;
ls.back()->root.cur_sync_gen = 1;
ls.back()->root.pool_size = 10737418240;
ls.back()->root.flushed_sync_gen = 1;
ls.back()->root.block_size = 4096;
ls.back()->root.num_log_entries = 0;
ls.back()->root.first_free_entry = 30601;
ls.back()->root.first_valid_entry = 2;
}
};
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
WRITE_CLASS_DENC(librbd::cache::pwl::ssd::SuperBlock)
#endif // CEPH_LIBRBD_CACHE_SSD_TYPES_H
| 1,221 | 22.5 | 70 | h |
null | ceph-main/src/librbd/cache/pwl/ssd/WriteLog.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "WriteLog.h"
#include "include/buffer.h"
#include "include/Context.h"
#include "include/ceph_assert.h"
#include "common/deleter.h"
#include "common/dout.h"
#include "common/environment.h"
#include "common/errno.h"
#include "common/WorkQueue.h"
#include "common/Timer.h"
#include "common/perf_counters.h"
#include "librbd/ImageCtx.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/cache/pwl/ImageCacheState.h"
#include "librbd/cache/pwl/LogEntry.h"
#include <map>
#include <vector>
#undef dout_subsys
#define dout_subsys ceph_subsys_rbd_pwl
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::pwl::ssd::WriteLog: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace cache {
namespace pwl {
namespace ssd {
using namespace std;
using namespace librbd::cache::pwl;
static bool is_valid_pool_root(const WriteLogPoolRoot& root) {
return root.pool_size % MIN_WRITE_ALLOC_SSD_SIZE == 0 &&
root.first_valid_entry >= DATA_RING_BUFFER_OFFSET &&
root.first_valid_entry < root.pool_size &&
root.first_valid_entry % MIN_WRITE_ALLOC_SSD_SIZE == 0 &&
root.first_free_entry >= DATA_RING_BUFFER_OFFSET &&
root.first_free_entry < root.pool_size &&
root.first_free_entry % MIN_WRITE_ALLOC_SSD_SIZE == 0;
}
template <typename I>
Builder<AbstractWriteLog<I>>* WriteLog<I>::create_builder() {
m_builderobj = new Builder<This>();
return m_builderobj;
}
template <typename I>
WriteLog<I>::WriteLog(
I &image_ctx, librbd::cache::pwl::ImageCacheState<I>* cache_state,
cache::ImageWritebackInterface& image_writeback,
plugin::Api<I>& plugin_api)
: AbstractWriteLog<I>(image_ctx, cache_state, create_builder(),
image_writeback, plugin_api)
{
}
template <typename I>
WriteLog<I>::~WriteLog() {
delete m_builderobj;
}
template <typename I>
void WriteLog<I>::collect_read_extents(
uint64_t read_buffer_offset, LogMapEntry<GenericWriteLogEntry> map_entry,
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,
std::vector<bufferlist*> &bls_to_read,
uint64_t entry_hit_length, Extent hit_extent,
pwl::C_ReadRequest *read_ctx) {
// Make a bl for this hit extent. This will add references to the
// write_entry->cache_bl */
ldout(m_image_ctx.cct, 5) << dendl;
auto write_entry = std::static_pointer_cast<WriteLogEntry>(map_entry.log_entry);
buffer::list hit_bl;
write_entry->copy_cache_bl(&hit_bl);
bool writesame = write_entry->is_writesame_entry();
auto hit_extent_buf = std::make_shared<ImageExtentBuf>(
hit_extent, hit_bl, true, read_buffer_offset, writesame);
read_ctx->read_extents.push_back(hit_extent_buf);
if (!hit_bl.length()) {
ldout(m_image_ctx.cct, 5) << "didn't hit RAM" << dendl;
auto read_extent = read_ctx->read_extents.back();
write_entry->inc_bl_refs();
log_entries_to_read.push_back(std::move(write_entry));
bls_to_read.push_back(&read_extent->m_bl);
}
}
template <typename I>
void WriteLog<I>::complete_read(
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,
std::vector<bufferlist*> &bls_to_read,
Context *ctx) {
if (!log_entries_to_read.empty()) {
aio_read_data_blocks(log_entries_to_read, bls_to_read, ctx);
} else {
ctx->complete(0);
}
}
template <typename I>
int WriteLog<I>::create_and_open_bdev() {
CephContext *cct = m_image_ctx.cct;
bdev = BlockDevice::create(cct, this->m_log_pool_name, aio_cache_cb,
nullptr, nullptr, nullptr);
int r = bdev->open(this->m_log_pool_name);
if (r < 0) {
lderr(cct) << "failed to open bdev" << dendl;
delete bdev;
return r;
}
ceph_assert(this->m_log_pool_size % MIN_WRITE_ALLOC_SSD_SIZE == 0);
if (bdev->get_size() != this->m_log_pool_size) {
lderr(cct) << "size mismatch: bdev size " << bdev->get_size()
<< " (block size " << bdev->get_block_size()
<< ") != pool size " << this->m_log_pool_size << dendl;
bdev->close();
delete bdev;
return -EINVAL;
}
return 0;
}
template <typename I>
bool WriteLog<I>::initialize_pool(Context *on_finish,
pwl::DeferredContexts &later) {
int r;
CephContext *cct = m_image_ctx.cct;
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
if (access(this->m_log_pool_name.c_str(), F_OK) != 0) {
int fd = ::open(this->m_log_pool_name.c_str(), O_RDWR|O_CREAT, 0644);
bool succeed = true;
if (fd >= 0) {
if (truncate(this->m_log_pool_name.c_str(),
this->m_log_pool_size) != 0) {
succeed = false;
}
::close(fd);
} else {
succeed = false;
}
if (!succeed) {
m_cache_state->present = false;
m_cache_state->clean = true;
m_cache_state->empty = true;
/* TODO: filter/replace errnos that are meaningless to the caller */
on_finish->complete(-errno);
return false;
}
r = create_and_open_bdev();
if (r < 0) {
on_finish->complete(r);
return false;
}
m_cache_state->present = true;
m_cache_state->clean = true;
m_cache_state->empty = true;
/* new pool, calculate and store metadata */
/* Keep ring buffer at least MIN_WRITE_ALLOC_SSD_SIZE bytes free.
* In this way, when all ring buffer spaces are allocated,
* m_first_free_entry and m_first_valid_entry will not be equal.
* Equal only means the cache is empty. */
this->m_bytes_allocated_cap = this->m_log_pool_size -
DATA_RING_BUFFER_OFFSET - MIN_WRITE_ALLOC_SSD_SIZE;
/* Log ring empty */
m_first_free_entry = DATA_RING_BUFFER_OFFSET;
m_first_valid_entry = DATA_RING_BUFFER_OFFSET;
auto new_root = std::make_shared<WriteLogPoolRoot>(pool_root);
new_root->layout_version = SSD_LAYOUT_VERSION;
new_root->pool_size = this->m_log_pool_size;
new_root->flushed_sync_gen = this->m_flushed_sync_gen;
new_root->block_size = MIN_WRITE_ALLOC_SSD_SIZE;
new_root->first_free_entry = m_first_free_entry;
new_root->first_valid_entry = m_first_valid_entry;
new_root->num_log_entries = 0;
pool_root = *new_root;
r = update_pool_root_sync(new_root);
if (r != 0) {
lderr(cct) << "failed to initialize pool ("
<< this->m_log_pool_name << ")" << dendl;
bdev->close();
delete bdev;
on_finish->complete(r);
return false;
}
} else {
ceph_assert(m_cache_state->present);
r = create_and_open_bdev();
if (r < 0) {
on_finish->complete(r);
return false;
}
bufferlist bl;
SuperBlock superblock;
::IOContext ioctx(cct, nullptr);
r = bdev->read(0, MIN_WRITE_ALLOC_SSD_SIZE, &bl, &ioctx, false);
if (r < 0) {
lderr(cct) << "read ssd cache superblock failed " << dendl;
goto err_close_bdev;
}
auto p = bl.cbegin();
decode(superblock, p);
pool_root = superblock.root;
ldout(cct, 1) << "Decoded root: pool_size=" << pool_root.pool_size
<< " first_valid_entry=" << pool_root.first_valid_entry
<< " first_free_entry=" << pool_root.first_free_entry
<< " flushed_sync_gen=" << pool_root.flushed_sync_gen
<< dendl;
ceph_assert(is_valid_pool_root(pool_root));
if (pool_root.layout_version != SSD_LAYOUT_VERSION) {
lderr(cct) << "pool layout version is "
<< pool_root.layout_version
<< " expected " << SSD_LAYOUT_VERSION
<< dendl;
goto err_close_bdev;
}
if (pool_root.block_size != MIN_WRITE_ALLOC_SSD_SIZE) {
lderr(cct) << "pool block size is " << pool_root.block_size
<< " expected " << MIN_WRITE_ALLOC_SSD_SIZE
<< dendl;
goto err_close_bdev;
}
this->m_log_pool_size = pool_root.pool_size;
this->m_flushed_sync_gen = pool_root.flushed_sync_gen;
this->m_first_valid_entry = pool_root.first_valid_entry;
this->m_first_free_entry = pool_root.first_free_entry;
this->m_bytes_allocated_cap = this->m_log_pool_size -
DATA_RING_BUFFER_OFFSET -
MIN_WRITE_ALLOC_SSD_SIZE;
load_existing_entries(later);
m_cache_state->clean = this->m_dirty_log_entries.empty();
m_cache_state->empty = m_log_entries.empty();
}
return true;
err_close_bdev:
bdev->close();
delete bdev;
on_finish->complete(-EINVAL);
return false;
}
template <typename I>
void WriteLog<I>::remove_pool_file() {
ceph_assert(bdev);
bdev->close();
delete bdev;
bdev = nullptr;
ldout(m_image_ctx.cct, 5) << "block device is closed" << dendl;
if (m_cache_state->clean) {
ldout(m_image_ctx.cct, 5) << "Removing empty pool file: "
<< this->m_log_pool_name << dendl;
if (remove(this->m_log_pool_name.c_str()) != 0) {
lderr(m_image_ctx.cct) << "failed to remove empty pool \""
<< this->m_log_pool_name << "\": " << dendl;
} else {
m_cache_state->present = false;
}
} else {
ldout(m_image_ctx.cct, 5) << "Not removing pool file: "
<< this->m_log_pool_name << dendl;
}
}
template <typename I>
void WriteLog<I>::load_existing_entries(pwl::DeferredContexts &later) {
CephContext *cct = m_image_ctx.cct;
std::map<uint64_t, std::shared_ptr<SyncPointLogEntry>> sync_point_entries;
std::map<uint64_t, bool> missing_sync_points;
// Iterate through the log_entries and append all the write_bytes
// of each entry to fetch the pos of next 4k of log_entries. Iterate
// through the log entries and append them to the in-memory vector
for (uint64_t next_log_pos = this->m_first_valid_entry;
next_log_pos != this->m_first_free_entry; ) {
// read the entries from SSD cache and decode
bufferlist bl_entries;
::IOContext ioctx_entry(cct, nullptr);
bdev->read(next_log_pos, MIN_WRITE_ALLOC_SSD_SIZE, &bl_entries,
&ioctx_entry, false);
std::vector<WriteLogCacheEntry> ssd_log_entries;
auto pl = bl_entries.cbegin();
decode(ssd_log_entries, pl);
ldout(cct, 5) << "decoded ssd log entries" << dendl;
uint64_t curr_log_pos = next_log_pos;
std::shared_ptr<GenericLogEntry> log_entry = nullptr;
for (auto it = ssd_log_entries.begin(); it != ssd_log_entries.end(); ++it) {
this->update_entries(&log_entry, &*it, missing_sync_points,
sync_point_entries, curr_log_pos);
log_entry->ram_entry = *it;
log_entry->log_entry_index = curr_log_pos;
log_entry->completed = true;
m_log_entries.push_back(log_entry);
next_log_pos += round_up_to(it->write_bytes, MIN_WRITE_ALLOC_SSD_SIZE);
}
// along with the write_bytes, add control block size too
next_log_pos += MIN_WRITE_ALLOC_SSD_SIZE;
if (next_log_pos >= this->m_log_pool_size) {
next_log_pos = next_log_pos % this->m_log_pool_size + DATA_RING_BUFFER_OFFSET;
}
}
this->update_sync_points(missing_sync_points, sync_point_entries, later);
if (m_first_valid_entry > m_first_free_entry) {
m_bytes_allocated = this->m_log_pool_size - m_first_valid_entry +
m_first_free_entry - DATA_RING_BUFFER_OFFSET;
} else {
m_bytes_allocated = m_first_free_entry - m_first_valid_entry;
}
}
// For SSD we don't calc m_bytes_allocated in this
template <typename I>
void WriteLog<I>::inc_allocated_cached_bytes(
std::shared_ptr<pwl::GenericLogEntry> log_entry) {
if (log_entry->is_write_entry()) {
this->m_bytes_cached += log_entry->write_bytes();
}
}
template <typename I>
bool WriteLog<I>::alloc_resources(C_BlockIORequestT *req) {
bool alloc_succeeds = true;
uint64_t bytes_allocated = 0;
uint64_t bytes_cached = 0;
uint64_t bytes_dirtied = 0;
uint64_t num_lanes = 0;
uint64_t num_unpublished_reserves = 0;
uint64_t num_log_entries = 0;
// Setup buffer, and get all the number of required resources
req->setup_buffer_resources(&bytes_cached, &bytes_dirtied, &bytes_allocated,
&num_lanes, &num_log_entries,
&num_unpublished_reserves);
ceph_assert(!num_lanes);
if (num_log_entries) {
bytes_allocated += num_log_entries * MIN_WRITE_ALLOC_SSD_SIZE;
num_log_entries = 0;
}
ceph_assert(!num_unpublished_reserves);
alloc_succeeds = this->check_allocation(req, bytes_cached, bytes_dirtied,
bytes_allocated, num_lanes,
num_log_entries,
num_unpublished_reserves);
req->set_allocated(alloc_succeeds);
return alloc_succeeds;
}
template <typename I>
bool WriteLog<I>::has_sync_point_logs(GenericLogOperations &ops) {
for (auto &op : ops) {
if (op->get_log_entry()->is_sync_point()) {
return true;
break;
}
}
return false;
}
template<typename I>
void WriteLog<I>::enlist_op_appender() {
this->m_async_append_ops++;
this->m_async_op_tracker.start_op();
Context *append_ctx = new LambdaContext([this](int r) {
append_scheduled_ops();
});
this->m_work_queue.queue(append_ctx);
}
/*
* Takes custody of ops. They'll all get their log entries appended,
* and have their on_write_persist contexts completed once they and
* all prior log entries are persisted everywhere.
*/
template<typename I>
void WriteLog<I>::schedule_append_ops(GenericLogOperations &ops, C_BlockIORequestT *req) {
bool need_finisher = false;
GenericLogOperationsVector appending;
std::copy(std::begin(ops), std::end(ops), std::back_inserter(appending));
{
std::lock_guard locker(m_lock);
bool persist_on_flush = this->get_persist_on_flush();
need_finisher = !this->m_appending &&
((this->m_ops_to_append.size() >= CONTROL_BLOCK_MAX_LOG_ENTRIES) ||
!persist_on_flush);
// Only flush logs into SSD when there is internal/external flush request
if (!need_finisher) {
need_finisher = has_sync_point_logs(ops);
}
this->m_ops_to_append.splice(this->m_ops_to_append.end(), ops);
// To preserve the order of overlapping IOs, release_cell() may be
// called only after the ops are added to m_ops_to_append.
// As soon as m_lock is released, the appended ops can be picked up
// by append_scheduled_ops() in another thread and req can be freed.
if (req != nullptr) {
if (persist_on_flush) {
req->complete_user_request(0);
}
req->release_cell();
}
}
if (need_finisher) {
this->enlist_op_appender();
}
for (auto &op : appending) {
op->appending();
}
}
template <typename I>
void WriteLog<I>::setup_schedule_append(pwl::GenericLogOperationsVector &ops,
bool do_early_flush,
C_BlockIORequestT *req) {
this->schedule_append(ops, req);
}
template <typename I>
void WriteLog<I>::append_scheduled_ops(void) {
GenericLogOperations ops;
ldout(m_image_ctx.cct, 20) << dendl;
bool ops_remain = false; // unused, no-op variable for SSD
bool appending = false; // unused, no-op variable for SSD
this->append_scheduled(ops, ops_remain, appending);
if (ops.size()) {
alloc_op_log_entries(ops);
append_op_log_entries(ops);
} else {
this->m_async_append_ops--;
this->m_async_op_tracker.finish_op();
}
}
/*
* Write and persist the (already allocated) write log entries and
* data buffer allocations for a set of ops. The data buffer for each
* of these must already have been persisted to its reserved area.
*/
template <typename I>
void WriteLog<I>::append_op_log_entries(GenericLogOperations &ops) {
ceph_assert(!ops.empty());
ldout(m_image_ctx.cct, 20) << dendl;
Context *ctx = new LambdaContext([this, ops](int r) {
assert(r == 0);
ldout(m_image_ctx.cct, 20) << "Finished root update " << dendl;
auto captured_ops = std::move(ops);
this->complete_op_log_entries(std::move(captured_ops), r);
bool need_finisher = false;
{
std::lock_guard locker1(m_lock);
bool persist_on_flush = this->get_persist_on_flush();
need_finisher = ((this->m_ops_to_append.size() >= CONTROL_BLOCK_MAX_LOG_ENTRIES) ||
!persist_on_flush);
if (!need_finisher) {
need_finisher = has_sync_point_logs(this->m_ops_to_append);
}
}
if (need_finisher) {
this->enlist_op_appender();
}
this->m_async_update_superblock--;
this->m_async_op_tracker.finish_op();
});
uint64_t *new_first_free_entry = new(uint64_t);
Context *append_ctx = new LambdaContext(
[this, new_first_free_entry, ops, ctx](int r) {
std::shared_ptr<WriteLogPoolRoot> new_root;
{
ldout(m_image_ctx.cct, 20) << "Finished appending at "
<< *new_first_free_entry << dendl;
utime_t now = ceph_clock_now();
for (auto &operation : ops) {
operation->log_append_comp_time = now;
}
std::lock_guard locker(this->m_log_append_lock);
std::lock_guard locker1(m_lock);
assert(this->m_appending);
this->m_appending = false;
new_root = std::make_shared<WriteLogPoolRoot>(pool_root);
pool_root.first_free_entry = *new_first_free_entry;
new_root->first_free_entry = *new_first_free_entry;
delete new_first_free_entry;
schedule_update_root(new_root, ctx);
}
this->m_async_append_ops--;
this->m_async_op_tracker.finish_op();
});
// Append logs and update first_free_update
append_ops(ops, append_ctx, new_first_free_entry);
if (ops.size()) {
this->dispatch_deferred_writes();
}
}
template <typename I>
void WriteLog<I>::release_ram(std::shared_ptr<GenericLogEntry> log_entry) {
log_entry->remove_cache_bl();
}
template <typename I>
void WriteLog<I>::alloc_op_log_entries(GenericLogOperations &ops) {
std::unique_lock locker(m_lock);
for (auto &operation : ops) {
auto &log_entry = operation->get_log_entry();
log_entry->ram_entry.set_entry_valid(true);
m_log_entries.push_back(log_entry);
ldout(m_image_ctx.cct, 20) << "operation=[" << *operation << "]" << dendl;
}
if (m_cache_state->empty && !m_log_entries.empty()) {
m_cache_state->empty = false;
this->update_image_cache_state();
this->write_image_cache_state(locker);
}
}
template <typename I>
void WriteLog<I>::construct_flush_entries(pwl::GenericLogEntries entries_to_flush,
DeferredContexts &post_unlock,
bool has_write_entry) {
// snapshot so we behave consistently
bool invalidating = this->m_invalidating;
if (invalidating || !has_write_entry) {
for (auto &log_entry : entries_to_flush) {
GuardedRequestFunctionContext *guarded_ctx =
new GuardedRequestFunctionContext([this, log_entry, invalidating]
(GuardedRequestFunctionContext &guard_ctx) {
log_entry->m_cell = guard_ctx.cell;
Context *ctx = this->construct_flush_entry(log_entry, invalidating);
if (!invalidating) {
ctx = new LambdaContext([this, log_entry, ctx](int r) {
m_image_ctx.op_work_queue->queue(new LambdaContext(
[this, log_entry, ctx](int r) {
ldout(m_image_ctx.cct, 15) << "flushing:" << log_entry
<< " " << *log_entry << dendl;
log_entry->writeback(this->m_image_writeback, ctx);
}), 0);
});
}
ctx->complete(0);
});
this->detain_flush_guard_request(log_entry, guarded_ctx);
}
} else {
int count = entries_to_flush.size();
std::vector<std::shared_ptr<GenericWriteLogEntry>> write_entries;
std::vector<bufferlist *> read_bls;
write_entries.reserve(count);
read_bls.reserve(count);
for (auto &log_entry : entries_to_flush) {
if (log_entry->is_write_entry()) {
bufferlist *bl = new bufferlist;
auto write_entry = static_pointer_cast<WriteLogEntry>(log_entry);
write_entry->inc_bl_refs();
write_entries.push_back(write_entry);
read_bls.push_back(bl);
}
}
Context *ctx = new LambdaContext(
[this, entries_to_flush, read_bls](int r) {
int i = 0;
GuardedRequestFunctionContext *guarded_ctx = nullptr;
for (auto &log_entry : entries_to_flush) {
if (log_entry->is_write_entry()) {
bufferlist captured_entry_bl;
captured_entry_bl.claim_append(*read_bls[i]);
delete read_bls[i++];
guarded_ctx = new GuardedRequestFunctionContext([this, log_entry, captured_entry_bl]
(GuardedRequestFunctionContext &guard_ctx) {
log_entry->m_cell = guard_ctx.cell;
Context *ctx = this->construct_flush_entry(log_entry, false);
m_image_ctx.op_work_queue->queue(new LambdaContext(
[this, log_entry, entry_bl=std::move(captured_entry_bl), ctx](int r) {
auto captured_entry_bl = std::move(entry_bl);
ldout(m_image_ctx.cct, 15) << "flushing:" << log_entry
<< " " << *log_entry << dendl;
log_entry->writeback_bl(this->m_image_writeback, ctx,
std::move(captured_entry_bl));
}), 0);
});
} else {
guarded_ctx = new GuardedRequestFunctionContext([this, log_entry]
(GuardedRequestFunctionContext &guard_ctx) {
log_entry->m_cell = guard_ctx.cell;
Context *ctx = this->construct_flush_entry(log_entry, false);
m_image_ctx.op_work_queue->queue(new LambdaContext(
[this, log_entry, ctx](int r) {
ldout(m_image_ctx.cct, 15) << "flushing:" << log_entry
<< " " << *log_entry << dendl;
log_entry->writeback(this->m_image_writeback, ctx);
}), 0);
});
}
this->detain_flush_guard_request(log_entry, guarded_ctx);
}
});
aio_read_data_blocks(write_entries, read_bls, ctx);
}
}
template <typename I>
void WriteLog<I>::process_work() {
CephContext *cct = m_image_ctx.cct;
int max_iterations = 4;
bool wake_up_requested = false;
uint64_t aggressive_high_water_bytes =
this->m_bytes_allocated_cap * AGGRESSIVE_RETIRE_HIGH_WATER;
uint64_t high_water_bytes = this->m_bytes_allocated_cap * RETIRE_HIGH_WATER;
ldout(cct, 20) << dendl;
do {
{
std::lock_guard locker(m_lock);
this->m_wake_up_requested = false;
}
if (this->m_alloc_failed_since_retire || (this->m_shutting_down) ||
this->m_invalidating || m_bytes_allocated > high_water_bytes) {
ldout(m_image_ctx.cct, 10) << "alloc_fail=" << this->m_alloc_failed_since_retire
<< ", allocated > high_water="
<< (m_bytes_allocated > high_water_bytes)
<< dendl;
retire_entries((this->m_shutting_down || this->m_invalidating ||
m_bytes_allocated > aggressive_high_water_bytes)
? MAX_ALLOC_PER_TRANSACTION : MAX_FREE_PER_TRANSACTION);
}
this->dispatch_deferred_writes();
this->process_writeback_dirty_entries();
{
std::lock_guard locker(m_lock);
wake_up_requested = this->m_wake_up_requested;
}
} while (wake_up_requested && --max_iterations > 0);
{
std::lock_guard locker(m_lock);
this->m_wake_up_scheduled = false;
// Reschedule if it's still requested
if (this->m_wake_up_requested) {
this->wake_up();
}
}
}
/**
* Retire up to MAX_ALLOC_PER_TRANSACTION of the oldest log entries
* that are eligible to be retired. Returns true if anything was
* retired.
*
*/
template <typename I>
bool WriteLog<I>::retire_entries(const unsigned long int frees_per_tx) {
CephContext *cct = m_image_ctx.cct;
GenericLogEntriesVector retiring_entries;
uint64_t initial_first_valid_entry;
uint64_t first_valid_entry;
std::lock_guard retire_locker(this->m_log_retire_lock);
ldout(cct, 20) << "Look for entries to retire" << dendl;
{
// Entry readers can't be added while we hold m_entry_reader_lock
RWLock::WLocker entry_reader_locker(this->m_entry_reader_lock);
std::lock_guard locker(m_lock);
initial_first_valid_entry = m_first_valid_entry;
first_valid_entry = m_first_valid_entry;
while (retiring_entries.size() < frees_per_tx && !m_log_entries.empty()) {
GenericLogEntriesVector retiring_subentries;
uint64_t control_block_pos = m_log_entries.front()->log_entry_index;
uint64_t data_length = 0;
for (auto it = m_log_entries.begin(); it != m_log_entries.end(); ++it) {
if (this->can_retire_entry(*it)) {
// log_entry_index is valid after appending to SSD
if ((*it)->log_entry_index != control_block_pos) {
ldout(cct, 20) << "Old log_entry_index is " << control_block_pos
<< ",New log_entry_index is "
<< (*it)->log_entry_index
<< ",data length is " << data_length << dendl;
ldout(cct, 20) << "The log entry is " << *(*it) << dendl;
if ((*it)->log_entry_index < control_block_pos) {
ceph_assert((*it)->log_entry_index ==
(control_block_pos + data_length + MIN_WRITE_ALLOC_SSD_SIZE) %
this->m_log_pool_size + DATA_RING_BUFFER_OFFSET);
} else {
ceph_assert((*it)->log_entry_index == control_block_pos +
data_length + MIN_WRITE_ALLOC_SSD_SIZE);
}
break;
} else {
retiring_subentries.push_back(*it);
if ((*it)->is_write_entry()) {
data_length += (*it)->get_aligned_data_size();
}
}
} else {
retiring_subentries.clear();
break;
}
}
// SSD: retiring_subentries in a span
if (!retiring_subentries.empty()) {
for (auto it = retiring_subentries.begin();
it != retiring_subentries.end(); it++) {
ceph_assert(m_log_entries.front() == *it);
m_log_entries.pop_front();
if ((*it)->write_bytes() > 0 || (*it)->bytes_dirty() > 0) {
auto gen_write_entry = static_pointer_cast<GenericWriteLogEntry>(*it);
if (gen_write_entry) {
this->m_blocks_to_log_entries.remove_log_entry(gen_write_entry);
}
}
}
ldout(cct, 20) << "span with " << retiring_subentries.size()
<< " entries: control_block_pos=" << control_block_pos
<< " data_length=" << data_length
<< dendl;
retiring_entries.insert(
retiring_entries.end(), retiring_subentries.begin(),
retiring_subentries.end());
first_valid_entry = control_block_pos + data_length +
MIN_WRITE_ALLOC_SSD_SIZE;
if (first_valid_entry >= this->m_log_pool_size) {
first_valid_entry = first_valid_entry % this->m_log_pool_size +
DATA_RING_BUFFER_OFFSET;
}
} else {
break;
}
}
}
if (retiring_entries.size()) {
ldout(cct, 20) << "Retiring " << retiring_entries.size() << " entries"
<< dendl;
// Advance first valid entry and release buffers
uint64_t flushed_sync_gen;
std::lock_guard append_locker(this->m_log_append_lock);
{
std::lock_guard locker(m_lock);
flushed_sync_gen = this->m_flushed_sync_gen;
}
ceph_assert(first_valid_entry != initial_first_valid_entry);
auto new_root = std::make_shared<WriteLogPoolRoot>(pool_root);
new_root->flushed_sync_gen = flushed_sync_gen;
new_root->first_valid_entry = first_valid_entry;
pool_root.flushed_sync_gen = flushed_sync_gen;
pool_root.first_valid_entry = first_valid_entry;
Context *ctx = new LambdaContext(
[this, first_valid_entry, initial_first_valid_entry,
retiring_entries](int r) {
uint64_t allocated_bytes = 0;
uint64_t cached_bytes = 0;
uint64_t former_log_pos = 0;
for (auto &entry : retiring_entries) {
ceph_assert(entry->log_entry_index != 0);
if (entry->log_entry_index != former_log_pos ) {
// Space for control blocks
allocated_bytes += MIN_WRITE_ALLOC_SSD_SIZE;
former_log_pos = entry->log_entry_index;
}
if (entry->is_write_entry()) {
cached_bytes += entry->write_bytes();
// space for userdata
allocated_bytes += entry->get_aligned_data_size();
}
}
bool need_update_state = false;
{
std::lock_guard locker(m_lock);
m_first_valid_entry = first_valid_entry;
ceph_assert(m_first_valid_entry % MIN_WRITE_ALLOC_SSD_SIZE == 0);
ceph_assert(this->m_bytes_allocated >= allocated_bytes);
this->m_bytes_allocated -= allocated_bytes;
ceph_assert(this->m_bytes_cached >= cached_bytes);
this->m_bytes_cached -= cached_bytes;
if (!m_cache_state->empty && m_log_entries.empty()) {
m_cache_state->empty = true;
this->update_image_cache_state();
need_update_state = true;
}
ldout(m_image_ctx.cct, 20)
<< "Finished root update: initial_first_valid_entry="
<< initial_first_valid_entry << ", m_first_valid_entry="
<< m_first_valid_entry << ", release space = "
<< allocated_bytes << ", m_bytes_allocated="
<< m_bytes_allocated << ", release cached space="
<< cached_bytes << ", m_bytes_cached="
<< this->m_bytes_cached << dendl;
this->m_alloc_failed_since_retire = false;
this->wake_up();
}
if (need_update_state) {
std::unique_lock locker(m_lock);
this->write_image_cache_state(locker);
}
this->dispatch_deferred_writes();
this->process_writeback_dirty_entries();
m_async_update_superblock--;
this->m_async_op_tracker.finish_op();
});
std::lock_guard locker(m_lock);
schedule_update_root(new_root, ctx);
} else {
ldout(cct, 20) << "Nothing to retire" << dendl;
return false;
}
return true;
}
template <typename I>
void WriteLog<I>::append_ops(GenericLogOperations &ops, Context *ctx,
uint64_t* new_first_free_entry) {
GenericLogEntriesVector log_entries;
CephContext *cct = m_image_ctx.cct;
uint64_t span_payload_len = 0;
uint64_t bytes_to_free = 0;
ldout(cct, 20) << "Appending " << ops.size() << " log entries." << dendl;
*new_first_free_entry = pool_root.first_free_entry;
AioTransContext* aio = new AioTransContext(cct, ctx);
utime_t now = ceph_clock_now();
for (auto &operation : ops) {
operation->log_append_start_time = now;
auto log_entry = operation->get_log_entry();
if (log_entries.size() == CONTROL_BLOCK_MAX_LOG_ENTRIES ||
span_payload_len >= SPAN_MAX_DATA_LEN) {
if (log_entries.size() > 1) {
bytes_to_free += (log_entries.size() - 1) * MIN_WRITE_ALLOC_SSD_SIZE;
}
write_log_entries(log_entries, aio, new_first_free_entry);
log_entries.clear();
span_payload_len = 0;
}
log_entries.push_back(log_entry);
span_payload_len += log_entry->write_bytes();
}
if (!span_payload_len || !log_entries.empty()) {
if (log_entries.size() > 1) {
bytes_to_free += (log_entries.size() - 1) * MIN_WRITE_ALLOC_SSD_SIZE;
}
write_log_entries(log_entries, aio, new_first_free_entry);
}
{
std::lock_guard locker1(m_lock);
m_first_free_entry = *new_first_free_entry;
m_bytes_allocated -= bytes_to_free;
}
bdev->aio_submit(&aio->ioc);
}
template <typename I>
void WriteLog<I>::write_log_entries(GenericLogEntriesVector log_entries,
AioTransContext *aio, uint64_t *pos) {
CephContext *cct = m_image_ctx.cct;
ldout(m_image_ctx.cct, 20) << "pos=" << *pos << dendl;
ceph_assert(*pos >= DATA_RING_BUFFER_OFFSET &&
*pos < this->m_log_pool_size &&
*pos % MIN_WRITE_ALLOC_SSD_SIZE == 0);
// The first block is for log entries
uint64_t control_block_pos = *pos;
*pos += MIN_WRITE_ALLOC_SSD_SIZE;
if (*pos == this->m_log_pool_size) {
*pos = DATA_RING_BUFFER_OFFSET;
}
std::vector<WriteLogCacheEntry> persist_log_entries;
bufferlist data_bl;
for (auto &log_entry : log_entries) {
log_entry->log_entry_index = control_block_pos;
// Append data buffer for write operations
if (log_entry->is_write_entry()) {
auto write_entry = static_pointer_cast<WriteLogEntry>(log_entry);
auto cache_bl = write_entry->get_cache_bl();
auto align_size = write_entry->get_aligned_data_size();
data_bl.append(cache_bl);
data_bl.append_zero(align_size - cache_bl.length());
write_entry->ram_entry.write_data_pos = *pos;
*pos += align_size;
if (*pos >= this->m_log_pool_size) {
*pos = *pos % this->m_log_pool_size + DATA_RING_BUFFER_OFFSET;
}
}
// push_back _after_ setting write_data_pos
persist_log_entries.push_back(log_entry->ram_entry);
}
//aio write
bufferlist bl;
encode(persist_log_entries, bl);
ceph_assert(bl.length() <= MIN_WRITE_ALLOC_SSD_SIZE);
bl.append_zero(MIN_WRITE_ALLOC_SSD_SIZE - bl.length());
bl.append(data_bl);
ceph_assert(bl.length() % MIN_WRITE_ALLOC_SSD_SIZE == 0);
if (control_block_pos + bl.length() > this->m_log_pool_size) {
//exceeds border, need to split
uint64_t size = bl.length();
bufferlist bl1;
bl.splice(0, this->m_log_pool_size - control_block_pos, &bl1);
ceph_assert(bl.length() == (size - bl1.length()));
ldout(cct, 20) << "write " << control_block_pos << "~"
<< size << " spans boundary, split into "
<< control_block_pos << "~" << bl1.length()
<< " and " << DATA_RING_BUFFER_OFFSET << "~"
<< bl.length() << dendl;
bdev->aio_write(control_block_pos, bl1, &aio->ioc, false,
WRITE_LIFE_NOT_SET);
bdev->aio_write(DATA_RING_BUFFER_OFFSET, bl, &aio->ioc, false,
WRITE_LIFE_NOT_SET);
} else {
ldout(cct, 20) << "write " << control_block_pos << "~"
<< bl.length() << dendl;
bdev->aio_write(control_block_pos, bl, &aio->ioc, false,
WRITE_LIFE_NOT_SET);
}
}
template <typename I>
void WriteLog<I>::schedule_update_root(
std::shared_ptr<WriteLogPoolRoot> root, Context *ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 15) << "New root: pool_size=" << root->pool_size
<< " first_valid_entry=" << root->first_valid_entry
<< " first_free_entry=" << root->first_free_entry
<< " flushed_sync_gen=" << root->flushed_sync_gen
<< dendl;
ceph_assert(is_valid_pool_root(*root));
bool need_finisher;
{
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
need_finisher = m_poolroot_to_update.empty() && !m_updating_pool_root;
std::shared_ptr<WriteLogPoolRootUpdate> entry =
std::make_shared<WriteLogPoolRootUpdate>(root, ctx);
this->m_async_update_superblock++;
this->m_async_op_tracker.start_op();
m_poolroot_to_update.emplace_back(entry);
}
if (need_finisher) {
enlist_op_update_root();
}
}
template <typename I>
void WriteLog<I>::enlist_op_update_root() {
Context *append_ctx = new LambdaContext([this](int r) {
update_root_scheduled_ops();
});
this->m_work_queue.queue(append_ctx);
}
template <typename I>
void WriteLog<I>::update_root_scheduled_ops() {
ldout(m_image_ctx.cct, 20) << dendl;
std::shared_ptr<WriteLogPoolRoot> root;
WriteLogPoolRootUpdateList root_updates;
Context *ctx = nullptr;
{
std::lock_guard locker(m_lock);
if (m_updating_pool_root) {
/* Another thread is appending */
ldout(m_image_ctx.cct, 15) << "Another thread is updating pool root"
<< dendl;
return;
}
if (m_poolroot_to_update.size()) {
m_updating_pool_root = true;
root_updates.swap(m_poolroot_to_update);
}
}
ceph_assert(!root_updates.empty());
ldout(m_image_ctx.cct, 15) << "Update root number: " << root_updates.size()
<< dendl;
// We just update the last one, and call all the completions.
auto entry = root_updates.back();
root = entry->root;
ctx = new LambdaContext([this, updates = std::move(root_updates)](int r) {
ldout(m_image_ctx.cct, 15) << "Start to callback." << dendl;
for (auto it = updates.begin(); it != updates.end(); it++) {
Context *it_ctx = (*it)->ctx;
it_ctx->complete(r);
}
});
Context *append_ctx = new LambdaContext([this, ctx](int r) {
ldout(m_image_ctx.cct, 15) << "Finish the update of pool root." << dendl;
bool need_finisher = false;
assert(r == 0);
{
std::lock_guard locker(m_lock);
m_updating_pool_root = false;
need_finisher = !m_poolroot_to_update.empty();
}
if (need_finisher) {
enlist_op_update_root();
}
ctx->complete(r);
});
AioTransContext* aio = new AioTransContext(m_image_ctx.cct, append_ctx);
update_pool_root(root, aio);
}
template <typename I>
void WriteLog<I>::update_pool_root(std::shared_ptr<WriteLogPoolRoot> root,
AioTransContext *aio) {
bufferlist bl;
SuperBlock superblock;
superblock.root = *root;
encode(superblock, bl);
bl.append_zero(MIN_WRITE_ALLOC_SSD_SIZE - bl.length());
ceph_assert(bl.length() % MIN_WRITE_ALLOC_SSD_SIZE == 0);
bdev->aio_write(0, bl, &aio->ioc, false, WRITE_LIFE_NOT_SET);
bdev->aio_submit(&aio->ioc);
}
template <typename I>
int WriteLog<I>::update_pool_root_sync(
std::shared_ptr<WriteLogPoolRoot> root) {
bufferlist bl;
SuperBlock superblock;
superblock.root = *root;
encode(superblock, bl);
bl.append_zero(MIN_WRITE_ALLOC_SSD_SIZE - bl.length());
ceph_assert(bl.length() % MIN_WRITE_ALLOC_SSD_SIZE == 0);
return bdev->write(0, bl, false);
}
template <typename I>
void WriteLog<I>::aio_read_data_block(std::shared_ptr<GenericWriteLogEntry> log_entry,
bufferlist *bl, Context *ctx) {
std::vector<std::shared_ptr<GenericWriteLogEntry>> log_entries = {std::move(log_entry)};
std::vector<bufferlist *> bls {bl};
aio_read_data_blocks(log_entries, bls, ctx);
}
template <typename I>
void WriteLog<I>::aio_read_data_blocks(
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries,
std::vector<bufferlist *> &bls, Context *ctx) {
ceph_assert(log_entries.size() == bls.size());
//get the valid part
Context *read_ctx = new LambdaContext(
[log_entries, bls, ctx](int r) {
for (unsigned int i = 0; i < log_entries.size(); i++) {
bufferlist valid_data_bl;
auto write_entry = static_pointer_cast<WriteLogEntry>(log_entries[i]);
auto length = write_entry->ram_entry.is_write() ? write_entry->ram_entry.write_bytes
: write_entry->ram_entry.ws_datalen;
valid_data_bl.substr_of(*bls[i], 0, length);
bls[i]->clear();
bls[i]->append(valid_data_bl);
write_entry->dec_bl_refs();
}
ctx->complete(r);
});
CephContext *cct = m_image_ctx.cct;
AioTransContext *aio = new AioTransContext(cct, read_ctx);
for (unsigned int i = 0; i < log_entries.size(); i++) {
WriteLogCacheEntry *log_entry = &log_entries[i]->ram_entry;
ceph_assert(log_entry->is_write() || log_entry->is_writesame());
uint64_t len = log_entry->is_write() ? log_entry->write_bytes :
log_entry->ws_datalen;
uint64_t align_len = round_up_to(len, MIN_WRITE_ALLOC_SSD_SIZE);
ldout(cct, 20) << "entry i=" << i << " " << log_entry->write_data_pos
<< "~" << len << dendl;
ceph_assert(log_entry->write_data_pos >= DATA_RING_BUFFER_OFFSET &&
log_entry->write_data_pos < pool_root.pool_size);
ceph_assert(align_len);
if (log_entry->write_data_pos + align_len > pool_root.pool_size) {
// spans boundary, need to split
uint64_t len1 = pool_root.pool_size - log_entry->write_data_pos;
uint64_t len2 = align_len - len1;
ldout(cct, 20) << "read " << log_entry->write_data_pos << "~"
<< align_len << " spans boundary, split into "
<< log_entry->write_data_pos << "~" << len1
<< " and " << DATA_RING_BUFFER_OFFSET << "~"
<< len2 << dendl;
bdev->aio_read(log_entry->write_data_pos, len1, bls[i], &aio->ioc);
bdev->aio_read(DATA_RING_BUFFER_OFFSET, len2, bls[i], &aio->ioc);
} else {
ldout(cct, 20) << "read " << log_entry->write_data_pos << "~"
<< align_len << dendl;
bdev->aio_read(log_entry->write_data_pos, align_len, bls[i], &aio->ioc);
}
}
bdev->aio_submit(&aio->ioc);
}
template <typename I>
void WriteLog<I>::complete_user_request(Context *&user_req, int r) {
m_image_ctx.op_work_queue->queue(user_req, r);
}
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
template class librbd::cache::pwl::ssd::WriteLog<librbd::ImageCtx>;
| 41,635 | 34.862188 | 92 | cc |
null | ceph-main/src/librbd/cache/pwl/ssd/WriteLog.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PWL_SSD_WRITE_LOG
#define CEPH_LIBRBD_CACHE_PWL_SSD_WRITE_LOG
#include "blk/BlockDevice.h"
#include "common/AsyncOpTracker.h"
#include "common/Checksummer.h"
#include "common/environment.h"
#include "common/RWLock.h"
#include "common/WorkQueue.h"
#include "librbd/BlockGuard.h"
#include "librbd/Utils.h"
#include "librbd/cache/ImageWriteback.h"
#include "librbd/cache/Types.h"
#include "librbd/cache/pwl/AbstractWriteLog.h"
#include "librbd/cache/pwl/LogMap.h"
#include "librbd/cache/pwl/LogOperation.h"
#include "librbd/cache/pwl/Request.h"
#include "librbd/cache/pwl/ssd/Builder.h"
#include "librbd/cache/pwl/ssd/Types.h"
#include <functional>
#include <list>
namespace librbd {
struct ImageCtx;
namespace cache {
namespace pwl {
namespace ssd {
template <typename ImageCtxT>
class WriteLog : public AbstractWriteLog<ImageCtxT> {
public:
WriteLog(ImageCtxT &image_ctx,
librbd::cache::pwl::ImageCacheState<ImageCtxT>* cache_state,
cache::ImageWritebackInterface& image_writeback,
plugin::Api<ImageCtxT>& plugin_api);
~WriteLog();
WriteLog(const WriteLog&) = delete;
WriteLog &operator=(const WriteLog&) = delete;
typedef io::Extent Extent;
using This = AbstractWriteLog<ImageCtxT>;
using C_BlockIORequestT = pwl::C_BlockIORequest<This>;
using C_WriteRequestT = pwl::C_WriteRequest<This>;
using C_WriteSameRequestT = pwl::C_WriteSameRequest<This>;
bool alloc_resources(C_BlockIORequestT *req) override;
void setup_schedule_append(
pwl::GenericLogOperationsVector &ops, bool do_early_flush,
C_BlockIORequestT *req) override;
void complete_user_request(Context *&user_req, int r) override;
protected:
using AbstractWriteLog<ImageCtxT>::m_lock;
using AbstractWriteLog<ImageCtxT>::m_log_entries;
using AbstractWriteLog<ImageCtxT>::m_image_ctx;
using AbstractWriteLog<ImageCtxT>::m_cache_state;
using AbstractWriteLog<ImageCtxT>::m_first_free_entry;
using AbstractWriteLog<ImageCtxT>::m_first_valid_entry;
using AbstractWriteLog<ImageCtxT>::m_bytes_allocated;
bool initialize_pool(Context *on_finish,
pwl::DeferredContexts &later) override;
void process_work() override;
void append_scheduled_ops(void) override;
void schedule_append_ops(pwl::GenericLogOperations &ops, C_BlockIORequestT *req) override;
void remove_pool_file() override;
void release_ram(std::shared_ptr<GenericLogEntry> log_entry) override;
private:
class AioTransContext {
public:
Context *on_finish;
::IOContext ioc;
explicit AioTransContext(CephContext* cct, Context *cb)
: on_finish(cb), ioc(cct, this) {}
~AioTransContext(){}
void aio_finish() {
on_finish->complete(ioc.get_return_value());
delete this;
}
}; //class AioTransContext
struct WriteLogPoolRootUpdate {
std::shared_ptr<pwl::WriteLogPoolRoot> root;
Context *ctx;
WriteLogPoolRootUpdate(std::shared_ptr<pwl::WriteLogPoolRoot> r,
Context* c)
: root(r), ctx(c) {}
};
using WriteLogPoolRootUpdateList = std::list<std::shared_ptr<WriteLogPoolRootUpdate>>;
WriteLogPoolRootUpdateList m_poolroot_to_update; /* pool root list to update to SSD */
bool m_updating_pool_root = false;
std::atomic<int> m_async_update_superblock = {0};
BlockDevice *bdev = nullptr;
pwl::WriteLogPoolRoot pool_root;
Builder<This> *m_builderobj;
Builder<This>* create_builder();
int create_and_open_bdev();
void load_existing_entries(pwl::DeferredContexts &later);
void inc_allocated_cached_bytes(
std::shared_ptr<pwl::GenericLogEntry> log_entry) override;
void collect_read_extents(
uint64_t read_buffer_offset, LogMapEntry<GenericWriteLogEntry> map_entry,
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,
std::vector<bufferlist*> &bls_to_read, uint64_t entry_hit_length,
Extent hit_extent, pwl::C_ReadRequest *read_ctx) override;
void complete_read(
std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries_to_read,
std::vector<bufferlist*> &bls_to_read, Context *ctx) override;
void enlist_op_appender();
bool retire_entries(const unsigned long int frees_per_tx);
bool has_sync_point_logs(GenericLogOperations &ops);
void append_op_log_entries(GenericLogOperations &ops);
void alloc_op_log_entries(GenericLogOperations &ops);
void construct_flush_entries(pwl::GenericLogEntries entires_to_flush,
DeferredContexts &post_unlock,
bool has_write_entry) override;
void append_ops(GenericLogOperations &ops, Context *ctx,
uint64_t* new_first_free_entry);
void write_log_entries(GenericLogEntriesVector log_entries,
AioTransContext *aio, uint64_t *pos);
void schedule_update_root(std::shared_ptr<WriteLogPoolRoot> root,
Context *ctx);
void enlist_op_update_root();
void update_root_scheduled_ops();
int update_pool_root_sync(std::shared_ptr<pwl::WriteLogPoolRoot> root);
void update_pool_root(std::shared_ptr<WriteLogPoolRoot> root,
AioTransContext *aio);
void aio_read_data_block(std::shared_ptr<GenericWriteLogEntry> log_entry,
bufferlist *bl, Context *ctx);
void aio_read_data_blocks(std::vector<std::shared_ptr<GenericWriteLogEntry>> &log_entries,
std::vector<bufferlist *> &bls, Context *ctx);
static void aio_cache_cb(void *priv, void *priv2) {
AioTransContext *c = static_cast<AioTransContext*>(priv2);
c->aio_finish();
}
};//class WriteLog
} // namespace ssd
} // namespace pwl
} // namespace cache
} // namespace librbd
extern template class librbd::cache::pwl::ssd::WriteLog<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_PWL_SSD_WRITE_LOG
| 5,926 | 36.751592 | 92 | h |
null | ceph-main/src/librbd/crypto/BlockCrypto.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/crypto/BlockCrypto.h"
#include "include/byteorder.h"
#include "include/ceph_assert.h"
#include "include/scope_guard.h"
#include <bit>
#include <stdlib.h>
namespace librbd {
namespace crypto {
template <typename T>
BlockCrypto<T>::BlockCrypto(CephContext* cct, DataCryptor<T>* data_cryptor,
uint64_t block_size, uint64_t data_offset)
: m_cct(cct), m_data_cryptor(data_cryptor), m_block_size(block_size),
m_data_offset(data_offset), m_iv_size(data_cryptor->get_iv_size()) {
ceph_assert(std::has_single_bit(block_size));
ceph_assert((block_size % data_cryptor->get_block_size()) == 0);
ceph_assert((block_size % 512) == 0);
}
template <typename T>
BlockCrypto<T>::~BlockCrypto() {
if (m_data_cryptor != nullptr) {
delete m_data_cryptor;
m_data_cryptor = nullptr;
}
}
template <typename T>
int BlockCrypto<T>::crypt(ceph::bufferlist* data, uint64_t image_offset,
CipherMode mode) {
if (image_offset % m_block_size != 0) {
lderr(m_cct) << "image offset: " << image_offset
<< " not aligned to block size: " << m_block_size << dendl;
return -EINVAL;
}
if (data->length() % m_block_size != 0) {
lderr(m_cct) << "data length: " << data->length()
<< " not aligned to block size: " << m_block_size << dendl;
return -EINVAL;
}
unsigned char* iv = (unsigned char*)alloca(m_iv_size);
memset(iv, 0, m_iv_size);
bufferlist src = *data;
data->clear();
auto ctx = m_data_cryptor->get_context(mode);
if (ctx == nullptr) {
lderr(m_cct) << "unable to get crypt context" << dendl;
return -EIO;
}
auto sg = make_scope_guard([&] {
m_data_cryptor->return_context(ctx, mode); });
auto sector_number = image_offset / 512;
auto appender = data->get_contiguous_appender(src.length());
unsigned char* out_buf_ptr = nullptr;
unsigned char* leftover_block = (unsigned char*)alloca(m_block_size);
uint32_t leftover_size = 0;
for (auto buf = src.buffers().begin(); buf != src.buffers().end(); ++buf) {
auto in_buf_ptr = reinterpret_cast<const unsigned char*>(buf->c_str());
auto remaining_buf_bytes = buf->length();
while (remaining_buf_bytes > 0) {
if (leftover_size == 0) {
auto block_offset_le = ceph_le64(sector_number);
memcpy(iv, &block_offset_le, sizeof(block_offset_le));
auto r = m_data_cryptor->init_context(ctx, iv, m_iv_size);
if (r != 0) {
lderr(m_cct) << "unable to init cipher's IV" << dendl;
return r;
}
out_buf_ptr = reinterpret_cast<unsigned char*>(
appender.get_pos_add(m_block_size));
sector_number += m_block_size / 512;
}
if (leftover_size > 0 || remaining_buf_bytes < m_block_size) {
auto copy_size = std::min(
(uint32_t)m_block_size - leftover_size, remaining_buf_bytes);
memcpy(leftover_block + leftover_size, in_buf_ptr, copy_size);
in_buf_ptr += copy_size;
leftover_size += copy_size;
remaining_buf_bytes -= copy_size;
}
int crypto_output_length = 0;
if (leftover_size == 0) {
crypto_output_length = m_data_cryptor->update_context(
ctx, in_buf_ptr, out_buf_ptr, m_block_size);
in_buf_ptr += m_block_size;
remaining_buf_bytes -= m_block_size;
} else if (leftover_size == m_block_size) {
crypto_output_length = m_data_cryptor->update_context(
ctx, leftover_block, out_buf_ptr, m_block_size);
leftover_size = 0;
}
if (crypto_output_length < 0) {
lderr(m_cct) << "crypt update failed" << dendl;
return crypto_output_length;
}
out_buf_ptr += crypto_output_length;
}
}
return 0;
}
template <typename T>
int BlockCrypto<T>::encrypt(ceph::bufferlist* data, uint64_t image_offset) {
return crypt(data, image_offset, CipherMode::CIPHER_MODE_ENC);
}
template <typename T>
int BlockCrypto<T>::decrypt(ceph::bufferlist* data, uint64_t image_offset) {
return crypt(data, image_offset, CipherMode::CIPHER_MODE_DEC);
}
} // namespace crypto
} // namespace librbd
template class librbd::crypto::BlockCrypto<EVP_CIPHER_CTX>;
| 4,344 | 31.669173 | 77 | cc |
null | ceph-main/src/librbd/crypto/BlockCrypto.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_BLOCK_CRYPTO_H
#define CEPH_LIBRBD_CRYPTO_BLOCK_CRYPTO_H
#include "include/Context.h"
#include "librbd/crypto/CryptoInterface.h"
#include "librbd/crypto/openssl/DataCryptor.h"
namespace librbd {
namespace crypto {
template <typename T>
class BlockCrypto : public CryptoInterface {
public:
static BlockCrypto* create(CephContext* cct, DataCryptor<T>* data_cryptor,
uint32_t block_size, uint64_t data_offset) {
return new BlockCrypto(cct, data_cryptor, block_size, data_offset);
}
BlockCrypto(CephContext* cct, DataCryptor<T>* data_cryptor,
uint64_t block_size, uint64_t data_offset);
~BlockCrypto();
int encrypt(ceph::bufferlist* data, uint64_t image_offset) override;
int decrypt(ceph::bufferlist* data, uint64_t image_offset) override;
uint64_t get_block_size() const override {
return m_block_size;
}
uint64_t get_data_offset() const override {
return m_data_offset;
}
const unsigned char* get_key() const override {
return m_data_cryptor->get_key();
}
int get_key_length() const override {
return m_data_cryptor->get_key_length();
}
private:
CephContext* m_cct;
DataCryptor<T>* m_data_cryptor;
uint64_t m_block_size;
uint64_t m_data_offset;
uint32_t m_iv_size;
int crypt(ceph::bufferlist* data, uint64_t image_offset, CipherMode mode);
};
} // namespace crypto
} // namespace librbd
extern template class librbd::crypto::BlockCrypto<EVP_CIPHER_CTX>;
#endif //CEPH_LIBRBD_CRYPTO_BLOCK_CRYPTO_H
| 1,693 | 26.770492 | 78 | h |
null | ceph-main/src/librbd/crypto/CryptoContextPool.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/crypto/CryptoContextPool.h"
namespace librbd {
namespace crypto {
template <typename T>
CryptoContextPool<T>::CryptoContextPool(DataCryptor<T>* data_cryptor,
uint32_t pool_size)
: m_data_cryptor(data_cryptor), m_encrypt_contexts(pool_size),
m_decrypt_contexts(pool_size) {
}
template <typename T>
CryptoContextPool<T>::~CryptoContextPool() {
T* ctx;
while (m_encrypt_contexts.pop(ctx)) {
m_data_cryptor->return_context(ctx, CipherMode::CIPHER_MODE_ENC);
}
while (m_decrypt_contexts.pop(ctx)) {
m_data_cryptor->return_context(ctx, CipherMode::CIPHER_MODE_DEC);
}
}
template <typename T>
T* CryptoContextPool<T>::get_context(CipherMode mode) {
T* ctx;
if (!get_contexts(mode).pop(ctx)) {
ctx = m_data_cryptor->get_context(mode);
}
return ctx;
}
template <typename T>
void CryptoContextPool<T>::return_context(T* ctx, CipherMode mode) {
if (!get_contexts(mode).push(ctx)) {
m_data_cryptor->return_context(ctx, mode);
}
}
} // namespace crypto
} // namespace librbd
| 1,174 | 25.111111 | 70 | cc |
null | ceph-main/src/librbd/crypto/CryptoContextPool.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_CRYPTO_CONTEXT_POOL_H
#define CEPH_LIBRBD_CRYPTO_CRYPTO_CONTEXT_POOL_H
#include "librbd/crypto/DataCryptor.h"
#include "include/ceph_assert.h"
#include <boost/lockfree/queue.hpp>
namespace librbd {
namespace crypto {
template <typename T>
class CryptoContextPool : public DataCryptor<T> {
public:
CryptoContextPool(DataCryptor<T>* data_cryptor, uint32_t pool_size);
~CryptoContextPool();
T* get_context(CipherMode mode) override;
void return_context(T* ctx, CipherMode mode) override;
inline uint32_t get_block_size() const override {
return m_data_cryptor->get_block_size();
}
inline uint32_t get_iv_size() const override {
return m_data_cryptor->get_iv_size();
}
inline int get_key_length() const override {
return m_data_cryptor->get_key_length();
}
inline const unsigned char* get_key() const override {
return m_data_cryptor->get_key();
}
inline int init_context(T* ctx, const unsigned char* iv,
uint32_t iv_length) const override {
return m_data_cryptor->init_context(ctx, iv, iv_length);
}
inline int update_context(T* ctx, const unsigned char* in,
unsigned char* out,
uint32_t len) const override {
return m_data_cryptor->update_context(ctx, in, out, len);
}
using ContextQueue = boost::lockfree::queue<T*>;
private:
DataCryptor<T>* m_data_cryptor;
ContextQueue m_encrypt_contexts;
ContextQueue m_decrypt_contexts;
inline ContextQueue& get_contexts(CipherMode mode) {
switch(mode) {
case CIPHER_MODE_ENC:
return m_encrypt_contexts;
case CIPHER_MODE_DEC:
return m_decrypt_contexts;
default:
ceph_assert(false);
}
}
};
} // namespace crypto
} // namespace librbd
#endif // CEPH_LIBRBD_CRYPTO_CRYPTO_CONTEXT_POOL_H
| 2,036 | 28.521739 | 72 | h |
null | ceph-main/src/librbd/crypto/CryptoImageDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/crypto/CryptoImageDispatch.h"
namespace librbd {
namespace crypto {
CryptoImageDispatch::CryptoImageDispatch(
uint64_t data_offset) : m_data_offset(data_offset) {
}
void CryptoImageDispatch::remap_to_physical(io::Extents& image_extents,
io::ImageArea area) {
switch (area) {
case io::ImageArea::DATA:
for (auto& [off, _] : image_extents) {
off += m_data_offset;
}
break;
case io::ImageArea::CRYPTO_HEADER:
// direct mapping
break;
default:
ceph_abort();
}
}
io::ImageArea CryptoImageDispatch::remap_to_logical(
io::Extents& image_extents) {
bool saw_data = false;
bool saw_crypto_header = false;
for (auto& [off, _] : image_extents) {
if (off >= m_data_offset) {
off -= m_data_offset;
saw_data = true;
} else {
saw_crypto_header = true;
}
}
if (saw_crypto_header) {
ceph_assert(!saw_data);
return io::ImageArea::CRYPTO_HEADER;
}
return io::ImageArea::DATA;
}
} // namespace crypto
} // namespace librbd
| 1,172 | 22.46 | 71 | cc |
null | ceph-main/src/librbd/crypto/CryptoImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_CRYPTO_IMAGE_DISPATCH_H
#define CEPH_LIBRBD_CRYPTO_CRYPTO_IMAGE_DISPATCH_H
#include "librbd/io/ImageDispatchInterface.h"
namespace librbd {
namespace crypto {
class CryptoImageDispatch : public io::ImageDispatchInterface {
public:
static CryptoImageDispatch* create(uint64_t data_offset) {
return new CryptoImageDispatch(data_offset);
}
CryptoImageDispatch(uint64_t data_offset);
io::ImageDispatchLayer get_dispatch_layer() const override {
return io::IMAGE_DISPATCH_LAYER_CRYPTO;
}
void shut_down(Context* on_finish) override {
on_finish->complete(0);
}
bool read(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
io::ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool write(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool discard(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool write_same(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool compare_and_write(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool flush(
io::AioCompletion* aio_comp, io::FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool list_snaps(
io::AioCompletion* aio_comp, io::Extents&& image_extents,
io::SnapIds&& snap_ids, int list_snaps_flags,
io::SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override {
return false;
}
// called directly by ImageDispatcher
// TODO: hoist these out and remove CryptoImageDispatch since it's
// just a placeholder
void remap_to_physical(io::Extents& image_extents, io::ImageArea area);
io::ImageArea remap_to_logical(io::Extents& image_extents);
private:
uint64_t m_data_offset;
};
} // namespace crypto
} // namespace librbd
#endif // CEPH_LIBRBD_CRYPTO_CRYPTO_IMAGE_DISPATCH_H
| 3,772 | 32.6875 | 80 | h |
null | ceph-main/src/librbd/crypto/CryptoInterface.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_CRYPTO_INTERFACE_H
#define CEPH_LIBRBD_CRYPTO_CRYPTO_INTERFACE_H
#include "include/buffer.h"
#include "include/intarith.h"
#include "librbd/io/Types.h"
namespace librbd {
namespace crypto {
class CryptoInterface {
public:
virtual ~CryptoInterface() = default;
virtual int encrypt(ceph::bufferlist* data, uint64_t image_offset) = 0;
virtual int decrypt(ceph::bufferlist* data, uint64_t image_offset) = 0;
virtual uint64_t get_block_size() const = 0;
virtual uint64_t get_data_offset() const = 0;
virtual const unsigned char* get_key() const = 0;
virtual int get_key_length() const = 0;
inline std::pair<uint64_t, uint64_t> get_pre_and_post_align(
uint64_t off, uint64_t len) {
if (len == 0) {
return std::make_pair(0, 0);
}
auto block_size = get_block_size();
return std::make_pair(p2phase(off, block_size),
p2nphase(off + len, block_size));
}
inline std::pair<uint64_t, uint64_t> align(uint64_t off, uint64_t len) {
auto aligns = get_pre_and_post_align(off, len);
return std::make_pair(off - aligns.first,
len + aligns.first + aligns.second);
}
inline bool is_aligned(uint64_t off, uint64_t len) {
auto aligns = get_pre_and_post_align(off, len);
return aligns.first == 0 && aligns.second == 0;
}
inline bool is_aligned(const io::ReadExtents& extents) {
for (const auto& extent: extents) {
if (!is_aligned(extent.offset, extent.length)) {
return false;
}
}
return true;
}
inline void align_extents(const io::ReadExtents& extents,
io::ReadExtents* aligned_extents) {
for (const auto& extent: extents) {
auto aligned = align(extent.offset, extent.length);
aligned_extents->emplace_back(aligned.first, aligned.second);
}
}
inline int decrypt_aligned_extent(io::ReadExtent& extent,
uint64_t image_offset) {
if (extent.length == 0 || extent.bl.length() == 0) {
return 0;
}
if (extent.extent_map.empty()) {
extent.extent_map.emplace_back(extent.offset, extent.bl.length());
}
ceph::bufferlist result_bl;
io::Extents result_extent_map;
ceph::bufferlist curr_block_bl;
auto curr_offset = extent.offset;
auto curr_block_start_offset = curr_offset;
auto curr_block_end_offset = curr_offset;
// this will add a final loop iteration for decrypting the last extent
extent.extent_map.emplace_back(
extent.offset + extent.length + get_block_size(), 0);
for (auto [off, len]: extent.extent_map) {
auto [aligned_off, aligned_len] = align(off, len);
if (aligned_off > curr_block_end_offset) {
curr_block_bl.append_zero(curr_block_end_offset - curr_offset);
auto curr_block_length = curr_block_bl.length();
if (curr_block_length > 0) {
auto r = decrypt(
&curr_block_bl,
image_offset + curr_block_start_offset - extent.offset);
if (r != 0) {
return r;
}
curr_block_bl.splice(0, curr_block_length, &result_bl);
result_extent_map.emplace_back(
curr_block_start_offset, curr_block_length);
}
curr_block_start_offset = aligned_off;
curr_block_end_offset = aligned_off + aligned_len;
curr_offset = aligned_off;
}
curr_block_bl.append_zero(off - curr_offset);
extent.bl.splice(0, len, &curr_block_bl);
curr_offset = off + len;
curr_block_end_offset = aligned_off + aligned_len;
}
extent.bl = std::move(result_bl);
extent.extent_map = std::move(result_extent_map);
return 0;
}
};
} // namespace crypto
} // namespace librbd
#endif // CEPH_LIBRBD_CRYPTO_CRYPTO_INTERFACE_H
| 3,953 | 30.380952 | 74 | h |
null | ceph-main/src/librbd/crypto/CryptoObjectDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/crypto/CryptoObjectDispatch.h"
#include "include/ceph_assert.h"
#include "include/neorados/RADOS.hpp"
#include "common/dout.h"
#include "osdc/Striper.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/crypto/CryptoInterface.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::CryptoObjectDispatch: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace crypto {
using librbd::util::create_context_callback;
using librbd::util::data_object_name;
template <typename I>
uint64_t get_file_offset(I* image_ctx, uint64_t object_no,
uint64_t object_off) {
auto off = io::util::raw_to_area_offset(
*image_ctx, Striper::get_file_offset(image_ctx->cct, &image_ctx->layout,
object_no, object_off));
ceph_assert(off.second == io::ImageArea::DATA);
return off.first;
}
template <typename I>
struct C_AlignedObjectReadRequest : public Context {
I* image_ctx;
CryptoInterface* crypto;
uint64_t object_no;
io::ReadExtents* extents;
IOContext io_context;
const ZTracer::Trace parent_trace;
uint64_t* version;
Context* on_finish;
io::ObjectDispatchSpec* req;
bool disable_read_from_parent;
C_AlignedObjectReadRequest(
I* image_ctx, CryptoInterface* crypto,
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
Context* on_dispatched
) : image_ctx(image_ctx), crypto(crypto), object_no(object_no),
extents(extents), io_context(io_context),
parent_trace(parent_trace), version(version),
on_finish(on_dispatched) {
disable_read_from_parent =
((read_flags & io::READ_FLAG_DISABLE_READ_FROM_PARENT) != 0);
read_flags |= io::READ_FLAG_DISABLE_READ_FROM_PARENT;
auto ctx = create_context_callback<
C_AlignedObjectReadRequest<I>,
&C_AlignedObjectReadRequest<I>::handle_read>(this);
req = io::ObjectDispatchSpec::create_read(
image_ctx, io::OBJECT_DISPATCH_LAYER_CRYPTO, object_no,
extents, io_context, op_flags, read_flags, parent_trace,
version, ctx);
}
void send() {
req->send();
}
void finish(int r) override {
ldout(image_ctx->cct, 20) << "aligned read r=" << r << dendl;
on_finish->complete(r);
}
void handle_read(int r) {
auto cct = image_ctx->cct;
ldout(cct, 20) << "aligned read r=" << r << dendl;
if (r >= 0) {
r = 0;
for (auto& extent: *extents) {
auto crypto_ret = crypto->decrypt_aligned_extent(
extent, get_file_offset(image_ctx, object_no, extent.offset));
if (crypto_ret != 0) {
ceph_assert(crypto_ret < 0);
r = crypto_ret;
break;
}
r += extent.length;
}
}
if (r == -ENOENT && !disable_read_from_parent) {
io::util::read_parent<I>(
image_ctx, object_no, extents,
io_context->read_snap().value_or(CEPH_NOSNAP),
parent_trace, this);
} else {
complete(r);
}
}
};
template <typename I>
struct C_UnalignedObjectReadRequest : public Context {
CephContext* cct;
io::ReadExtents* extents;
Context* on_finish;
io::ReadExtents aligned_extents;
io::ObjectDispatchSpec* req;
C_UnalignedObjectReadRequest(
I* image_ctx, CryptoInterface* crypto,
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
Context* on_dispatched) : cct(image_ctx->cct), extents(extents),
on_finish(on_dispatched) {
crypto->align_extents(*extents, &aligned_extents);
// send the aligned read back to get decrypted
req = io::ObjectDispatchSpec::create_read(
image_ctx,
io::util::get_previous_layer(io::OBJECT_DISPATCH_LAYER_CRYPTO),
object_no, &aligned_extents, io_context, op_flags, read_flags,
parent_trace, version, this);
}
void send() {
req->send();
}
void remove_alignment_data() {
for (uint64_t i = 0; i < extents->size(); ++i) {
auto& extent = (*extents)[i];
auto& aligned_extent = aligned_extents[i];
if (aligned_extent.extent_map.empty()) {
uint64_t cut_offset = extent.offset - aligned_extent.offset;
int64_t padding_count =
cut_offset + extent.length - aligned_extent.bl.length();
if (padding_count > 0) {
aligned_extent.bl.append_zero(padding_count);
}
aligned_extent.bl.splice(cut_offset, extent.length, &extent.bl);
} else {
for (auto [off, len]: aligned_extent.extent_map) {
ceph::bufferlist tmp;
aligned_extent.bl.splice(0, len, &tmp);
uint64_t bytes_to_skip = 0;
if (off < extent.offset) {
bytes_to_skip = extent.offset - off;
if (len <= bytes_to_skip) {
continue;
}
off += bytes_to_skip;
len -= bytes_to_skip;
}
len = std::min(len, extent.offset + extent.length - off);
if (len == 0) {
continue;
}
if (len > 0) {
tmp.splice(bytes_to_skip, len, &extent.bl);
extent.extent_map.emplace_back(off, len);
}
}
}
}
}
void finish(int r) override {
ldout(cct, 20) << "unaligned read r=" << r << dendl;
if (r >= 0) {
remove_alignment_data();
r = 0;
for (auto& extent: *extents) {
r += extent.length;
}
}
on_finish->complete(r);
}
};
template <typename I>
struct C_UnalignedObjectWriteRequest : public Context {
I* image_ctx;
CryptoInterface* crypto;
uint64_t object_no;
uint64_t object_off;
ceph::bufferlist data;
ceph::bufferlist cmp_data;
uint64_t* mismatch_offset;
IOContext io_context;
int op_flags;
int write_flags;
std::optional<uint64_t> assert_version;
const ZTracer::Trace parent_trace;
int* object_dispatch_flags;
uint64_t* journal_tid;
Context* on_finish;
bool may_copyup;
ceph::bufferlist aligned_data;
io::ReadExtents extents;
uint64_t version;
C_UnalignedObjectReadRequest<I>* read_req;
bool object_exists;
C_UnalignedObjectWriteRequest(
I* image_ctx, CryptoInterface* crypto,
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
ceph::bufferlist&& cmp_data, uint64_t* mismatch_offset,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, Context* on_dispatched, bool may_copyup
) : image_ctx(image_ctx), crypto(crypto), object_no(object_no),
object_off(object_off), data(data), cmp_data(cmp_data),
mismatch_offset(mismatch_offset), io_context(io_context),
op_flags(op_flags), write_flags(write_flags),
assert_version(assert_version), parent_trace(parent_trace),
object_dispatch_flags(object_dispatch_flags),
journal_tid(journal_tid), on_finish(on_dispatched),
may_copyup(may_copyup) {
// build read extents
auto [pre_align, post_align] = crypto->get_pre_and_post_align(
object_off, data.length());
if (pre_align != 0) {
extents.emplace_back(object_off - pre_align, pre_align);
}
if (post_align != 0) {
extents.emplace_back(object_off + data.length(), post_align);
}
if (cmp_data.length() != 0) {
extents.emplace_back(object_off, cmp_data.length());
}
auto ctx = create_context_callback<
C_UnalignedObjectWriteRequest<I>,
&C_UnalignedObjectWriteRequest<I>::handle_read>(this);
read_req = new C_UnalignedObjectReadRequest<I>(
image_ctx, crypto, object_no, &extents, io_context,
0, io::READ_FLAG_DISABLE_READ_FROM_PARENT, parent_trace,
&version, 0, ctx);
}
void send() {
read_req->send();
}
bool check_cmp_data() {
if (cmp_data.length() == 0) {
return true;
}
auto& cmp_extent = extents.back();
io::util::unsparsify(image_ctx->cct, &cmp_extent.bl,
cmp_extent.extent_map, cmp_extent.offset,
cmp_extent.length);
std::optional<uint64_t> found_mismatch = std::nullopt;
auto it1 = cmp_data.cbegin();
auto it2 = cmp_extent.bl.cbegin();
for (uint64_t idx = 0; idx < cmp_data.length(); ++idx) {
if (*it1 != *it2) {
found_mismatch = std::make_optional(idx);
break;
}
++it1;
++it2;
}
extents.pop_back();
if (found_mismatch.has_value()) {
if (mismatch_offset != nullptr) {
*mismatch_offset = found_mismatch.value();
}
complete(-EILSEQ);
return false;
}
return true;
}
bool check_create_exclusive() {
bool exclusive =
((write_flags & io::OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE) != 0);
if (exclusive && object_exists) {
complete(-EEXIST);
return false;
}
return true;
}
bool check_version() {
int r = 0;
if (assert_version.has_value()) {
if (!object_exists) {
r = -ENOENT;
} else if (assert_version.value() < version) {
r = -ERANGE;
} else if (assert_version.value() > version) {
r = -EOVERFLOW;
}
}
if (r != 0) {
complete(r);
return false;
}
return true;
}
void build_aligned_data() {
auto [pre_align, post_align] = crypto->get_pre_and_post_align(
object_off, data.length());
if (pre_align != 0) {
auto &extent = extents.front();
io::util::unsparsify(image_ctx->cct, &extent.bl, extent.extent_map,
extent.offset, extent.length);
extent.bl.splice(0, pre_align, &aligned_data);
}
aligned_data.append(data);
if (post_align != 0) {
auto &extent = extents.back();
io::util::unsparsify(image_ctx->cct, &extent.bl, extent.extent_map,
extent.offset, extent.length);
extent.bl.splice(0, post_align, &aligned_data);
}
}
void handle_copyup(int r) {
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
complete(r);
} else {
restart_request(false);
}
}
void handle_read(int r) {
ldout(image_ctx->cct, 20) << "unaligned write r=" << r << dendl;
if (r == -ENOENT) {
if (may_copyup) {
auto ctx = create_context_callback<
C_UnalignedObjectWriteRequest<I>,
&C_UnalignedObjectWriteRequest<I>::handle_copyup>(this);
if (io::util::trigger_copyup(
image_ctx, object_no, io_context, ctx)) {
return;
}
delete ctx;
}
object_exists = false;
} else if (r < 0) {
complete(r);
return;
} else {
object_exists = true;
}
if (!check_create_exclusive() || !check_version() || !check_cmp_data()) {
return;
}
build_aligned_data();
auto aligned_off = crypto->align(object_off, data.length()).first;
auto new_write_flags = write_flags;
auto new_assert_version = std::make_optional(version);
if (!object_exists) {
new_write_flags |= io::OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE;
new_assert_version = std::nullopt;
}
auto ctx = create_context_callback<
C_UnalignedObjectWriteRequest<I>,
&C_UnalignedObjectWriteRequest<I>::handle_write>(this);
// send back aligned write back to get encrypted and committed
auto write_req = io::ObjectDispatchSpec::create_write(
image_ctx,
io::util::get_previous_layer(io::OBJECT_DISPATCH_LAYER_CRYPTO),
object_no, aligned_off, std::move(aligned_data), io_context,
op_flags, new_write_flags, new_assert_version,
journal_tid == nullptr ? 0 : *journal_tid, parent_trace, ctx);
write_req->send();
}
void restart_request(bool may_copyup) {
auto req = new C_UnalignedObjectWriteRequest<I>(
image_ctx, crypto, object_no, object_off,
std::move(data), std::move(cmp_data),
mismatch_offset, io_context, op_flags, write_flags,
assert_version, parent_trace,
object_dispatch_flags, journal_tid, this, may_copyup);
req->send();
}
void handle_write(int r) {
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
bool exclusive = write_flags & io::OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE;
bool restart = false;
if (r == -ERANGE && !assert_version.has_value()) {
restart = true;
} else if (r == -EEXIST && !exclusive) {
restart = true;
}
if (restart) {
restart_request(may_copyup);
} else {
complete(r);
}
}
void finish(int r) override {
ldout(image_ctx->cct, 20) << "unaligned write r=" << r << dendl;
on_finish->complete(r);
}
};
template <typename I>
CryptoObjectDispatch<I>::CryptoObjectDispatch(
I* image_ctx, CryptoInterface* crypto)
: m_image_ctx(image_ctx), m_crypto(crypto) {
m_data_offset_object_no = Striper::get_num_objects(image_ctx->layout,
crypto->get_data_offset());
}
template <typename I>
void CryptoObjectDispatch<I>::shut_down(Context* on_finish) {
on_finish->complete(0);
}
template <typename I>
bool CryptoObjectDispatch<I>::read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
if (object_no < m_data_offset_object_no) {
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< *extents << dendl;
ceph_assert(m_crypto != nullptr);
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
if (m_crypto->is_aligned(*extents)) {
auto req = new C_AlignedObjectReadRequest<I>(
m_image_ctx, m_crypto, object_no, extents, io_context,
op_flags, read_flags, parent_trace, version, object_dispatch_flags,
on_dispatched);
req->send();
} else {
auto req = new C_UnalignedObjectReadRequest<I>(
m_image_ctx, m_crypto, object_no, extents, io_context,
op_flags, read_flags, parent_trace, version, object_dispatch_flags,
on_dispatched);
req->send();
}
return true;
}
template <typename I>
bool CryptoObjectDispatch<I>::write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (object_no < m_data_offset_object_no) {
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << data.length() << dendl;
ceph_assert(m_crypto != nullptr);
if (m_crypto->is_aligned(object_off, data.length())) {
auto r = m_crypto->encrypt(
&data, get_file_offset(m_image_ctx, object_no, object_off));
*dispatch_result = r == 0 ? io::DISPATCH_RESULT_CONTINUE
: io::DISPATCH_RESULT_COMPLETE;
on_dispatched->complete(r);
} else {
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
auto req = new C_UnalignedObjectWriteRequest<I>(
m_image_ctx, m_crypto, object_no, object_off, std::move(data), {},
nullptr, io_context, op_flags, write_flags, assert_version,
parent_trace, object_dispatch_flags, journal_tid, on_dispatched,
true);
req->send();
}
return true;
}
template <typename I>
bool CryptoObjectDispatch<I>::write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (object_no < m_data_offset_object_no) {
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
ceph_assert(m_crypto != nullptr);
// convert to regular write
io::LightweightObjectExtent extent(object_no, object_off, object_len, 0);
extent.buffer_extents = std::move(buffer_extents);
bufferlist ws_data;
io::util::assemble_write_same_extent(extent, data, &ws_data, true);
auto ctx = new LambdaContext(
[on_finish_ctx=on_dispatched](int r) {
on_finish_ctx->complete(r);
});
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
auto req = io::ObjectDispatchSpec::create_write(
m_image_ctx,
io::util::get_previous_layer(io::OBJECT_DISPATCH_LAYER_CRYPTO),
object_no, object_off, std::move(ws_data), io_context, op_flags, 0,
std::nullopt, 0, parent_trace, ctx);
req->send();
return true;
}
template <typename I>
bool CryptoObjectDispatch<I>::compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
if (object_no < m_data_offset_object_no) {
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << write_data.length()
<< dendl;
ceph_assert(m_crypto != nullptr);
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
auto req = new C_UnalignedObjectWriteRequest<I>(
m_image_ctx, m_crypto, object_no, object_off, std::move(write_data),
std::move(cmp_data), mismatch_offset, io_context, op_flags, 0,
std::nullopt, parent_trace, object_dispatch_flags, journal_tid,
on_dispatched, true);
req->send();
return true;
}
template <typename I>
bool CryptoObjectDispatch<I>::discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
if (object_no < m_data_offset_object_no) {
return false;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
ceph_assert(m_crypto != nullptr);
// convert to write-same
auto ctx = new LambdaContext(
[on_finish_ctx=on_dispatched](int r) {
on_finish_ctx->complete(r);
});
bufferlist bl;
const int buffer_size = 4096;
bl.append_zero(buffer_size);
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
auto req = io::ObjectDispatchSpec::create_write_same(
m_image_ctx,
io::util::get_previous_layer(io::OBJECT_DISPATCH_LAYER_CRYPTO),
object_no, object_off, object_len, {{0, object_len}}, std::move(bl),
io_context, *object_dispatch_flags, 0, parent_trace, ctx);
req->send();
return true;
}
template <typename I>
int CryptoObjectDispatch<I>::prepare_copyup(
uint64_t object_no,
io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) {
if (object_no < m_data_offset_object_no) {
return 0;
}
ceph::bufferlist current_bl;
current_bl.append_zero(m_image_ctx->get_object_size());
for (auto& [key, extent_map]: *snapshot_sparse_bufferlist) {
// update current_bl with data from extent_map
for (auto& extent : extent_map) {
auto &sbe = extent.get_val();
if (sbe.state == io::SPARSE_EXTENT_STATE_DATA) {
current_bl.begin(extent.get_off()).copy_in(extent.get_len(), sbe.bl);
} else if (sbe.state == io::SPARSE_EXTENT_STATE_ZEROED) {
ceph::bufferlist zeros;
zeros.append_zero(extent.get_len());
current_bl.begin(extent.get_off()).copy_in(extent.get_len(), zeros);
}
}
// encrypt
io::SparseBufferlist encrypted_sparse_bufferlist;
for (auto& extent : extent_map) {
auto [aligned_off, aligned_len] = m_crypto->align(
extent.get_off(), extent.get_len());
auto [image_extents, _] = io::util::object_to_area_extents(
m_image_ctx, object_no, {{aligned_off, aligned_len}});
ceph::bufferlist encrypted_bl;
uint64_t position = 0;
for (auto [image_offset, image_length]: image_extents) {
ceph::bufferlist aligned_bl;
aligned_bl.substr_of(current_bl, aligned_off + position, image_length);
aligned_bl.rebuild(); // to deep copy aligned_bl from current_bl
position += image_length;
auto r = m_crypto->encrypt(&aligned_bl, image_offset);
if (r != 0) {
return r;
}
encrypted_bl.append(aligned_bl);
}
encrypted_sparse_bufferlist.insert(
aligned_off, aligned_len, {io::SPARSE_EXTENT_STATE_DATA, aligned_len,
std::move(encrypted_bl)});
}
// replace original plaintext sparse bufferlist with encrypted one
extent_map.clear();
extent_map.insert(std::move(encrypted_sparse_bufferlist));
}
return 0;
}
} // namespace crypto
} // namespace librbd
template class librbd::crypto::CryptoObjectDispatch<librbd::ImageCtx>;
| 23,404 | 32.822254 | 80 | cc |
null | ceph-main/src/librbd/crypto/CryptoObjectDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_CRYPTO_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_CRYPTO_CRYPTO_OBJECT_DISPATCH_H
#include "librbd/crypto/CryptoInterface.h"
#include "librbd/io/Types.h"
#include "librbd/io/ObjectDispatchInterface.h"
namespace librbd {
struct ImageCtx;
namespace crypto {
template <typename ImageCtxT = librbd::ImageCtx>
class CryptoObjectDispatch : public io::ObjectDispatchInterface {
public:
static CryptoObjectDispatch* create(
ImageCtxT* image_ctx, CryptoInterface* crypto) {
return new CryptoObjectDispatch(image_ctx, crypto);
}
CryptoObjectDispatch(ImageCtxT* image_ctx,
CryptoInterface* crypto);
io::ObjectDispatchLayer get_dispatch_layer() const override {
return io::OBJECT_DISPATCH_LAYER_CRYPTO;
}
void shut_down(Context* on_finish) override;
bool read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override {
return false;
}
bool list_snaps(
uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override {
return false;
}
bool reset_existence_cache(Context* on_finish) override {
return false;
}
void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) override {
}
int prepare_copyup(
uint64_t object_no,
io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override;
private:
ImageCtxT* m_image_ctx;
CryptoInterface* m_crypto;
uint64_t m_data_offset_object_no;
};
} // namespace crypto
} // namespace librbd
extern template class librbd::crypto::CryptoObjectDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CRYPTO_CRYPTO_OBJECT_DISPATCH_H
| 4,094 | 34.301724 | 77 | h |
null | ceph-main/src/librbd/crypto/DataCryptor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_DATA_CRYPTOR_H
#define CEPH_LIBRBD_CRYPTO_DATA_CRYPTOR_H
#include "include/int_types.h"
#include "librbd/crypto/Types.h"
namespace librbd {
namespace crypto {
template <typename T>
class DataCryptor {
public:
virtual ~DataCryptor() = default;
virtual uint32_t get_block_size() const = 0;
virtual uint32_t get_iv_size() const = 0;
virtual const unsigned char* get_key() const = 0;
virtual int get_key_length() const = 0;
virtual T* get_context(CipherMode mode) = 0;
virtual void return_context(T* ctx, CipherMode mode) = 0;
virtual int init_context(T* ctx, const unsigned char* iv,
uint32_t iv_length) const = 0;
virtual int update_context(T* ctx, const unsigned char* in,
unsigned char* out, uint32_t len) const = 0;
};
} // namespace crypto
} // namespace librbd
#endif // CEPH_LIBRBD_CRYPTO_DATA_CRYPTOR_H
| 1,018 | 25.815789 | 73 | h |
null | ceph-main/src/librbd/crypto/EncryptionFormat.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_ENCRYPTION_FORMAT_H
#define CEPH_LIBRBD_CRYPTO_ENCRYPTION_FORMAT_H
#include <memory>
struct Context;
namespace librbd {
namespace crypto {
struct CryptoInterface;
template <typename ImageCtxT>
struct EncryptionFormat {
virtual ~EncryptionFormat() {
}
virtual std::unique_ptr<EncryptionFormat<ImageCtxT>> clone() const = 0;
virtual void format(ImageCtxT* ictx, Context* on_finish) = 0;
virtual void load(ImageCtxT* ictx, std::string* detected_format_name,
Context* on_finish) = 0;
virtual void flatten(ImageCtxT* ictx, Context* on_finish) = 0;
virtual CryptoInterface* get_crypto() = 0;
};
} // namespace crypto
} // namespace librbd
#endif // CEPH_LIBRBD_CRYPTO_ENCRYPTION_FORMAT_H
| 851 | 24.058824 | 73 | h |
null | ceph-main/src/librbd/crypto/FormatRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "FormatRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/crypto/EncryptionFormat.h"
#include "librbd/crypto/ShutDownCryptoRequest.h"
#include "librbd/crypto/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/Types.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::FormatRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace crypto {
using librbd::util::create_context_callback;
template <typename I>
FormatRequest<I>::FormatRequest(
I* image_ctx, EncryptionFormat format,
Context* on_finish) : m_image_ctx(image_ctx),
m_format(std::move(format)),
m_on_finish(on_finish) {
}
template <typename I>
void FormatRequest<I>::send() {
if (m_image_ctx->test_features(RBD_FEATURE_JOURNALING)) {
lderr(m_image_ctx->cct) << "cannot use encryption with journal" << dendl;
finish(-ENOTSUP);
return;
}
if (m_image_ctx->encryption_format.get() == nullptr) {
format();
return;
} else if (m_image_ctx->parent != nullptr) {
lderr(m_image_ctx->cct) << "cannot format a cloned image "
"while encryption is loaded"
<< dendl;
finish(-EINVAL);
return;
}
auto ctx = create_context_callback<
FormatRequest<I>, &FormatRequest<I>::handle_shutdown_crypto>(this);
auto *req = ShutDownCryptoRequest<I>::create(m_image_ctx, ctx);
req->send();
}
template <typename I>
void FormatRequest<I>::handle_shutdown_crypto(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r != 0) {
lderr(m_image_ctx->cct) << "unable to unload existing crypto: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
format();
}
template <typename I>
void FormatRequest<I>::format() {
auto ctx = create_context_callback<
FormatRequest<I>, &FormatRequest<I>::handle_format>(this);
m_format->format(m_image_ctx, ctx);
}
template <typename I>
void FormatRequest<I>::handle_format(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r != 0) {
lderr(m_image_ctx->cct) << "unable to format image: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
flush();
}
template <typename I>
void FormatRequest<I>::flush() {
auto ctx = create_context_callback<
FormatRequest<I>, &FormatRequest<I>::handle_flush>(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, librbd::util::get_image_ctx(m_image_ctx), io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*m_image_ctx, io::IMAGE_DISPATCH_LAYER_INTERNAL_START, aio_comp,
io::FLUSH_SOURCE_INTERNAL, {});
req->send();
}
template <typename I>
void FormatRequest<I>::handle_flush(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r != 0) {
lderr(m_image_ctx->cct) << "unable to flush image: " << cpp_strerror(r)
<< dendl;
}
finish(r);
}
template <typename I>
void FormatRequest<I>::finish(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r == 0 && m_image_ctx->parent == nullptr) {
// only load on flat images, to avoid a case where encryption
// is wrongfully loaded only on the child image
util::set_crypto(m_image_ctx, std::move(m_format));
}
m_on_finish->complete(r);
delete this;
}
} // namespace crypto
} // namespace librbd
template class librbd::crypto::FormatRequest<librbd::ImageCtx>;
| 3,873 | 27.277372 | 77 | cc |
null | ceph-main/src/librbd/crypto/FormatRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_FORMAT_REQUEST_H
#define CEPH_LIBRBD_CRYPTO_FORMAT_REQUEST_H
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
struct Context;
namespace librbd {
class ImageCtx;
namespace crypto {
template <typename I>
class FormatRequest {
public:
using EncryptionFormat = decltype(I::encryption_format);
static FormatRequest* create(
I* image_ctx, EncryptionFormat format, Context* on_finish) {
return new FormatRequest(image_ctx, std::move(format), on_finish);
}
FormatRequest(I* image_ctx, EncryptionFormat format, Context* on_finish);
void send();
void handle_shutdown_crypto(int r);
void format();
void handle_format(int r);
void flush();
void handle_flush(int r);
void finish(int r);
private:
I* m_image_ctx;
EncryptionFormat m_format;
Context* m_on_finish;
};
} // namespace crypto
} // namespace librbd
extern template class librbd::crypto::FormatRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CRYPTO_FORMAT_REQUEST_H
| 1,139 | 21.8 | 77 | h |
null | ceph-main/src/librbd/crypto/LoadRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "LoadRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/Utils.h"
#include "librbd/ImageCtx.h"
#include "librbd/crypto/EncryptionFormat.h"
#include "librbd/crypto/Types.h"
#include "librbd/crypto/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/Types.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::LoadRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace crypto {
using librbd::util::create_context_callback;
template <typename I>
LoadRequest<I>::LoadRequest(
I* image_ctx, std::vector<EncryptionFormat>&& formats,
Context* on_finish) : m_image_ctx(image_ctx),
m_on_finish(on_finish),
m_format_idx(0),
m_is_current_format_cloned(false),
m_formats(std::move(formats)) {
}
template <typename I>
void LoadRequest<I>::send() {
if (m_formats.empty()) {
lderr(m_image_ctx->cct) << "no encryption formats were specified" << dendl;
finish(-EINVAL);
return;
}
ldout(m_image_ctx->cct, 20) << "got " << m_formats.size() << " formats"
<< dendl;
if (m_image_ctx->encryption_format.get() != nullptr) {
lderr(m_image_ctx->cct) << "encryption already loaded" << dendl;
finish(-EEXIST);
return;
}
auto ictx = m_image_ctx;
while (ictx != nullptr) {
if (ictx->test_features(RBD_FEATURE_JOURNALING)) {
lderr(m_image_ctx->cct) << "cannot use encryption with journal."
<< " image name: " << ictx->name << dendl;
finish(-ENOTSUP);
return;
}
ictx = ictx->parent;
}
m_current_image_ctx = m_image_ctx;
flush();
}
template <typename I>
void LoadRequest<I>::flush() {
auto ctx = create_context_callback<
LoadRequest<I>, &LoadRequest<I>::handle_flush>(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, librbd::util::get_image_ctx(m_image_ctx), io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*m_image_ctx, io::IMAGE_DISPATCH_LAYER_INTERNAL_START, aio_comp,
io::FLUSH_SOURCE_INTERNAL, {});
req->send();
}
template <typename I>
void LoadRequest<I>::handle_flush(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "failed to flush image" << dendl;
finish(r);
return;
}
load();
}
template <typename I>
void LoadRequest<I>::load() {
ldout(m_image_ctx->cct, 20) << "format_idx=" << m_format_idx << dendl;
m_detected_format_name = "";
auto ctx = create_context_callback<
LoadRequest<I>, &LoadRequest<I>::handle_load>(this);
m_formats[m_format_idx]->load(m_current_image_ctx, &m_detected_format_name,
ctx);
}
template <typename I>
void LoadRequest<I>::handle_load(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
if (m_is_current_format_cloned &&
m_detected_format_name == UNKNOWN_FORMAT) {
// encryption format was not detected, assume plaintext
ldout(m_image_ctx->cct, 5) << "assuming plaintext for image "
<< m_current_image_ctx->name << dendl;
m_formats.pop_back();
invalidate_cache();
return;
}
lderr(m_image_ctx->cct) << "failed to load encryption. image name: "
<< m_current_image_ctx->name << dendl;
finish(r);
return;
}
ldout(m_image_ctx->cct, 5) << "loaded format " << m_detected_format_name
<< (m_is_current_format_cloned ? " (cloned)" : "")
<< " for image " << m_current_image_ctx->name
<< dendl;
m_format_idx++;
m_current_image_ctx = m_current_image_ctx->parent;
if (m_current_image_ctx != nullptr) {
// move on to loading parent
if (m_format_idx >= m_formats.size()) {
// try to load next ancestor using the same format
ldout(m_image_ctx->cct, 20) << "cloning format" << dendl;
m_is_current_format_cloned = true;
m_formats.push_back(m_formats[m_formats.size() - 1]->clone());
}
load();
} else {
if (m_formats.size() != m_format_idx) {
lderr(m_image_ctx->cct) << "got " << m_formats.size()
<< " encryption specs to load, "
<< "but image has " << m_format_idx - 1
<< " ancestors" << dendl;
finish(-EINVAL);
return;
}
invalidate_cache();
}
}
template <typename I>
void LoadRequest<I>::invalidate_cache() {
auto ctx = create_context_callback<
LoadRequest<I>, &LoadRequest<I>::handle_invalidate_cache>(this);
m_image_ctx->io_image_dispatcher->invalidate_cache(ctx);
}
template <typename I>
void LoadRequest<I>::handle_invalidate_cache(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "failed to invalidate image cache" << dendl;
}
finish(r);
}
template <typename I>
void LoadRequest<I>::finish(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r == 0) {
auto ictx = m_image_ctx;
for (auto& format : m_formats) {
util::set_crypto(ictx, std::move(format));
ictx = ictx->parent;
}
}
m_on_finish->complete(r);
delete this;
}
} // namespace crypto
} // namespace librbd
template class librbd::crypto::LoadRequest<librbd::ImageCtx>;
| 5,769 | 28.438776 | 79 | cc |
null | ceph-main/src/librbd/crypto/LoadRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_LOAD_REQUEST_H
#define CEPH_LIBRBD_CRYPTO_LOAD_REQUEST_H
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
struct Context;
namespace librbd {
class ImageCtx;
namespace crypto {
template <typename I>
class LoadRequest {
public:
using EncryptionFormat = decltype(I::encryption_format);
static constexpr char UNKNOWN_FORMAT[] = "<unknown>";
static LoadRequest* create(
I* image_ctx, std::vector<EncryptionFormat>&& formats,
Context* on_finish) {
return new LoadRequest(image_ctx, std::move(formats), on_finish);
}
LoadRequest(I* image_ctx, std::vector<EncryptionFormat>&& formats,
Context* on_finish);
void send();
void flush();
void handle_flush(int r);
void load();
void handle_load(int r);
void invalidate_cache();
void handle_invalidate_cache(int r);
void finish(int r);
private:
I* m_image_ctx;
Context* m_on_finish;
size_t m_format_idx;
bool m_is_current_format_cloned;
std::vector<EncryptionFormat> m_formats;
I* m_current_image_ctx;
std::string m_detected_format_name;
};
} // namespace crypto
} // namespace librbd
extern template class librbd::crypto::LoadRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CRYPTO_LOAD_REQUEST_H
| 1,413 | 22.966102 | 71 | h |
null | ceph-main/src/librbd/crypto/ShutDownCryptoRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ShutDownCryptoRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/Utils.h"
#include "librbd/crypto/CryptoImageDispatch.h"
#include "librbd/crypto/CryptoObjectDispatch.h"
#include "librbd/crypto/EncryptionFormat.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::ShutDownCryptoRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace crypto {
using librbd::util::create_context_callback;
template <typename I>
ShutDownCryptoRequest<I>::ShutDownCryptoRequest(I* image_ctx,
Context* on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish) {}
template <typename I>
void ShutDownCryptoRequest<I>::send() {
shut_down_object_dispatch();
}
template <typename I>
void ShutDownCryptoRequest<I>::shut_down_object_dispatch() {
if (!m_image_ctx->io_object_dispatcher->exists(
io::OBJECT_DISPATCH_LAYER_CRYPTO)) {
finish(0);
return;
}
auto ctx = create_context_callback<
ShutDownCryptoRequest<I>,
&ShutDownCryptoRequest<I>::handle_shut_down_object_dispatch>(this);
m_image_ctx->io_object_dispatcher->shut_down_dispatch(
io::OBJECT_DISPATCH_LAYER_CRYPTO, ctx);
}
template <typename I>
void ShutDownCryptoRequest<I>::handle_shut_down_object_dispatch(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "failed to shut down object dispatch: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
shut_down_image_dispatch();
}
template <typename I>
void ShutDownCryptoRequest<I>::shut_down_image_dispatch() {
if (!m_image_ctx->io_image_dispatcher->exists(
io::IMAGE_DISPATCH_LAYER_CRYPTO)) {
finish(0);
return;
}
auto ctx = create_context_callback<
ShutDownCryptoRequest<I>,
&ShutDownCryptoRequest<I>::handle_shut_down_image_dispatch>(this);
m_image_ctx->io_image_dispatcher->shut_down_dispatch(
io::IMAGE_DISPATCH_LAYER_CRYPTO, ctx);
}
template <typename I>
void ShutDownCryptoRequest<I>::handle_shut_down_image_dispatch(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "failed to shut down image dispatch: "
<< cpp_strerror(r) << dendl;
}
finish(r);
}
template <typename I>
void ShutDownCryptoRequest<I>::finish(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r == 0) {
{
std::unique_lock image_locker{m_image_ctx->image_lock};
m_image_ctx->encryption_format.reset();
}
if (m_image_ctx->parent != nullptr) {
// move to shutting down parent crypto
m_image_ctx = m_image_ctx->parent;
shut_down_object_dispatch();
return;
}
}
m_on_finish->complete(r);
delete this;
}
} // namespace crypto
} // namespace librbd
template class librbd::crypto::ShutDownCryptoRequest<librbd::ImageCtx>;
| 3,252 | 26.803419 | 77 | cc |
null | ceph-main/src/librbd/crypto/ShutDownCryptoRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_SHUT_DOWN_CRYPTO_REQUEST_H
#define CEPH_LIBRBD_CRYPTO_SHUT_DOWN_CRYPTO_REQUEST_H
#include "librbd/ImageCtx.h"
struct Context;
namespace librbd {
class ImageCtx;
namespace crypto {
template <typename I>
class ShutDownCryptoRequest {
public:
static ShutDownCryptoRequest* create(I* image_ctx, Context* on_finish) {
return new ShutDownCryptoRequest(image_ctx, on_finish);
}
ShutDownCryptoRequest(I* image_ctx, Context* on_finish);
void send();
void shut_down_object_dispatch();
void handle_shut_down_object_dispatch(int r);
void shut_down_image_dispatch();
void handle_shut_down_image_dispatch(int r);
void finish(int r);
private:
I* m_image_ctx;
Context* m_on_finish;
};
} // namespace crypto
} // namespace librbd
extern template class librbd::crypto::ShutDownCryptoRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CRYPTO_SHUT_DOWN_CRYPTO_REQUEST_H
| 1,036 | 22.568182 | 78 | h |
null | ceph-main/src/librbd/crypto/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_TYPES_H
#define CEPH_LIBRBD_CRYPTO_TYPES_H
namespace librbd {
namespace crypto {
enum CipherMode {
CIPHER_MODE_ENC,
CIPHER_MODE_DEC,
};
} // namespace crypto
} // namespace librbd
#endif // CEPH_LIBRBD_CRYPTO_DATA_CRYPTOR_H
| 362 | 18.105263 | 70 | h |
null | ceph-main/src/librbd/crypto/Utils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Utils.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/crypto/BlockCrypto.h"
#include "librbd/crypto/CryptoImageDispatch.h"
#include "librbd/crypto/CryptoInterface.h"
#include "librbd/crypto/CryptoObjectDispatch.h"
#include "librbd/crypto/EncryptionFormat.h"
#include "librbd/crypto/openssl/DataCryptor.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::util: " << __func__ << ": "
namespace librbd {
namespace crypto {
namespace util {
template <typename I>
void set_crypto(I *image_ctx,
decltype(I::encryption_format) encryption_format) {
std::unique_lock image_locker{image_ctx->image_lock};
ceph_assert(!image_ctx->encryption_format);
auto crypto = encryption_format->get_crypto();
auto object_dispatch = CryptoObjectDispatch<I>::create(image_ctx, crypto);
auto image_dispatch = CryptoImageDispatch::create(crypto->get_data_offset());
image_ctx->io_object_dispatcher->register_dispatch(object_dispatch);
image_ctx->io_image_dispatcher->register_dispatch(image_dispatch);
image_ctx->encryption_format = std::move(encryption_format);
}
int build_crypto(
CephContext* cct, const unsigned char* key, uint32_t key_length,
uint64_t block_size, uint64_t data_offset,
std::unique_ptr<CryptoInterface>* result_crypto) {
const char* cipher_suite;
switch (key_length) {
case 32:
cipher_suite = "aes-128-xts";
break;
case 64:
cipher_suite = "aes-256-xts";
break;
default:
lderr(cct) << "unsupported key length: " << key_length << dendl;
return -ENOTSUP;
}
auto data_cryptor = new openssl::DataCryptor(cct);
int r = data_cryptor->init(cipher_suite, key, key_length);
if (r != 0) {
lderr(cct) << "error initializing data cryptor: " << cpp_strerror(r)
<< dendl;
delete data_cryptor;
return r;
}
result_crypto->reset(BlockCrypto<EVP_CIPHER_CTX>::create(
cct, data_cryptor, block_size, data_offset));
return 0;
}
} // namespace util
} // namespace crypto
} // namespace librbd
template void librbd::crypto::util::set_crypto(
librbd::ImageCtx *image_ctx,
std::unique_ptr<EncryptionFormat<librbd::ImageCtx>> encryption_format);
| 2,503 | 30.3 | 79 | cc |
null | ceph-main/src/librbd/crypto/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_UTILS_H
#define CEPH_LIBRBD_CRYPTO_UTILS_H
#include "include/Context.h"
namespace librbd {
struct ImageCtx;
namespace crypto {
class CryptoInterface;
template <typename> class EncryptionFormat;
namespace util {
template <typename ImageCtxT = librbd::ImageCtx>
void set_crypto(ImageCtxT *image_ctx,
decltype(ImageCtxT::encryption_format) encryption_format);
int build_crypto(
CephContext* cct, const unsigned char* key, uint32_t key_length,
uint64_t block_size, uint64_t data_offset,
std::unique_ptr<CryptoInterface>* result_crypto);
} // namespace util
} // namespace crypto
} // namespace librbd
#endif // CEPH_LIBRBD_CRYPTO_UTILS_H
| 810 | 22.852941 | 74 | h |
null | ceph-main/src/librbd/crypto/luks/FlattenRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "FlattenRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/Utils.h"
#include "librbd/crypto/EncryptionFormat.h"
#include "librbd/crypto/Utils.h"
#include "librbd/crypto/luks/LoadRequest.h"
#include "librbd/crypto/luks/Magic.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ReadResult.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::luks::FlattenRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace crypto {
namespace luks {
using librbd::util::create_context_callback;
template <typename I>
FlattenRequest<I>::FlattenRequest(
I* image_ctx, Context* on_finish) : m_image_ctx(image_ctx),
m_on_finish(on_finish) {
ceph_assert(m_image_ctx->encryption_format.get() != nullptr);
}
template <typename I>
void FlattenRequest<I>::send() {
read_header();
}
template <typename I>
void FlattenRequest<I>::read_header() {
auto ctx = create_context_callback<
FlattenRequest<I>, &FlattenRequest<I>::handle_read_header>(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, librbd::util::get_image_ctx(m_image_ctx), io::AIO_TYPE_READ);
auto crypto = m_image_ctx->encryption_format->get_crypto();
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_read(
*m_image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{0, crypto->get_data_offset()}}, io::ImageArea::CRYPTO_HEADER,
io::ReadResult{&m_bl}, m_image_ctx->get_data_io_context(), 0, 0,
trace);
req->send();
}
template <typename I>
void FlattenRequest<I>::handle_read_header(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "error reading from image: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
r = Magic::is_rbd_clone(m_bl);
if (r < 0) {
lderr(m_image_ctx->cct) << "unable to determine encryption header magic: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
} else if (r > 0) {
// switch magic
r = Magic::replace_magic(m_image_ctx->cct, m_bl);
if (r < 0) {
lderr(m_image_ctx->cct) << "unable to restore header magic: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
}
write_header();
}
template <typename I>
void FlattenRequest<I>::write_header() {
// write header to offset 0 of the image
auto ctx = create_context_callback<
FlattenRequest<I>, &FlattenRequest<I>::handle_write_header>(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, librbd::util::get_image_ctx(m_image_ctx), io::AIO_TYPE_WRITE);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_write(
*m_image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{0, m_bl.length()}}, io::ImageArea::CRYPTO_HEADER,
std::move(m_bl), 0, trace);
req->send();
}
template <typename I>
void FlattenRequest<I>::handle_write_header(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "error writing header to image: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
flush();
}
template <typename I>
void FlattenRequest<I>::flush() {
auto ctx = create_context_callback<
FlattenRequest<I>, &FlattenRequest<I>::handle_flush>(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, librbd::util::get_image_ctx(m_image_ctx), io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*m_image_ctx, io::IMAGE_DISPATCH_LAYER_INTERNAL_START, aio_comp,
io::FLUSH_SOURCE_INTERNAL, {});
req->send();
}
template <typename I>
void FlattenRequest<I>::handle_flush(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "unable to flush image: " << cpp_strerror(r)
<< dendl;
}
finish(r);
}
template <typename I>
void FlattenRequest<I>::finish(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace luks
} // namespace crypto
} // namespace librbd
template class librbd::crypto::luks::FlattenRequest<librbd::ImageCtx>;
| 4,572 | 28.503226 | 78 | cc |
null | ceph-main/src/librbd/crypto/luks/FlattenRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_LUKS_FLATTEN_REQUEST_H
#define CEPH_LIBRBD_CRYPTO_LUKS_FLATTEN_REQUEST_H
#include "librbd/ImageCtx.h"
namespace librbd {
namespace crypto {
namespace luks {
template <typename I>
class FlattenRequest {
public:
using EncryptionFormat = decltype(I::encryption_format);
static FlattenRequest* create(I* image_ctx, Context* on_finish) {
return new FlattenRequest(image_ctx, on_finish);
}
FlattenRequest(I* image_ctx, Context* on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* READ_HEADER
* |
* v
* WRITE_HEADER (replacing magic back from RBDL to LUKS if needed)
* |
* v
* FLUSH
* |
* v
* <finish>
*
* @endverbatim
*/
I* m_image_ctx;
Context* m_on_finish;
ceph::bufferlist m_bl;
void read_header();
void handle_read_header(int r);
void write_header();
void handle_write_header(int r);
void flush();
void handle_flush(int r);
void finish(int r);
};
} // namespace luks
} // namespace crypto
} // namespace librbd
extern template class librbd::crypto::luks::FlattenRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CRYPTO_LUKS_FLATTEN_REQUEST_H
| 1,342 | 19.348485 | 77 | h |
null | ceph-main/src/librbd/crypto/luks/FormatRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "FormatRequest.h"
#include <stdlib.h>
#include <openssl/rand.h>
#include "common/dout.h"
#include "common/errno.h"
#include "include/compat.h"
#include "librbd/Utils.h"
#include "librbd/crypto/Utils.h"
#include "librbd/crypto/luks/Header.h"
#include "librbd/crypto/luks/Magic.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::luks::FormatRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace crypto {
namespace luks {
using librbd::util::create_context_callback;
template <typename I>
FormatRequest<I>::FormatRequest(
I* image_ctx, encryption_format_t format, encryption_algorithm_t alg,
std::string_view passphrase,
std::unique_ptr<CryptoInterface>* result_crypto, Context* on_finish,
bool insecure_fast_mode) : m_image_ctx(image_ctx), m_format(format),
m_alg(alg),
m_passphrase(passphrase),
m_result_crypto(result_crypto),
m_on_finish(on_finish),
m_insecure_fast_mode(insecure_fast_mode),
m_header(image_ctx->cct) {
}
template <typename I>
void FormatRequest<I>::send() {
const char* type;
size_t sector_size;
switch (m_format) {
case RBD_ENCRYPTION_FORMAT_LUKS1:
type = CRYPT_LUKS1;
sector_size = 512;
break;
case RBD_ENCRYPTION_FORMAT_LUKS2:
type = CRYPT_LUKS2;
sector_size = 4096;
break;
default:
lderr(m_image_ctx->cct) << "unsupported format type: " << m_format
<< dendl;
finish(-EINVAL);
return;
}
const char* cipher;
size_t key_size;
switch (m_alg) {
case RBD_ENCRYPTION_ALGORITHM_AES128:
cipher = "aes";
key_size = 32;
break;
case RBD_ENCRYPTION_ALGORITHM_AES256:
cipher = "aes";
key_size = 64;
break;
default:
lderr(m_image_ctx->cct) << "unsupported cipher algorithm: " << m_alg
<< dendl;
finish(-EINVAL);
return;
}
// generate encryption key
unsigned char* key = (unsigned char*)alloca(key_size);
if (RAND_bytes((unsigned char *)key, key_size) != 1) {
lderr(m_image_ctx->cct) << "cannot generate random encryption key"
<< dendl;
finish(-EAGAIN);
return;
}
// setup interface with libcryptsetup
auto r = m_header.init();
if (r < 0) {
finish(r);
return;
}
// format (create LUKS header)
auto stripe_period = m_image_ctx->get_stripe_period();
r = m_header.format(type, cipher, reinterpret_cast<char*>(key), key_size,
"xts-plain64", sector_size, stripe_period,
m_insecure_fast_mode);
if (r != 0) {
finish(r);
return;
}
m_image_ctx->image_lock.lock_shared();
uint64_t image_size = m_image_ctx->get_image_size(CEPH_NOSNAP);
m_image_ctx->image_lock.unlock_shared();
if (m_header.get_data_offset() > image_size) {
lderr(m_image_ctx->cct) << "image is too small, format requires "
<< m_header.get_data_offset() << " bytes" << dendl;
finish(-ENOSPC);
return;
}
// add keyslot (volume key encrypted with passphrase)
r = m_header.add_keyslot(m_passphrase.data(), m_passphrase.size());
if (r != 0) {
finish(r);
return;
}
r = util::build_crypto(m_image_ctx->cct, key, key_size,
m_header.get_sector_size(),
m_header.get_data_offset(), m_result_crypto);
ceph_memzero_s(key, key_size, key_size);
if (r != 0) {
finish(r);
return;
}
// read header from libcryptsetup interface
ceph::bufferlist bl;
r = m_header.read(&bl);
if (r < 0) {
finish(r);
return;
}
if (m_image_ctx->parent != nullptr) {
// parent is not encrypted with same key
// change LUKS magic to prevent decryption by other LUKS implementations
r = Magic::replace_magic(m_image_ctx->cct, bl);
if (r < 0) {
lderr(m_image_ctx->cct) << "error replacing LUKS magic: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
}
// pad header to stripe period alignment to prevent copyup of parent data
// when writing encryption header to the child image
auto alignment = bl.length() % stripe_period;
if (alignment > 0) {
bl.append_zero(stripe_period - alignment);
}
// write header to offset 0 of the image
auto ctx = create_context_callback<
FormatRequest<I>, &FormatRequest<I>::handle_write_header>(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, librbd::util::get_image_ctx(m_image_ctx), io::AIO_TYPE_WRITE);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_write(
*m_image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{0, bl.length()}}, io::ImageArea::DATA, std::move(bl), 0, trace);
req->send();
}
template <typename I>
void FormatRequest<I>::handle_write_header(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "error writing header to image: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void FormatRequest<I>::finish(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace luks
} // namespace crypto
} // namespace librbd
template class librbd::crypto::luks::FormatRequest<librbd::ImageCtx>;
| 5,870 | 28.208955 | 79 | cc |
null | ceph-main/src/librbd/crypto/luks/FormatRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_LUKS_FORMAT_REQUEST_H
#define CEPH_LIBRBD_CRYPTO_LUKS_FORMAT_REQUEST_H
#include <string_view>
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
#include "librbd/crypto/CryptoInterface.h"
#include "librbd/crypto/luks/Header.h"
namespace librbd {
class ImageCtx;
namespace crypto {
namespace luks {
template <typename I>
class FormatRequest {
public:
static FormatRequest* create(
I* image_ctx, encryption_format_t format,
encryption_algorithm_t alg, std::string_view passphrase,
std::unique_ptr<CryptoInterface>* result_crypto, Context* on_finish,
bool insecure_fast_mode) {
return new FormatRequest(image_ctx, format, alg, passphrase,
result_crypto, on_finish, insecure_fast_mode);
}
FormatRequest(I* image_ctx, encryption_format_t format,
encryption_algorithm_t alg, std::string_view passphrase,
std::unique_ptr<CryptoInterface>* result_crypto,
Context* on_finish, bool insecure_fast_mode);
void send();
void finish(int r);
private:
I* m_image_ctx;
encryption_format_t m_format;
encryption_algorithm_t m_alg;
std::string_view m_passphrase;
std::unique_ptr<CryptoInterface>* m_result_crypto;
Context* m_on_finish;
bool m_insecure_fast_mode;
Header m_header;
void handle_write_header(int r);
};
} // namespace luks
} // namespace crypto
} // namespace librbd
extern template class librbd::crypto::luks::FormatRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CRYPTO_LUKS_FORMAT_REQUEST_H
| 1,732 | 27.883333 | 80 | h |
null | ceph-main/src/librbd/crypto/luks/Header.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Header.h"
#include <errno.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include "common/dout.h"
#include "common/errno.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::luks::Header: " << this << " " \
<< __func__ << ": "
namespace librbd {
namespace crypto {
namespace luks {
Header::Header(CephContext* cct) : m_cct(cct), m_fd(-1), m_cd(nullptr) {
}
Header::~Header() {
if (m_fd != -1) {
close(m_fd);
m_fd = -1;
}
if (m_cd != nullptr) {
crypt_free(m_cd);
m_cd = nullptr;
}
}
void Header::libcryptsetup_log_wrapper(int level, const char* msg, void* header) {
((Header*)header)->libcryptsetup_log(level, msg);
}
void Header::libcryptsetup_log(int level, const char* msg) {
switch (level) {
case CRYPT_LOG_NORMAL:
ldout(m_cct, 5) << "[libcryptsetup] " << msg << dendl;
break;
case CRYPT_LOG_ERROR:
lderr(m_cct) << "[libcryptsetup] " << msg << dendl;
break;
case CRYPT_LOG_VERBOSE:
ldout(m_cct, 10) << "[libcryptsetup] " << msg << dendl;
break;
case CRYPT_LOG_DEBUG:
ldout(m_cct, 20) << "[libcryptsetup] " << msg << dendl;
break;
}
}
int Header::init() {
if (m_fd != -1) {
return 0;
}
// create anonymous file
m_fd = syscall(SYS_memfd_create, "LibcryptsetupInterface", 0);
if (m_fd == -1) {
lderr(m_cct) << "error creating anonymous file: " << cpp_strerror(-errno)
<< dendl;
return -errno;
}
std::string path =
"/proc/" + std::to_string(getpid()) + "/fd/" + std::to_string(m_fd);
if (m_cct->_conf->subsys.should_gather<dout_subsys, 30>()) {
crypt_set_debug_level(CRYPT_DEBUG_ALL);
}
// init libcryptsetup handle
auto r = crypt_init(&m_cd, path.c_str());
if (r != 0) {
lderr(m_cct) << "crypt_init failed: " << cpp_strerror(r) << dendl;
return r;
}
// redirect logging
crypt_set_log_callback(m_cd, &libcryptsetup_log_wrapper, this);
return 0;
}
int Header::write(const ceph::bufferlist& bl) {
ceph_assert(m_fd != -1);
auto r = bl.write_fd(m_fd);
if (r != 0) {
lderr(m_cct) << "error writing header: " << cpp_strerror(r) << dendl;
}
return r;
}
ssize_t Header::read(ceph::bufferlist* bl) {
ceph_assert(m_fd != -1);
// get current header size
struct stat st;
ssize_t r = fstat(m_fd, &st);
if (r < 0) {
r = -errno;
lderr(m_cct) << "failed to stat anonymous file: " << cpp_strerror(r)
<< dendl;
return r;
}
r = bl->read_fd(m_fd, st.st_size);
if (r < 0) {
lderr(m_cct) << "error reading header: " << cpp_strerror(r) << dendl;
}
ldout(m_cct, 20) << "read size = " << r << dendl;
return r;
}
int Header::format(const char* type, const char* alg, const char* key,
size_t key_size, const char* cipher_mode,
uint32_t sector_size, uint32_t data_alignment,
bool insecure_fast_mode) {
ceph_assert(m_cd != nullptr);
ldout(m_cct, 20) << "sector size: " << sector_size << ", data alignment: "
<< data_alignment << dendl;
// required for passing libcryptsetup device size check
if (ftruncate(m_fd, 4096) != 0) {
lderr(m_cct) << "failed to truncate anonymous file: "
<< cpp_strerror(-errno) << dendl;
return -errno;
}
struct crypt_params_luks1 luks1params;
struct crypt_params_luks2 luks2params;
const size_t converted_data_alignment = data_alignment / 512;
void* params = nullptr;
if (strcmp(type, CRYPT_LUKS1) == 0) {
memset(&luks1params, 0, sizeof(luks1params));
luks1params.data_alignment = converted_data_alignment;
params = &luks1params;
} else if (strcmp(type, CRYPT_LUKS2) == 0) {
memset(&luks2params, 0, sizeof(luks2params));
luks2params.data_alignment = converted_data_alignment;
luks2params.sector_size = sector_size;
params = &luks2params;
}
// this mode should be used for testing only
if (insecure_fast_mode) {
struct crypt_pbkdf_type pbkdf;
memset(&pbkdf, 0, sizeof(pbkdf));
pbkdf.type = CRYPT_KDF_PBKDF2;
pbkdf.flags = CRYPT_PBKDF_NO_BENCHMARK;
pbkdf.hash = "sha256";
pbkdf.iterations = 1000;
pbkdf.time_ms = 1;
auto r = crypt_set_pbkdf_type(m_cd, &pbkdf);
if (r != 0) {
lderr(m_cct) << "crypt_set_pbkdf_type failed: " << cpp_strerror(r)
<< dendl;
return r;
}
}
auto r = crypt_format(
m_cd, type, alg, cipher_mode, NULL, key, key_size, params);
if (r != 0) {
lderr(m_cct) << "crypt_format failed: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
int Header::add_keyslot(const char* passphrase, size_t passphrase_size) {
ceph_assert(m_cd != nullptr);
auto r = crypt_keyslot_add_by_volume_key(
m_cd, CRYPT_ANY_SLOT, NULL, 0, passphrase, passphrase_size);
if (r < 0) {
lderr(m_cct) << "crypt_keyslot_add_by_volume_key failed: "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
int Header::load(const char* type) {
ceph_assert(m_cd != nullptr);
// libcryptsetup checks if device size matches the header and keyslots size
// in LUKS2, 2 X 4MB header + 128MB keyslots
if (ftruncate(m_fd, 136 * 1024 * 1024) != 0) {
lderr(m_cct) << "failed to truncate anonymous file: "
<< cpp_strerror(-errno) << dendl;
return -errno;
}
auto r = crypt_load(m_cd, type, NULL);
if (r != 0) {
ldout(m_cct, 20) << "crypt_load failed: " << cpp_strerror(r) << dendl;
return r;
}
ldout(m_cct, 20) << "sector size: " << get_sector_size() << ", data offset: "
<< get_data_offset() << dendl;
return 0;
}
int Header::read_volume_key(const char* passphrase, size_t passphrase_size,
char* volume_key, size_t* volume_key_size) {
ceph_assert(m_cd != nullptr);
auto r = crypt_volume_key_get(
m_cd, CRYPT_ANY_SLOT, volume_key, volume_key_size, passphrase,
passphrase_size);
if (r < 0) {
ldout(m_cct, 20) << "crypt_volume_key_get failed: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
int Header::get_sector_size() {
ceph_assert(m_cd != nullptr);
return crypt_get_sector_size(m_cd);
}
uint64_t Header::get_data_offset() {
ceph_assert(m_cd != nullptr);
return crypt_get_data_offset(m_cd) << 9;
}
const char* Header::get_cipher() {
ceph_assert(m_cd != nullptr);
return crypt_get_cipher(m_cd);
}
const char* Header::get_cipher_mode() {
ceph_assert(m_cd != nullptr);
return crypt_get_cipher_mode(m_cd);
}
const char* Header::get_format_name() {
ceph_assert(m_cd != nullptr);
return crypt_get_type(m_cd);
}
} // namespace luks
} // namespace crypto
} // namespace librbd
| 6,924 | 25.431298 | 82 | cc |
null | ceph-main/src/librbd/crypto/luks/Header.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_LUKS_HEADER_H
#define CEPH_LIBRBD_CRYPTO_LUKS_HEADER_H
#include <libcryptsetup.h>
#include "common/ceph_context.h"
#include "include/buffer.h"
namespace librbd {
namespace crypto {
namespace luks {
class Header {
public:
Header(CephContext* cct);
~Header();
int init();
int write(const ceph::bufferlist& bl);
ssize_t read(ceph::bufferlist* bl);
int format(const char* type, const char* alg, const char* key,
size_t key_size, const char* cipher_mode, uint32_t sector_size,
uint32_t data_alignment, bool insecure_fast_mode);
int add_keyslot(const char* passphrase, size_t passphrase_size);
int load(const char* type);
int read_volume_key(const char* passphrase, size_t passphrase_size,
char* volume_key, size_t* volume_key_size);
int get_sector_size();
uint64_t get_data_offset();
const char* get_cipher();
const char* get_cipher_mode();
const char* get_format_name();
private:
void libcryptsetup_log(int level, const char* msg);
static void libcryptsetup_log_wrapper(int level, const char* msg,
void* header);
CephContext* m_cct;
int m_fd;
struct crypt_device *m_cd;
};
} // namespace luks
} // namespace crypto
} // namespace librbd
#endif // CEPH_LIBRBD_CRYPTO_LUKS_HEADER_H
| 1,484 | 27.018868 | 78 | h |
null | ceph-main/src/librbd/crypto/luks/LUKSEncryptionFormat.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "LUKSEncryptionFormat.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/compat.h"
#include "librbd/crypto/luks/FlattenRequest.h"
#include "librbd/crypto/luks/FormatRequest.h"
#include "librbd/crypto/luks/LoadRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::luks::LUKSEncryptionFormat:: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace crypto {
namespace luks {
template <typename I>
void EncryptionFormat<I>::flatten(I* image_ctx, Context* on_finish) {
auto req = luks::FlattenRequest<I>::create(image_ctx, on_finish);
req->send();
}
template <typename I>
void LUKSEncryptionFormat<I>::format(I* image_ctx, Context* on_finish) {
lderr(image_ctx->cct) << "explicit LUKS version required for format" << dendl;
on_finish->complete(-EINVAL);
}
template <typename I>
void LUKSEncryptionFormat<I>::load(I* image_ctx,
std::string* detected_format_name,
Context* on_finish) {
auto req = luks::LoadRequest<I>::create(image_ctx, RBD_ENCRYPTION_FORMAT_LUKS,
m_passphrase, &this->m_crypto,
detected_format_name, on_finish);
req->send();
}
template <typename I>
void LUKS1EncryptionFormat<I>::format(I* image_ctx, Context* on_finish) {
auto req = luks::FormatRequest<I>::create(
image_ctx, RBD_ENCRYPTION_FORMAT_LUKS1, m_alg, m_passphrase,
&this->m_crypto, on_finish, false);
req->send();
}
template <typename I>
void LUKS1EncryptionFormat<I>::load(I* image_ctx,
std::string* detected_format_name,
Context* on_finish) {
auto req = luks::LoadRequest<I>::create(
image_ctx, RBD_ENCRYPTION_FORMAT_LUKS1, m_passphrase, &this->m_crypto,
detected_format_name, on_finish);
req->send();
}
template <typename I>
void LUKS2EncryptionFormat<I>::format(I* image_ctx, Context* on_finish) {
auto req = luks::FormatRequest<I>::create(
image_ctx, RBD_ENCRYPTION_FORMAT_LUKS2, m_alg, m_passphrase,
&this->m_crypto, on_finish, false);
req->send();
}
template <typename I>
void LUKS2EncryptionFormat<I>::load(I* image_ctx,
std::string* detected_format_name,
Context* on_finish) {
auto req = luks::LoadRequest<I>::create(
image_ctx, RBD_ENCRYPTION_FORMAT_LUKS2, m_passphrase, &this->m_crypto,
detected_format_name, on_finish);
req->send();
}
} // namespace luks
} // namespace crypto
} // namespace librbd
template class librbd::crypto::luks::LUKSEncryptionFormat<librbd::ImageCtx>;
template class librbd::crypto::luks::LUKS1EncryptionFormat<librbd::ImageCtx>;
template class librbd::crypto::luks::LUKS2EncryptionFormat<librbd::ImageCtx>;
| 3,031 | 34.255814 | 80 | cc |
null | ceph-main/src/librbd/crypto/luks/LUKSEncryptionFormat.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_LUKS_ENCRYPTION_FORMAT_H
#define CEPH_LIBRBD_CRYPTO_LUKS_ENCRYPTION_FORMAT_H
#include <string_view>
#include "include/rbd/librbd.hpp"
#include "librbd/crypto/CryptoInterface.h"
#include "librbd/crypto/EncryptionFormat.h"
namespace librbd {
struct ImageCtx;
namespace crypto {
namespace luks {
template <typename ImageCtxT>
class EncryptionFormat : public crypto::EncryptionFormat<ImageCtxT> {
public:
void flatten(ImageCtxT* ictx, Context* on_finish) override;
CryptoInterface* get_crypto() override {
ceph_assert(m_crypto);
return m_crypto.get();
}
protected:
std::unique_ptr<CryptoInterface> m_crypto;
};
template <typename ImageCtxT>
class LUKSEncryptionFormat : public EncryptionFormat<ImageCtxT> {
public:
LUKSEncryptionFormat(std::string_view passphrase)
: m_passphrase(passphrase) {}
std::unique_ptr<crypto::EncryptionFormat<ImageCtxT>> clone() const override {
return std::make_unique<LUKSEncryptionFormat>(m_passphrase);
}
void format(ImageCtxT* ictx, Context* on_finish) override;
void load(ImageCtxT* ictx, std::string* detected_format_name,
Context* on_finish) override;
private:
std::string_view m_passphrase;
};
template <typename ImageCtxT>
class LUKS1EncryptionFormat : public EncryptionFormat<ImageCtxT> {
public:
LUKS1EncryptionFormat(encryption_algorithm_t alg, std::string_view passphrase)
: m_alg(alg), m_passphrase(passphrase) {}
std::unique_ptr<crypto::EncryptionFormat<ImageCtxT>> clone() const override {
return std::make_unique<LUKS1EncryptionFormat>(m_alg, m_passphrase);
}
void format(ImageCtxT* ictx, Context* on_finish) override;
void load(ImageCtxT* ictx, std::string* detected_format_name,
Context* on_finish) override;
private:
encryption_algorithm_t m_alg;
std::string_view m_passphrase;
};
template <typename ImageCtxT>
class LUKS2EncryptionFormat : public EncryptionFormat<ImageCtxT> {
public:
LUKS2EncryptionFormat(encryption_algorithm_t alg, std::string_view passphrase)
: m_alg(alg), m_passphrase(passphrase) {}
std::unique_ptr<crypto::EncryptionFormat<ImageCtxT>> clone() const override {
return std::make_unique<LUKS2EncryptionFormat>(m_alg, m_passphrase);
}
void format(ImageCtxT* ictx, Context* on_finish) override;
void load(ImageCtxT* ictx, std::string* detected_format_name,
Context* on_finish) override;
private:
encryption_algorithm_t m_alg;
std::string_view m_passphrase;
};
} // namespace luks
} // namespace crypto
} // namespace librbd
extern template class librbd::crypto::luks::LUKSEncryptionFormat<
librbd::ImageCtx>;
extern template class librbd::crypto::luks::LUKS1EncryptionFormat<
librbd::ImageCtx>;
extern template class librbd::crypto::luks::LUKS2EncryptionFormat<
librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CRYPTO_LUKS_ENCRYPTION_FORMAT_H
| 2,993 | 28.643564 | 80 | h |
null | ceph-main/src/librbd/crypto/luks/LoadRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "LoadRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/Utils.h"
#include "librbd/crypto/Utils.h"
#include "librbd/crypto/LoadRequest.h"
#include "librbd/crypto/luks/Magic.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ReadResult.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::luks::LoadRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace crypto {
namespace luks {
using librbd::util::create_context_callback;
template <typename I>
LoadRequest<I>::LoadRequest(
I* image_ctx, encryption_format_t format, std::string_view passphrase,
std::unique_ptr<CryptoInterface>* result_crypto,
std::string* detected_format_name,
Context* on_finish) : m_image_ctx(image_ctx),
m_format(format),
m_passphrase(passphrase),
m_on_finish(on_finish),
m_result_crypto(result_crypto),
m_detected_format_name(detected_format_name),
m_initial_read_size(DEFAULT_INITIAL_READ_SIZE),
m_header(image_ctx->cct), m_offset(0) {
}
template <typename I>
void LoadRequest<I>::set_initial_read_size(uint64_t read_size) {
m_initial_read_size = read_size;
}
template <typename I>
void LoadRequest<I>::send() {
auto ctx = create_context_callback<
LoadRequest<I>, &LoadRequest<I>::handle_read_header>(this);
read(m_initial_read_size, ctx);
}
template <typename I>
void LoadRequest<I>::read(uint64_t end_offset, Context* on_finish) {
auto length = end_offset - m_offset;
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, librbd::util::get_image_ctx(m_image_ctx),
io::AIO_TYPE_READ);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_read(
*m_image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{m_offset, length}}, io::ImageArea::DATA, io::ReadResult{&m_bl},
m_image_ctx->get_data_io_context(), 0, 0, trace);
req->send();
}
template <typename I>
bool LoadRequest<I>::handle_read(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_image_ctx->cct) << "error reading from image: " << cpp_strerror(r)
<< dendl;
finish(r);
return false;
}
// first, check LUKS magic at the beginning of the image
// If no magic is detected, caller may assume image is actually plaintext
if (m_offset == 0) {
if (Magic::is_luks(m_bl) > 0 || Magic::is_rbd_clone(m_bl) > 0) {
*m_detected_format_name = "LUKS";
} else {
*m_detected_format_name = crypto::LoadRequest<I>::UNKNOWN_FORMAT;
finish(-EINVAL);
return false;
}
if (m_image_ctx->parent != nullptr && Magic::is_rbd_clone(m_bl) > 0) {
r = Magic::replace_magic(m_image_ctx->cct, m_bl);
if (r < 0) {
m_image_ctx->image_lock.lock_shared();
auto image_size = m_image_ctx->get_image_size(m_image_ctx->snap_id);
m_image_ctx->image_lock.unlock_shared();
auto max_header_size = std::min(MAXIMUM_HEADER_SIZE, image_size);
if (r == -EINVAL && m_bl.length() < max_header_size) {
m_bl.clear();
auto ctx = create_context_callback<
LoadRequest<I>, &LoadRequest<I>::handle_read_header>(this);
read(max_header_size, ctx);
return false;
}
lderr(m_image_ctx->cct) << "error replacing rbd clone magic: "
<< cpp_strerror(r) << dendl;
finish(r);
return false;
}
}
}
// setup interface with libcryptsetup
r = m_header.init();
if (r < 0) {
finish(r);
return false;
}
m_offset += m_bl.length();
// write header to libcryptsetup interface
r = m_header.write(m_bl);
if (r < 0) {
finish(r);
return false;
}
m_bl.clear();
return true;
}
template <typename I>
void LoadRequest<I>::handle_read_header(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (!handle_read(r)) {
return;
}
const char* type;
switch (m_format) {
case RBD_ENCRYPTION_FORMAT_LUKS:
type = CRYPT_LUKS;
break;
case RBD_ENCRYPTION_FORMAT_LUKS1:
type = CRYPT_LUKS1;
break;
case RBD_ENCRYPTION_FORMAT_LUKS2:
type = CRYPT_LUKS2;
break;
default:
lderr(m_image_ctx->cct) << "unsupported format type: " << m_format
<< dendl;
finish(-EINVAL);
return;
}
// parse header via libcryptsetup
r = m_header.load(type);
if (r != 0) {
if (m_offset < MAXIMUM_HEADER_SIZE) {
// perhaps we did not feed the entire header to libcryptsetup, retry
auto ctx = create_context_callback<
LoadRequest<I>, &LoadRequest<I>::handle_read_header>(this);
read(MAXIMUM_HEADER_SIZE, ctx);
return;
}
finish(r);
return;
}
// gets actual LUKS version (only used for logging)
ceph_assert(*m_detected_format_name == "LUKS");
*m_detected_format_name = m_header.get_format_name();
auto cipher = m_header.get_cipher();
if (strcmp(cipher, "aes") != 0) {
lderr(m_image_ctx->cct) << "unsupported cipher: " << cipher << dendl;
finish(-ENOTSUP);
return;
}
auto cipher_mode = m_header.get_cipher_mode();
if (strcmp(cipher_mode, "xts-plain64") != 0) {
lderr(m_image_ctx->cct) << "unsupported cipher mode: " << cipher_mode
<< dendl;
finish(-ENOTSUP);
return;
}
m_image_ctx->image_lock.lock_shared();
uint64_t image_size = m_image_ctx->get_image_size(CEPH_NOSNAP);
m_image_ctx->image_lock.unlock_shared();
if (m_header.get_data_offset() > image_size) {
lderr(m_image_ctx->cct) << "image is too small, data offset "
<< m_header.get_data_offset() << dendl;
finish(-EINVAL);
return;
}
uint64_t stripe_period = m_image_ctx->get_stripe_period();
if (m_header.get_data_offset() % stripe_period != 0) {
lderr(m_image_ctx->cct) << "incompatible stripe pattern, data offset "
<< m_header.get_data_offset() << dendl;
finish(-EINVAL);
return;
}
read_volume_key();
return;
}
template <typename I>
void LoadRequest<I>::handle_read_keyslots(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
if (!handle_read(r)) {
return;
}
read_volume_key();
}
template <typename I>
void LoadRequest<I>::read_volume_key() {
char volume_key[64];
size_t volume_key_size = sizeof(volume_key);
auto r = m_header.read_volume_key(
m_passphrase.data(), m_passphrase.size(),
reinterpret_cast<char*>(volume_key), &volume_key_size);
if (r != 0) {
auto keyslots_end_offset = m_header.get_data_offset();
if (m_offset < keyslots_end_offset) {
// perhaps we did not feed the necessary keyslot, retry
auto ctx = create_context_callback<
LoadRequest<I>, &LoadRequest<I>::handle_read_keyslots>(this);
read(keyslots_end_offset, ctx);
return;
}
finish(r);
return;
}
r = util::build_crypto(
m_image_ctx->cct, reinterpret_cast<unsigned char*>(volume_key),
volume_key_size, m_header.get_sector_size(),
m_header.get_data_offset(), m_result_crypto);
ceph_memzero_s(volume_key, 64, 64);
finish(r);
}
template <typename I>
void LoadRequest<I>::finish(int r) {
ldout(m_image_ctx->cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace luks
} // namespace crypto
} // namespace librbd
template class librbd::crypto::luks::LoadRequest<librbd::ImageCtx>;
| 7,928 | 28.043956 | 78 | cc |
null | ceph-main/src/librbd/crypto/luks/LoadRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_LUKS_LOAD_REQUEST_H
#define CEPH_LIBRBD_CRYPTO_LUKS_LOAD_REQUEST_H
#include <string_view>
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
#include "librbd/crypto/CryptoInterface.h"
#include "librbd/crypto/luks/Header.h"
namespace librbd {
class ImageCtx;
namespace crypto {
namespace luks {
// max header size in LUKS1/2 (excl. keyslots) is 4MB
const uint64_t MAXIMUM_HEADER_SIZE = 4 * 1024 * 1024;
// default header size in LUKS2 2 X 16KB + 1 X 256KB keyslot
const uint64_t DEFAULT_INITIAL_READ_SIZE = 288 * 1024;
template <typename I>
class LoadRequest {
public:
static LoadRequest* create(
I* image_ctx, encryption_format_t format,
std::string_view passphrase,
std::unique_ptr<CryptoInterface>* result_crypto,
std::string* detected_format_name,
Context* on_finish) {
return new LoadRequest(image_ctx, format, passphrase, result_crypto,
detected_format_name, on_finish);
}
LoadRequest(I* image_ctx, encryption_format_t format,
std::string_view passphrase,
std::unique_ptr<CryptoInterface>* result_crypto,
std::string* detected_format_name, Context* on_finish);
void send();
void finish(int r);
void set_initial_read_size(uint64_t read_size);
private:
I* m_image_ctx;
encryption_format_t m_format;
std::string_view m_passphrase;
Context* m_on_finish;
ceph::bufferlist m_bl;
std::unique_ptr<CryptoInterface>* m_result_crypto;
std::string* m_detected_format_name;
uint64_t m_initial_read_size;
Header m_header;
uint64_t m_offset;
void read(uint64_t end_offset, Context* on_finish);
bool handle_read(int r);
void handle_read_header(int r);
void handle_read_keyslots(int r);
void read_volume_key();
};
} // namespace luks
} // namespace crypto
} // namespace librbd
extern template class librbd::crypto::luks::LoadRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CRYPTO_LUKS_LOAD_REQUEST_H
| 2,167 | 29.111111 | 74 | h |
null | ceph-main/src/librbd/crypto/luks/Magic.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "Magic.h"
#include "common/dout.h"
#include "common/errno.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::crypto::luks::Magic: " << __func__ \
<< ": "
namespace librbd {
namespace crypto {
namespace luks {
namespace {
constexpr uint64_t MAGIC_LENGTH = 6;
const std::string LUKS_MAGIC = "LUKS\xba\xbe";
const std::string RBD_CLONE_MAGIC = "RBDL\xba\xbe";
} // anonymous namespace
int Magic::read(ceph::bufferlist &bl, uint32_t bl_off,
uint32_t read_size, char* result) {
if (bl_off + read_size > bl.length()) {
return -EINVAL;
}
memcpy(result, bl.c_str() + bl_off, read_size);
return 0;
}
int Magic::cmp(ceph::bufferlist &bl, uint32_t bl_off,
const std::string &cmp_str) {
auto cmp_length = cmp_str.length();
if (bl_off + cmp_length > bl.length()) {
return -EINVAL;
}
if (memcmp(bl.c_str() + bl_off, cmp_str.c_str(), cmp_length)) {
return 0;
}
return 1;
}
int Magic::is_luks(ceph::bufferlist& bl) {
return cmp(bl, 0, LUKS_MAGIC);
}
int Magic::is_rbd_clone(ceph::bufferlist& bl) {
return cmp(bl, 0, RBD_CLONE_MAGIC);
}
void Magic::transform_secondary_header_magic(char* magic) {
std::swap(magic[0], magic[3]);
std::swap(magic[1], magic[2]);
}
int Magic::replace_magic(CephContext* cct, ceph::bufferlist& bl) {
const std::string *old_magic, *new_magic;
if (is_luks(bl) > 0) {
old_magic = &LUKS_MAGIC;
new_magic = &RBD_CLONE_MAGIC;
} else if (is_rbd_clone(bl) > 0) {
old_magic = &RBD_CLONE_MAGIC;
new_magic = &LUKS_MAGIC;
} else {
lderr(cct) << "invalid magic: " << dendl;
return -EILSEQ;
}
// read luks version
uint16_t version;
auto r = read(bl, MAGIC_LENGTH, sizeof(version), (char*)&version);
if (r < 0) {
lderr(cct) << "cannot read header version: " << cpp_strerror(r) << dendl;
return r;
}
boost::endian::big_to_native_inplace(version);
switch (version) {
case 1: {
// LUKS1, no secondary header
break;
}
case 2: {
// LUKS2, secondary header follows primary header
// read header size
uint64_t hdr_size;
r = read(bl, MAGIC_LENGTH + sizeof(version), sizeof(hdr_size),
(char*)&hdr_size);
if (r < 0) {
lderr(cct) << "cannot read header size: " << cpp_strerror(r) << dendl;
return r;
}
boost::endian::big_to_native_inplace(hdr_size);
if ((uint32_t)hdr_size + MAGIC_LENGTH > bl.length()) {
ldout(cct, 20) << "cannot replace secondary header magic" << dendl;
return -EINVAL;
}
// check secondary header magic
auto secondary_header_magic = bl.c_str() + hdr_size;
transform_secondary_header_magic(secondary_header_magic);
auto is_secondary_header_magic_valid =
!memcmp(secondary_header_magic, old_magic->c_str(), MAGIC_LENGTH);
if (!is_secondary_header_magic_valid) {
transform_secondary_header_magic(secondary_header_magic);
lderr(cct) << "invalid secondary header magic" << dendl;
return -EILSEQ;
}
// replace secondary header magic
memcpy(secondary_header_magic, new_magic->c_str(), MAGIC_LENGTH);
transform_secondary_header_magic(secondary_header_magic);
break;
}
default: {
lderr(cct) << "bad header version: " << version << dendl;
return -EINVAL;
}
}
// switch primary header magic
memcpy(bl.c_str(), new_magic->c_str(), MAGIC_LENGTH);
return 0;
}
} // namespace luks
} // namespace crypto
} // namespace librbd
| 3,708 | 25.492857 | 80 | cc |
null | ceph-main/src/librbd/crypto/luks/Magic.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_LUKS_MAGIC_H
#define CEPH_LIBRBD_CRYPTO_LUKS_MAGIC_H
#include "common/ceph_context.h"
#include "include/buffer.h"
namespace librbd {
namespace crypto {
namespace luks {
class Magic {
public:
static int is_luks(ceph::bufferlist& bl);
static int is_rbd_clone(ceph::bufferlist& bl);
static int replace_magic(CephContext* cct, ceph::bufferlist& bl);
private:
static int read(ceph::bufferlist& bl, uint32_t bl_off,
uint32_t read_size, char* result);
static int cmp(ceph::bufferlist& bl, uint32_t bl_off,
const std::string& cmp_str);
static void transform_secondary_header_magic(char* magic);
};
} // namespace luks
} // namespace crypto
} // namespace librbd
#endif // CEPH_LIBRBD_CRYPTO_LUKS_MAGIC_H
| 875 | 25.545455 | 70 | h |
null | ceph-main/src/librbd/crypto/openssl/DataCryptor.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/crypto/openssl/DataCryptor.h"
#include <openssl/err.h>
#include <string.h>
#include "include/ceph_assert.h"
#include "include/compat.h"
namespace librbd {
namespace crypto {
namespace openssl {
int DataCryptor::init(const char* cipher_name, const unsigned char* key,
uint16_t key_length) {
if (m_key != nullptr) {
ceph_memzero_s(m_key, m_key_size, m_key_size);
delete [] m_key;
m_key = nullptr;
m_key_size = 0;
}
if (cipher_name == nullptr) {
lderr(m_cct) << "missing cipher name" << dendl;
return -EINVAL;
}
if (key == nullptr) {
lderr(m_cct) << "missing key" << dendl;
return -EINVAL;
}
m_cipher = EVP_get_cipherbyname(cipher_name);
if (m_cipher == nullptr) {
lderr(m_cct) << "EVP_get_cipherbyname failed. Cipher name: " << cipher_name
<< dendl;
log_errors();
return -EINVAL;
}
auto expected_key_length = EVP_CIPHER_key_length(m_cipher);
if (expected_key_length != key_length) {
lderr(m_cct) << "cipher " << cipher_name << " expects key of "
<< expected_key_length << " bytes. got: " << key_length
<< dendl;
return -EINVAL;
}
m_key_size = key_length;
m_key = new unsigned char[key_length];
memcpy(m_key, key, key_length);
m_iv_size = static_cast<uint32_t>(EVP_CIPHER_iv_length(m_cipher));
return 0;
}
DataCryptor::~DataCryptor() {
if (m_key != nullptr) {
ceph_memzero_s(m_key, m_key_size, m_key_size);
delete [] m_key;
m_key = nullptr;
}
}
uint32_t DataCryptor::get_block_size() const {
return EVP_CIPHER_block_size(m_cipher);
}
uint32_t DataCryptor::get_iv_size() const {
return m_iv_size;
}
const unsigned char* DataCryptor::get_key() const {
return m_key;
}
int DataCryptor::get_key_length() const {
return EVP_CIPHER_key_length(m_cipher);
}
EVP_CIPHER_CTX* DataCryptor::get_context(CipherMode mode) {
int enc;
switch(mode) {
case CIPHER_MODE_ENC:
enc = 1;
break;
case CIPHER_MODE_DEC:
enc = 0;
break;
default:
lderr(m_cct) << "Invalid CipherMode:" << mode << dendl;
return nullptr;
}
auto ctx = EVP_CIPHER_CTX_new();
if (ctx == nullptr) {
lderr(m_cct) << "EVP_CIPHER_CTX_new failed" << dendl;
log_errors();
return nullptr;
}
if (1 != EVP_CipherInit_ex(ctx, m_cipher, nullptr, m_key, nullptr, enc)) {
lderr(m_cct) << "EVP_CipherInit_ex failed" << dendl;
log_errors();
return nullptr;
}
return ctx;
}
void DataCryptor::return_context(EVP_CIPHER_CTX* ctx, CipherMode mode) {
if (ctx != nullptr) {
EVP_CIPHER_CTX_free(ctx);
}
}
int DataCryptor::init_context(EVP_CIPHER_CTX* ctx, const unsigned char* iv,
uint32_t iv_length) const {
if (iv_length != m_iv_size) {
lderr(m_cct) << "cipher expects IV of " << m_iv_size << " bytes. got: "
<< iv_length << dendl;
return -EINVAL;
}
if (1 != EVP_CipherInit_ex(ctx, nullptr, nullptr, nullptr, iv, -1)) {
lderr(m_cct) << "EVP_CipherInit_ex failed" << dendl;
log_errors();
return -EIO;
}
return 0;
}
int DataCryptor::update_context(EVP_CIPHER_CTX* ctx, const unsigned char* in,
unsigned char* out, uint32_t len) const {
int out_length;
if (1 != EVP_CipherUpdate(ctx, out, &out_length, in, len)) {
lderr(m_cct) << "EVP_CipherUpdate failed. len=" << len << dendl;
log_errors();
return -EIO;
}
return out_length;
}
void DataCryptor::log_errors() const {
while (true) {
auto error = ERR_get_error();
if (error == 0) {
break;
}
lderr(m_cct) << "OpenSSL error: " << ERR_error_string(error, nullptr)
<< dendl;
}
}
} // namespace openssl
} // namespace crypto
} // namespace librbd
| 3,903 | 24.350649 | 79 | cc |
null | ceph-main/src/librbd/crypto/openssl/DataCryptor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CRYPTO_OPENSSL_DATA_CRYPTOR_H
#define CEPH_LIBRBD_CRYPTO_OPENSSL_DATA_CRYPTOR_H
#include "librbd/crypto/DataCryptor.h"
#include "include/Context.h"
#include <openssl/evp.h>
namespace librbd {
namespace crypto {
namespace openssl {
class DataCryptor : public crypto::DataCryptor<EVP_CIPHER_CTX> {
public:
DataCryptor(CephContext* cct) : m_cct(cct) {};
~DataCryptor();
int init(const char* cipher_name, const unsigned char* key,
uint16_t key_length);
uint32_t get_block_size() const override;
uint32_t get_iv_size() const override;
const unsigned char* get_key() const override;
int get_key_length() const override;
EVP_CIPHER_CTX* get_context(CipherMode mode) override;
void return_context(EVP_CIPHER_CTX* ctx, CipherMode mode) override;
int init_context(EVP_CIPHER_CTX* ctx, const unsigned char* iv,
uint32_t iv_length) const override;
int update_context(EVP_CIPHER_CTX* ctx, const unsigned char* in,
unsigned char* out, uint32_t len) const override;
private:
CephContext* m_cct;
unsigned char* m_key = nullptr;
uint16_t m_key_size = 0;
const EVP_CIPHER* m_cipher;
uint32_t m_iv_size;
void log_errors() const;
};
} // namespace openssl
} // namespace crypto
} // namespace librbd
#endif // CEPH_LIBRBD_CRYPTO_OPENSSL_DATA_CRYPTOR_H
| 1,486 | 28.74 | 72 | h |
null | ceph-main/src/librbd/deep_copy/Handler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_HANDLER_H
#define CEPH_LIBRBD_DEEP_COPY_HANDLER_H
#include "include/int_types.h"
#include "include/rbd/librbd.hpp"
namespace librbd {
namespace deep_copy {
struct Handler {
virtual ~Handler() {}
virtual void handle_read(uint64_t bytes_read) = 0;
virtual int update_progress(uint64_t object_number,
uint64_t object_count) = 0;
};
struct NoOpHandler : public Handler {
void handle_read(uint64_t bytes_read) override {
}
int update_progress(uint64_t object_number,
uint64_t object_count) override {
return 0;
}
};
class ProgressHandler : public NoOpHandler {
public:
ProgressHandler(ProgressContext* progress_ctx)
: m_progress_ctx(progress_ctx) {
}
int update_progress(uint64_t object_number,
uint64_t object_count) override {
return m_progress_ctx->update_progress(object_number, object_count);
}
private:
librbd::ProgressContext* m_progress_ctx;
};
} // namespace deep_copy
} // namespace librbd
#endif // CEPH_LIBRBD_DEEP_COPY_HANDLER_H
| 1,188 | 22.313725 | 72 | h |
null | ceph-main/src/librbd/deep_copy/ImageCopyRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ImageCopyRequest.h"
#include "ObjectCopyRequest.h"
#include "common/errno.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/deep_copy/Handler.h"
#include "librbd/deep_copy/Utils.h"
#include "librbd/object_map/DiffRequest.h"
#include "osdc/Striper.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::deep_copy::ImageCopyRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace deep_copy {
using librbd::util::create_async_context_callback;
using librbd::util::create_context_callback;
using librbd::util::unique_lock_name;
template <typename I>
ImageCopyRequest<I>::ImageCopyRequest(I *src_image_ctx, I *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten,
const ObjectNumber &object_number,
const SnapSeqs &snap_seqs,
Handler *handler,
Context *on_finish)
: RefCountedObject(dst_image_ctx->cct), m_src_image_ctx(src_image_ctx),
m_dst_image_ctx(dst_image_ctx), m_src_snap_id_start(src_snap_id_start),
m_src_snap_id_end(src_snap_id_end), m_dst_snap_id_start(dst_snap_id_start),
m_flatten(flatten), m_object_number(object_number), m_snap_seqs(snap_seqs),
m_handler(handler), m_on_finish(on_finish), m_cct(dst_image_ctx->cct),
m_lock(ceph::make_mutex(unique_lock_name("ImageCopyRequest::m_lock", this))) {
}
template <typename I>
void ImageCopyRequest<I>::send() {
m_dst_image_ctx->image_lock.lock_shared();
util::compute_snap_map(m_dst_image_ctx->cct, m_src_snap_id_start,
m_src_snap_id_end, m_dst_image_ctx->snaps, m_snap_seqs,
&m_snap_map);
m_dst_image_ctx->image_lock.unlock_shared();
if (m_snap_map.empty()) {
lderr(m_cct) << "failed to map snapshots within boundary" << dendl;
finish(-EINVAL);
return;
}
compute_diff();
}
template <typename I>
void ImageCopyRequest<I>::cancel() {
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << dendl;
m_canceled = true;
}
template <typename I>
void ImageCopyRequest<I>::map_src_objects(uint64_t dst_object,
std::set<uint64_t> *src_objects) {
std::vector<std::pair<uint64_t, uint64_t>> image_extents;
Striper::extent_to_file(m_cct, &m_dst_image_ctx->layout, dst_object, 0,
m_dst_image_ctx->layout.object_size, image_extents);
for (auto &e : image_extents) {
std::map<object_t, std::vector<ObjectExtent>> src_object_extents;
Striper::file_to_extents(m_cct, m_src_image_ctx->format_string,
&m_src_image_ctx->layout, e.first, e.second, 0,
src_object_extents);
for (auto &p : src_object_extents) {
for (auto &s : p.second) {
src_objects->insert(s.objectno);
}
}
}
ceph_assert(!src_objects->empty());
ldout(m_cct, 20) << dst_object << " -> " << *src_objects << dendl;
}
template <typename I>
void ImageCopyRequest<I>::compute_diff() {
if (m_flatten) {
send_object_copies();
return;
}
ldout(m_cct, 10) << dendl;
auto ctx = create_context_callback<
ImageCopyRequest<I>, &ImageCopyRequest<I>::handle_compute_diff>(this);
auto req = object_map::DiffRequest<I>::create(m_src_image_ctx, m_src_snap_id_start,
m_src_snap_id_end, &m_object_diff_state,
ctx);
req->send();
}
template <typename I>
void ImageCopyRequest<I>::handle_compute_diff(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
ldout(m_cct, 10) << "fast-diff optimization disabled" << dendl;
m_object_diff_state.resize(0);
}
send_object_copies();
}
template <typename I>
void ImageCopyRequest<I>::send_object_copies() {
m_object_no = 0;
if (m_object_number) {
m_object_no = *m_object_number + 1;
}
uint64_t size;
{
std::shared_lock image_locker{m_src_image_ctx->image_lock};
size = m_src_image_ctx->get_image_size(CEPH_NOSNAP);
for (auto snap_id : m_src_image_ctx->snaps) {
size = std::max(size, m_src_image_ctx->get_image_size(snap_id));
}
}
m_end_object_no = Striper::get_num_objects(m_dst_image_ctx->layout, size);
ldout(m_cct, 20) << "start_object=" << m_object_no << ", "
<< "end_object=" << m_end_object_no << dendl;
bool complete;
{
std::lock_guard locker{m_lock};
auto max_ops = m_src_image_ctx->config.template get_val<uint64_t>(
"rbd_concurrent_management_ops");
// attempt to schedule at least 'max_ops' initial requests where
// some objects might be skipped if fast-diff notes no change
for (uint64_t i = 0; i < max_ops; i++) {
send_next_object_copy();
}
complete = (m_current_ops == 0) && !m_updating_progress;
}
if (complete) {
finish(m_ret_val);
}
}
template <typename I>
void ImageCopyRequest<I>::send_next_object_copy() {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_canceled && m_ret_val == 0) {
ldout(m_cct, 10) << "image copy canceled" << dendl;
m_ret_val = -ECANCELED;
}
if (m_ret_val < 0 || m_object_no >= m_end_object_no) {
return;
}
uint64_t ono = m_object_no++;
Context *ctx = new LambdaContext(
[this, ono](int r) {
handle_object_copy(ono, r);
});
ldout(m_cct, 20) << "object_num=" << ono << dendl;
++m_current_ops;
uint8_t object_diff_state = object_map::DIFF_STATE_HOLE;
if (m_object_diff_state.size() > 0) {
std::set<uint64_t> src_objects;
map_src_objects(ono, &src_objects);
for (auto src_ono : src_objects) {
if (src_ono >= m_object_diff_state.size()) {
object_diff_state = object_map::DIFF_STATE_DATA_UPDATED;
} else {
auto state = m_object_diff_state[src_ono];
if ((state == object_map::DIFF_STATE_HOLE_UPDATED &&
object_diff_state != object_map::DIFF_STATE_DATA_UPDATED) ||
(state == object_map::DIFF_STATE_DATA &&
object_diff_state == object_map::DIFF_STATE_HOLE) ||
(state == object_map::DIFF_STATE_DATA_UPDATED)) {
object_diff_state = state;
}
}
}
if (object_diff_state == object_map::DIFF_STATE_HOLE) {
ldout(m_cct, 20) << "skipping non-existent object " << ono << dendl;
create_async_context_callback(*m_src_image_ctx, ctx)->complete(0);
return;
}
}
uint32_t flags = 0;
if (m_flatten) {
flags |= OBJECT_COPY_REQUEST_FLAG_FLATTEN;
}
if (object_diff_state == object_map::DIFF_STATE_DATA) {
// no source objects have been updated and at least one has clean data
flags |= OBJECT_COPY_REQUEST_FLAG_EXISTS_CLEAN;
}
auto req = ObjectCopyRequest<I>::create(
m_src_image_ctx, m_dst_image_ctx, m_src_snap_id_start, m_dst_snap_id_start,
m_snap_map, ono, flags, m_handler, ctx);
req->send();
}
template <typename I>
void ImageCopyRequest<I>::handle_object_copy(uint64_t object_no, int r) {
ldout(m_cct, 20) << "object_no=" << object_no << ", r=" << r << dendl;
bool complete;
{
std::lock_guard locker{m_lock};
ceph_assert(m_current_ops > 0);
--m_current_ops;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "object copy failed: " << cpp_strerror(r) << dendl;
if (m_ret_val == 0) {
m_ret_val = r;
}
} else {
m_copied_objects.push(object_no);
while (!m_updating_progress && !m_copied_objects.empty() &&
m_copied_objects.top() ==
(m_object_number ? *m_object_number + 1 : 0)) {
m_object_number = m_copied_objects.top();
m_copied_objects.pop();
uint64_t progress_object_no = *m_object_number + 1;
m_updating_progress = true;
m_lock.unlock();
m_handler->update_progress(progress_object_no, m_end_object_no);
m_lock.lock();
ceph_assert(m_updating_progress);
m_updating_progress = false;
}
}
send_next_object_copy();
complete = (m_current_ops == 0) && !m_updating_progress;
}
if (complete) {
finish(m_ret_val);
}
}
template <typename I>
void ImageCopyRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
put();
}
} // namespace deep_copy
} // namespace librbd
template class librbd::deep_copy::ImageCopyRequest<librbd::ImageCtx>;
| 8,819 | 30.612903 | 88 | cc |
null | ceph-main/src/librbd/deep_copy/ImageCopyRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_IMAGE_DEEP_COPY_REQUEST_H
#define CEPH_LIBRBD_DEEP_COPY_IMAGE_DEEP_COPY_REQUEST_H
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "common/bit_vector.hpp"
#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include "librbd/Types.h"
#include "librbd/deep_copy/Types.h"
#include <functional>
#include <map>
#include <queue>
#include <set>
#include <vector>
#include <boost/optional.hpp>
class Context;
namespace librbd {
class ImageCtx;
namespace deep_copy {
class Handler;
template <typename ImageCtxT = ImageCtx>
class ImageCopyRequest : public RefCountedObject {
public:
static ImageCopyRequest* create(ImageCtxT *src_image_ctx,
ImageCtxT *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten,
const ObjectNumber &object_number,
const SnapSeqs &snap_seqs,
Handler *handler,
Context *on_finish) {
return new ImageCopyRequest(src_image_ctx, dst_image_ctx, src_snap_id_start,
src_snap_id_end, dst_snap_id_start, flatten,
object_number, snap_seqs, handler, on_finish);
}
ImageCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten, const ObjectNumber &object_number,
const SnapSeqs &snap_seqs, Handler *handler,
Context *on_finish);
void send();
void cancel();
private:
/**
* @verbatim
*
* <start>
* |
* v
* COMPUTE_DIFF
* |
* | . . . . .
* | . . (parallel execution of
* v v . multiple objects at once)
* COPY_OBJECT . . . .
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_src_image_ctx;
ImageCtxT *m_dst_image_ctx;
librados::snap_t m_src_snap_id_start;
librados::snap_t m_src_snap_id_end;
librados::snap_t m_dst_snap_id_start;
bool m_flatten;
ObjectNumber m_object_number;
SnapSeqs m_snap_seqs;
Handler *m_handler;
Context *m_on_finish;
CephContext *m_cct;
ceph::mutex m_lock;
bool m_canceled = false;
uint64_t m_object_no = 0;
uint64_t m_end_object_no = 0;
uint64_t m_current_ops = 0;
std::priority_queue<
uint64_t, std::vector<uint64_t>, std::greater<uint64_t>> m_copied_objects;
bool m_updating_progress = false;
SnapMap m_snap_map;
int m_ret_val = 0;
BitVector<2> m_object_diff_state;
void map_src_objects(uint64_t dst_object, std::set<uint64_t> *src_objects);
void compute_diff();
void handle_compute_diff(int r);
void send_object_copies();
void send_next_object_copy();
void handle_object_copy(uint64_t object_no, int r);
void finish(int r);
};
} // namespace deep_copy
} // namespace librbd
extern template class librbd::deep_copy::ImageCopyRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_DEEP_COPY_IMAGE_DEEP_COPY_REQUEST_H
| 3,503 | 27.258065 | 80 | h |
null | ceph-main/src/librbd/deep_copy/MetadataCopyRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "MetadataCopyRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/Utils.h"
#include "librbd/image/GetMetadataRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::deep_copy::MetadataCopyRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace deep_copy {
namespace {
const uint64_t MAX_METADATA_ITEMS = 128;
} // anonymous namespace
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
MetadataCopyRequest<I>::MetadataCopyRequest(I *src_image_ctx, I *dst_image_ctx,
Context *on_finish)
: m_src_image_ctx(src_image_ctx), m_dst_image_ctx(dst_image_ctx),
m_on_finish(on_finish), m_cct(dst_image_ctx->cct) {
}
template <typename I>
void MetadataCopyRequest<I>::send() {
list_src_metadata();
}
template <typename I>
void MetadataCopyRequest<I>::list_src_metadata() {
ldout(m_cct, 20) << "start_key=" << m_last_metadata_key << dendl;
m_metadata.clear();
auto ctx = create_context_callback<
MetadataCopyRequest<I>,
&MetadataCopyRequest<I>::handle_list_src_metadata>(this);
auto req = image::GetMetadataRequest<I>::create(
m_src_image_ctx->md_ctx, m_src_image_ctx->header_oid, true, "",
m_last_metadata_key, MAX_METADATA_ITEMS, &m_metadata, ctx);
req->send();
}
template <typename I>
void MetadataCopyRequest<I>::handle_list_src_metadata(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to retrieve metadata: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
if (m_metadata.empty()) {
finish(0);
return;
}
m_last_metadata_key = m_metadata.rbegin()->first;
m_more_metadata = (m_metadata.size() >= MAX_METADATA_ITEMS);
set_dst_metadata();
}
template <typename I>
void MetadataCopyRequest<I>::set_dst_metadata() {
ldout(m_cct, 20) << "count=" << m_metadata.size() << dendl;
librados::ObjectWriteOperation op;
librbd::cls_client::metadata_set(&op, m_metadata);
librados::AioCompletion *aio_comp = create_rados_callback<
MetadataCopyRequest<I>,
&MetadataCopyRequest<I>::handle_set_dst_metadata>(this);
m_dst_image_ctx->md_ctx.aio_operate(m_dst_image_ctx->header_oid, aio_comp,
&op);
aio_comp->release();
}
template <typename I>
void MetadataCopyRequest<I>::handle_set_dst_metadata(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to set metadata: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
if (m_more_metadata) {
list_src_metadata();
return;
}
finish(0);
}
template <typename I>
void MetadataCopyRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace deep_copy
} // namespace librbd
template class librbd::deep_copy::MetadataCopyRequest<librbd::ImageCtx>;
| 3,156 | 25.754237 | 80 | cc |
null | ceph-main/src/librbd/deep_copy/MetadataCopyRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_METADATA_COPY_REQUEST_H
#define CEPH_LIBRBD_DEEP_COPY_METADATA_COPY_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "librbd/ImageCtx.h"
#include <map>
#include <string>
class Context;
namespace librbd {
namespace deep_copy {
template <typename ImageCtxT = librbd::ImageCtx>
class MetadataCopyRequest {
public:
static MetadataCopyRequest* create(ImageCtxT *src_image_ctx,
ImageCtxT *dst_image_ctx,
Context *on_finish) {
return new MetadataCopyRequest(src_image_ctx, dst_image_ctx, on_finish);
}
MetadataCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx,
Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* LIST_SRC_METADATA <------\
* | | (repeat if additional
* v | metadata)
* SET_DST_METADATA --------/
* |
* v
* <finish>
*
* @endverbatim
*/
typedef std::map<std::string, bufferlist> Metadata;
ImageCtxT *m_src_image_ctx;
ImageCtxT *m_dst_image_ctx;
Context *m_on_finish;
CephContext *m_cct;
bufferlist m_out_bl;
std::map<std::string, bufferlist> m_metadata;
std::string m_last_metadata_key;
bool m_more_metadata = false;
void list_src_metadata();
void handle_list_src_metadata(int r);
void set_dst_metadata();
void handle_set_dst_metadata(int r);
void finish(int r);
};
} // namespace deep_copy
} // namespace librbd
extern template class librbd::deep_copy::MetadataCopyRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_DEEP_COPY_METADATA_COPY_REQUEST_H
| 1,854 | 22.481013 | 79 | h |
null | ceph-main/src/librbd/deep_copy/ObjectCopyRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ObjectCopyRequest.h"
#include "include/neorados/RADOS.hpp"
#include "common/errno.h"
#include "librados/snap_set_diff.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/deep_copy/Handler.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/AsyncOperation.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Utils.h"
#include "osdc/Striper.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::deep_copy::ObjectCopyRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace deep_copy {
using librbd::util::create_async_context_callback;
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
using librbd::util::get_image_ctx;
template <typename I>
ObjectCopyRequest<I>::ObjectCopyRequest(I *src_image_ctx,
I *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t dst_snap_id_start,
const SnapMap &snap_map,
uint64_t dst_object_number,
uint32_t flags, Handler* handler,
Context *on_finish)
: m_src_image_ctx(src_image_ctx),
m_dst_image_ctx(dst_image_ctx), m_cct(dst_image_ctx->cct),
m_src_snap_id_start(src_snap_id_start),
m_dst_snap_id_start(dst_snap_id_start), m_snap_map(snap_map),
m_dst_object_number(dst_object_number), m_flags(flags),
m_handler(handler), m_on_finish(on_finish) {
ceph_assert(src_image_ctx->data_ctx.is_valid());
ceph_assert(dst_image_ctx->data_ctx.is_valid());
ceph_assert(!m_snap_map.empty());
m_src_async_op = new io::AsyncOperation();
m_src_async_op->start_op(*get_image_ctx(m_src_image_ctx));
m_src_io_ctx.dup(m_src_image_ctx->data_ctx);
m_dst_io_ctx.dup(m_dst_image_ctx->data_ctx);
m_dst_oid = m_dst_image_ctx->get_object_name(dst_object_number);
ldout(m_cct, 20) << "dst_oid=" << m_dst_oid << ", "
<< "src_snap_id_start=" << m_src_snap_id_start << ", "
<< "dst_snap_id_start=" << m_dst_snap_id_start << ", "
<< "snap_map=" << m_snap_map << dendl;
}
template <typename I>
void ObjectCopyRequest<I>::send() {
send_list_snaps();
}
template <typename I>
void ObjectCopyRequest<I>::send_list_snaps() {
// image extents are consistent across src and dst so compute once
std::tie(m_image_extents, m_image_area) = io::util::object_to_area_extents(
m_dst_image_ctx, m_dst_object_number,
{{0, m_dst_image_ctx->layout.object_size}});
ldout(m_cct, 20) << "image_extents=" << m_image_extents
<< " area=" << m_image_area << dendl;
auto ctx = create_async_context_callback(
*m_src_image_ctx, create_context_callback<
ObjectCopyRequest, &ObjectCopyRequest<I>::handle_list_snaps>(this));
if ((m_flags & OBJECT_COPY_REQUEST_FLAG_EXISTS_CLEAN) != 0) {
// skip listing the snaps if we know the destination exists and is clean,
// but we do need to update the object-map
ctx->complete(0);
return;
}
io::SnapIds snap_ids;
snap_ids.reserve(1 + m_snap_map.size());
snap_ids.push_back(m_src_snap_id_start);
for (auto& [src_snap_id, _] : m_snap_map) {
if (m_src_snap_id_start < src_snap_id) {
snap_ids.push_back(src_snap_id);
}
}
auto list_snaps_flags = io::LIST_SNAPS_FLAG_DISABLE_LIST_FROM_PARENT;
m_snapshot_delta.clear();
auto aio_comp = io::AioCompletion::create_and_start(
ctx, get_image_ctx(m_src_image_ctx), io::AIO_TYPE_GENERIC);
auto req = io::ImageDispatchSpec::create_list_snaps(
*m_src_image_ctx, io::IMAGE_DISPATCH_LAYER_NONE, aio_comp,
io::Extents{m_image_extents}, m_image_area, std::move(snap_ids),
list_snaps_flags, &m_snapshot_delta, {});
req->send();
}
template <typename I>
void ObjectCopyRequest<I>::handle_list_snaps(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to list snaps: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
ldout(m_cct, 20) << "snapshot_delta=" << m_snapshot_delta << dendl;
compute_dst_object_may_exist();
compute_read_ops();
send_read();
}
template <typename I>
void ObjectCopyRequest<I>::send_read() {
if (m_read_snaps.empty()) {
// all snapshots have been read
merge_write_ops();
compute_zero_ops();
send_update_object_map();
return;
}
auto index = *m_read_snaps.begin();
auto& read_op = m_read_ops[index];
if (read_op.image_interval.empty()) {
// nothing written to this object for this snapshot (must be trunc/remove)
handle_read(0);
return;
}
auto io_context = m_src_image_ctx->duplicate_data_io_context();
io_context->read_snap(index.second);
io::Extents image_extents{read_op.image_interval.begin(),
read_op.image_interval.end()};
io::ReadResult read_result{&read_op.image_extent_map,
&read_op.out_bl};
ldout(m_cct, 20) << "read: src_snap_seq=" << index.second << ", "
<< "image_extents=" << image_extents << dendl;
int op_flags = (LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL |
LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
int read_flags = 0;
if (index.second != m_src_image_ctx->snap_id) {
read_flags |= io::READ_FLAG_DISABLE_CLIPPING;
}
auto ctx = create_context_callback<
ObjectCopyRequest<I>, &ObjectCopyRequest<I>::handle_read>(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, get_image_ctx(m_src_image_ctx), io::AIO_TYPE_READ);
auto req = io::ImageDispatchSpec::create_read(
*m_src_image_ctx, io::IMAGE_DISPATCH_LAYER_INTERNAL_START, aio_comp,
std::move(image_extents), m_image_area, std::move(read_result),
io_context, op_flags, read_flags, {});
req->send();
}
template <typename I>
void ObjectCopyRequest<I>::handle_read(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to read from source object: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (m_handler != nullptr) {
auto index = *m_read_snaps.begin();
auto& read_op = m_read_ops[index];
m_handler->handle_read(read_op.out_bl.length());
}
ceph_assert(!m_read_snaps.empty());
m_read_snaps.erase(m_read_snaps.begin());
send_read();
}
template <typename I>
void ObjectCopyRequest<I>::send_update_object_map() {
if (!m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP) ||
m_dst_object_state.empty()) {
process_copyup();
return;
}
m_dst_image_ctx->owner_lock.lock_shared();
m_dst_image_ctx->image_lock.lock_shared();
if (m_dst_image_ctx->object_map == nullptr) {
// possible that exclusive lock was lost in background
lderr(m_cct) << "object map is not initialized" << dendl;
m_dst_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->owner_lock.unlock_shared();
finish(-EINVAL);
return;
}
auto &dst_object_state = *m_dst_object_state.begin();
auto it = m_snap_map.find(dst_object_state.first);
ceph_assert(it != m_snap_map.end());
auto dst_snap_id = it->second.front();
auto object_state = dst_object_state.second;
m_dst_object_state.erase(m_dst_object_state.begin());
ldout(m_cct, 20) << "dst_snap_id=" << dst_snap_id << ", object_state="
<< static_cast<uint32_t>(object_state) << dendl;
int r;
auto finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
m_dst_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->owner_lock.unlock_shared();
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_update_object_map(r);
finish_op_ctx->complete(0);
});
auto dst_image_ctx = m_dst_image_ctx;
bool sent = dst_image_ctx->object_map->template aio_update<
Context, &Context::complete>(dst_snap_id, m_dst_object_number, object_state,
{}, {}, false, ctx);
// NOTE: state machine might complete before we reach here
dst_image_ctx->image_lock.unlock_shared();
dst_image_ctx->owner_lock.unlock_shared();
if (!sent) {
ceph_assert(dst_snap_id == CEPH_NOSNAP);
ctx->complete(0);
}
}
template <typename I>
void ObjectCopyRequest<I>::handle_update_object_map(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to update object map: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
if (!m_dst_object_state.empty()) {
send_update_object_map();
return;
}
process_copyup();
}
template <typename I>
void ObjectCopyRequest<I>::process_copyup() {
if (m_snapshot_sparse_bufferlist.empty()) {
// no data to copy or truncate/zero. only the copyup state machine cares
// about whether the object exists or not, and it always copies from
// snap id 0.
finish(m_src_snap_id_start > 0 ? 0 : -ENOENT);
return;
}
ldout(m_cct, 20) << dendl;
// let dispatch layers have a chance to process the data but
// assume that the dispatch layer will only touch the sparse bufferlist
auto r = m_dst_image_ctx->io_object_dispatcher->prepare_copyup(
m_dst_object_number, &m_snapshot_sparse_bufferlist);
if (r < 0) {
lderr(m_cct) << "failed to prepare copyup data: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
send_write_object();
}
template <typename I>
void ObjectCopyRequest<I>::send_write_object() {
ceph_assert(!m_snapshot_sparse_bufferlist.empty());
auto& sparse_bufferlist = m_snapshot_sparse_bufferlist.begin()->second;
m_src_image_ctx->image_lock.lock_shared();
bool hide_parent = (m_src_snap_id_start == 0 &&
m_src_image_ctx->parent != nullptr);
m_src_image_ctx->image_lock.unlock_shared();
// retrieve the destination snap context for the op
SnapIds dst_snap_ids;
librados::snap_t dst_snap_seq = 0;
librados::snap_t src_snap_seq = m_snapshot_sparse_bufferlist.begin()->first;
if (src_snap_seq != 0) {
auto snap_map_it = m_snap_map.find(src_snap_seq);
ceph_assert(snap_map_it != m_snap_map.end());
auto dst_snap_id = snap_map_it->second.front();
auto dst_may_exist_it = m_dst_object_may_exist.find(dst_snap_id);
ceph_assert(dst_may_exist_it != m_dst_object_may_exist.end());
if (!dst_may_exist_it->second && !sparse_bufferlist.empty()) {
// if the object cannot exist, the only valid op is to remove it
ldout(m_cct, 20) << "object DNE: src_snap_seq=" << src_snap_seq << dendl;
ceph_assert(sparse_bufferlist.ext_count() == 1U);
ceph_assert(sparse_bufferlist.begin().get_val().state ==
io::SPARSE_EXTENT_STATE_ZEROED &&
sparse_bufferlist.begin().get_off() == 0 &&
sparse_bufferlist.begin().get_len() ==
m_dst_image_ctx->layout.object_size);
}
// write snapshot context should be before actual snapshot
ceph_assert(!snap_map_it->second.empty());
auto dst_snap_ids_it = snap_map_it->second.begin();
++dst_snap_ids_it;
dst_snap_ids = SnapIds{dst_snap_ids_it, snap_map_it->second.end()};
if (!dst_snap_ids.empty()) {
dst_snap_seq = dst_snap_ids.front();
}
ceph_assert(dst_snap_seq != CEPH_NOSNAP);
}
ldout(m_cct, 20) << "src_snap_seq=" << src_snap_seq << ", "
<< "dst_snap_seq=" << dst_snap_seq << ", "
<< "dst_snaps=" << dst_snap_ids << dendl;
librados::ObjectWriteOperation op;
bool migration = ((m_flags & OBJECT_COPY_REQUEST_FLAG_MIGRATION) != 0);
if (migration) {
ldout(m_cct, 20) << "assert_snapc_seq=" << dst_snap_seq << dendl;
cls_client::assert_snapc_seq(&op, dst_snap_seq,
cls::rbd::ASSERT_SNAPC_SEQ_GT_SNAPSET_SEQ);
}
for (auto& sbe : sparse_bufferlist) {
switch (sbe.get_val().state) {
case io::SPARSE_EXTENT_STATE_DATA:
ldout(m_cct, 20) << "write op: " << sbe.get_off() << "~"
<< sbe.get_len() << dendl;
op.write(sbe.get_off(), std::move(sbe.get_val().bl));
op.set_op_flags2(LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL |
LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
break;
case io::SPARSE_EXTENT_STATE_ZEROED:
if (sbe.get_off() + sbe.get_len() ==
m_dst_image_ctx->layout.object_size) {
if (sbe.get_off() == 0) {
if (hide_parent) {
ldout(m_cct, 20) << "create+truncate op" << dendl;
op.create(false);
op.truncate(0);
} else {
ldout(m_cct, 20) << "remove op" << dendl;
op.remove();
}
} else {
ldout(m_cct, 20) << "trunc op: " << sbe.get_off() << dendl;
op.truncate(sbe.get_off());
}
} else {
ldout(m_cct, 20) << "zero op: " << sbe.get_off() << "~"
<< sbe.get_len() << dendl;
op.zero(sbe.get_off(), sbe.get_len());
}
break;
default:
ceph_abort();
}
}
if (op.size() == (migration ? 1 : 0)) {
handle_write_object(0);
return;
}
int r;
Context *finish_op_ctx;
{
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r);
}
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_write_object(r);
finish_op_ctx->complete(0);
});
librados::AioCompletion *comp = create_rados_callback(ctx);
r = m_dst_io_ctx.aio_operate(m_dst_oid, comp, &op, dst_snap_seq, dst_snap_ids,
nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ObjectCopyRequest<I>::handle_write_object(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r == -ENOENT) {
r = 0;
} else if (r == -ERANGE) {
ldout(m_cct, 10) << "concurrent deep copy" << dendl;
r = 0;
}
if (r < 0) {
lderr(m_cct) << "failed to write to destination object: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
m_snapshot_sparse_bufferlist.erase(m_snapshot_sparse_bufferlist.begin());
if (!m_snapshot_sparse_bufferlist.empty()) {
send_write_object();
return;
}
finish(0);
}
template <typename I>
Context *ObjectCopyRequest<I>::start_lock_op(ceph::shared_mutex &owner_lock,
int* r) {
ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock));
if (m_dst_image_ctx->exclusive_lock == nullptr) {
return new LambdaContext([](int r) {});
}
return m_dst_image_ctx->exclusive_lock->start_op(r);
}
template <typename I>
void ObjectCopyRequest<I>::compute_read_ops() {
ldout(m_cct, 20) << dendl;
m_src_image_ctx->image_lock.lock_shared();
bool read_from_parent = (m_src_snap_id_start == 0 &&
m_src_image_ctx->parent != nullptr);
m_src_image_ctx->image_lock.unlock_shared();
bool only_dne_extents = true;
interval_set<uint64_t> dne_image_interval;
// compute read ops for any data sections or for any extents that we need to
// read from our parent
for (auto& [key, image_intervals] : m_snapshot_delta) {
io::WriteReadSnapIds write_read_snap_ids{key};
// advance the src write snap id to the first valid snap id
if (write_read_snap_ids.first > m_src_snap_id_start) {
// don't attempt to read from snapshots that shouldn't exist in
// case the OSD fails to give a correct snap list
auto snap_map_it = m_snap_map.find(write_read_snap_ids.first);
ceph_assert(snap_map_it != m_snap_map.end());
auto dst_snap_seq = snap_map_it->second.front();
auto dst_may_exist_it = m_dst_object_may_exist.find(dst_snap_seq);
ceph_assert(dst_may_exist_it != m_dst_object_may_exist.end());
if (!dst_may_exist_it->second) {
ldout(m_cct, 20) << "DNE snapshot: " << write_read_snap_ids.first
<< dendl;
continue;
}
}
for (auto& image_interval : image_intervals) {
auto state = image_interval.get_val().state;
switch (state) {
case io::SPARSE_EXTENT_STATE_DNE:
if (write_read_snap_ids == io::INITIAL_WRITE_READ_SNAP_IDS &&
read_from_parent) {
// special-case for DNE initial object-extents since when flattening
// we need to read data from the parent images extents
ldout(m_cct, 20) << "DNE extent: "
<< image_interval.get_off() << "~"
<< image_interval.get_len() << dendl;
dne_image_interval.insert(
image_interval.get_off(), image_interval.get_len());
}
break;
case io::SPARSE_EXTENT_STATE_ZEROED:
only_dne_extents = false;
break;
case io::SPARSE_EXTENT_STATE_DATA:
ldout(m_cct, 20) << "read op: "
<< "snap_ids=" << write_read_snap_ids << " "
<< image_interval.get_off() << "~"
<< image_interval.get_len() << dendl;
m_read_ops[write_read_snap_ids].image_interval.union_insert(
image_interval.get_off(), image_interval.get_len());
only_dne_extents = false;
break;
default:
ceph_abort();
break;
}
}
}
bool flatten = ((m_flags & OBJECT_COPY_REQUEST_FLAG_FLATTEN) != 0);
if (!dne_image_interval.empty() && (!only_dne_extents || flatten)) {
auto snap_map_it = m_snap_map.begin();
ceph_assert(snap_map_it != m_snap_map.end());
auto src_snap_seq = snap_map_it->first;
WriteReadSnapIds write_read_snap_ids{src_snap_seq, src_snap_seq};
// prepare to prune the extents to the maximum parent overlap
std::shared_lock image_locker(m_src_image_ctx->image_lock);
uint64_t raw_overlap = 0;
int r = m_src_image_ctx->get_parent_overlap(src_snap_seq, &raw_overlap);
if (r < 0) {
ldout(m_cct, 5) << "failed getting parent overlap for snap_id: "
<< src_snap_seq << ": " << cpp_strerror(r) << dendl;
} else if (raw_overlap > 0) {
ldout(m_cct, 20) << "raw_overlap=" << raw_overlap << dendl;
io::Extents parent_extents;
for (auto [image_offset, image_length] : dne_image_interval) {
parent_extents.emplace_back(image_offset, image_length);
}
m_src_image_ctx->prune_parent_extents(parent_extents, m_image_area,
raw_overlap, false);
for (auto [image_offset, image_length] : parent_extents) {
ldout(m_cct, 20) << "parent read op: "
<< "snap_ids=" << write_read_snap_ids << " "
<< image_offset << "~" << image_length << dendl;
m_read_ops[write_read_snap_ids].image_interval.union_insert(
image_offset, image_length);
}
}
}
for (auto& [write_read_snap_ids, _] : m_read_ops) {
m_read_snaps.push_back(write_read_snap_ids);
}
}
template <typename I>
void ObjectCopyRequest<I>::merge_write_ops() {
ldout(m_cct, 20) << dendl;
for (auto& [write_read_snap_ids, read_op] : m_read_ops) {
auto src_snap_seq = write_read_snap_ids.first;
// convert the resulting sparse image extent map to an interval ...
auto& image_data_interval = m_dst_data_interval[src_snap_seq];
for (auto [image_offset, image_length] : read_op.image_extent_map) {
image_data_interval.union_insert(image_offset, image_length);
}
// ... and compute the difference between it and the image extents since
// that indicates zeroed extents
interval_set<uint64_t> intersection;
intersection.intersection_of(read_op.image_interval, image_data_interval);
read_op.image_interval.subtract(intersection);
for (auto& [image_offset, image_length] : read_op.image_interval) {
ldout(m_cct, 20) << "src_snap_seq=" << src_snap_seq << ", "
<< "inserting sparse-read zero " << image_offset << "~"
<< image_length << dendl;
m_dst_zero_interval[src_snap_seq].union_insert(
image_offset, image_length);
}
uint64_t buffer_offset = 0;
for (auto [image_offset, image_length] : read_op.image_extent_map) {
// convert image extents back to object extents for the write op
striper::LightweightObjectExtents object_extents;
io::util::area_to_object_extents(m_dst_image_ctx, image_offset,
image_length, m_image_area,
buffer_offset, &object_extents);
for (auto& object_extent : object_extents) {
ldout(m_cct, 20) << "src_snap_seq=" << src_snap_seq << ", "
<< "object_offset=" << object_extent.offset << ", "
<< "object_length=" << object_extent.length << dendl;
bufferlist sub_bl;
sub_bl.substr_of(read_op.out_bl, buffer_offset, object_extent.length);
m_snapshot_sparse_bufferlist[src_snap_seq].insert(
object_extent.offset, object_extent.length,
{io::SPARSE_EXTENT_STATE_DATA, object_extent.length,\
std::move(sub_bl)});
buffer_offset += object_extent.length;
}
}
}
}
template <typename I>
void ObjectCopyRequest<I>::compute_zero_ops() {
ldout(m_cct, 20) << dendl;
m_src_image_ctx->image_lock.lock_shared();
bool hide_parent = (m_src_snap_id_start == 0 &&
m_src_image_ctx->parent != nullptr);
m_src_image_ctx->image_lock.unlock_shared();
// ensure we have a zeroed interval for each snapshot
for (auto& [src_snap_seq, _] : m_snap_map) {
if (m_src_snap_id_start < src_snap_seq) {
m_dst_zero_interval[src_snap_seq];
}
}
// exists if copying from an arbitrary snapshot w/o any deltas in the
// start snapshot slot (i.e. DNE)
bool object_exists = (
m_src_snap_id_start > 0 &&
m_snapshot_delta.count({m_src_snap_id_start, m_src_snap_id_start}) == 0);
bool fast_diff = m_dst_image_ctx->test_features(RBD_FEATURE_FAST_DIFF);
uint64_t prev_end_size = 0;
// compute zero ops from the zeroed intervals
for (auto &it : m_dst_zero_interval) {
auto src_snap_seq = it.first;
auto &zero_interval = it.second;
auto snap_map_it = m_snap_map.find(src_snap_seq);
ceph_assert(snap_map_it != m_snap_map.end());
auto dst_snap_seq = snap_map_it->second.front();
auto dst_may_exist_it = m_dst_object_may_exist.find(dst_snap_seq);
ceph_assert(dst_may_exist_it != m_dst_object_may_exist.end());
if (!dst_may_exist_it->second && object_exists) {
ldout(m_cct, 5) << "object DNE for snap_id: " << dst_snap_seq << dendl;
m_snapshot_sparse_bufferlist[src_snap_seq].insert(
0, m_dst_image_ctx->layout.object_size,
{io::SPARSE_EXTENT_STATE_ZEROED, m_dst_image_ctx->layout.object_size});
object_exists = false;
prev_end_size = 0;
continue;
}
if (hide_parent) {
std::shared_lock image_locker{m_dst_image_ctx->image_lock};
uint64_t raw_overlap = 0;
uint64_t object_overlap = 0;
int r = m_dst_image_ctx->get_parent_overlap(dst_snap_seq, &raw_overlap);
if (r < 0) {
ldout(m_cct, 5) << "failed getting parent overlap for snap_id: "
<< dst_snap_seq << ": " << cpp_strerror(r) << dendl;
} else if (raw_overlap > 0) {
auto parent_extents = m_image_extents;
object_overlap = m_dst_image_ctx->prune_parent_extents(
parent_extents, m_image_area, raw_overlap, false);
}
if (object_overlap == 0) {
ldout(m_cct, 20) << "no parent overlap" << dendl;
hide_parent = false;
}
}
// collect known zeroed extents from the snapshot delta for the current
// src snapshot. If this is the first snapshot, we might need to handle
// the whiteout case if it overlaps with the parent
auto first_src_snap_id = m_snap_map.begin()->first;
auto snapshot_delta_it = m_snapshot_delta.lower_bound(
{(hide_parent && src_snap_seq == first_src_snap_id ?
0 : src_snap_seq), 0});
for (; snapshot_delta_it != m_snapshot_delta.end() &&
snapshot_delta_it->first.first <= src_snap_seq;
++snapshot_delta_it) {
auto& write_read_snap_ids = snapshot_delta_it->first;
auto& image_intervals = snapshot_delta_it->second;
for (auto& image_interval : image_intervals) {
auto state = image_interval.get_val().state;
switch (state) {
case io::SPARSE_EXTENT_STATE_ZEROED:
if (write_read_snap_ids != io::INITIAL_WRITE_READ_SNAP_IDS) {
ldout(m_cct, 20) << "zeroed extent: "
<< "src_snap_seq=" << src_snap_seq << " "
<< image_interval.get_off() << "~"
<< image_interval.get_len() << dendl;
zero_interval.union_insert(
image_interval.get_off(), image_interval.get_len());
} else if (hide_parent &&
write_read_snap_ids == io::INITIAL_WRITE_READ_SNAP_IDS) {
ldout(m_cct, 20) << "zeroed (hide parent) extent: "
<< "src_snap_seq=" << src_snap_seq << " "
<< image_interval.get_off() << "~"
<< image_interval.get_len() << dendl;
zero_interval.union_insert(
image_interval.get_off(), image_interval.get_len());
}
break;
case io::SPARSE_EXTENT_STATE_DNE:
case io::SPARSE_EXTENT_STATE_DATA:
break;
default:
ceph_abort();
break;
}
}
}
// subtract any data intervals from our zero intervals
auto& data_interval = m_dst_data_interval[src_snap_seq];
interval_set<uint64_t> intersection;
intersection.intersection_of(zero_interval, data_interval);
zero_interval.subtract(intersection);
// update end_size if there are writes into higher offsets
uint64_t end_size = prev_end_size;
auto iter = m_snapshot_sparse_bufferlist.find(src_snap_seq);
if (iter != m_snapshot_sparse_bufferlist.end()) {
for (auto &sparse_bufferlist : iter->second) {
object_exists = true;
end_size = std::max(
end_size, sparse_bufferlist.get_off() + sparse_bufferlist.get_len());
}
}
ldout(m_cct, 20) << "src_snap_seq=" << src_snap_seq << ", "
<< "dst_snap_seq=" << dst_snap_seq << ", "
<< "zero_interval=" << zero_interval << ", "
<< "end_size=" << end_size << dendl;
for (auto z = zero_interval.begin(); z != zero_interval.end(); ++z) {
// convert image extents back to object extents for the write op
striper::LightweightObjectExtents object_extents;
io::util::area_to_object_extents(m_dst_image_ctx, z.get_start(),
z.get_len(), m_image_area, 0,
&object_extents);
for (auto& object_extent : object_extents) {
ceph_assert(object_extent.offset + object_extent.length <=
m_dst_image_ctx->layout.object_size);
if (object_extent.offset + object_extent.length >= end_size) {
// zero interval at the object end
if ((object_extent.offset == 0 && hide_parent) ||
(object_extent.offset < prev_end_size)) {
ldout(m_cct, 20) << "truncate " << object_extent.offset
<< dendl;
auto length =
m_dst_image_ctx->layout.object_size - object_extent.offset;
m_snapshot_sparse_bufferlist[src_snap_seq].insert(
object_extent.offset, length,
{io::SPARSE_EXTENT_STATE_ZEROED, length});
}
object_exists = (object_extent.offset > 0 || hide_parent);
end_size = std::min(end_size, object_extent.offset);
} else {
// zero interval inside the object
ldout(m_cct, 20) << "zero "
<< object_extent.offset << "~"
<< object_extent.length << dendl;
m_snapshot_sparse_bufferlist[src_snap_seq].insert(
object_extent.offset, object_extent.length,
{io::SPARSE_EXTENT_STATE_ZEROED, object_extent.length});
object_exists = true;
}
}
}
uint8_t dst_object_map_state = OBJECT_NONEXISTENT;
if (object_exists) {
dst_object_map_state = OBJECT_EXISTS;
if (fast_diff && m_snapshot_sparse_bufferlist.count(src_snap_seq) == 0) {
dst_object_map_state = OBJECT_EXISTS_CLEAN;
}
m_dst_object_state[src_snap_seq] = dst_object_map_state;
}
ldout(m_cct, 20) << "dst_snap_seq=" << dst_snap_seq << ", "
<< "end_size=" << end_size << ", "
<< "dst_object_map_state="
<< static_cast<uint32_t>(dst_object_map_state) << dendl;
prev_end_size = end_size;
}
}
template <typename I>
void ObjectCopyRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
// ensure IoCtxs are closed prior to proceeding
auto on_finish = m_on_finish;
m_src_async_op->finish_op();
delete m_src_async_op;
delete this;
on_finish->complete(r);
}
template <typename I>
void ObjectCopyRequest<I>::compute_dst_object_may_exist() {
std::shared_lock image_locker{m_dst_image_ctx->image_lock};
auto snap_ids = m_dst_image_ctx->snaps;
snap_ids.push_back(CEPH_NOSNAP);
for (auto snap_id : snap_ids) {
m_dst_object_may_exist[snap_id] =
(m_dst_object_number < m_dst_image_ctx->get_object_count(snap_id));
}
ldout(m_cct, 20) << "dst_object_may_exist=" << m_dst_object_may_exist
<< dendl;
}
} // namespace deep_copy
} // namespace librbd
template class librbd::deep_copy::ObjectCopyRequest<librbd::ImageCtx>;
| 30,508 | 35.320238 | 80 | cc |
null | ceph-main/src/librbd/deep_copy/ObjectCopyRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_OBJECT_COPY_REQUEST_H
#define CEPH_LIBRBD_DEEP_COPY_OBJECT_COPY_REQUEST_H
#include "include/int_types.h"
#include "include/interval_set.h"
#include "include/rados/librados.hpp"
#include "common/snap_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/deep_copy/Types.h"
#include "librbd/io/Types.h"
#include <list>
#include <map>
#include <string>
class Context;
class RWLock;
namespace librbd {
namespace io { class AsyncOperation; }
namespace deep_copy {
struct Handler;
template <typename ImageCtxT = librbd::ImageCtx>
class ObjectCopyRequest {
public:
static ObjectCopyRequest* create(ImageCtxT *src_image_ctx,
ImageCtxT *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t dst_snap_id_start,
const SnapMap &snap_map,
uint64_t object_number, uint32_t flags,
Handler* handler, Context *on_finish) {
return new ObjectCopyRequest(src_image_ctx, dst_image_ctx,
src_snap_id_start, dst_snap_id_start, snap_map,
object_number, flags, handler, on_finish);
}
ObjectCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t dst_snap_id_start, const SnapMap &snap_map,
uint64_t object_number, uint32_t flags, Handler* handler,
Context *on_finish);
void send();
// testing support
inline librados::IoCtx &get_src_io_ctx() {
return m_src_io_ctx;
}
inline librados::IoCtx &get_dst_io_ctx() {
return m_dst_io_ctx;
}
private:
/**
* @verbatim
*
* <start>
* |
* v
* LIST_SNAPS
* |
* |/---------\
* | | (repeat for each snapshot)
* v |
* READ ---------/
* |
* | /-----------\
* | | | (repeat for each snapshot)
* v v |
* UPDATE_OBJECT_MAP ---/ (skip if object
* | map disabled)
* | /-----------\
* | | | (repeat for each snapshot)
* v v |
* WRITE_OBJECT --------/
* |
* v
* <finish>
*
* @endverbatim
*/
struct ReadOp {
interval_set<uint64_t> image_interval;
io::Extents image_extent_map;
bufferlist out_bl;
};
typedef std::pair<librados::snap_t, librados::snap_t> WriteReadSnapIds;
ImageCtxT *m_src_image_ctx;
ImageCtxT *m_dst_image_ctx;
CephContext *m_cct;
librados::snap_t m_src_snap_id_start;
librados::snap_t m_dst_snap_id_start;
SnapMap m_snap_map;
uint64_t m_dst_object_number;
uint32_t m_flags;
Handler* m_handler;
Context *m_on_finish;
decltype(m_src_image_ctx->data_ctx) m_src_io_ctx;
decltype(m_dst_image_ctx->data_ctx) m_dst_io_ctx;
std::string m_dst_oid;
io::Extents m_image_extents;
io::ImageArea m_image_area = io::ImageArea::DATA;
io::SnapshotDelta m_snapshot_delta;
std::map<WriteReadSnapIds, ReadOp> m_read_ops;
std::list<WriteReadSnapIds> m_read_snaps;
io::SnapshotSparseBufferlist m_snapshot_sparse_bufferlist;
std::map<librados::snap_t, interval_set<uint64_t>> m_dst_data_interval;
std::map<librados::snap_t, interval_set<uint64_t>> m_dst_zero_interval;
std::map<librados::snap_t, uint8_t> m_dst_object_state;
std::map<librados::snap_t, bool> m_dst_object_may_exist;
io::AsyncOperation* m_src_async_op = nullptr;
void send_list_snaps();
void handle_list_snaps(int r);
void send_read();
void handle_read(int r);
void send_update_object_map();
void handle_update_object_map(int r);
void process_copyup();
void send_write_object();
void handle_write_object(int r);
Context *start_lock_op(ceph::shared_mutex &owner_lock, int* r);
void compute_read_ops();
void merge_write_ops();
void compute_zero_ops();
void compute_dst_object_may_exist();
void finish(int r);
};
} // namespace deep_copy
} // namespace librbd
extern template class librbd::deep_copy::ObjectCopyRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_DEEP_COPY_OBJECT_COPY_REQUEST_H
| 4,396 | 26.654088 | 80 | h |
null | ceph-main/src/librbd/deep_copy/SetHeadRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "SetHeadRequest.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/Utils.h"
#include "librbd/image/AttachParentRequest.h"
#include "librbd/image/DetachParentRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::deep_copy::SetHeadRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace deep_copy {
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
SetHeadRequest<I>::SetHeadRequest(I *image_ctx, uint64_t size,
const cls::rbd::ParentImageSpec &spec,
uint64_t parent_overlap,
Context *on_finish)
: m_image_ctx(image_ctx), m_size(size), m_parent_spec(spec),
m_parent_overlap(parent_overlap), m_on_finish(on_finish),
m_cct(image_ctx->cct) {
ceph_assert(m_parent_overlap <= m_size);
}
template <typename I>
void SetHeadRequest<I>::send() {
send_set_size();
}
template <typename I>
void SetHeadRequest<I>::send_set_size() {
m_image_ctx->image_lock.lock_shared();
if (m_image_ctx->size == m_size) {
m_image_ctx->image_lock.unlock_shared();
send_detach_parent();
return;
}
m_image_ctx->image_lock.unlock_shared();
ldout(m_cct, 20) << dendl;
// Change the image size on disk so that the snapshot picks up
// the expected size. We can do this because the last snapshot
// we process is the sync snapshot which was created to match the
// image size. We also don't need to worry about trimming because
// we track the highest possible object number within the sync record
librados::ObjectWriteOperation op;
librbd::cls_client::set_size(&op, m_size);
int r;
auto finish_op_ctx = start_lock_op(&r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_set_size(r);
finish_op_ctx->complete(0);
});
librados::AioCompletion *comp = create_rados_callback(ctx);
r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void SetHeadRequest<I>::handle_set_size(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to update image size: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
{
// adjust in-memory image size now that it's updated on disk
std::unique_lock image_locker{m_image_ctx->image_lock};
if (m_image_ctx->size > m_size) {
if (m_image_ctx->parent_md.spec.pool_id != -1 &&
m_image_ctx->parent_md.overlap > m_size) {
m_image_ctx->parent_md.overlap = m_size;
}
}
m_image_ctx->size = m_size;
}
send_detach_parent();
}
template <typename I>
void SetHeadRequest<I>::send_detach_parent() {
m_image_ctx->image_lock.lock_shared();
if (m_image_ctx->parent_md.spec.pool_id == -1 ||
(m_image_ctx->parent_md.spec == m_parent_spec &&
m_image_ctx->parent_md.overlap == m_parent_overlap)) {
m_image_ctx->image_lock.unlock_shared();
send_attach_parent();
return;
}
m_image_ctx->image_lock.unlock_shared();
ldout(m_cct, 20) << dendl;
int r;
auto finish_op_ctx = start_lock_op(&r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_detach_parent(r);
finish_op_ctx->complete(0);
});
auto req = image::DetachParentRequest<I>::create(*m_image_ctx, ctx);
req->send();
}
template <typename I>
void SetHeadRequest<I>::handle_detach_parent(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to remove parent: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
{
// adjust in-memory parent now that it's updated on disk
std::unique_lock image_locker{m_image_ctx->image_lock};
m_image_ctx->parent_md.spec = {};
m_image_ctx->parent_md.overlap = 0;
}
send_attach_parent();
}
template <typename I>
void SetHeadRequest<I>::send_attach_parent() {
m_image_ctx->image_lock.lock_shared();
if (m_image_ctx->parent_md.spec == m_parent_spec &&
m_image_ctx->parent_md.overlap == m_parent_overlap) {
m_image_ctx->image_lock.unlock_shared();
finish(0);
return;
}
m_image_ctx->image_lock.unlock_shared();
ldout(m_cct, 20) << dendl;
int r;
auto finish_op_ctx = start_lock_op(&r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_attach_parent(r);
finish_op_ctx->complete(0);
});
auto req = image::AttachParentRequest<I>::create(
*m_image_ctx, m_parent_spec, m_parent_overlap, false, ctx);
req->send();
}
template <typename I>
void SetHeadRequest<I>::handle_attach_parent(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to attach parent: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
{
// adjust in-memory parent now that it's updated on disk
std::unique_lock image_locker{m_image_ctx->image_lock};
m_image_ctx->parent_md.spec = m_parent_spec;
m_image_ctx->parent_md.overlap = m_parent_overlap;
}
finish(0);
}
template <typename I>
Context *SetHeadRequest<I>::start_lock_op(int* r) {
std::shared_lock owner_locker{m_image_ctx->owner_lock};
if (m_image_ctx->exclusive_lock == nullptr) {
return new LambdaContext([](int r) {});
}
return m_image_ctx->exclusive_lock->start_op(r);
}
template <typename I>
void SetHeadRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace deep_copy
} // namespace librbd
template class librbd::deep_copy::SetHeadRequest<librbd::ImageCtx>;
| 6,257 | 26.9375 | 80 | cc |
null | ceph-main/src/librbd/deep_copy/SetHeadRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_SET_HEAD_REQUEST_H
#define CEPH_LIBRBD_DEEP_COPY_SET_HEAD_REQUEST_H
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "common/snap_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Types.h"
#include <map>
#include <set>
#include <string>
#include <tuple>
class Context;
namespace librbd {
namespace deep_copy {
template <typename ImageCtxT = librbd::ImageCtx>
class SetHeadRequest {
public:
static SetHeadRequest* create(ImageCtxT *image_ctx, uint64_t size,
const cls::rbd::ParentImageSpec &parent_spec,
uint64_t parent_overlap,
Context *on_finish) {
return new SetHeadRequest(image_ctx, size, parent_spec, parent_overlap,
on_finish);
}
SetHeadRequest(ImageCtxT *image_ctx, uint64_t size,
const cls::rbd::ParentImageSpec &parent_spec,
uint64_t parent_overlap, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v (skip if not needed)
* SET_SIZE
* |
* v (skip if not needed)
* DETACH_PARENT
* |
* v (skip if not needed)
* ATTACH_PARENT
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
uint64_t m_size;
cls::rbd::ParentImageSpec m_parent_spec;
uint64_t m_parent_overlap;
Context *m_on_finish;
CephContext *m_cct;
void send_set_size();
void handle_set_size(int r);
void send_detach_parent();
void handle_detach_parent(int r);
void send_attach_parent();
void handle_attach_parent(int r);
Context *start_lock_op(int* r);
void finish(int r);
};
} // namespace deep_copy
} // namespace librbd
extern template class librbd::deep_copy::SetHeadRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_DEEP_COPY_SET_HEAD_REQUEST_H
| 2,011 | 21.863636 | 77 | h |
null | ceph-main/src/librbd/deep_copy/SnapshotCopyRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "SnapshotCopyRequest.h"
#include "SetHeadRequest.h"
#include "SnapshotCreateRequest.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ObjectMap.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "osdc/Striper.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::deep_copy::SnapshotCopyRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace deep_copy {
namespace {
template <typename I>
const std::string &get_snapshot_name(I *image_ctx, librados::snap_t snap_id) {
auto snap_it = std::find_if(image_ctx->snap_ids.begin(),
image_ctx->snap_ids.end(),
[snap_id](
const std::pair<
std::pair<cls::rbd::SnapshotNamespace,
std::string>,
librados::snap_t> &pair) {
return pair.second == snap_id;
});
ceph_assert(snap_it != image_ctx->snap_ids.end());
return snap_it->first.second;
}
} // anonymous namespace
using librbd::util::create_context_callback;
using librbd::util::unique_lock_name;
template <typename I>
SnapshotCopyRequest<I>::SnapshotCopyRequest(I *src_image_ctx,
I *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten,
asio::ContextWQ *work_queue,
SnapSeqs *snap_seqs,
Context *on_finish)
: RefCountedObject(dst_image_ctx->cct), m_src_image_ctx(src_image_ctx),
m_dst_image_ctx(dst_image_ctx), m_src_snap_id_start(src_snap_id_start),
m_src_snap_id_end(src_snap_id_end), m_dst_snap_id_start(dst_snap_id_start),
m_flatten(flatten), m_work_queue(work_queue), m_snap_seqs_result(snap_seqs),
m_snap_seqs(*snap_seqs), m_on_finish(on_finish), m_cct(dst_image_ctx->cct),
m_lock(ceph::make_mutex(unique_lock_name("SnapshotCopyRequest::m_lock", this))) {
ceph_assert((m_src_snap_id_start == 0 && m_dst_snap_id_start == 0) ||
(m_src_snap_id_start > 0 && m_dst_snap_id_start > 0));
// snap ids ordered from oldest to newest
m_src_image_ctx->image_lock.lock_shared();
m_src_snap_ids.insert(src_image_ctx->snaps.begin(),
src_image_ctx->snaps.end());
m_src_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->image_lock.lock_shared();
m_dst_snap_ids.insert(dst_image_ctx->snaps.begin(),
dst_image_ctx->snaps.end());
m_dst_image_ctx->image_lock.unlock_shared();
if (m_src_snap_id_end != CEPH_NOSNAP) {
m_src_snap_ids.erase(m_src_snap_ids.upper_bound(m_src_snap_id_end),
m_src_snap_ids.end());
}
}
template <typename I>
void SnapshotCopyRequest<I>::send() {
cls::rbd::ParentImageSpec src_parent_spec;
int r = validate_parent(m_src_image_ctx, &src_parent_spec);
if (r < 0) {
lderr(m_cct) << "source image parent spec mismatch" << dendl;
error(r);
return;
}
r = validate_parent(m_dst_image_ctx, &m_dst_parent_spec);
if (r < 0) {
lderr(m_cct) << "destination image parent spec mismatch" << dendl;
error(r);
return;
}
send_snap_unprotect();
}
template <typename I>
void SnapshotCopyRequest<I>::cancel() {
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << dendl;
m_canceled = true;
}
template <typename I>
void SnapshotCopyRequest<I>::send_snap_unprotect() {
SnapIdSet::iterator snap_id_it = m_dst_snap_ids.begin();
if (m_prev_snap_id != CEPH_NOSNAP) {
snap_id_it = m_dst_snap_ids.upper_bound(m_prev_snap_id);
} else if (m_dst_snap_id_start > 0) {
snap_id_it = m_dst_snap_ids.upper_bound(m_dst_snap_id_start);
}
for (; snap_id_it != m_dst_snap_ids.end(); ++snap_id_it) {
librados::snap_t dst_snap_id = *snap_id_it;
m_dst_image_ctx->image_lock.lock_shared();
bool dst_unprotected;
int r = m_dst_image_ctx->is_snap_unprotected(dst_snap_id, &dst_unprotected);
if (r < 0) {
lderr(m_cct) << "failed to retrieve destination snap unprotect status: "
<< cpp_strerror(r) << dendl;
m_dst_image_ctx->image_lock.unlock_shared();
finish(r);
return;
}
m_dst_image_ctx->image_lock.unlock_shared();
if (dst_unprotected) {
// snap is already unprotected -- check next snap
continue;
}
// if destination snapshot is protected and (1) it isn't in our mapping
// table, or (2) the source snapshot isn't protected, unprotect it
auto snap_seq_it = std::find_if(
m_snap_seqs.begin(), m_snap_seqs.end(),
[dst_snap_id](const SnapSeqs::value_type& pair) {
return pair.second == dst_snap_id;
});
if (snap_seq_it != m_snap_seqs.end()) {
m_src_image_ctx->image_lock.lock_shared();
bool src_unprotected;
r = m_src_image_ctx->is_snap_unprotected(snap_seq_it->first,
&src_unprotected);
ldout(m_cct, 20) << "m_src_image_ctx->is_snap_unprotected("
<< snap_seq_it->first << "): r=" << r
<< ", src_unprotected=" << src_unprotected << dendl;
if (r == -ENOENT) {
src_unprotected = true;
r = 0;
}
if (r < 0) {
lderr(m_cct) << "failed to retrieve source snap unprotect status: "
<< cpp_strerror(r) << dendl;
m_src_image_ctx->image_lock.unlock_shared();
finish(r);
return;
}
m_src_image_ctx->image_lock.unlock_shared();
if (src_unprotected) {
// source is unprotected -- unprotect destination snap
break;
}
} else {
// source snapshot doesn't exist -- unprotect destination snap
break;
}
}
if (snap_id_it == m_dst_snap_ids.end()) {
// no destination snapshots to unprotect
m_prev_snap_id = CEPH_NOSNAP;
send_snap_remove();
return;
}
m_prev_snap_id = *snap_id_it;
m_snap_name = get_snapshot_name(m_dst_image_ctx, m_prev_snap_id);
ldout(m_cct, 20) << "snap_name=" << m_snap_name << ", "
<< "snap_id=" << m_prev_snap_id << dendl;
int r;
auto finish_op_ctx = start_lock_op(&r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_snap_unprotect(r);
finish_op_ctx->complete(0);
});
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
m_dst_image_ctx->operations->execute_snap_unprotect(
cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
}
template <typename I>
void SnapshotCopyRequest<I>::handle_snap_unprotect(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to unprotect snapshot '" << m_snap_name << "': "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
{
// avoid the need to refresh to delete the newly unprotected snapshot
std::shared_lock image_locker{m_dst_image_ctx->image_lock};
auto snap_info_it = m_dst_image_ctx->snap_info.find(m_prev_snap_id);
if (snap_info_it != m_dst_image_ctx->snap_info.end()) {
snap_info_it->second.protection_status =
RBD_PROTECTION_STATUS_UNPROTECTED;
}
}
if (handle_cancellation()) {
return;
}
send_snap_unprotect();
}
template <typename I>
void SnapshotCopyRequest<I>::send_snap_remove() {
SnapIdSet::iterator snap_id_it = m_dst_snap_ids.begin();
if (m_prev_snap_id != CEPH_NOSNAP) {
snap_id_it = m_dst_snap_ids.upper_bound(m_prev_snap_id);
} else if (m_dst_snap_id_start > 0) {
snap_id_it = m_dst_snap_ids.upper_bound(m_dst_snap_id_start);
}
for (; snap_id_it != m_dst_snap_ids.end(); ++snap_id_it) {
librados::snap_t dst_snap_id = *snap_id_it;
cls::rbd::SnapshotNamespace snap_namespace;
m_dst_image_ctx->image_lock.lock_shared();
int r = m_dst_image_ctx->get_snap_namespace(dst_snap_id, &snap_namespace);
m_dst_image_ctx->image_lock.unlock_shared();
if (r < 0) {
lderr(m_cct) << "failed to retrieve destination snap namespace: "
<< m_snap_name << dendl;
finish(r);
return;
}
if (!std::holds_alternative<cls::rbd::UserSnapshotNamespace>(snap_namespace)) {
continue;
}
// if the destination snapshot isn't in our mapping table, remove it
auto snap_seq_it = std::find_if(
m_snap_seqs.begin(), m_snap_seqs.end(),
[dst_snap_id](const SnapSeqs::value_type& pair) {
return pair.second == dst_snap_id;
});
if (snap_seq_it == m_snap_seqs.end()) {
break;
}
}
if (snap_id_it == m_dst_snap_ids.end()) {
// no destination snapshots to delete
m_prev_snap_id = CEPH_NOSNAP;
send_snap_create();
return;
}
m_prev_snap_id = *snap_id_it;
m_snap_name = get_snapshot_name(m_dst_image_ctx, m_prev_snap_id);
ldout(m_cct, 20) << ""
<< "snap_name=" << m_snap_name << ", "
<< "snap_id=" << m_prev_snap_id << dendl;
int r;
auto finish_op_ctx = start_lock_op(&r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_snap_remove(r);
finish_op_ctx->complete(0);
});
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
m_dst_image_ctx->operations->execute_snap_remove(
cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
}
template <typename I>
void SnapshotCopyRequest<I>::handle_snap_remove(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to remove snapshot '" << m_snap_name << "': "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
if (handle_cancellation()) {
return;
}
send_snap_remove();
}
template <typename I>
void SnapshotCopyRequest<I>::send_snap_create() {
SnapIdSet::iterator snap_id_it = m_src_snap_ids.begin();
if (m_prev_snap_id != CEPH_NOSNAP) {
snap_id_it = m_src_snap_ids.upper_bound(m_prev_snap_id);
} else if (m_src_snap_id_start > 0) {
snap_id_it = m_src_snap_ids.upper_bound(m_src_snap_id_start);
}
for (; snap_id_it != m_src_snap_ids.end(); ++snap_id_it) {
librados::snap_t src_snap_id = *snap_id_it;
cls::rbd::SnapshotNamespace snap_namespace;
m_src_image_ctx->image_lock.lock_shared();
int r = m_src_image_ctx->get_snap_namespace(src_snap_id, &snap_namespace);
m_src_image_ctx->image_lock.unlock_shared();
if (r < 0) {
lderr(m_cct) << "failed to retrieve source snap namespace: "
<< m_snap_name << dendl;
finish(r);
return;
}
if (m_snap_seqs.find(src_snap_id) == m_snap_seqs.end()) {
// the source snapshot is not in our mapping table, ...
if (std::holds_alternative<cls::rbd::UserSnapshotNamespace>(snap_namespace)) {
// ... create it since it's a user snapshot
break;
} else if (src_snap_id == m_src_snap_id_end) {
// ... map it to destination HEAD since it's not a user snapshot that we
// will create (e.g. MirrorSnapshotNamespace)
m_snap_seqs[src_snap_id] = CEPH_NOSNAP;
}
}
}
if (snap_id_it == m_src_snap_ids.end()) {
// no source snapshots to create
m_prev_snap_id = CEPH_NOSNAP;
send_snap_protect();
return;
}
m_prev_snap_id = *snap_id_it;
m_snap_name = get_snapshot_name(m_src_image_ctx, m_prev_snap_id);
m_src_image_ctx->image_lock.lock_shared();
auto snap_info_it = m_src_image_ctx->snap_info.find(m_prev_snap_id);
if (snap_info_it == m_src_image_ctx->snap_info.end()) {
m_src_image_ctx->image_lock.unlock_shared();
lderr(m_cct) << "failed to retrieve source snap info: " << m_snap_name
<< dendl;
finish(-ENOENT);
return;
}
uint64_t size = snap_info_it->second.size;
m_snap_namespace = snap_info_it->second.snap_namespace;
cls::rbd::ParentImageSpec parent_spec;
uint64_t parent_overlap = 0;
if (!m_flatten && snap_info_it->second.parent.spec.pool_id != -1) {
parent_spec = m_dst_parent_spec;
parent_overlap = snap_info_it->second.parent.overlap;
}
m_src_image_ctx->image_lock.unlock_shared();
ldout(m_cct, 20) << "snap_name=" << m_snap_name << ", "
<< "snap_id=" << m_prev_snap_id << ", "
<< "size=" << size << ", "
<< "parent_info=["
<< "pool_id=" << parent_spec.pool_id << ", "
<< "image_id=" << parent_spec.image_id << ", "
<< "snap_id=" << parent_spec.snap_id << ", "
<< "overlap=" << parent_overlap << "]" << dendl;
int r;
Context *finish_op_ctx = start_lock_op(&r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_snap_create(r);
finish_op_ctx->complete(0);
});
SnapshotCreateRequest<I> *req = SnapshotCreateRequest<I>::create(
m_dst_image_ctx, m_snap_name, m_snap_namespace, size, parent_spec,
parent_overlap, ctx);
req->send();
}
template <typename I>
void SnapshotCopyRequest<I>::handle_snap_create(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to create snapshot '" << m_snap_name << "': "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
if (handle_cancellation()) {
return;
}
ceph_assert(m_prev_snap_id != CEPH_NOSNAP);
auto snap_it = m_dst_image_ctx->snap_ids.find(
{cls::rbd::UserSnapshotNamespace(), m_snap_name});
ceph_assert(snap_it != m_dst_image_ctx->snap_ids.end());
librados::snap_t dst_snap_id = snap_it->second;
ldout(m_cct, 20) << "mapping source snap id " << m_prev_snap_id << " to "
<< dst_snap_id << dendl;
m_snap_seqs[m_prev_snap_id] = dst_snap_id;
send_snap_create();
}
template <typename I>
void SnapshotCopyRequest<I>::send_snap_protect() {
SnapIdSet::iterator snap_id_it = m_src_snap_ids.begin();
if (m_prev_snap_id != CEPH_NOSNAP) {
snap_id_it = m_src_snap_ids.upper_bound(m_prev_snap_id);
} else if (m_src_snap_id_start > 0) {
snap_id_it = m_src_snap_ids.upper_bound(m_src_snap_id_start);
}
for (; snap_id_it != m_src_snap_ids.end(); ++snap_id_it) {
librados::snap_t src_snap_id = *snap_id_it;
m_src_image_ctx->image_lock.lock_shared();
bool src_protected;
int r = m_src_image_ctx->is_snap_protected(src_snap_id, &src_protected);
if (r < 0) {
lderr(m_cct) << "failed to retrieve source snap protect status: "
<< cpp_strerror(r) << dendl;
m_src_image_ctx->image_lock.unlock_shared();
finish(r);
return;
}
m_src_image_ctx->image_lock.unlock_shared();
if (!src_protected) {
// snap is not protected -- check next snap
continue;
}
// if destination snapshot is not protected, protect it
auto snap_seq_it = m_snap_seqs.find(src_snap_id);
ceph_assert(snap_seq_it != m_snap_seqs.end());
if (snap_seq_it->second == CEPH_NOSNAP) {
// implies src end snapshot is mapped to a non-copyable snapshot
ceph_assert(src_snap_id == m_src_snap_id_end);
break;
}
m_dst_image_ctx->image_lock.lock_shared();
bool dst_protected;
r = m_dst_image_ctx->is_snap_protected(snap_seq_it->second, &dst_protected);
if (r < 0) {
lderr(m_cct) << "failed to retrieve destination snap protect status: "
<< cpp_strerror(r) << dendl;
m_dst_image_ctx->image_lock.unlock_shared();
finish(r);
return;
}
m_dst_image_ctx->image_lock.unlock_shared();
if (!dst_protected) {
break;
}
}
if (snap_id_it == m_src_snap_ids.end()) {
// no destination snapshots to protect
m_prev_snap_id = CEPH_NOSNAP;
send_set_head();
return;
}
m_prev_snap_id = *snap_id_it;
m_snap_name = get_snapshot_name(m_src_image_ctx, m_prev_snap_id);
ldout(m_cct, 20) << "snap_name=" << m_snap_name << ", "
<< "snap_id=" << m_prev_snap_id << dendl;
int r;
auto finish_op_ctx = start_lock_op(&r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_snap_protect(r);
finish_op_ctx->complete(0);
});
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
m_dst_image_ctx->operations->execute_snap_protect(
cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
}
template <typename I>
void SnapshotCopyRequest<I>::handle_snap_protect(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to protect snapshot '" << m_snap_name << "': "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
if (handle_cancellation()) {
return;
}
send_snap_protect();
}
template <typename I>
void SnapshotCopyRequest<I>::send_set_head() {
auto snap_seq_it = m_snap_seqs.find(m_src_snap_id_end);
if (m_src_snap_id_end != CEPH_NOSNAP &&
(snap_seq_it == m_snap_seqs.end() ||
snap_seq_it->second != CEPH_NOSNAP)) {
// not copying to src nor dst HEAD revision
finish(0);
return;
}
ldout(m_cct, 20) << dendl;
uint64_t size;
cls::rbd::ParentImageSpec parent_spec;
uint64_t parent_overlap = 0;
{
std::shared_lock src_locker{m_src_image_ctx->image_lock};
auto snap_info_it = m_src_image_ctx->snap_info.find(m_src_snap_id_end);
if (snap_info_it != m_src_image_ctx->snap_info.end()) {
auto& snap_info = snap_info_it->second;
size = snap_info.size;
if (!m_flatten && snap_info.parent.spec.pool_id != -1) {
parent_spec = m_dst_parent_spec;
parent_overlap = snap_info.parent.overlap;
}
} else {
size = m_src_image_ctx->size;
if (!m_flatten) {
parent_spec = m_dst_image_ctx->parent_md.spec;
parent_overlap = m_src_image_ctx->parent_md.overlap;
}
}
}
auto ctx = create_context_callback<
SnapshotCopyRequest<I>, &SnapshotCopyRequest<I>::handle_set_head>(this);
auto req = SetHeadRequest<I>::create(m_dst_image_ctx, size, parent_spec,
parent_overlap, ctx);
req->send();
}
template <typename I>
void SnapshotCopyRequest<I>::handle_set_head(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to set head: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
if (handle_cancellation()) {
return;
}
send_resize_object_map();
}
template <typename I>
void SnapshotCopyRequest<I>::send_resize_object_map() {
int r = 0;
if (m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP)) {
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
std::shared_lock image_locker{m_dst_image_ctx->image_lock};
if (m_dst_image_ctx->object_map != nullptr &&
Striper::get_num_objects(m_dst_image_ctx->layout,
m_dst_image_ctx->size) !=
m_dst_image_ctx->object_map->size()) {
ldout(m_cct, 20) << dendl;
auto finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r);
if (finish_op_ctx != nullptr) {
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_resize_object_map(r);
finish_op_ctx->complete(0);
});
m_dst_image_ctx->object_map->aio_resize(m_dst_image_ctx->size,
OBJECT_NONEXISTENT, ctx);
return;
}
lderr(m_cct) << "lost exclusive lock" << dendl;
}
}
finish(r);
}
template <typename I>
void SnapshotCopyRequest<I>::handle_resize_object_map(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to resize object map: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
bool SnapshotCopyRequest<I>::handle_cancellation() {
{
std::lock_guard locker{m_lock};
if (!m_canceled) {
return false;
}
}
ldout(m_cct, 10) << "snapshot copy canceled" << dendl;
finish(-ECANCELED);
return true;
}
template <typename I>
void SnapshotCopyRequest<I>::error(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_work_queue->queue(new LambdaContext([this, r](int r1) { finish(r); }));
}
template <typename I>
int SnapshotCopyRequest<I>::validate_parent(I *image_ctx,
cls::rbd::ParentImageSpec *spec) {
std::shared_lock owner_locker{image_ctx->owner_lock};
std::shared_lock image_locker{image_ctx->image_lock};
// ensure source image's parent specs are still consistent
*spec = image_ctx->parent_md.spec;
for (auto &snap_info_pair : image_ctx->snap_info) {
auto &parent_spec = snap_info_pair.second.parent.spec;
if (parent_spec.pool_id == -1) {
continue;
} else if (spec->pool_id == -1) {
*spec = parent_spec;
continue;
}
if (*spec != parent_spec) {
return -EINVAL;
}
}
return 0;
}
template <typename I>
Context *SnapshotCopyRequest<I>::start_lock_op(int* r) {
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
return start_lock_op(m_dst_image_ctx->owner_lock, r);
}
template <typename I>
Context *SnapshotCopyRequest<I>::start_lock_op(ceph::shared_mutex &owner_lock, int* r) {
ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock));
if (m_dst_image_ctx->exclusive_lock == nullptr) {
return new LambdaContext([](int r) {});
}
return m_dst_image_ctx->exclusive_lock->start_op(r);
}
template <typename I>
void SnapshotCopyRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r == 0) {
*m_snap_seqs_result = m_snap_seqs;
}
m_on_finish->complete(r);
put();
}
} // namespace deep_copy
} // namespace librbd
template class librbd::deep_copy::SnapshotCopyRequest<librbd::ImageCtx>;
| 22,552 | 29.894521 | 88 | cc |
null | ceph-main/src/librbd/deep_copy/SnapshotCopyRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_COPY_REQUEST_H
#define CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_COPY_REQUEST_H
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "common/RefCountedObj.h"
#include "common/snap_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Types.h"
#include <map>
#include <set>
#include <string>
#include <tuple>
class Context;
namespace librbd {
namespace asio { struct ContextWQ; }
namespace deep_copy {
template <typename ImageCtxT = librbd::ImageCtx>
class SnapshotCopyRequest : public RefCountedObject {
public:
static SnapshotCopyRequest* create(ImageCtxT *src_image_ctx,
ImageCtxT *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten, asio::ContextWQ *work_queue,
SnapSeqs *snap_seqs, Context *on_finish) {
return new SnapshotCopyRequest(src_image_ctx, dst_image_ctx,
src_snap_id_start, src_snap_id_end,
dst_snap_id_start, flatten, work_queue,
snap_seqs, on_finish);
}
SnapshotCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten, asio::ContextWQ *work_queue,
SnapSeqs *snap_seqs, Context *on_finish);
void send();
void cancel();
private:
/**
* @verbatim
*
* <start>
* |
* | /-----------\
* | | |
* v v | (repeat as needed)
* UNPROTECT_SNAP ----/
* |
* | /-----------\
* | | |
* v v | (repeat as needed)
* REMOVE_SNAP -------/
* |
* | /-----------\
* | | |
* v v | (repeat as needed)
* CREATE_SNAP -------/
* |
* | /-----------\
* | | |
* v v | (repeat as needed)
* PROTECT_SNAP ------/
* |
* v
* SET_HEAD (skip if not needed)
* |
* v
* RESIZE_OBJECT_MAP (skip if not needed)
* |
* v
* <finish>
*
* @endverbatim
*/
typedef std::set<librados::snap_t> SnapIdSet;
ImageCtxT *m_src_image_ctx;
ImageCtxT *m_dst_image_ctx;
librados::snap_t m_src_snap_id_start;
librados::snap_t m_src_snap_id_end;
librados::snap_t m_dst_snap_id_start;
bool m_flatten;
asio::ContextWQ *m_work_queue;
SnapSeqs *m_snap_seqs_result;
SnapSeqs m_snap_seqs;
Context *m_on_finish;
CephContext *m_cct;
SnapIdSet m_src_snap_ids;
SnapIdSet m_dst_snap_ids;
librados::snap_t m_prev_snap_id = CEPH_NOSNAP;
std::string m_snap_name;
cls::rbd::SnapshotNamespace m_snap_namespace;
cls::rbd::ParentImageSpec m_dst_parent_spec;
ceph::mutex m_lock;
bool m_canceled = false;
void send_snap_unprotect();
void handle_snap_unprotect(int r);
void send_snap_remove();
void handle_snap_remove(int r);
void send_snap_create();
void handle_snap_create(int r);
void send_snap_protect();
void handle_snap_protect(int r);
void send_set_head();
void handle_set_head(int r);
void send_resize_object_map();
void handle_resize_object_map(int r);
bool handle_cancellation();
void error(int r);
int validate_parent(ImageCtxT *image_ctx, cls::rbd::ParentImageSpec *spec);
Context *start_lock_op(int* r);
Context *start_lock_op(ceph::shared_mutex &owner_locki, int* r);
void finish(int r);
};
} // namespace deep_copy
} // namespace librbd
extern template class librbd::deep_copy::SnapshotCopyRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_COPY_REQUEST_H
| 4,112 | 26.059211 | 79 | h |
null | ceph-main/src/librbd/deep_copy/SnapshotCreateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "SetHeadRequest.h"
#include "SnapshotCreateRequest.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ObjectMap.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "osdc/Striper.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::deep_copy::SnapshotCreateRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace deep_copy {
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
template <typename I>
SnapshotCreateRequest<I>::SnapshotCreateRequest(
I *dst_image_ctx, const std::string &snap_name,
const cls::rbd::SnapshotNamespace &snap_namespace,
uint64_t size, const cls::rbd::ParentImageSpec &spec,
uint64_t parent_overlap, Context *on_finish)
: m_dst_image_ctx(dst_image_ctx), m_snap_name(snap_name),
m_snap_namespace(snap_namespace), m_size(size),
m_parent_spec(spec), m_parent_overlap(parent_overlap),
m_on_finish(on_finish), m_cct(dst_image_ctx->cct) {
}
template <typename I>
void SnapshotCreateRequest<I>::send() {
send_set_head();
}
template <typename I>
void SnapshotCreateRequest<I>::send_set_head() {
ldout(m_cct, 20) << dendl;
auto ctx = create_context_callback<
SnapshotCreateRequest<I>, &SnapshotCreateRequest<I>::handle_set_head>(this);
auto req = SetHeadRequest<I>::create(m_dst_image_ctx, m_size, m_parent_spec,
m_parent_overlap, ctx);
req->send();
}
template <typename I>
void SnapshotCreateRequest<I>::handle_set_head(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to set head: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
send_create_snap();
}
template <typename I>
void SnapshotCreateRequest<I>::send_create_snap() {
ldout(m_cct, 20) << "snap_name=" << m_snap_name << dendl;
int r;
auto finish_op_ctx = start_lock_op(&r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_create_snap(r);
finish_op_ctx->complete(0);
});
uint64_t flags = SNAP_CREATE_FLAG_SKIP_OBJECT_MAP |
SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE;
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
m_dst_image_ctx->operations->execute_snap_create(
m_snap_namespace, m_snap_name.c_str(), ctx, 0U, flags, m_prog_ctx);
}
template <typename I>
void SnapshotCreateRequest<I>::handle_create_snap(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to create snapshot '" << m_snap_name << "': "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
send_create_object_map();
}
template <typename I>
void SnapshotCreateRequest<I>::send_create_object_map() {
if (!m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP)) {
finish(0);
return;
}
m_dst_image_ctx->image_lock.lock_shared();
auto snap_it = m_dst_image_ctx->snap_ids.find(
{cls::rbd::UserSnapshotNamespace(), m_snap_name});
if (snap_it == m_dst_image_ctx->snap_ids.end()) {
lderr(m_cct) << "failed to locate snap: " << m_snap_name << dendl;
m_dst_image_ctx->image_lock.unlock_shared();
finish(-ENOENT);
return;
}
librados::snap_t local_snap_id = snap_it->second;
m_dst_image_ctx->image_lock.unlock_shared();
std::string object_map_oid(librbd::ObjectMap<>::object_map_name(
m_dst_image_ctx->id, local_snap_id));
uint64_t object_count = Striper::get_num_objects(m_dst_image_ctx->layout,
m_size);
ldout(m_cct, 20) << "object_map_oid=" << object_map_oid << ", "
<< "object_count=" << object_count << dendl;
// initialize an empty object map of the correct size (object sync
// will populate the object map)
librados::ObjectWriteOperation op;
librbd::cls_client::object_map_resize(&op, object_count, OBJECT_NONEXISTENT);
int r;
auto finish_op_ctx = start_lock_op(&r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_create_object_map(r);
finish_op_ctx->complete(0);
});
librados::AioCompletion *comp = create_rados_callback(ctx);
r = m_dst_image_ctx->md_ctx.aio_operate(object_map_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void SnapshotCreateRequest<I>::handle_create_object_map(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to create object map: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
Context *SnapshotCreateRequest<I>::start_lock_op(int* r) {
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
if (m_dst_image_ctx->exclusive_lock == nullptr) {
return new LambdaContext([](int r) {});
}
return m_dst_image_ctx->exclusive_lock->start_op(r);
}
template <typename I>
void SnapshotCreateRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace deep_copy
} // namespace librbd
template class librbd::deep_copy::SnapshotCreateRequest<librbd::ImageCtx>;
| 5,629 | 28.946809 | 80 | cc |
null | ceph-main/src/librbd/deep_copy/SnapshotCreateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_CREATE_REQUEST_H
#define CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_CREATE_REQUEST_H
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "common/snap_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Types.h"
#include "librbd/internal.h"
#include <map>
#include <set>
#include <string>
#include <tuple>
class Context;
namespace librbd {
namespace deep_copy {
template <typename ImageCtxT = librbd::ImageCtx>
class SnapshotCreateRequest {
public:
static SnapshotCreateRequest* create(ImageCtxT *dst_image_ctx,
const std::string &snap_name,
const cls::rbd::SnapshotNamespace &snap_namespace,
uint64_t size,
const cls::rbd::ParentImageSpec &parent_spec,
uint64_t parent_overlap,
Context *on_finish) {
return new SnapshotCreateRequest(dst_image_ctx, snap_name, snap_namespace, size,
parent_spec, parent_overlap, on_finish);
}
SnapshotCreateRequest(ImageCtxT *dst_image_ctx,
const std::string &snap_name,
const cls::rbd::SnapshotNamespace &snap_namespace,
uint64_t size,
const cls::rbd::ParentImageSpec &parent_spec,
uint64_t parent_overlap, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* SET_HEAD
* |
* v
* CREATE_SNAP
* |
* v (skip if not needed)
* CREATE_OBJECT_MAP
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_dst_image_ctx;
std::string m_snap_name;
cls::rbd::SnapshotNamespace m_snap_namespace;
uint64_t m_size;
cls::rbd::ParentImageSpec m_parent_spec;
uint64_t m_parent_overlap;
Context *m_on_finish;
CephContext *m_cct;
NoOpProgressContext m_prog_ctx;
void send_set_head();
void handle_set_head(int r);
void send_create_snap();
void handle_create_snap(int r);
void send_create_object_map();
void handle_create_object_map(int r);
Context *start_lock_op(int* r);
void finish(int r);
};
} // namespace deep_copy
} // namespace librbd
extern template class librbd::deep_copy::SnapshotCreateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_DEEP_COPY_SNAPSHOT_CREATE_REQUEST_H
| 2,564 | 24.909091 | 89 | h |
null | ceph-main/src/librbd/deep_copy/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_TYPES_H
#define CEPH_LIBRBD_DEEP_COPY_TYPES_H
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include <boost/optional.hpp>
namespace librbd {
namespace deep_copy {
enum {
OBJECT_COPY_REQUEST_FLAG_FLATTEN = 1U << 0,
OBJECT_COPY_REQUEST_FLAG_MIGRATION = 1U << 1,
OBJECT_COPY_REQUEST_FLAG_EXISTS_CLEAN = 1U << 2,
};
typedef std::vector<librados::snap_t> SnapIds;
typedef std::map<librados::snap_t, SnapIds> SnapMap;
typedef boost::optional<uint64_t> ObjectNumber;
} // namespace deep_copy
} // namespace librbd
#endif // CEPH_LIBRBD_DEEP_COPY_TYPES_H
| 720 | 23.862069 | 70 | h |
null | ceph-main/src/librbd/deep_copy/Utils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/debug.h"
#include "Utils.h"
#include <set>
namespace librbd {
namespace deep_copy {
namespace util {
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::deep_copy::util::" << __func__ << ": "
void compute_snap_map(CephContext* cct,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
const SnapIds& dst_snap_ids,
const SnapSeqs &snap_seqs,
SnapMap *snap_map) {
std::set<librados::snap_t> ordered_dst_snap_ids{
dst_snap_ids.begin(), dst_snap_ids.end()};
auto dst_snap_id_it = ordered_dst_snap_ids.begin();
SnapIds snap_ids;
for (auto &it : snap_seqs) {
// ensure all dst snap ids are included in the mapping table since
// deep copy will skip non-user snapshots
while (dst_snap_id_it != ordered_dst_snap_ids.end()) {
if (*dst_snap_id_it < it.second) {
snap_ids.insert(snap_ids.begin(), *dst_snap_id_it);
} else if (*dst_snap_id_it > it.second) {
break;
}
++dst_snap_id_it;
}
// we should only have the HEAD revision in the last snap seq
ceph_assert(snap_ids.empty() || snap_ids[0] != CEPH_NOSNAP);
snap_ids.insert(snap_ids.begin(), it.second);
if (it.first < src_snap_id_start) {
continue;
} else if (it.first > src_snap_id_end) {
break;
}
(*snap_map)[it.first] = snap_ids;
}
ldout(cct, 10) << "src_snap_id_start=" << src_snap_id_start << ", "
<< "src_snap_id_end=" << src_snap_id_end << ", "
<< "dst_snap_ids=" << dst_snap_ids << ", "
<< "snap_seqs=" << snap_seqs << ", "
<< "snap_map=" << *snap_map << dendl;
}
} // namespace util
} // namespace deep_copy
} // namespace librbd
| 1,954 | 30.532258 | 77 | cc |
null | ceph-main/src/librbd/deep_copy/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_UTILS_H
#define CEPH_LIBRBD_DEEP_COPY_UTILS_H
#include "include/common_fwd.h"
#include "include/rados/librados.hpp"
#include "librbd/Types.h"
#include "librbd/deep_copy/Types.h"
#include <boost/optional.hpp>
namespace librbd {
namespace deep_copy {
namespace util {
void compute_snap_map(CephContext* cct,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
const SnapIds& dst_snap_ids,
const SnapSeqs &snap_seqs,
SnapMap *snap_map);
} // namespace util
} // namespace deep_copy
} // namespace librbd
#endif // CEPH_LIBRBD_DEEP_COPY_UTILS_H
| 804 | 25.833333 | 70 | h |
null | ceph-main/src/librbd/exclusive_lock/AutomaticPolicy.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/exclusive_lock/AutomaticPolicy.h"
#include "librbd/ImageCtx.h"
#include "librbd/ExclusiveLock.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ExclusiveLock::AutomaticPolicy "
namespace librbd {
namespace exclusive_lock {
int AutomaticPolicy::lock_requested(bool force) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock));
ceph_assert(m_image_ctx->exclusive_lock != nullptr);
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force
<< dendl;
// release the lock upon request (ignore forced requests)
m_image_ctx->exclusive_lock->release_lock(nullptr);
return 0;
}
} // namespace exclusive_lock
} // namespace librbd
| 839 | 27 | 79 | cc |
null | ceph-main/src/librbd/exclusive_lock/AutomaticPolicy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H
#define CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H
#include "librbd/exclusive_lock/Policy.h"
namespace librbd {
struct ImageCtx;
namespace exclusive_lock {
class AutomaticPolicy : public Policy {
public:
AutomaticPolicy(ImageCtx *image_ctx) : m_image_ctx(image_ctx) {
}
bool may_auto_request_lock() override {
return true;
}
int lock_requested(bool force) override;
private:
ImageCtx *m_image_ctx;
};
} // namespace exclusive_lock
} // namespace librbd
#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_AUTOMATIC_POLICY_H
| 692 | 18.8 | 70 | h |
null | ceph-main/src/librbd/exclusive_lock/ImageDispatch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/exclusive_lock/ImageDispatch.h"
#include "include/Context.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ImageDispatcherInterface.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::exclusive_lock::ImageDispatch: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace exclusive_lock {
using util::create_context_callback;
using util::create_async_context_callback;
template <typename I>
ImageDispatch<I>::ImageDispatch(I* image_ctx)
: m_image_ctx(image_ctx),
m_lock(ceph::make_shared_mutex(
util::unique_lock_name("librbd::exclusive_lock::ImageDispatch::m_lock",
this))) {
}
template <typename I>
void ImageDispatch<I>::shut_down(Context* on_finish) {
// release any IO waiting on exclusive lock
Contexts on_dispatches;
{
std::unique_lock locker{m_lock};
std::swap(on_dispatches, m_on_dispatches);
}
for (auto ctx : on_dispatches) {
ctx->complete(0);
}
on_finish->complete(0);
}
template <typename I>
void ImageDispatch<I>::set_require_lock(bool init_shutdown,
io::Direction direction,
Context* on_finish) {
// pause any matching IO from proceeding past this layer
set_require_lock(direction, true);
if (direction == io::DIRECTION_READ) {
on_finish->complete(0);
return;
}
// push through a flush for any in-flight writes at lower levels
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, util::get_image_ctx(m_image_ctx), io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*m_image_ctx, io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK, aio_comp,
(init_shutdown ?
io::FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH :
io::FLUSH_SOURCE_EXCLUSIVE_LOCK), {});
req->send();
}
template <typename I>
void ImageDispatch<I>::unset_require_lock(io::Direction direction) {
set_require_lock(direction, false);
}
template <typename I>
bool ImageDispatch<I>::set_require_lock(io::Direction direction, bool enabled) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "direction=" << direction << ", enabled=" << enabled
<< dendl;
std::unique_lock locker{m_lock};
auto prev_require_lock = (m_require_lock_on_read || m_require_lock_on_write);
switch (direction) {
case io::DIRECTION_READ:
m_require_lock_on_read = enabled;
break;
case io::DIRECTION_WRITE:
m_require_lock_on_write = enabled;
break;
case io::DIRECTION_BOTH:
m_require_lock_on_read = enabled;
m_require_lock_on_write = enabled;
break;
}
bool require_lock = (m_require_lock_on_read || m_require_lock_on_write);
return ((enabled && !prev_require_lock && require_lock) ||
(!enabled && prev_require_lock && !require_lock));
}
template <typename I>
bool ImageDispatch<I>::read(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
io::ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "image_extents=" << image_extents << dendl;
if (needs_exclusive_lock(true, tid, dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool ImageDispatch<I>::write(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool ImageDispatch<I>::discard(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool ImageDispatch<I>::write_same(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool ImageDispatch<I>::compare_and_write(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
<< dendl;
if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool ImageDispatch<I>::flush(
io::AioCompletion* aio_comp, io::FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "tid=" << tid << dendl;
// don't attempt to grab the exclusive lock if were are just internally
// clearing out our in-flight IO queue
if (flush_source != io::FLUSH_SOURCE_USER) {
return false;
}
if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) {
return true;
}
return false;
}
template <typename I>
bool ImageDispatch<I>::is_lock_required(bool read_op) const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return ((read_op && m_require_lock_on_read) ||
(!read_op && m_require_lock_on_write));
}
template <typename I>
bool ImageDispatch<I>::needs_exclusive_lock(bool read_op, uint64_t tid,
io::DispatchResult* dispatch_result,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
bool lock_required = false;
{
std::shared_lock locker{m_lock};
lock_required = is_lock_required(read_op);
}
if (lock_required) {
std::shared_lock owner_locker{m_image_ctx->owner_lock};
if (m_image_ctx->exclusive_lock == nullptr) {
// raced with the exclusive lock being disabled
return false;
}
ldout(cct, 5) << "exclusive lock required: delaying IO" << dendl;
if (!m_image_ctx->get_exclusive_lock_policy()->may_auto_request_lock()) {
lderr(cct) << "op requires exclusive lock" << dendl;
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
on_dispatched->complete(
m_image_ctx->exclusive_lock->get_unlocked_op_error());
return true;
}
// block potential races with other incoming IOs
std::unique_lock locker{m_lock};
bool retesting_lock = (
!m_on_dispatches.empty() && m_on_dispatches.front() == on_dispatched);
if (!m_on_dispatches.empty() && !retesting_lock) {
*dispatch_result = io::DISPATCH_RESULT_RESTART;
m_on_dispatches.push_back(on_dispatched);
return true;
}
if (!is_lock_required(read_op)) {
return false;
}
ceph_assert(m_on_dispatches.empty() || retesting_lock);
m_on_dispatches.push_back(on_dispatched);
locker.unlock();
*dispatch_result = io::DISPATCH_RESULT_RESTART;
auto ctx = create_async_context_callback(
*m_image_ctx, create_context_callback<
ImageDispatch<I>, &ImageDispatch<I>::handle_acquire_lock>(this));
m_image_ctx->exclusive_lock->acquire_lock(ctx);
return true;
}
return false;
}
template <typename I>
void ImageDispatch<I>::handle_acquire_lock(int r) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "r=" << r << dendl;
std::unique_lock locker{m_lock};
ceph_assert(!m_on_dispatches.empty());
Context* failed_dispatch = nullptr;
Contexts on_dispatches;
if (r == -ERESTART) {
ldout(cct, 5) << "IO raced with exclusive lock shutdown" << dendl;
} else if (r < 0) {
lderr(cct) << "failed to acquire exclusive lock: " << cpp_strerror(r)
<< dendl;
failed_dispatch = m_on_dispatches.front();
m_on_dispatches.pop_front();
}
// re-test if lock is still required (i.e. it wasn't acquired/lost) via a
// restart dispatch
std::swap(on_dispatches, m_on_dispatches);
locker.unlock();
if (failed_dispatch != nullptr) {
failed_dispatch->complete(r);
}
for (auto ctx : on_dispatches) {
ctx->complete(0);
}
}
} // namespace exclusive_lock
} // namespace librbd
template class librbd::exclusive_lock::ImageDispatch<librbd::ImageCtx>;
| 10,081 | 30.4081 | 80 | cc |
null | ceph-main/src/librbd/exclusive_lock/ImageDispatch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H
#define CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H
#include "librbd/io/ImageDispatchInterface.h"
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/ceph_mutex.h"
#include "common/zipkin_trace.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
#include <atomic>
#include <list>
#include <unordered_set>
struct Context;
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
}
namespace exclusive_lock {
template <typename ImageCtxT>
class ImageDispatch : public io::ImageDispatchInterface {
public:
static ImageDispatch* create(ImageCtxT* image_ctx) {
return new ImageDispatch(image_ctx);
}
void destroy() {
delete this;
}
ImageDispatch(ImageCtxT* image_ctx);
io::ImageDispatchLayer get_dispatch_layer() const override {
return io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK;
}
void set_require_lock(bool init_shutdown,
io::Direction direction, Context* on_finish);
void unset_require_lock(io::Direction direction);
void shut_down(Context* on_finish) override;
bool read(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
io::ReadResult &&read_result, IOContext io_context, int op_flags,
int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool write_same(
io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool compare_and_write(
io::AioCompletion* aio_comp, io::Extents &&image_extents,
bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
int op_flags, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
io::AioCompletion* aio_comp, io::FlushSource flush_source,
const ZTracer::Trace &parent_trace, uint64_t tid,
std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool list_snaps(
io::AioCompletion* aio_comp, io::Extents&& image_extents,
io::SnapIds&& snap_ids, int list_snaps_flags,
io::SnapshotDelta* snapshot_delta, const ZTracer::Trace &parent_trace,
uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override {
return false;
}
private:
typedef std::list<Context*> Contexts;
typedef std::unordered_set<uint64_t> Tids;
ImageCtxT* m_image_ctx;
mutable ceph::shared_mutex m_lock;
bool m_require_lock_on_read = false;
bool m_require_lock_on_write = false;
Contexts m_on_dispatches;
bool set_require_lock(io::Direction direction, bool enabled);
bool is_lock_required(bool read_op) const;
bool needs_exclusive_lock(bool read_op, uint64_t tid,
io::DispatchResult* dispatch_result,
Context* on_dispatched);
void handle_acquire_lock(int r);
};
} // namespace exclusiv_lock
} // namespace librbd
extern template class librbd::exclusive_lock::ImageDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H
| 4,518 | 32.723881 | 80 | h |
null | ceph-main/src/librbd/exclusive_lock/Policy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H
#define CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H
namespace librbd {
namespace exclusive_lock {
enum OperationRequestType {
OPERATION_REQUEST_TYPE_GENERAL = 0,
OPERATION_REQUEST_TYPE_TRASH_SNAP_REMOVE = 1,
OPERATION_REQUEST_TYPE_FORCE_PROMOTION = 2,
};
struct Policy {
virtual ~Policy() {
}
virtual bool may_auto_request_lock() = 0;
virtual int lock_requested(bool force) = 0;
virtual bool accept_blocked_request(OperationRequestType) {
return false;
}
};
} // namespace exclusive_lock
} // namespace librbd
#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_POLICY_H
| 732 | 21.90625 | 70 | h |
null | ceph-main/src/librbd/exclusive_lock/PostAcquireRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/exclusive_lock/PostAcquireRequest.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/lock/cls_lock_types.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/stringify.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ImageWatcher.h"
#include "librbd/Journal.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/image/RefreshRequest.h"
#include "librbd/journal/Policy.h"
#include "librbd/PluginRegistry.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::exclusive_lock::PostAcquireRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace exclusive_lock {
using util::create_async_context_callback;
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
PostAcquireRequest<I>* PostAcquireRequest<I>::create(I &image_ctx,
Context *on_acquire,
Context *on_finish) {
return new PostAcquireRequest(image_ctx, on_acquire, on_finish);
}
template <typename I>
PostAcquireRequest<I>::PostAcquireRequest(I &image_ctx, Context *on_acquire,
Context *on_finish)
: m_image_ctx(image_ctx),
m_on_acquire(on_acquire),
m_on_finish(create_async_context_callback(image_ctx, on_finish)),
m_object_map(nullptr), m_journal(nullptr), m_error_result(0) {
}
template <typename I>
PostAcquireRequest<I>::~PostAcquireRequest() {
if (!m_prepare_lock_completed) {
m_image_ctx.state->handle_prepare_lock_complete();
}
delete m_on_acquire;
}
template <typename I>
void PostAcquireRequest<I>::send() {
send_refresh();
}
template <typename I>
void PostAcquireRequest<I>::send_refresh() {
if (!m_image_ctx.state->is_refresh_required()) {
send_open_object_map();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PostAcquireRequest<I>;
Context *ctx = create_async_context_callback(
m_image_ctx, create_context_callback<klass, &klass::handle_refresh>(this));
// ImageState is blocked waiting for lock to complete -- safe to directly
// refresh
image::RefreshRequest<I> *req = image::RefreshRequest<I>::create(
m_image_ctx, true, false, ctx);
req->send();
}
template <typename I>
void PostAcquireRequest<I>::handle_refresh(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r == -ERESTART) {
// next issued IO or op will (re)-refresh the image and shut down lock
ldout(cct, 5) << "exclusive lock dynamically disabled" << dendl;
r = 0;
} else if (r < 0) {
lderr(cct) << "failed to refresh image: " << cpp_strerror(r) << dendl;
save_result(r);
revert();
finish();
return;
}
send_open_object_map();
}
template <typename I>
void PostAcquireRequest<I>::send_open_journal() {
// alert caller that we now own the exclusive lock
m_on_acquire->complete(0);
m_on_acquire = nullptr;
bool journal_enabled;
{
std::shared_lock image_locker{m_image_ctx.image_lock};
journal_enabled = (m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
m_image_ctx.image_lock) &&
!m_image_ctx.get_journal_policy()->journal_disabled());
}
if (!journal_enabled) {
apply();
send_process_plugin_acquire_lock();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PostAcquireRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_open_journal>(
this);
m_journal = m_image_ctx.create_journal();
// journal playback requires object map (if enabled) and itself
apply();
m_journal->open(ctx);
}
template <typename I>
void PostAcquireRequest<I>::handle_open_journal(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
save_result(r);
if (r < 0) {
lderr(cct) << "failed to open journal: " << cpp_strerror(r) << dendl;
send_close_journal();
return;
}
send_allocate_journal_tag();
}
template <typename I>
void PostAcquireRequest<I>::send_allocate_journal_tag() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
std::shared_lock image_locker{m_image_ctx.image_lock};
using klass = PostAcquireRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_allocate_journal_tag>(this, m_journal);
m_image_ctx.get_journal_policy()->allocate_tag_on_lock(ctx);
}
template <typename I>
void PostAcquireRequest<I>::handle_allocate_journal_tag(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
save_result(r);
if (r < 0) {
lderr(cct) << "failed to allocate journal tag: " << cpp_strerror(r)
<< dendl;
send_close_journal();
return;
}
send_process_plugin_acquire_lock();
}
template <typename I>
void PostAcquireRequest<I>::send_process_plugin_acquire_lock() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PostAcquireRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_process_plugin_acquire_lock>(this);
m_image_ctx.plugin_registry->acquired_exclusive_lock(ctx);
}
template <typename I>
void PostAcquireRequest<I>::handle_process_plugin_acquire_lock(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
save_result(r);
if (r < 0) {
lderr(cct) << "failed to process plugins: " << cpp_strerror(r)
<< dendl;
send_process_plugin_release_lock();
return;
}
finish();
}
template <typename I>
void PostAcquireRequest<I>::send_process_plugin_release_lock() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PostAcquireRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_process_plugin_release_lock>(this);
m_image_ctx.plugin_registry->prerelease_exclusive_lock(ctx);
}
template <typename I>
void PostAcquireRequest<I>::handle_process_plugin_release_lock(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
save_result(r);
if (r < 0) {
lderr(cct) << "failed to release plugins: " << cpp_strerror(r)
<< dendl;
}
send_close_journal();
}
template <typename I>
void PostAcquireRequest<I>::send_close_journal() {
if (m_journal == nullptr) {
send_close_object_map();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PostAcquireRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_close_journal>(
this);
m_journal->close(ctx);
}
template <typename I>
void PostAcquireRequest<I>::handle_close_journal(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
save_result(r);
if (r < 0) {
lderr(cct) << "failed to close journal: " << cpp_strerror(r) << dendl;
}
send_close_object_map();
}
template <typename I>
void PostAcquireRequest<I>::send_open_object_map() {
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
send_open_journal();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PostAcquireRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_open_object_map>(
this);
m_object_map = m_image_ctx.create_object_map(CEPH_NOSNAP);
m_object_map->open(ctx);
}
template <typename I>
void PostAcquireRequest<I>::handle_open_object_map(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to open object map: " << cpp_strerror(r) << dendl;
m_object_map->put();
m_object_map = nullptr;
if (r != -EFBIG) {
save_result(r);
revert();
finish();
return;
}
}
send_open_journal();
}
template <typename I>
void PostAcquireRequest<I>::send_close_object_map() {
if (m_object_map == nullptr) {
revert();
finish();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PostAcquireRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_close_object_map>(this);
m_object_map->close(ctx);
}
template <typename I>
void PostAcquireRequest<I>::handle_close_object_map(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to close object map: " << cpp_strerror(r) << dendl;
}
revert();
finish();
}
template <typename I>
void PostAcquireRequest<I>::apply() {
{
std::unique_lock image_locker{m_image_ctx.image_lock};
ceph_assert(m_image_ctx.object_map == nullptr);
m_image_ctx.object_map = m_object_map;
ceph_assert(m_image_ctx.journal == nullptr);
m_image_ctx.journal = m_journal;
}
m_prepare_lock_completed = true;
m_image_ctx.state->handle_prepare_lock_complete();
}
template <typename I>
void PostAcquireRequest<I>::revert() {
std::unique_lock image_locker{m_image_ctx.image_lock};
m_image_ctx.object_map = nullptr;
m_image_ctx.journal = nullptr;
if (m_object_map) {
m_object_map->put();
}
if (m_journal) {
m_journal->put();
}
ceph_assert(m_error_result < 0);
}
template <typename I>
void PostAcquireRequest<I>::finish() {
m_on_finish->complete(m_error_result);
delete this;
}
} // namespace exclusive_lock
} // namespace librbd
template class librbd::exclusive_lock::PostAcquireRequest<librbd::ImageCtx>;
| 9,778 | 25.501355 | 80 | cc |
null | ceph-main/src/librbd/exclusive_lock/PostAcquireRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H
#define CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "librbd/ImageCtx.h"
#include "msg/msg_types.h"
#include <string>
class Context;
namespace librbd {
namespace exclusive_lock {
template <typename ImageCtxT = ImageCtx>
class PostAcquireRequest {
public:
static PostAcquireRequest* create(ImageCtxT &image_ctx, Context *on_acquire,
Context *on_finish);
~PostAcquireRequest();
void send();
private:
/**
* @verbatim
*
* <start>
* |
* |
* v
* REFRESH (skip if not
* | needed)
* v
* OPEN_OBJECT_MAP (skip if
* | disabled)
* v
* OPEN_JOURNAL (skip if
* | * disabled)
* | *
* | * * * * * * * *
* v *
* ALLOCATE_JOURNAL_TAG *
* | * *
* | * *
* v * *
* PROCESS_PLUGIN_ACQUIRE*
* | * *
* | * *
* | v v v
* | PROCESS_PLUGIN_RELEASE
* | |
* | v
* | CLOSE_JOURNAL
* | |
* | v
* | CLOSE_OBJECT_MAP
* | |
* v |
* <finish> <----------/
*
* @endverbatim
*/
PostAcquireRequest(ImageCtxT &image_ctx, Context *on_acquire,
Context *on_finish);
ImageCtxT &m_image_ctx;
Context *m_on_acquire;
Context *m_on_finish;
decltype(m_image_ctx.object_map) m_object_map;
decltype(m_image_ctx.journal) m_journal;
bool m_prepare_lock_completed = false;
int m_error_result;
void send_refresh();
void handle_refresh(int r);
void send_open_journal();
void handle_open_journal(int r);
void send_allocate_journal_tag();
void handle_allocate_journal_tag(int r);
void send_open_object_map();
void handle_open_object_map(int r);
void send_close_journal();
void handle_close_journal(int r);
void send_close_object_map();
void handle_close_object_map(int r);
void send_process_plugin_acquire_lock();
void handle_process_plugin_acquire_lock(int r);
void send_process_plugin_release_lock();
void handle_process_plugin_release_lock(int r);
void apply();
void revert();
void finish();
void save_result(int result) {
if (m_error_result == 0 && result < 0) {
m_error_result = result;
}
}
};
} // namespace exclusive_lock
} // namespace librbd
extern template class librbd::exclusive_lock::PostAcquireRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_POST_ACQUIRE_REQUEST_H
| 2,910 | 22.288 | 83 | h |
null | ceph-main/src/librbd/exclusive_lock/PreAcquireRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/exclusive_lock/PreAcquireRequest.h"
#include "librbd/Utils.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/ImageState.h"
#include "librbd/asio/ContextWQ.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::exclusive_lock::PreAcquireRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace exclusive_lock {
using util::create_async_context_callback;
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
PreAcquireRequest<I>* PreAcquireRequest<I>::create(I &image_ctx,
Context *on_finish) {
return new PreAcquireRequest(image_ctx, on_finish);
}
template <typename I>
PreAcquireRequest<I>::PreAcquireRequest(I &image_ctx, Context *on_finish)
: m_image_ctx(image_ctx),
m_on_finish(create_async_context_callback(image_ctx, on_finish)),
m_error_result(0) {
}
template <typename I>
PreAcquireRequest<I>::~PreAcquireRequest() {
}
template <typename I>
void PreAcquireRequest<I>::send() {
send_prepare_lock();
}
template <typename I>
void PreAcquireRequest<I>::send_prepare_lock() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
// acquire the lock if the image is not busy performing other actions
Context *ctx = create_context_callback<
PreAcquireRequest<I>, &PreAcquireRequest<I>::handle_prepare_lock>(this);
m_image_ctx.state->prepare_lock(ctx);
}
template <typename I>
void PreAcquireRequest<I>::handle_prepare_lock(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
send_flush_notifies();
}
template <typename I>
void PreAcquireRequest<I>::send_flush_notifies() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PreAcquireRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_flush_notifies>(
this);
m_image_ctx.image_watcher->flush(ctx);
}
template <typename I>
void PreAcquireRequest<I>::handle_flush_notifies(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
ceph_assert(r == 0);
finish();
}
template <typename I>
void PreAcquireRequest<I>::finish() {
m_on_finish->complete(m_error_result);
delete this;
}
} // namespace exclusive_lock
} // namespace librbd
template class librbd::exclusive_lock::PreAcquireRequest<librbd::ImageCtx>;
| 2,609 | 26.1875 | 79 | cc |
null | ceph-main/src/librbd/exclusive_lock/PreAcquireRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_ACQUIRE_REQUEST_H
#define CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_ACQUIRE_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "librbd/ImageCtx.h"
#include "msg/msg_types.h"
#include <string>
class Context;
namespace librbd {
namespace exclusive_lock {
template <typename ImageCtxT = ImageCtx>
class PreAcquireRequest {
public:
static PreAcquireRequest* create(ImageCtxT &image_ctx, Context *on_finish);
~PreAcquireRequest();
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* PREPARE_LOCK
* |
* v
* FLUSH_NOTIFIES
* |
* |
* |
v
* <finish>
*
* @endverbatim
*/
PreAcquireRequest(ImageCtxT &image_ctx, Context *on_finish);
ImageCtxT &m_image_ctx;
Context *m_on_finish;
int m_error_result;
void send_prepare_lock();
void handle_prepare_lock(int r);
void send_flush_notifies();
void handle_flush_notifies(int r);
void finish();
void save_result(int result) {
if (m_error_result == 0 && result < 0) {
m_error_result = result;
}
}
};
} // namespace exclusive_lock
} // namespace librbd
extern template class librbd::exclusive_lock::PreAcquireRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_ACQUIRE_REQUEST_H
| 1,420 | 17.697368 | 82 | h |
null | ceph-main/src/librbd/exclusive_lock/PreReleaseRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/exclusive_lock/PreReleaseRequest.h"
#include "common/AsyncOpTracker.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageState.h"
#include "librbd/ImageWatcher.h"
#include "librbd/Journal.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/exclusive_lock/ImageDispatch.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/Types.h"
#include "librbd/PluginRegistry.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::exclusive_lock::PreReleaseRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace exclusive_lock {
using util::create_async_context_callback;
using util::create_context_callback;
template <typename I>
PreReleaseRequest<I>* PreReleaseRequest<I>::create(
I &image_ctx, ImageDispatch<I>* image_dispatch, bool shutting_down,
AsyncOpTracker &async_op_tracker, Context *on_finish) {
return new PreReleaseRequest(image_ctx, image_dispatch, shutting_down,
async_op_tracker, on_finish);
}
template <typename I>
PreReleaseRequest<I>::PreReleaseRequest(I &image_ctx,
ImageDispatch<I>* image_dispatch,
bool shutting_down,
AsyncOpTracker &async_op_tracker,
Context *on_finish)
: m_image_ctx(image_ctx), m_image_dispatch(image_dispatch),
m_shutting_down(shutting_down), m_async_op_tracker(async_op_tracker),
m_on_finish(create_async_context_callback(image_ctx, on_finish)) {
}
template <typename I>
PreReleaseRequest<I>::~PreReleaseRequest() {
if (!m_shutting_down) {
m_image_ctx.state->handle_prepare_lock_complete();
}
}
template <typename I>
void PreReleaseRequest<I>::send() {
send_cancel_op_requests();
}
template <typename I>
void PreReleaseRequest<I>::send_cancel_op_requests() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PreReleaseRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_cancel_op_requests>(this);
m_image_ctx.cancel_async_requests(ctx);
}
template <typename I>
void PreReleaseRequest<I>::handle_cancel_op_requests(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
ceph_assert(r == 0);
send_set_require_lock();
}
template <typename I>
void PreReleaseRequest<I>::send_set_require_lock() {
if (!m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) {
// exclusive-lock was disabled, no need to block IOs
send_wait_for_ops();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PreReleaseRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_set_require_lock>(this);
// setting the lock as required will automatically cause the IO
// queue to re-request the lock if any IO is queued
if (m_image_ctx.clone_copy_on_read ||
m_image_ctx.test_features(RBD_FEATURE_JOURNALING) ||
m_image_ctx.test_features(RBD_FEATURE_DIRTY_CACHE)) {
m_image_dispatch->set_require_lock(m_shutting_down,
io::DIRECTION_BOTH, ctx);
} else {
m_image_dispatch->set_require_lock(m_shutting_down,
io::DIRECTION_WRITE, ctx);
}
}
template <typename I>
void PreReleaseRequest<I>::handle_set_require_lock(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
// IOs are still flushed regardless of the error
lderr(cct) << "failed to set lock: " << cpp_strerror(r) << dendl;
}
send_wait_for_ops();
}
template <typename I>
void PreReleaseRequest<I>::send_wait_for_ops() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
Context *ctx = create_context_callback<
PreReleaseRequest<I>, &PreReleaseRequest<I>::handle_wait_for_ops>(this);
m_async_op_tracker.wait_for_ops(ctx);
}
template <typename I>
void PreReleaseRequest<I>::handle_wait_for_ops(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
send_prepare_lock();
}
template <typename I>
void PreReleaseRequest<I>::send_prepare_lock() {
if (m_shutting_down) {
send_process_plugin_release_lock();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
// release the lock if the image is not busy performing other actions
Context *ctx = create_context_callback<
PreReleaseRequest<I>, &PreReleaseRequest<I>::handle_prepare_lock>(this);
m_image_ctx.state->prepare_lock(ctx);
}
template <typename I>
void PreReleaseRequest<I>::handle_prepare_lock(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
send_process_plugin_release_lock();
}
template <typename I>
void PreReleaseRequest<I>::send_process_plugin_release_lock() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
std::shared_lock owner_lock{m_image_ctx.owner_lock};
Context *ctx = create_async_context_callback(m_image_ctx, create_context_callback<
PreReleaseRequest<I>,
&PreReleaseRequest<I>::handle_process_plugin_release_lock>(this));
m_image_ctx.plugin_registry->prerelease_exclusive_lock(ctx);
}
template <typename I>
void PreReleaseRequest<I>::handle_process_plugin_release_lock(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to handle plugins before releasing lock: "
<< cpp_strerror(r) << dendl;
m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH);
save_result(r);
finish();
return;
}
send_invalidate_cache();
}
template <typename I>
void PreReleaseRequest<I>::send_invalidate_cache() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
Context *ctx = create_context_callback<
PreReleaseRequest<I>,
&PreReleaseRequest<I>::handle_invalidate_cache>(this);
m_image_ctx.io_image_dispatcher->invalidate_cache(ctx);
}
template <typename I>
void PreReleaseRequest<I>::handle_invalidate_cache(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0 && r != -EBLOCKLISTED && r != -EBUSY) {
lderr(cct) << "failed to invalidate cache: " << cpp_strerror(r)
<< dendl;
m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH);
save_result(r);
finish();
return;
}
send_flush_io();
}
template <typename I>
void PreReleaseRequest<I>::send_flush_io() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
// ensure that all in-flight IO is flushed -- skipping the refresh layer
// since it should have been flushed when the lock was required and now
// refreshes are disabled / interlocked w/ this state machine.
auto ctx = create_context_callback<
PreReleaseRequest<I>, &PreReleaseRequest<I>::handle_flush_io>(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, util::get_image_ctx(&m_image_ctx), librbd::io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
m_image_ctx, io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK, aio_comp,
io::FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH, {});
req->send();
}
template <typename I>
void PreReleaseRequest<I>::handle_flush_io(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to flush IO: " << cpp_strerror(r) << dendl;
}
send_flush_notifies();
}
template <typename I>
void PreReleaseRequest<I>::send_flush_notifies() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PreReleaseRequest<I>;
Context *ctx =
create_context_callback<klass, &klass::handle_flush_notifies>(this);
m_image_ctx.image_watcher->flush(ctx);
}
template <typename I>
void PreReleaseRequest<I>::handle_flush_notifies(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
ceph_assert(r == 0);
send_close_journal();
}
template <typename I>
void PreReleaseRequest<I>::send_close_journal() {
{
std::unique_lock image_locker{m_image_ctx.image_lock};
std::swap(m_journal, m_image_ctx.journal);
}
if (m_journal == nullptr) {
send_close_object_map();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PreReleaseRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_close_journal>(
this);
m_journal->close(ctx);
}
template <typename I>
void PreReleaseRequest<I>::handle_close_journal(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
// error implies some journal events were not flushed -- continue
lderr(cct) << "failed to close journal: " << cpp_strerror(r) << dendl;
}
m_journal->put();
m_journal = nullptr;
send_close_object_map();
}
template <typename I>
void PreReleaseRequest<I>::send_close_object_map() {
{
std::unique_lock image_locker{m_image_ctx.image_lock};
std::swap(m_object_map, m_image_ctx.object_map);
}
if (m_object_map == nullptr) {
send_unlock();
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
using klass = PreReleaseRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_close_object_map>(this, m_object_map);
m_object_map->close(ctx);
}
template <typename I>
void PreReleaseRequest<I>::handle_close_object_map(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to close object map: " << cpp_strerror(r) << dendl;
}
m_object_map->put();
send_unlock();
}
template <typename I>
void PreReleaseRequest<I>::send_unlock() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
finish();
}
template <typename I>
void PreReleaseRequest<I>::finish() {
m_on_finish->complete(m_error_result);
delete this;
}
} // namespace exclusive_lock
} // namespace librbd
template class librbd::exclusive_lock::PreReleaseRequest<librbd::ImageCtx>;
| 10,457 | 27.730769 | 84 | cc |
null | ceph-main/src/librbd/exclusive_lock/PreReleaseRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H
#define CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H
#include "librbd/ImageCtx.h"
#include <string>
class AsyncOpTracker;
class Context;
namespace librbd {
struct ImageCtx;
namespace exclusive_lock {
template <typename> struct ImageDispatch;
template <typename ImageCtxT = ImageCtx>
class PreReleaseRequest {
public:
static PreReleaseRequest* create(ImageCtxT &image_ctx,
ImageDispatch<ImageCtxT>* image_dispatch,
bool shutting_down,
AsyncOpTracker &async_op_tracker,
Context *on_finish);
~PreReleaseRequest();
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* CANCEL_OP_REQUESTS
* |
* v
* SET_REQUIRE_LOCK
* |
* v
* WAIT_FOR_OPS
* |
* v
* PREPARE_LOCK
* |
* v
* PROCESS_PLUGIN_RELEASE
* |
* v
* SHUT_DOWN_IMAGE_CACHE
* |
* v
* INVALIDATE_CACHE
* |
* v
* FLUSH_IO
* |
* v
* FLUSH_NOTIFIES . . . . . . . . . . . . . .
* | .
* v .
* CLOSE_JOURNAL .
* | (journal disabled, .
* v object map enabled) .
* CLOSE_OBJECT_MAP < . . . . . . . . . . . .
* | .
* v (object map disabled) .
* <finish> < . . . . . . . . . . . . . . . . .
*
* @endverbatim
*/
PreReleaseRequest(ImageCtxT &image_ctx,
ImageDispatch<ImageCtxT>* image_dispatch,
bool shutting_down, AsyncOpTracker &async_op_tracker,
Context *on_finish);
ImageCtxT &m_image_ctx;
ImageDispatch<ImageCtxT>* m_image_dispatch;
bool m_shutting_down;
AsyncOpTracker &m_async_op_tracker;
Context *m_on_finish;
int m_error_result = 0;
decltype(m_image_ctx.object_map) m_object_map = nullptr;
decltype(m_image_ctx.journal) m_journal = nullptr;
void send_cancel_op_requests();
void handle_cancel_op_requests(int r);
void send_set_require_lock();
void handle_set_require_lock(int r);
void send_wait_for_ops();
void handle_wait_for_ops(int r);
void send_prepare_lock();
void handle_prepare_lock(int r);
void send_process_plugin_release_lock();
void handle_process_plugin_release_lock(int r);
void send_invalidate_cache();
void handle_invalidate_cache(int r);
void send_flush_io();
void handle_flush_io(int r);
void send_flush_notifies();
void handle_flush_notifies(int r);
void send_close_journal();
void handle_close_journal(int r);
void send_close_object_map();
void handle_close_object_map(int r);
void send_unlock();
void finish();
void save_result(int result) {
if (m_error_result == 0 && result < 0) {
m_error_result = result;
}
}
};
} // namespace exclusive_lock
} // namespace librbd
#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H
| 3,252 | 22.235714 | 76 | h |
null | ceph-main/src/librbd/exclusive_lock/StandardPolicy.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/exclusive_lock/StandardPolicy.h"
#include "librbd/ImageCtx.h"
#include "librbd/ExclusiveLock.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ExclusiveLock::StandardPolicy "
namespace librbd {
namespace exclusive_lock {
template <typename I>
int StandardPolicy<I>::lock_requested(bool force) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock));
ceph_assert(m_image_ctx->exclusive_lock != nullptr);
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force
<< dendl;
return -EROFS;
}
} // namespace exclusive_lock
} // namespace librbd
template class librbd::exclusive_lock::StandardPolicy<librbd::ImageCtx>;
| 825 | 26.533333 | 79 | cc |
null | ceph-main/src/librbd/exclusive_lock/StandardPolicy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H
#define CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H
#include "librbd/exclusive_lock/Policy.h"
namespace librbd {
struct ImageCtx;
namespace exclusive_lock {
template <typename ImageCtxT = ImageCtx>
class StandardPolicy : public Policy {
public:
StandardPolicy(ImageCtxT* image_ctx) : m_image_ctx(image_ctx) {
}
bool may_auto_request_lock() override {
return false;
}
int lock_requested(bool force) override;
private:
ImageCtxT* m_image_ctx;
};
} // namespace exclusive_lock
} // namespace librbd
extern template class librbd::exclusive_lock::StandardPolicy<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_STANDARD_POLICY_H
| 812 | 20.394737 | 79 | h |
null | ceph-main/src/librbd/image/AttachChildRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/AttachChildRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/image/RefreshRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::AttachChildRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace image {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
AttachChildRequest<I>::AttachChildRequest(
I *image_ctx, I *parent_image_ctx, const librados::snap_t &parent_snap_id,
I *old_parent_image_ctx, const librados::snap_t &old_parent_snap_id,
uint32_t clone_format, Context* on_finish)
: m_image_ctx(image_ctx), m_parent_image_ctx(parent_image_ctx),
m_parent_snap_id(parent_snap_id),
m_old_parent_image_ctx(old_parent_image_ctx),
m_old_parent_snap_id(old_parent_snap_id), m_clone_format(clone_format),
m_on_finish(on_finish), m_cct(m_image_ctx->cct) {
}
template <typename I>
void AttachChildRequest<I>::send() {
if (m_clone_format == 1) {
v1_add_child();
} else {
v2_set_op_feature();
}
}
template <typename I>
void AttachChildRequest<I>::v1_add_child() {
ldout(m_cct, 15) << dendl;
librados::ObjectWriteOperation op;
cls_client::add_child(&op, {m_parent_image_ctx->md_ctx.get_id(), "",
m_parent_image_ctx->id,
m_parent_snap_id}, m_image_ctx->id);
using klass = AttachChildRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_v1_add_child>(this);
int r = m_image_ctx->md_ctx.aio_operate(RBD_CHILDREN, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void AttachChildRequest<I>::handle_v1_add_child(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
if (r == -EEXIST && m_old_parent_image_ctx != nullptr) {
ldout(m_cct, 5) << "child already exists" << dendl;
} else {
lderr(m_cct) << "couldn't add child: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
}
v1_refresh();
}
template <typename I>
void AttachChildRequest<I>::v1_refresh() {
ldout(m_cct, 15) << dendl;
using klass = AttachChildRequest<I>;
RefreshRequest<I> *req = RefreshRequest<I>::create(
*m_parent_image_ctx, false, false,
create_context_callback<klass, &klass::handle_v1_refresh>(this));
req->send();
}
template <typename I>
void AttachChildRequest<I>::handle_v1_refresh(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
bool snap_protected = false;
if (r == 0) {
std::shared_lock image_locker{m_parent_image_ctx->image_lock};
r = m_parent_image_ctx->is_snap_protected(m_parent_snap_id,
&snap_protected);
}
if (r < 0 || !snap_protected) {
lderr(m_cct) << "validate protected failed" << dendl;
finish(-EINVAL);
return;
}
v1_remove_child_from_old_parent();
}
template <typename I>
void AttachChildRequest<I>::v1_remove_child_from_old_parent() {
if (m_old_parent_image_ctx == nullptr) {
finish(0);
return;
}
ldout(m_cct, 15) << dendl;
librados::ObjectWriteOperation op;
cls_client::remove_child(&op, {m_old_parent_image_ctx->md_ctx.get_id(),
m_old_parent_image_ctx->md_ctx.get_namespace(),
m_old_parent_image_ctx->id,
m_old_parent_snap_id}, m_image_ctx->id);
using klass = AttachChildRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_v1_remove_child_from_old_parent>(this);
int r = m_image_ctx->md_ctx.aio_operate(RBD_CHILDREN, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void AttachChildRequest<I>::handle_v1_remove_child_from_old_parent(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "couldn't remove child: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void AttachChildRequest<I>::v2_set_op_feature() {
ldout(m_cct, 15) << dendl;
librados::ObjectWriteOperation op;
cls_client::op_features_set(&op, RBD_OPERATION_FEATURE_CLONE_CHILD,
RBD_OPERATION_FEATURE_CLONE_CHILD);
using klass = AttachChildRequest<I>;
auto aio_comp = create_rados_callback<
klass, &klass::handle_v2_set_op_feature>(this);
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, aio_comp,
&op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void AttachChildRequest<I>::handle_v2_set_op_feature(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to enable clone v2: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
v2_child_attach();
}
template <typename I>
void AttachChildRequest<I>::v2_child_attach() {
ldout(m_cct, 15) << dendl;
librados::ObjectWriteOperation op;
cls_client::child_attach(&op, m_parent_snap_id,
{m_image_ctx->md_ctx.get_id(),
m_image_ctx->md_ctx.get_namespace(),
m_image_ctx->id});
using klass = AttachChildRequest<I>;
auto aio_comp = create_rados_callback<
klass, &klass::handle_v2_child_attach>(this);
int r = m_parent_image_ctx->md_ctx.aio_operate(m_parent_image_ctx->header_oid,
aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void AttachChildRequest<I>::handle_v2_child_attach(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
if (r == -EEXIST && m_old_parent_image_ctx != nullptr) {
ldout(m_cct, 5) << "child already exists" << dendl;
} else {
lderr(m_cct) << "failed to attach child image: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
}
v2_child_detach_from_old_parent();
}
template <typename I>
void AttachChildRequest<I>::v2_child_detach_from_old_parent() {
if (m_old_parent_image_ctx == nullptr) {
finish(0);
return;
}
ldout(m_cct, 15) << dendl;
librados::ObjectWriteOperation op;
cls_client::child_detach(&op, m_old_parent_snap_id,
{m_image_ctx->md_ctx.get_id(),
m_image_ctx->md_ctx.get_namespace(),
m_image_ctx->id});
using klass = AttachChildRequest<I>;
auto aio_comp = create_rados_callback<
klass, &klass::handle_v2_child_detach_from_old_parent>(this);
int r = m_old_parent_image_ctx->md_ctx.aio_operate(
m_old_parent_image_ctx->header_oid, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void AttachChildRequest<I>::handle_v2_child_detach_from_old_parent(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to detach child image: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void AttachChildRequest<I>::finish(int r) {
ldout(m_cct, 5) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::AttachChildRequest<librbd::ImageCtx>;
| 7,622 | 28.09542 | 80 | cc |
null | ceph-main/src/librbd/image/AttachChildRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_ATTACH_CHILD_REQUEST_H
#define CEPH_LIBRBD_IMAGE_ATTACH_CHILD_REQUEST_H
#include "include/common_fwd.h"
#include "include/int_types.h"
#include "include/rados/librados.hpp"
class Context;
namespace librbd {
class ImageCtx;
namespace image {
template <typename ImageCtxT = ImageCtx>
class AttachChildRequest {
public:
static AttachChildRequest* create(ImageCtxT *image_ctx,
ImageCtxT *parent_image_ctx,
const librados::snap_t &parent_snap_id,
ImageCtxT *old_parent_image_ctx,
const librados::snap_t &old_parent_snap_id,
uint32_t clone_format,
Context* on_finish) {
return new AttachChildRequest(image_ctx, parent_image_ctx, parent_snap_id,
old_parent_image_ctx, old_parent_snap_id,
clone_format, on_finish);
}
AttachChildRequest(ImageCtxT *image_ctx,
ImageCtxT *parent_image_ctx,
const librados::snap_t &parent_snap_id,
ImageCtxT *old_parent_image_ctx,
const librados::snap_t &old_parent_snap_id,
uint32_t clone_format,
Context* on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* (clone v1) | (clone v2)
* /----------------/ \---------------\
* | |
* v v
* V1 ADD CHILD V2 SET CLONE
* | |
* v v
* V1 VALIDATE PROTECTED V2 ATTACH CHILD
* | |
* | v
* V1 REMOVE CHILD FROM OLD PARENT V2 DETACH CHILD FROM OLD PARENT
* | |
* \----------------\ /---------------/
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT *m_image_ctx;
ImageCtxT *m_parent_image_ctx;
librados::snap_t m_parent_snap_id;
ImageCtxT *m_old_parent_image_ctx;
librados::snap_t m_old_parent_snap_id;
uint32_t m_clone_format;
Context* m_on_finish;
CephContext *m_cct;
void v1_add_child();
void handle_v1_add_child(int r);
void v1_refresh();
void handle_v1_refresh(int r);
void v1_remove_child_from_old_parent();
void handle_v1_remove_child_from_old_parent(int r);
void v2_set_op_feature();
void handle_v2_set_op_feature(int r);
void v2_child_attach();
void handle_v2_child_attach(int r);
void v2_child_detach_from_old_parent();
void handle_v2_child_detach_from_old_parent(int r);
void finish(int r);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::AttachChildRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_ATTACH_CHILD_REQUEST_H
| 3,250 | 29.669811 | 80 | h |
null | ceph-main/src/librbd/image/AttachParentRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/AttachParentRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::AttachParentRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace image {
using util::create_rados_callback;
template <typename I>
void AttachParentRequest<I>::send() {
attach_parent();
}
template <typename I>
void AttachParentRequest<I>::attach_parent() {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "parent_image_spec=" << m_parent_image_spec << dendl;
librados::ObjectWriteOperation op;
if (!m_legacy_parent) {
librbd::cls_client::parent_attach(&op, m_parent_image_spec,
m_parent_overlap, m_reattach);
} else {
librbd::cls_client::set_parent(&op, m_parent_image_spec, m_parent_overlap);
}
auto aio_comp = create_rados_callback<
AttachParentRequest<I>,
&AttachParentRequest<I>::handle_attach_parent>(this);
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void AttachParentRequest<I>::handle_attach_parent(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
if (!m_legacy_parent && r == -EOPNOTSUPP && !m_reattach) {
if (m_parent_image_spec.pool_namespace ==
m_image_ctx.md_ctx.get_namespace()) {
m_parent_image_spec.pool_namespace = "";
}
if (m_parent_image_spec.pool_namespace.empty()) {
ldout(cct, 10) << "retrying using legacy parent method" << dendl;
m_legacy_parent = true;
attach_parent();
return;
}
// namespaces require newer OSDs
r = -EXDEV;
}
if (r < 0) {
lderr(cct) << "attach parent encountered an error: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void AttachParentRequest<I>::finish(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::AttachParentRequest<librbd::ImageCtx>;
| 2,419 | 25.593407 | 80 | cc |
null | ceph-main/src/librbd/image/AttachParentRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_ATTACH_PARENT_REQUEST_H
#define CEPH_LIBRBD_IMAGE_ATTACH_PARENT_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "librbd/Types.h"
class Context;
namespace librbd {
class ImageCtx;
namespace image {
template <typename ImageCtxT = ImageCtx>
class AttachParentRequest {
public:
static AttachParentRequest* create(ImageCtxT& image_ctx,
const cls::rbd::ParentImageSpec& pspec,
uint64_t parent_overlap,
bool reattach,
Context* on_finish) {
return new AttachParentRequest(image_ctx, pspec, parent_overlap, reattach,
on_finish);
}
AttachParentRequest(ImageCtxT& image_ctx,
const cls::rbd::ParentImageSpec& pspec,
uint64_t parent_overlap, bool reattach,
Context* on_finish)
: m_image_ctx(image_ctx), m_parent_image_spec(pspec),
m_parent_overlap(parent_overlap), m_reattach(reattach),
m_on_finish(on_finish) {
}
void send();
private:
/**
* @verbatim
*
* <start>
* | * * * * * *
* | * * -EOPNOTSUPP
* v v *
* ATTACH_PARENT * * *
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT& m_image_ctx;
cls::rbd::ParentImageSpec m_parent_image_spec;
uint64_t m_parent_overlap;
bool m_reattach;
Context* m_on_finish;
bool m_legacy_parent = false;
void attach_parent();
void handle_attach_parent(int r);
void finish(int r);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::AttachParentRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_ATTACH_PARENT_REQUEST_H
| 1,961 | 23.525 | 78 | h |
null | ceph-main/src/librbd/image/CloneRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/rbd/cls_rbd_client.h"
#include "cls/rbd/cls_rbd_types.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/ceph_assert.h"
#include "librbd/ImageState.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/deep_copy/MetadataCopyRequest.h"
#include "librbd/image/AttachChildRequest.h"
#include "librbd/image/AttachParentRequest.h"
#include "librbd/image/CloneRequest.h"
#include "librbd/image/CreateRequest.h"
#include "librbd/image/RemoveRequest.h"
#include "librbd/image/Types.h"
#include "librbd/mirror/EnableRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::CloneRequest: " << this << " " \
<< __func__ << ": "
#define MAX_KEYS 64
namespace librbd {
namespace image {
using util::create_rados_callback;
using util::create_context_callback;
using util::create_async_context_callback;
template <typename I>
CloneRequest<I>::CloneRequest(
ConfigProxy& config,
IoCtx& parent_io_ctx,
const std::string& parent_image_id,
const std::string& parent_snap_name,
const cls::rbd::SnapshotNamespace& parent_snap_namespace,
uint64_t parent_snap_id,
IoCtx &c_ioctx,
const std::string &c_name,
const std::string &c_id,
ImageOptions c_options,
cls::rbd::MirrorImageMode mirror_image_mode,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid,
asio::ContextWQ *op_work_queue, Context *on_finish)
: m_config(config), m_parent_io_ctx(parent_io_ctx),
m_parent_image_id(parent_image_id), m_parent_snap_name(parent_snap_name),
m_parent_snap_namespace(parent_snap_namespace),
m_parent_snap_id(parent_snap_id), m_ioctx(c_ioctx), m_name(c_name),
m_id(c_id), m_opts(c_options), m_mirror_image_mode(mirror_image_mode),
m_non_primary_global_image_id(non_primary_global_image_id),
m_primary_mirror_uuid(primary_mirror_uuid),
m_op_work_queue(op_work_queue), m_on_finish(on_finish),
m_use_p_features(true) {
m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
bool default_format_set;
m_opts.is_set(RBD_IMAGE_OPTION_FORMAT, &default_format_set);
if (!default_format_set) {
m_opts.set(RBD_IMAGE_OPTION_FORMAT, static_cast<uint64_t>(2));
}
ldout(m_cct, 20) << "parent_pool_id=" << parent_io_ctx.get_id() << ", "
<< "parent_image_id=" << parent_image_id << ", "
<< "parent_snap=" << parent_snap_name << "/"
<< parent_snap_id << " clone to "
<< "pool_id=" << m_ioctx.get_id() << ", "
<< "name=" << m_name << ", "
<< "opts=" << m_opts << dendl;
}
template <typename I>
void CloneRequest<I>::send() {
ldout(m_cct, 20) << dendl;
validate_options();
}
template <typename I>
void CloneRequest<I>::validate_options() {
ldout(m_cct, 20) << dendl;
uint64_t format = 0;
m_opts.get(RBD_IMAGE_OPTION_FORMAT, &format);
if (format < 2) {
lderr(m_cct) << "format 2 or later required for clone" << dendl;
complete(-EINVAL);
return;
}
if (m_opts.get(RBD_IMAGE_OPTION_FEATURES, &m_features) == 0) {
if (m_features & ~RBD_FEATURES_ALL) {
lderr(m_cct) << "librbd does not support requested features" << dendl;
complete(-ENOSYS);
return;
}
m_use_p_features = false;
}
if (m_opts.get(RBD_IMAGE_OPTION_CLONE_FORMAT, &m_clone_format) < 0) {
std::string default_clone_format = m_config.get_val<std::string>(
"rbd_default_clone_format");
if (default_clone_format == "1") {
m_clone_format = 1;
} else if (default_clone_format == "auto") {
librados::Rados rados(m_ioctx);
int8_t min_compat_client;
int8_t require_min_compat_client;
int r = rados.get_min_compatible_client(&min_compat_client,
&require_min_compat_client);
if (r < 0) {
complete(r);
return;
}
if (std::max(min_compat_client, require_min_compat_client) <
CEPH_RELEASE_MIMIC) {
m_clone_format = 1;
}
}
}
if (m_clone_format == 1 &&
m_parent_io_ctx.get_namespace() != m_ioctx.get_namespace()) {
ldout(m_cct, 1) << "clone v2 required for cross-namespace clones" << dendl;
complete(-EXDEV);
return;
}
open_parent();
}
template <typename I>
void CloneRequest<I>::open_parent() {
ldout(m_cct, 20) << dendl;
ceph_assert(m_parent_snap_name.empty() ^ (m_parent_snap_id == CEPH_NOSNAP));
if (m_parent_snap_id != CEPH_NOSNAP) {
m_parent_image_ctx = I::create("", m_parent_image_id, m_parent_snap_id,
m_parent_io_ctx, true);
} else {
m_parent_image_ctx = I::create("", m_parent_image_id,
m_parent_snap_name.c_str(),
m_parent_io_ctx,
true);
m_parent_image_ctx->snap_namespace = m_parent_snap_namespace;
}
Context *ctx = create_context_callback<
CloneRequest<I>, &CloneRequest<I>::handle_open_parent>(this);
m_parent_image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT, ctx);
}
template <typename I>
void CloneRequest<I>::handle_open_parent(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
m_parent_image_ctx = nullptr;
lderr(m_cct) << "failed to open parent image: " << cpp_strerror(r) << dendl;
complete(r);
return;
}
m_parent_snap_id = m_parent_image_ctx->snap_id;
m_pspec = {m_parent_io_ctx.get_id(), m_parent_io_ctx.get_namespace(),
m_parent_image_id, m_parent_snap_id};
validate_parent();
}
template <typename I>
void CloneRequest<I>::validate_parent() {
ldout(m_cct, 20) << dendl;
if (m_parent_image_ctx->operations_disabled) {
lderr(m_cct) << "image operations disabled due to unsupported op features"
<< dendl;
m_r_saved = -EROFS;
close_parent();
return;
}
if (m_parent_image_ctx->snap_id == CEPH_NOSNAP) {
lderr(m_cct) << "image to be cloned must be a snapshot" << dendl;
m_r_saved = -EINVAL;
close_parent();
return;
}
if (m_parent_image_ctx->old_format) {
lderr(m_cct) << "parent image must be in new format" << dendl;
m_r_saved = -EINVAL;
close_parent();
return;
}
m_parent_image_ctx->image_lock.lock_shared();
uint64_t p_features = m_parent_image_ctx->features;
m_size = m_parent_image_ctx->get_image_size(m_parent_image_ctx->snap_id);
bool snap_protected;
int r = m_parent_image_ctx->is_snap_protected(m_parent_image_ctx->snap_id, &snap_protected);
m_parent_image_ctx->image_lock.unlock_shared();
if ((p_features & RBD_FEATURE_LAYERING) != RBD_FEATURE_LAYERING) {
lderr(m_cct) << "parent image must support layering" << dendl;
m_r_saved = -ENOSYS;
close_parent();
return;
}
if (m_use_p_features) {
m_features = p_features;
}
if (r < 0) {
lderr(m_cct) << "unable to locate parent's snapshot" << dendl;
m_r_saved = r;
close_parent();
return;
}
if (m_clone_format == 1 && !snap_protected) {
lderr(m_cct) << "parent snapshot must be protected" << dendl;
m_r_saved = -EINVAL;
close_parent();
return;
}
validate_child();
}
template <typename I>
void CloneRequest<I>::validate_child() {
ldout(m_cct, 15) << dendl;
if ((m_features & RBD_FEATURE_LAYERING) != RBD_FEATURE_LAYERING) {
lderr(m_cct) << "cloning image must support layering" << dendl;
m_r_saved = -ENOSYS;
close_parent();
return;
}
using klass = CloneRequest<I>;
librados::AioCompletion *comp = create_rados_callback<
klass, &klass::handle_validate_child>(this);
librados::ObjectReadOperation op;
op.stat(NULL, NULL, NULL);
int r = m_ioctx.aio_operate(util::old_header_name(m_name), comp, &op,
&m_out_bl);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void CloneRequest<I>::handle_validate_child(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r != -ENOENT) {
lderr(m_cct) << "rbd image " << m_name << " already exists" << dendl;
m_r_saved = r;
close_parent();
return;
}
create_child();
}
template <typename I>
void CloneRequest<I>::create_child() {
ldout(m_cct, 15) << dendl;
uint64_t order = m_parent_image_ctx->order;
if (m_opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0) {
m_opts.set(RBD_IMAGE_OPTION_ORDER, order);
}
m_opts.set(RBD_IMAGE_OPTION_FEATURES, m_features);
uint64_t stripe_unit = m_parent_image_ctx->stripe_unit;
if (m_opts.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &stripe_unit) != 0) {
m_opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
}
uint64_t stripe_count = m_parent_image_ctx->stripe_count;
if (m_opts.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &stripe_count) != 0) {
m_opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
}
using klass = CloneRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_create_child>(this);
auto req = CreateRequest<I>::create(
m_config, m_ioctx, m_name, m_id, m_size, m_opts,
image::CREATE_FLAG_SKIP_MIRROR_ENABLE,
cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, m_non_primary_global_image_id,
m_primary_mirror_uuid, m_op_work_queue, ctx);
req->send();
}
template <typename I>
void CloneRequest<I>::handle_create_child(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r == -EBADF) {
ldout(m_cct, 5) << "image id already in-use" << dendl;
complete(r);
return;
} else if (r < 0) {
lderr(m_cct) << "error creating child: " << cpp_strerror(r) << dendl;
m_r_saved = r;
close_parent();
return;
}
open_child();
}
template <typename I>
void CloneRequest<I>::open_child() {
ldout(m_cct, 15) << dendl;
m_imctx = I::create(m_name, "", nullptr, m_ioctx, false);
using klass = CloneRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_open_child>(this);
uint64_t flags = OPEN_FLAG_SKIP_OPEN_PARENT;
if ((m_features & RBD_FEATURE_MIGRATING) != 0) {
flags |= OPEN_FLAG_IGNORE_MIGRATING;
}
m_imctx->state->open(flags, ctx);
}
template <typename I>
void CloneRequest<I>::handle_open_child(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
m_imctx = nullptr;
lderr(m_cct) << "Error opening new image: " << cpp_strerror(r) << dendl;
m_r_saved = r;
remove_child();
return;
}
attach_parent();
}
template <typename I>
void CloneRequest<I>::attach_parent() {
ldout(m_cct, 15) << dendl;
auto ctx = create_context_callback<
CloneRequest<I>, &CloneRequest<I>::handle_attach_parent>(this);
auto req = AttachParentRequest<I>::create(
*m_imctx, m_pspec, m_size, false, ctx);
req->send();
}
template <typename I>
void CloneRequest<I>::handle_attach_parent(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to attach parent: " << cpp_strerror(r) << dendl;
m_r_saved = r;
close_child();
return;
}
attach_child();
}
template <typename I>
void CloneRequest<I>::attach_child() {
ldout(m_cct, 15) << dendl;
auto ctx = create_context_callback<
CloneRequest<I>, &CloneRequest<I>::handle_attach_child>(this);
auto req = AttachChildRequest<I>::create(
m_imctx, m_parent_image_ctx, m_parent_image_ctx->snap_id, nullptr, 0,
m_clone_format, ctx);
req->send();
}
template <typename I>
void CloneRequest<I>::handle_attach_child(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to attach parent: " << cpp_strerror(r) << dendl;
m_r_saved = r;
close_child();
return;
}
copy_metadata();
}
template <typename I>
void CloneRequest<I>::copy_metadata() {
ldout(m_cct, 15) << dendl;
auto ctx = create_context_callback<
CloneRequest<I>, &CloneRequest<I>::handle_copy_metadata>(this);
auto req = deep_copy::MetadataCopyRequest<I>::create(
m_parent_image_ctx, m_imctx, ctx);
req->send();
}
template <typename I>
void CloneRequest<I>::handle_copy_metadata(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to copy metadata: " << cpp_strerror(r) << dendl;
m_r_saved = r;
close_child();
return;
}
get_mirror_mode();
}
template <typename I>
void CloneRequest<I>::get_mirror_mode() {
ldout(m_cct, 15) << dendl;
uint64_t mirror_image_mode;
if (!m_non_primary_global_image_id.empty()) {
enable_mirror();
return;
} else if (m_opts.get(RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE,
&mirror_image_mode) == 0) {
m_mirror_image_mode = static_cast<cls::rbd::MirrorImageMode>(
mirror_image_mode);
enable_mirror();
return;
} else if (!m_imctx->test_features(RBD_FEATURE_JOURNALING)) {
close_child();
return;
}
librados::ObjectReadOperation op;
cls_client::mirror_mode_get_start(&op);
using klass = CloneRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_get_mirror_mode>(this);
m_out_bl.clear();
m_imctx->md_ctx.aio_operate(RBD_MIRRORING,
comp, &op, &m_out_bl);
comp->release();
}
template <typename I>
void CloneRequest<I>::handle_get_mirror_mode(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r == 0) {
auto it = m_out_bl.cbegin();
r = cls_client::mirror_mode_get_finish(&it, &m_mirror_mode);
}
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to retrieve mirror mode: " << cpp_strerror(r)
<< dendl;
m_r_saved = r;
} else if (m_mirror_mode == cls::rbd::MIRROR_MODE_POOL) {
m_mirror_image_mode = cls::rbd::MIRROR_IMAGE_MODE_JOURNAL;
enable_mirror();
return;
}
close_child();
}
template <typename I>
void CloneRequest<I>::enable_mirror() {
ldout(m_cct, 15) << dendl;
using klass = CloneRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_enable_mirror>(this);
auto req = mirror::EnableRequest<I>::create(
m_imctx, m_mirror_image_mode, m_non_primary_global_image_id, true, ctx);
req->send();
}
template <typename I>
void CloneRequest<I>::handle_enable_mirror(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to enable mirroring: " << cpp_strerror(r)
<< dendl;
m_r_saved = r;
}
close_child();
}
template <typename I>
void CloneRequest<I>::close_child() {
ldout(m_cct, 15) << dendl;
ceph_assert(m_imctx != nullptr);
auto ctx = create_context_callback<
CloneRequest<I>, &CloneRequest<I>::handle_close_child>(this);
m_imctx->state->close(ctx);
}
template <typename I>
void CloneRequest<I>::handle_close_child(int r) {
ldout(m_cct, 15) << dendl;
m_imctx = nullptr;
if (r < 0) {
lderr(m_cct) << "couldn't close image: " << cpp_strerror(r) << dendl;
if (m_r_saved == 0) {
m_r_saved = r;
}
}
if (m_r_saved < 0) {
remove_child();
return;
}
close_parent();
}
template <typename I>
void CloneRequest<I>::remove_child() {
ldout(m_cct, 15) << dendl;
using klass = CloneRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_remove_child>(this);
auto req = librbd::image::RemoveRequest<I>::create(
m_ioctx, m_name, m_id, false, false, m_no_op, m_op_work_queue, ctx);
req->send();
}
template <typename I>
void CloneRequest<I>::handle_remove_child(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "Error removing failed clone: "
<< cpp_strerror(r) << dendl;
}
close_parent();
}
template <typename I>
void CloneRequest<I>::close_parent() {
ldout(m_cct, 20) << dendl;
ceph_assert(m_parent_image_ctx != nullptr);
auto ctx = create_context_callback<
CloneRequest<I>, &CloneRequest<I>::handle_close_parent>(this);
m_parent_image_ctx->state->close(ctx);
}
template <typename I>
void CloneRequest<I>::handle_close_parent(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_parent_image_ctx = nullptr;
if (r < 0) {
lderr(m_cct) << "failed to close parent image: "
<< cpp_strerror(r) << dendl;
if (m_r_saved == 0) {
m_r_saved = r;
}
}
complete(m_r_saved);
}
template <typename I>
void CloneRequest<I>::complete(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} //namespace image
} //namespace librbd
template class librbd::image::CloneRequest<librbd::ImageCtx>;
| 16,558 | 26.235197 | 94 | cc |
null | ceph-main/src/librbd/image/CloneRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_CLONE_REQUEST_H
#define CEPH_LIBRBD_IMAGE_CLONE_REQUEST_H
#include "cls/rbd/cls_rbd_types.h"
#include "common/config_fwd.h"
#include "librbd/internal.h"
#include "include/rbd/librbd.hpp"
class Context;
using librados::IoCtx;
namespace librbd {
namespace asio { struct ContextWQ; }
namespace image {
template <typename ImageCtxT = ImageCtx>
class CloneRequest {
public:
static CloneRequest *create(
ConfigProxy& config, IoCtx& parent_io_ctx,
const std::string& parent_image_id,
const std::string& parent_snap_name,
const cls::rbd::SnapshotNamespace& parent_snap_namespace,
uint64_t parent_snap_id,
IoCtx &c_ioctx, const std::string &c_name,
const std::string &c_id, ImageOptions c_options,
cls::rbd::MirrorImageMode mirror_image_mode,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid,
asio::ContextWQ *op_work_queue, Context *on_finish) {
return new CloneRequest(config, parent_io_ctx, parent_image_id,
parent_snap_name, parent_snap_namespace,
parent_snap_id, c_ioctx, c_name, c_id, c_options,
mirror_image_mode, non_primary_global_image_id,
primary_mirror_uuid, op_work_queue, on_finish);
}
CloneRequest(ConfigProxy& config, IoCtx& parent_io_ctx,
const std::string& parent_image_id,
const std::string& parent_snap_name,
const cls::rbd::SnapshotNamespace& parent_snap_namespace,
uint64_t parent_snap_id,
IoCtx &c_ioctx, const std::string &c_name,
const std::string &c_id, ImageOptions c_options,
cls::rbd::MirrorImageMode mirror_image_mode,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid,
asio::ContextWQ *op_work_queue, Context *on_finish);
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* OPEN PARENT
* |
* v
* VALIDATE CHILD <finish>
* | ^
* v |
* CREATE CHILD * * * * * * * * * > CLOSE PARENT
* | ^
* v |
* OPEN CHILD * * * * * * * * * * > REMOVE CHILD
* | ^
* v |
* ATTACH PARENT * * * * * * * * > CLOSE CHILD
* | ^
* v *
* ATTACH CHILD * * * * * * * * * * * *
* | *
* v *
* COPY META DATA * * * * * * * * * * ^
* | *
* v (skip if not needed) *
* GET MIRROR MODE * * * * * * * * * ^
* | *
* v (skip if not needed) *
* SET MIRROR ENABLED * * * * * * * * *
* |
* v
* CLOSE CHILD
* |
* v
* CLOSE PARENT
* |
* v
* <finish>
*
* @endverbatim
*/
ConfigProxy& m_config;
IoCtx &m_parent_io_ctx;
std::string m_parent_image_id;
std::string m_parent_snap_name;
cls::rbd::SnapshotNamespace m_parent_snap_namespace;
uint64_t m_parent_snap_id;
ImageCtxT *m_parent_image_ctx;
IoCtx &m_ioctx;
std::string m_name;
std::string m_id;
ImageOptions m_opts;
cls::rbd::ParentImageSpec m_pspec;
ImageCtxT *m_imctx;
cls::rbd::MirrorMode m_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
cls::rbd::MirrorImageMode m_mirror_image_mode;
const std::string m_non_primary_global_image_id;
const std::string m_primary_mirror_uuid;
NoOpProgressContext m_no_op;
asio::ContextWQ *m_op_work_queue;
Context *m_on_finish;
CephContext *m_cct;
uint64_t m_clone_format = 2;
bool m_use_p_features;
uint64_t m_features;
bufferlist m_out_bl;
uint64_t m_size;
int m_r_saved = 0;
void validate_options();
void open_parent();
void handle_open_parent(int r);
void validate_parent();
void validate_child();
void handle_validate_child(int r);
void create_child();
void handle_create_child(int r);
void open_child();
void handle_open_child(int r);
void attach_parent();
void handle_attach_parent(int r);
void attach_child();
void handle_attach_child(int r);
void copy_metadata();
void handle_copy_metadata(int r);
void get_mirror_mode();
void handle_get_mirror_mode(int r);
void enable_mirror();
void handle_enable_mirror(int r);
void close_child();
void handle_close_child(int r);
void remove_child();
void handle_remove_child(int r);
void close_parent();
void handle_close_parent(int r);
void complete(int r);
};
} //namespace image
} //namespace librbd
extern template class librbd::image::CloneRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_CLONE_REQUEST_H
| 5,123 | 27.153846 | 77 | h |
null | ceph-main/src/librbd/image/CloseRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/CloseRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ConfigWatcher.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ImageWatcher.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatcher.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::CloseRequest: "
namespace librbd {
namespace image {
using util::create_async_context_callback;
using util::create_context_callback;
template <typename I>
CloseRequest<I>::CloseRequest(I *image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish), m_error_result(0),
m_exclusive_lock(nullptr) {
ceph_assert(image_ctx != nullptr);
}
template <typename I>
void CloseRequest<I>::send() {
if (m_image_ctx->config_watcher != nullptr) {
m_image_ctx->config_watcher->shut_down();
delete m_image_ctx->config_watcher;
m_image_ctx->config_watcher = nullptr;
}
send_block_image_watcher();
}
template <typename I>
void CloseRequest<I>::send_block_image_watcher() {
if (m_image_ctx->image_watcher == nullptr) {
send_shut_down_update_watchers();
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
// prevent incoming requests from our peers
m_image_ctx->image_watcher->block_notifies(create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_block_image_watcher>(this));
}
template <typename I>
void CloseRequest<I>::handle_block_image_watcher(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
send_shut_down_update_watchers();
}
template <typename I>
void CloseRequest<I>::send_shut_down_update_watchers() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_image_ctx->state->shut_down_update_watchers(create_async_context_callback(
*m_image_ctx, create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_shut_down_update_watchers>(this)));
}
template <typename I>
void CloseRequest<I>::handle_shut_down_update_watchers(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
save_result(r);
if (r < 0) {
lderr(cct) << "failed to shut down update watchers: " << cpp_strerror(r)
<< dendl;
}
send_flush();
}
template <typename I>
void CloseRequest<I>::send_flush() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
std::shared_lock owner_locker{m_image_ctx->owner_lock};
auto ctx = create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_flush>(this);
auto aio_comp = io::AioCompletion::create_and_start(ctx, m_image_ctx,
io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*m_image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
io::FLUSH_SOURCE_SHUTDOWN, {});
req->send();
}
template <typename I>
void CloseRequest<I>::handle_flush(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to flush IO: " << cpp_strerror(r) << dendl;
}
send_shut_down_exclusive_lock();
}
template <typename I>
void CloseRequest<I>::send_shut_down_exclusive_lock() {
{
std::unique_lock owner_locker{m_image_ctx->owner_lock};
m_exclusive_lock = m_image_ctx->exclusive_lock;
// if reading a snapshot -- possible object map is open
std::unique_lock image_locker{m_image_ctx->image_lock};
if (m_exclusive_lock == nullptr && m_image_ctx->object_map) {
m_image_ctx->object_map->put();
m_image_ctx->object_map = nullptr;
}
}
if (m_exclusive_lock == nullptr) {
send_unregister_image_watcher();
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
// in-flight IO will be flushed and in-flight requests will be canceled
// before releasing lock
m_exclusive_lock->shut_down(create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_shut_down_exclusive_lock>(this));
}
template <typename I>
void CloseRequest<I>::handle_shut_down_exclusive_lock(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
{
std::shared_lock owner_locker{m_image_ctx->owner_lock};
ceph_assert(m_image_ctx->exclusive_lock == nullptr);
// object map and journal closed during exclusive lock shutdown
std::shared_lock image_locker{m_image_ctx->image_lock};
ceph_assert(m_image_ctx->journal == nullptr);
ceph_assert(m_image_ctx->object_map == nullptr);
}
m_exclusive_lock->put();
m_exclusive_lock = nullptr;
save_result(r);
if (r < 0) {
lderr(cct) << "failed to shut down exclusive lock: " << cpp_strerror(r)
<< dendl;
}
send_unregister_image_watcher();
}
template <typename I>
void CloseRequest<I>::send_unregister_image_watcher() {
if (m_image_ctx->image_watcher == nullptr) {
send_flush_readahead();
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_image_ctx->image_watcher->unregister_watch(create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_unregister_image_watcher>(this));
}
template <typename I>
void CloseRequest<I>::handle_unregister_image_watcher(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
save_result(r);
if (r < 0) {
lderr(cct) << "failed to unregister image watcher: " << cpp_strerror(r)
<< dendl;
}
send_flush_readahead();
}
template <typename I>
void CloseRequest<I>::send_flush_readahead() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_image_ctx->readahead.wait_for_pending(create_async_context_callback(
*m_image_ctx, create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_flush_readahead>(this)));
}
template <typename I>
void CloseRequest<I>::handle_flush_readahead(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
send_shut_down_image_dispatcher();
}
template <typename I>
void CloseRequest<I>::send_shut_down_image_dispatcher() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_image_ctx->io_image_dispatcher->shut_down(create_context_callback<
CloseRequest<I>,
&CloseRequest<I>::handle_shut_down_image_dispatcher>(this));
}
template <typename I>
void CloseRequest<I>::handle_shut_down_image_dispatcher(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
save_result(r);
if (r < 0) {
lderr(cct) << "failed to shut down image dispatcher: "
<< cpp_strerror(r) << dendl;
}
send_shut_down_object_dispatcher();
}
template <typename I>
void CloseRequest<I>::send_shut_down_object_dispatcher() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_image_ctx->io_object_dispatcher->shut_down(create_context_callback<
CloseRequest<I>,
&CloseRequest<I>::handle_shut_down_object_dispatcher>(this));
}
template <typename I>
void CloseRequest<I>::handle_shut_down_object_dispatcher(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
save_result(r);
if (r < 0) {
lderr(cct) << "failed to shut down object dispatcher: "
<< cpp_strerror(r) << dendl;
}
send_flush_op_work_queue();
}
template <typename I>
void CloseRequest<I>::send_flush_op_work_queue() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_image_ctx->op_work_queue->queue(create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_flush_op_work_queue>(this), 0);
}
template <typename I>
void CloseRequest<I>::handle_flush_op_work_queue(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
send_close_parent();
}
template <typename I>
void CloseRequest<I>::send_close_parent() {
if (m_image_ctx->parent == nullptr) {
send_flush_image_watcher();
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_image_ctx->parent->state->close(create_async_context_callback(
*m_image_ctx, create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_close_parent>(this)));
}
template <typename I>
void CloseRequest<I>::handle_close_parent(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
m_image_ctx->parent = nullptr;
save_result(r);
if (r < 0) {
lderr(cct) << "error closing parent image: " << cpp_strerror(r) << dendl;
}
send_flush_image_watcher();
}
template <typename I>
void CloseRequest<I>::send_flush_image_watcher() {
if (m_image_ctx->image_watcher == nullptr) {
finish();
return;
}
m_image_ctx->image_watcher->flush(create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_flush_image_watcher>(this));
}
template <typename I>
void CloseRequest<I>::handle_flush_image_watcher(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << "error flushing image watcher: " << cpp_strerror(r) << dendl;
}
save_result(r);
finish();
}
template <typename I>
void CloseRequest<I>::finish() {
m_image_ctx->shutdown();
m_on_finish->complete(m_error_result);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::CloseRequest<librbd::ImageCtx>;
| 10,339 | 28.458689 | 83 | cc |
null | ceph-main/src/librbd/image/CloseRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_CLOSE_REQUEST_H
#define CEPH_LIBRBD_IMAGE_CLOSE_REQUEST_H
#include "librbd/ImageCtx.h"
class Context;
namespace librbd {
class ImageCtx;
namespace image {
template <typename ImageCtxT = ImageCtx>
class CloseRequest {
public:
static CloseRequest *create(ImageCtxT *image_ctx, Context *on_finish) {
return new CloseRequest(image_ctx, on_finish);
}
void send();
private:
/**
* @verbatim
*
* <start>
* |
* v
* BLOCK_IMAGE_WATCHER (skip if R/O)
* |
* v
* SHUT_DOWN_UPDATE_WATCHERS
* |
* v
* FLUSH
* |
* v (skip if disabled)
* SHUT_DOWN_EXCLUSIVE_LOCK
* |
* v
* UNREGISTER_IMAGE_WATCHER (skip if R/O)
* |
* v
* FLUSH_READAHEAD
* |
* v
* SHUT_DOWN_IMAGE_DISPATCHER
* |
* v
* SHUT_DOWN_OBJECT_DISPATCHER
* |
* v
* FLUSH_OP_WORK_QUEUE
* |
* v (skip if no parent)
* CLOSE_PARENT
* |
* v
* FLUSH_IMAGE_WATCHER
* |
* v
* <finish>
*
* @endverbatim
*/
CloseRequest(ImageCtxT *image_ctx, Context *on_finish);
ImageCtxT *m_image_ctx;
Context *m_on_finish;
int m_error_result;
decltype(m_image_ctx->exclusive_lock) m_exclusive_lock;
void send_block_image_watcher();
void handle_block_image_watcher(int r);
void send_shut_down_update_watchers();
void handle_shut_down_update_watchers(int r);
void send_flush();
void handle_flush(int r);
void send_shut_down_exclusive_lock();
void handle_shut_down_exclusive_lock(int r);
void send_unregister_image_watcher();
void handle_unregister_image_watcher(int r);
void send_flush_readahead();
void handle_flush_readahead(int r);
void send_shut_down_image_dispatcher();
void handle_shut_down_image_dispatcher(int r);
void send_shut_down_object_dispatcher();
void handle_shut_down_object_dispatcher(int r);
void send_flush_op_work_queue();
void handle_flush_op_work_queue(int r);
void send_close_parent();
void handle_close_parent(int r);
void send_flush_image_watcher();
void handle_flush_image_watcher(int r);
void finish();
void save_result(int result) {
if (m_error_result == 0 && result < 0) {
m_error_result = result;
}
}
};
} // namespace image
} // namespace librbd
extern template class librbd::image::CloseRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_CLOSE_REQUEST_H
| 2,543 | 18.875 | 73 | h |
null | ceph-main/src/librbd/image/CreateRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/CreateRequest.h"
#include "include/ceph_assert.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/ceph_context.h"
#include "cls/rbd/cls_rbd_client.h"
#include "osdc/Striper.h"
#include "librbd/Features.h"
#include "librbd/Journal.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/image/Types.h"
#include "librbd/image/ValidatePoolRequest.h"
#include "librbd/journal/CreateRequest.h"
#include "librbd/journal/RemoveRequest.h"
#include "librbd/journal/TypeTraits.h"
#include "librbd/mirror/EnableRequest.h"
#include "journal/Journaler.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::CreateRequest: " << __func__ \
<< ": "
namespace librbd {
namespace image {
using util::create_rados_callback;
using util::create_context_callback;
namespace {
int validate_features(CephContext *cct, uint64_t features) {
if (features & ~RBD_FEATURES_ALL) {
lderr(cct) << "librbd does not support requested features." << dendl;
return -ENOSYS;
}
if ((features & RBD_FEATURES_INTERNAL) != 0) {
lderr(cct) << "cannot use internally controlled features" << dendl;
return -EINVAL;
}
if ((features & RBD_FEATURE_FAST_DIFF) != 0 &&
(features & RBD_FEATURE_OBJECT_MAP) == 0) {
lderr(cct) << "cannot use fast diff without object map" << dendl;
return -EINVAL;
}
if ((features & RBD_FEATURE_OBJECT_MAP) != 0 &&
(features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) {
lderr(cct) << "cannot use object map without exclusive lock" << dendl;
return -EINVAL;
}
if ((features & RBD_FEATURE_JOURNALING) != 0 &&
(features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) {
lderr(cct) << "cannot use journaling without exclusive lock" << dendl;
return -EINVAL;
}
return 0;
}
int validate_striping(CephContext *cct, uint8_t order, uint64_t stripe_unit,
uint64_t stripe_count) {
if ((stripe_unit && !stripe_count) ||
(!stripe_unit && stripe_count)) {
lderr(cct) << "must specify both (or neither) of stripe-unit and "
<< "stripe-count" << dendl;
return -EINVAL;
} else if (stripe_unit && ((1ull << order) % stripe_unit || stripe_unit > (1ull << order))) {
lderr(cct) << "stripe unit is not a factor of the object size" << dendl;
return -EINVAL;
} else if (stripe_unit != 0 && stripe_unit < 512) {
lderr(cct) << "stripe unit must be at least 512 bytes" << dendl;
return -EINVAL;
}
return 0;
}
bool validate_layout(CephContext *cct, uint64_t size, file_layout_t &layout) {
if (!librbd::ObjectMap<>::is_compatible(layout, size)) {
lderr(cct) << "image size not compatible with object map" << dendl;
return false;
}
return true;
}
int get_image_option(const ImageOptions &image_options, int option,
uint8_t *value) {
uint64_t large_value;
int r = image_options.get(option, &large_value);
if (r < 0) {
return r;
}
*value = static_cast<uint8_t>(large_value);
return 0;
}
} // anonymous namespace
template<typename I>
int CreateRequest<I>::validate_order(CephContext *cct, uint8_t order) {
if (order > 25 || order < 12) {
lderr(cct) << "order must be in the range [12, 25]" << dendl;
return -EDOM;
}
return 0;
}
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::CreateRequest: " << this << " " \
<< __func__ << ": "
template<typename I>
CreateRequest<I>::CreateRequest(const ConfigProxy& config, IoCtx &ioctx,
const std::string &image_name,
const std::string &image_id, uint64_t size,
const ImageOptions &image_options,
uint32_t create_flags,
cls::rbd::MirrorImageMode mirror_image_mode,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid,
asio::ContextWQ *op_work_queue,
Context *on_finish)
: m_config(config), m_image_name(image_name), m_image_id(image_id),
m_size(size), m_create_flags(create_flags),
m_mirror_image_mode(mirror_image_mode),
m_non_primary_global_image_id(non_primary_global_image_id),
m_primary_mirror_uuid(primary_mirror_uuid),
m_op_work_queue(op_work_queue), m_on_finish(on_finish) {
m_io_ctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext *>(m_io_ctx.cct());
m_id_obj = util::id_obj_name(m_image_name);
m_header_obj = util::header_name(m_image_id);
m_objmap_name = ObjectMap<>::object_map_name(m_image_id, CEPH_NOSNAP);
if (!non_primary_global_image_id.empty() &&
(m_create_flags & CREATE_FLAG_MIRROR_ENABLE_MASK) == 0) {
m_create_flags |= CREATE_FLAG_FORCE_MIRROR_ENABLE;
}
if (image_options.get(RBD_IMAGE_OPTION_FEATURES, &m_features) != 0) {
m_features = librbd::rbd_features_from_string(
m_config.get_val<std::string>("rbd_default_features"), nullptr);
m_negotiate_features = true;
}
uint64_t features_clear = 0;
uint64_t features_set = 0;
image_options.get(RBD_IMAGE_OPTION_FEATURES_CLEAR, &features_clear);
image_options.get(RBD_IMAGE_OPTION_FEATURES_SET, &features_set);
uint64_t features_conflict = features_clear & features_set;
features_clear &= ~features_conflict;
features_set &= ~features_conflict;
m_features |= features_set;
m_features &= ~features_clear;
m_features &= ~RBD_FEATURES_IMPLICIT_ENABLE;
if ((m_features & RBD_FEATURE_OBJECT_MAP) == RBD_FEATURE_OBJECT_MAP) {
m_features |= RBD_FEATURE_FAST_DIFF;
}
if (image_options.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &m_stripe_unit) != 0 ||
m_stripe_unit == 0) {
m_stripe_unit = m_config.get_val<Option::size_t>("rbd_default_stripe_unit");
}
if (image_options.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &m_stripe_count) != 0 ||
m_stripe_count == 0) {
m_stripe_count = m_config.get_val<uint64_t>("rbd_default_stripe_count");
}
if (get_image_option(image_options, RBD_IMAGE_OPTION_ORDER, &m_order) != 0 ||
m_order == 0) {
m_order = config.get_val<uint64_t>("rbd_default_order");
}
if (get_image_option(image_options, RBD_IMAGE_OPTION_JOURNAL_ORDER,
&m_journal_order) != 0) {
m_journal_order = m_config.get_val<uint64_t>("rbd_journal_order");
}
if (get_image_option(image_options, RBD_IMAGE_OPTION_JOURNAL_SPLAY_WIDTH,
&m_journal_splay_width) != 0) {
m_journal_splay_width = m_config.get_val<uint64_t>(
"rbd_journal_splay_width");
}
if (image_options.get(RBD_IMAGE_OPTION_JOURNAL_POOL, &m_journal_pool) != 0) {
m_journal_pool = m_config.get_val<std::string>("rbd_journal_pool");
}
if (image_options.get(RBD_IMAGE_OPTION_DATA_POOL, &m_data_pool) != 0) {
m_data_pool = m_config.get_val<std::string>("rbd_default_data_pool");
}
m_layout.object_size = 1ull << m_order;
if (m_stripe_unit == 0 || m_stripe_count == 0) {
m_layout.stripe_unit = m_layout.object_size;
m_layout.stripe_count = 1;
} else {
m_layout.stripe_unit = m_stripe_unit;
m_layout.stripe_count = m_stripe_count;
}
if (!m_data_pool.empty() && m_data_pool != ioctx.get_pool_name()) {
m_features |= RBD_FEATURE_DATA_POOL;
} else {
m_data_pool.clear();
}
if ((m_stripe_unit != 0 && m_stripe_unit != (1ULL << m_order)) ||
(m_stripe_count != 0 && m_stripe_count != 1)) {
m_features |= RBD_FEATURE_STRIPINGV2;
}
ldout(m_cct, 10) << "name=" << m_image_name << ", "
<< "id=" << m_image_id << ", "
<< "size=" << m_size << ", "
<< "features=" << m_features << ", "
<< "order=" << (uint64_t)m_order << ", "
<< "stripe_unit=" << m_stripe_unit << ", "
<< "stripe_count=" << m_stripe_count << ", "
<< "journal_order=" << (uint64_t)m_journal_order << ", "
<< "journal_splay_width="
<< (uint64_t)m_journal_splay_width << ", "
<< "journal_pool=" << m_journal_pool << ", "
<< "data_pool=" << m_data_pool << dendl;
}
template<typename I>
void CreateRequest<I>::send() {
ldout(m_cct, 20) << dendl;
int r = validate_features(m_cct, m_features);
if (r < 0) {
complete(r);
return;
}
r = validate_order(m_cct, m_order);
if (r < 0) {
complete(r);
return;
}
r = validate_striping(m_cct, m_order, m_stripe_unit, m_stripe_count);
if (r < 0) {
complete(r);
return;
}
if (((m_features & RBD_FEATURE_OBJECT_MAP) != 0) &&
(!validate_layout(m_cct, m_size, m_layout))) {
complete(-EINVAL);
return;
}
validate_data_pool();
}
template <typename I>
void CreateRequest<I>::validate_data_pool() {
m_data_io_ctx = m_io_ctx;
if ((m_features & RBD_FEATURE_DATA_POOL) != 0) {
librados::Rados rados(m_io_ctx);
int r = rados.ioctx_create(m_data_pool.c_str(), m_data_io_ctx);
if (r < 0) {
lderr(m_cct) << "data pool " << m_data_pool << " does not exist" << dendl;
complete(r);
return;
}
m_data_pool_id = m_data_io_ctx.get_id();
m_data_io_ctx.set_namespace(m_io_ctx.get_namespace());
}
if (!m_config.get_val<bool>("rbd_validate_pool")) {
add_image_to_directory();
return;
}
ldout(m_cct, 15) << dendl;
auto ctx = create_context_callback<
CreateRequest<I>, &CreateRequest<I>::handle_validate_data_pool>(this);
auto req = ValidatePoolRequest<I>::create(m_data_io_ctx, ctx);
req->send();
}
template <typename I>
void CreateRequest<I>::handle_validate_data_pool(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r == -EINVAL) {
lderr(m_cct) << "pool does not support RBD images" << dendl;
complete(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to validate pool: " << cpp_strerror(r) << dendl;
complete(r);
return;
}
add_image_to_directory();
}
template<typename I>
void CreateRequest<I>::add_image_to_directory() {
ldout(m_cct, 15) << dendl;
librados::ObjectWriteOperation op;
if (!m_io_ctx.get_namespace().empty()) {
cls_client::dir_state_assert(&op, cls::rbd::DIRECTORY_STATE_READY);
}
cls_client::dir_add_image(&op, m_image_name, m_image_id);
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_add_image_to_directory>(this);
int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_add_image_to_directory(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r == -EEXIST) {
ldout(m_cct, 5) << "directory entry for image " << m_image_name
<< " already exists" << dendl;
complete(r);
return;
} else if (!m_io_ctx.get_namespace().empty() && r == -ENOENT) {
ldout(m_cct, 5) << "namespace " << m_io_ctx.get_namespace()
<< " does not exist" << dendl;
complete(r);
return;
} else if (r < 0) {
lderr(m_cct) << "error adding image to directory: " << cpp_strerror(r)
<< dendl;
complete(r);
return;
}
create_id_object();
}
template<typename I>
void CreateRequest<I>::create_id_object() {
ldout(m_cct, 15) << dendl;
librados::ObjectWriteOperation op;
op.create(true);
cls_client::set_id(&op, m_image_id);
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_create_id_object>(this);
int r = m_io_ctx.aio_operate(m_id_obj, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_create_id_object(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r == -EEXIST) {
ldout(m_cct, 5) << "id object for " << m_image_name << " already exists"
<< dendl;
m_r_saved = r;
remove_from_dir();
return;
} else if (r < 0) {
lderr(m_cct) << "error creating RBD id object: " << cpp_strerror(r)
<< dendl;
m_r_saved = r;
remove_from_dir();
return;
}
negotiate_features();
}
template<typename I>
void CreateRequest<I>::negotiate_features() {
if (!m_negotiate_features) {
create_image();
return;
}
ldout(m_cct, 15) << dendl;
librados::ObjectReadOperation op;
cls_client::get_all_features_start(&op);
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_negotiate_features>(this);
m_outbl.clear();
int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op, &m_outbl);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_negotiate_features(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
uint64_t all_features;
if (r >= 0) {
auto it = m_outbl.cbegin();
r = cls_client::get_all_features_finish(&it, &all_features);
}
if (r < 0) {
ldout(m_cct, 10) << "error retrieving server supported features set: "
<< cpp_strerror(r) << dendl;
} else if ((m_features & all_features) != m_features) {
m_features &= all_features;
ldout(m_cct, 10) << "limiting default features set to server supported: "
<< m_features << dendl;
}
create_image();
}
template<typename I>
void CreateRequest<I>::create_image() {
ldout(m_cct, 15) << dendl;
ceph_assert(m_data_pool.empty() || m_data_pool_id != -1);
std::ostringstream oss;
oss << RBD_DATA_PREFIX;
if (m_data_pool_id != -1) {
oss << stringify(m_io_ctx.get_id()) << ".";
}
oss << m_image_id;
if (oss.str().length() > RBD_MAX_BLOCK_NAME_PREFIX_LENGTH) {
lderr(m_cct) << "object prefix '" << oss.str() << "' too large" << dendl;
m_r_saved = -EINVAL;
remove_id_object();
return;
}
librados::ObjectWriteOperation op;
op.create(true);
cls_client::create_image(&op, m_size, m_order, m_features, oss.str(),
m_data_pool_id);
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_create_image>(this);
int r = m_io_ctx.aio_operate(m_header_obj, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_create_image(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r == -EEXIST) {
ldout(m_cct, 5) << "image id already in-use" << dendl;
complete(-EBADF);
return;
} else if (r < 0) {
lderr(m_cct) << "error writing header: " << cpp_strerror(r) << dendl;
m_r_saved = r;
remove_id_object();
return;
}
set_stripe_unit_count();
}
template<typename I>
void CreateRequest<I>::set_stripe_unit_count() {
if ((!m_stripe_unit && !m_stripe_count) ||
((m_stripe_count == 1) && (m_stripe_unit == (1ull << m_order)))) {
object_map_resize();
return;
}
ldout(m_cct, 15) << dendl;
librados::ObjectWriteOperation op;
cls_client::set_stripe_unit_count(&op, m_stripe_unit, m_stripe_count);
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_set_stripe_unit_count>(this);
int r = m_io_ctx.aio_operate(m_header_obj, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_set_stripe_unit_count(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "error setting stripe unit/count: "
<< cpp_strerror(r) << dendl;
m_r_saved = r;
remove_header_object();
return;
}
object_map_resize();
}
template<typename I>
void CreateRequest<I>::object_map_resize() {
if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0) {
fetch_mirror_mode();
return;
}
ldout(m_cct, 15) << dendl;
librados::ObjectWriteOperation op;
cls_client::object_map_resize(&op, Striper::get_num_objects(m_layout, m_size),
OBJECT_NONEXISTENT);
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_object_map_resize>(this);
int r = m_io_ctx.aio_operate(m_objmap_name, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_object_map_resize(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "error creating initial object map: "
<< cpp_strerror(r) << dendl;
m_r_saved = r;
remove_header_object();
return;
}
fetch_mirror_mode();
}
template<typename I>
void CreateRequest<I>::fetch_mirror_mode() {
if ((m_features & RBD_FEATURE_JOURNALING) == 0) {
mirror_image_enable();
return;
}
ldout(m_cct, 15) << dendl;
librados::ObjectReadOperation op;
cls_client::mirror_mode_get_start(&op);
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_fetch_mirror_mode>(this);
m_outbl.clear();
int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_outbl);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_fetch_mirror_mode(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if ((r < 0) && (r != -ENOENT)) {
lderr(m_cct) << "failed to retrieve mirror mode: " << cpp_strerror(r)
<< dendl;
m_r_saved = r;
remove_object_map();
return;
}
m_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
if (r == 0) {
auto it = m_outbl.cbegin();
r = cls_client::mirror_mode_get_finish(&it, &m_mirror_mode);
if (r < 0) {
lderr(m_cct) << "Failed to retrieve mirror mode" << dendl;
m_r_saved = r;
remove_object_map();
return;
}
}
journal_create();
}
template<typename I>
void CreateRequest<I>::journal_create() {
ldout(m_cct, 15) << dendl;
using klass = CreateRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_journal_create>(
this);
// only link to remote primary mirror uuid if in journal-based
// mirroring mode
bool use_primary_mirror_uuid = (
!m_non_primary_global_image_id.empty() &&
m_mirror_image_mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL);
librbd::journal::TagData tag_data;
tag_data.mirror_uuid = (use_primary_mirror_uuid ? m_primary_mirror_uuid :
librbd::Journal<I>::LOCAL_MIRROR_UUID);
typename journal::TypeTraits<I>::ContextWQ* context_wq;
Journal<>::get_work_queue(m_cct, &context_wq);
auto req = librbd::journal::CreateRequest<I>::create(
m_io_ctx, m_image_id, m_journal_order, m_journal_splay_width,
m_journal_pool, cls::journal::Tag::TAG_CLASS_NEW, tag_data,
librbd::Journal<I>::IMAGE_CLIENT_ID, context_wq, ctx);
req->send();
}
template<typename I>
void CreateRequest<I>::handle_journal_create(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "error creating journal: " << cpp_strerror(r)
<< dendl;
m_r_saved = r;
remove_object_map();
return;
}
mirror_image_enable();
}
template<typename I>
void CreateRequest<I>::mirror_image_enable() {
auto mirror_enable_flag = (m_create_flags & CREATE_FLAG_MIRROR_ENABLE_MASK);
if ((m_mirror_mode != cls::rbd::MIRROR_MODE_POOL &&
mirror_enable_flag != CREATE_FLAG_FORCE_MIRROR_ENABLE) ||
(mirror_enable_flag == CREATE_FLAG_SKIP_MIRROR_ENABLE)) {
complete(0);
return;
}
ldout(m_cct, 15) << dendl;
auto ctx = create_context_callback<
CreateRequest<I>, &CreateRequest<I>::handle_mirror_image_enable>(this);
auto req = mirror::EnableRequest<I>::create(
m_io_ctx, m_image_id, m_mirror_image_mode,
m_non_primary_global_image_id, true, m_op_work_queue, ctx);
req->send();
}
template<typename I>
void CreateRequest<I>::handle_mirror_image_enable(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "cannot enable mirroring: " << cpp_strerror(r)
<< dendl;
m_r_saved = r;
journal_remove();
return;
}
complete(0);
}
template<typename I>
void CreateRequest<I>::complete(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
m_data_io_ctx.close();
auto on_finish = m_on_finish;
delete this;
on_finish->complete(r);
}
// cleanup
template<typename I>
void CreateRequest<I>::journal_remove() {
if ((m_features & RBD_FEATURE_JOURNALING) == 0) {
remove_object_map();
return;
}
ldout(m_cct, 15) << dendl;
using klass = CreateRequest<I>;
Context *ctx = create_context_callback<klass, &klass::handle_journal_remove>(
this);
typename journal::TypeTraits<I>::ContextWQ* context_wq;
Journal<>::get_work_queue(m_cct, &context_wq);
librbd::journal::RemoveRequest<I> *req =
librbd::journal::RemoveRequest<I>::create(
m_io_ctx, m_image_id, librbd::Journal<I>::IMAGE_CLIENT_ID, context_wq,
ctx);
req->send();
}
template<typename I>
void CreateRequest<I>::handle_journal_remove(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "error cleaning up journal after creation failed: "
<< cpp_strerror(r) << dendl;
}
remove_object_map();
}
template<typename I>
void CreateRequest<I>::remove_object_map() {
if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0) {
remove_header_object();
return;
}
ldout(m_cct, 15) << dendl;
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_remove_object_map>(this);
int r = m_io_ctx.aio_remove(m_objmap_name, comp);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_remove_object_map(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "error cleaning up object map after creation failed: "
<< cpp_strerror(r) << dendl;
}
remove_header_object();
}
template<typename I>
void CreateRequest<I>::remove_header_object() {
ldout(m_cct, 15) << dendl;
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_remove_header_object>(this);
int r = m_io_ctx.aio_remove(m_header_obj, comp);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_remove_header_object(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "error cleaning up image header after creation failed: "
<< cpp_strerror(r) << dendl;
}
remove_id_object();
}
template<typename I>
void CreateRequest<I>::remove_id_object() {
ldout(m_cct, 15) << dendl;
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_remove_id_object>(this);
int r = m_io_ctx.aio_remove(m_id_obj, comp);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_remove_id_object(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "error cleaning up id object after creation failed: "
<< cpp_strerror(r) << dendl;
}
remove_from_dir();
}
template<typename I>
void CreateRequest<I>::remove_from_dir() {
ldout(m_cct, 15) << dendl;
librados::ObjectWriteOperation op;
cls_client::dir_remove_image(&op, m_image_name, m_image_id);
using klass = CreateRequest<I>;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_remove_from_dir>(this);
int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::handle_remove_from_dir(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "error cleaning up image from rbd_directory object "
<< "after creation failed: " << cpp_strerror(r) << dendl;
}
complete(m_r_saved);
}
} //namespace image
} //namespace librbd
template class librbd::image::CreateRequest<librbd::ImageCtx>;
| 24,258 | 28.017943 | 95 | cc |
null | ceph-main/src/librbd/image/CreateRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_CREATE_REQUEST_H
#define CEPH_LIBRBD_IMAGE_CREATE_REQUEST_H
#include "common/config_fwd.h"
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/ImageCtx.h"
class Context;
using librados::IoCtx;
namespace journal { class Journaler; }
namespace librbd {
namespace asio { struct ContextWQ; }
namespace image {
template <typename ImageCtxT = ImageCtx>
class CreateRequest {
public:
static CreateRequest *create(const ConfigProxy& config, IoCtx &ioctx,
const std::string &image_name,
const std::string &image_id, uint64_t size,
const ImageOptions &image_options,
uint32_t create_flags,
cls::rbd::MirrorImageMode mirror_image_mode,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid,
asio::ContextWQ *op_work_queue,
Context *on_finish) {
return new CreateRequest(config, ioctx, image_name, image_id, size,
image_options, create_flags,
mirror_image_mode, non_primary_global_image_id,
primary_mirror_uuid, op_work_queue, on_finish);
}
static int validate_order(CephContext *cct, uint8_t order);
void send();
private:
/**
* @verbatim
*
* <start> . . . . > . . . . .
* | .
* v .
* VALIDATE DATA POOL v (pool validation
* | . disabled)
* v .
* (error: bottom up) ADD IMAGE TO DIRECTORY < . . . .
* _______<_______ |
* | | v
* | | CREATE ID OBJECT
* | | / |
* | REMOVE FROM DIR <-------/ v
* | | NEGOTIATE FEATURES (when using default features)
* | | |
* | | v (stripingv2 disabled)
* | | CREATE IMAGE. . . . > . . . .
* v | / | .
* | REMOVE ID OBJ <---------/ v .
* | | SET STRIPE UNIT COUNT .
* | | / | \ . . . . . > . . . .
* | REMOVE HEADER OBJ<------/ v /. (object-map
* | |\ OBJECT MAP RESIZE . . < . . * v disabled)
* | | \ / | \ . . . . . > . . . .
* | | *<-----------/ v /. (journaling
* | | FETCH MIRROR MODE. . < . . * v disabled)
* | | / | .
* | REMOVE OBJECT MAP<--------/ v .
* | |\ JOURNAL CREATE .
* | | \ / | .
* v | *<------------/ v .
* | | MIRROR IMAGE ENABLE .
* | | / | .
* | JOURNAL REMOVE*<-------/ | .
* | v .
* |_____________>___________________<finish> . . . . < . . . .
*
* @endverbatim
*/
CreateRequest(const ConfigProxy& config, IoCtx &ioctx,
const std::string &image_name,
const std::string &image_id, uint64_t size,
const ImageOptions &image_options,
uint32_t create_flags,
cls::rbd::MirrorImageMode mirror_image_mode,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid,
asio::ContextWQ *op_work_queue, Context *on_finish);
const ConfigProxy& m_config;
IoCtx m_io_ctx;
IoCtx m_data_io_ctx;
std::string m_image_name;
std::string m_image_id;
uint64_t m_size;
uint8_t m_order = 0;
uint64_t m_features = 0;
uint64_t m_stripe_unit = 0;
uint64_t m_stripe_count = 0;
uint8_t m_journal_order = 0;
uint8_t m_journal_splay_width = 0;
std::string m_journal_pool;
std::string m_data_pool;
int64_t m_data_pool_id = -1;
uint32_t m_create_flags;
cls::rbd::MirrorImageMode m_mirror_image_mode;
const std::string m_non_primary_global_image_id;
const std::string m_primary_mirror_uuid;
bool m_negotiate_features = false;
asio::ContextWQ *m_op_work_queue;
Context *m_on_finish;
CephContext *m_cct;
int m_r_saved = 0; // used to return actual error after cleanup
file_layout_t m_layout;
std::string m_id_obj, m_header_obj, m_objmap_name;
bufferlist m_outbl;
cls::rbd::MirrorMode m_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
cls::rbd::MirrorImage m_mirror_image_internal;
void validate_data_pool();
void handle_validate_data_pool(int r);
void add_image_to_directory();
void handle_add_image_to_directory(int r);
void create_id_object();
void handle_create_id_object(int r);
void negotiate_features();
void handle_negotiate_features(int r);
void create_image();
void handle_create_image(int r);
void set_stripe_unit_count();
void handle_set_stripe_unit_count(int r);
void object_map_resize();
void handle_object_map_resize(int r);
void fetch_mirror_mode();
void handle_fetch_mirror_mode(int r);
void journal_create();
void handle_journal_create(int r);
void mirror_image_enable();
void handle_mirror_image_enable(int r);
void complete(int r);
// cleanup
void journal_remove();
void handle_journal_remove(int r);
void remove_object_map();
void handle_remove_object_map(int r);
void remove_header_object();
void handle_remove_header_object(int r);
void remove_id_object();
void handle_remove_id_object(int r);
void remove_from_dir();
void handle_remove_from_dir(int r);
};
} //namespace image
} //namespace librbd
extern template class librbd::image::CreateRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_CREATE_REQUEST_H
| 6,757 | 34.197917 | 82 | h |
null | ceph-main/src/librbd/image/DetachChildRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/DetachChildRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/journal/DisabledPolicy.h"
#include "librbd/trash/RemoveRequest.h"
#include <string>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::DetachChildRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace image {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
DetachChildRequest<I>::~DetachChildRequest() {
ceph_assert(m_parent_image_ctx == nullptr);
}
template <typename I>
void DetachChildRequest<I>::send() {
{
std::shared_lock image_locker{m_image_ctx.image_lock};
// use oldest snapshot or HEAD for parent spec
if (!m_image_ctx.snap_info.empty()) {
m_parent_spec = m_image_ctx.snap_info.begin()->second.parent.spec;
} else {
m_parent_spec = m_image_ctx.parent_md.spec;
}
}
if (m_parent_spec.pool_id == -1) {
// ignore potential race with parent disappearing
m_image_ctx.op_work_queue->queue(create_context_callback<
DetachChildRequest<I>,
&DetachChildRequest<I>::finish>(this), 0);
return;
} else if (!m_image_ctx.test_op_features(RBD_OPERATION_FEATURE_CLONE_CHILD)) {
clone_v1_remove_child();
return;
}
clone_v2_child_detach();
}
template <typename I>
void DetachChildRequest<I>::clone_v2_child_detach() {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
librados::ObjectWriteOperation op;
cls_client::child_detach(&op, m_parent_spec.snap_id,
{m_image_ctx.md_ctx.get_id(),
m_image_ctx.md_ctx.get_namespace(),
m_image_ctx.id});
int r = util::create_ioctx(m_image_ctx.md_ctx, "parent image",
m_parent_spec.pool_id,
m_parent_spec.pool_namespace, &m_parent_io_ctx);
if (r < 0) {
if (r == -ENOENT) {
r = 0;
}
finish(r);
return;
}
m_parent_header_name = util::header_name(m_parent_spec.image_id);
auto aio_comp = create_rados_callback<
DetachChildRequest<I>,
&DetachChildRequest<I>::handle_clone_v2_child_detach>(this);
r = m_parent_io_ctx.aio_operate(m_parent_header_name, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void DetachChildRequest<I>::handle_clone_v2_child_detach(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error detaching child from parent: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
clone_v2_get_snapshot();
}
template <typename I>
void DetachChildRequest<I>::clone_v2_get_snapshot() {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
librados::ObjectReadOperation op;
cls_client::snapshot_get_start(&op, m_parent_spec.snap_id);
m_out_bl.clear();
auto aio_comp = create_rados_callback<
DetachChildRequest<I>,
&DetachChildRequest<I>::handle_clone_v2_get_snapshot>(this);
int r = m_parent_io_ctx.aio_operate(m_parent_header_name, aio_comp, &op,
&m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void DetachChildRequest<I>::handle_clone_v2_get_snapshot(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
bool remove_snapshot = false;
if (r == 0) {
cls::rbd::SnapshotInfo snap_info;
auto it = m_out_bl.cbegin();
r = cls_client::snapshot_get_finish(&it, &snap_info);
if (r == 0) {
m_parent_snap_namespace = snap_info.snapshot_namespace;
m_parent_snap_name = snap_info.name;
if (cls::rbd::get_snap_namespace_type(m_parent_snap_namespace) ==
cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH &&
snap_info.child_count == 0) {
// snapshot is in trash w/ zero children, so remove it
remove_snapshot = true;
}
}
}
if (r < 0 && r != -ENOENT) {
ldout(cct, 5) << "failed to retrieve snapshot: " << cpp_strerror(r)
<< dendl;
}
if (!remove_snapshot) {
finish(0);
return;
}
clone_v2_open_parent();
}
template<typename I>
void DetachChildRequest<I>::clone_v2_open_parent() {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
m_parent_image_ctx = I::create("", m_parent_spec.image_id, nullptr,
m_parent_io_ctx, false);
// ensure non-primary images can be modified
m_parent_image_ctx->read_only_mask &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
auto ctx = create_context_callback<
DetachChildRequest<I>,
&DetachChildRequest<I>::handle_clone_v2_open_parent>(this);
m_parent_image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT, ctx);
}
template<typename I>
void DetachChildRequest<I>::handle_clone_v2_open_parent(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
ldout(cct, 5) << "failed to open parent for read/write: "
<< cpp_strerror(r) << dendl;
m_parent_image_ctx = nullptr;
finish(0);
return;
}
// do not attempt to open the parent journal when removing the trash
// snapshot, because the parent may be not promoted
if (m_parent_image_ctx->test_features(RBD_FEATURE_JOURNALING)) {
std::unique_lock image_locker{m_parent_image_ctx->image_lock};
m_parent_image_ctx->set_journal_policy(new journal::DisabledPolicy());
}
// disallow any proxied maintenance operations
{
std::shared_lock owner_locker{m_parent_image_ctx->owner_lock};
if (m_parent_image_ctx->exclusive_lock != nullptr) {
m_parent_image_ctx->exclusive_lock->block_requests(0);
}
}
clone_v2_remove_snapshot();
}
template<typename I>
void DetachChildRequest<I>::clone_v2_remove_snapshot() {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
auto ctx = create_context_callback<
DetachChildRequest<I>,
&DetachChildRequest<I>::handle_clone_v2_remove_snapshot>(this);
m_parent_image_ctx->operations->snap_remove(m_parent_snap_namespace,
m_parent_snap_name, ctx);
}
template<typename I>
void DetachChildRequest<I>::handle_clone_v2_remove_snapshot(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
ldout(cct, 5) << "failed to remove trashed clone snapshot: "
<< cpp_strerror(r) << dendl;
clone_v2_close_parent();
return;
}
if (m_parent_image_ctx->snaps.empty()) {
clone_v2_get_parent_trash_entry();
} else {
clone_v2_close_parent();
}
}
template<typename I>
void DetachChildRequest<I>::clone_v2_get_parent_trash_entry() {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
librados::ObjectReadOperation op;
cls_client::trash_get_start(&op, m_parent_image_ctx->id);
m_out_bl.clear();
auto aio_comp = create_rados_callback<
DetachChildRequest<I>,
&DetachChildRequest<I>::handle_clone_v2_get_parent_trash_entry>(this);
int r = m_parent_io_ctx.aio_operate(RBD_TRASH, aio_comp, &op, &m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
template<typename I>
void DetachChildRequest<I>::handle_clone_v2_get_parent_trash_entry(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
ldout(cct, 5) << "failed to get parent trash entry: " << cpp_strerror(r)
<< dendl;
clone_v2_close_parent();
return;
}
bool in_trash = false;
if (r == 0) {
cls::rbd::TrashImageSpec trash_spec;
auto it = m_out_bl.cbegin();
r = cls_client::trash_get_finish(&it, &trash_spec);
if (r == 0 &&
trash_spec.source == cls::rbd::TRASH_IMAGE_SOURCE_USER_PARENT &&
trash_spec.state == cls::rbd::TRASH_IMAGE_STATE_NORMAL &&
trash_spec.deferment_end_time <= ceph_clock_now()) {
in_trash = true;
}
}
if (in_trash) {
clone_v2_remove_parent_from_trash();
} else {
clone_v2_close_parent();
}
}
template<typename I>
void DetachChildRequest<I>::clone_v2_remove_parent_from_trash() {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
auto ctx = create_context_callback<
DetachChildRequest<I>,
&DetachChildRequest<I>::handle_clone_v2_remove_parent_from_trash>(this);
auto req = librbd::trash::RemoveRequest<I>::create(
m_parent_io_ctx, m_parent_image_ctx, m_image_ctx.op_work_queue, false,
m_no_op, ctx);
req->send();
}
template<typename I>
void DetachChildRequest<I>::handle_clone_v2_remove_parent_from_trash(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
ldout(cct, 5) << "failed to remove parent image:" << cpp_strerror(r)
<< dendl;
}
m_parent_image_ctx = nullptr;
finish(0);
}
template<typename I>
void DetachChildRequest<I>::clone_v2_close_parent() {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
auto ctx = create_context_callback<
DetachChildRequest<I>,
&DetachChildRequest<I>::handle_clone_v2_close_parent>(this);
m_parent_image_ctx->state->close(ctx);
}
template<typename I>
void DetachChildRequest<I>::handle_clone_v2_close_parent(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r < 0) {
ldout(cct, 5) << "failed to close parent image:" << cpp_strerror(r)
<< dendl;
}
m_parent_image_ctx = nullptr;
finish(0);
}
template<typename I>
void DetachChildRequest<I>::clone_v1_remove_child() {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
m_parent_spec.pool_namespace = "";
librados::ObjectWriteOperation op;
librbd::cls_client::remove_child(&op, m_parent_spec, m_image_ctx.id);
auto aio_comp = create_rados_callback<
DetachChildRequest<I>,
&DetachChildRequest<I>::handle_clone_v1_remove_child>(this);
int r = m_image_ctx.md_ctx.aio_operate(RBD_CHILDREN, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template<typename I>
void DetachChildRequest<I>::handle_clone_v1_remove_child(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
if (r == -ENOENT) {
r = 0;
} else if (r < 0) {
lderr(cct) << "failed to remove child from children list: "
<< cpp_strerror(r) << dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void DetachChildRequest<I>::finish(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::DetachChildRequest<librbd::ImageCtx>;
| 11,042 | 27.099237 | 80 | cc |
null | ceph-main/src/librbd/image/DetachChildRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_DETACH_CHILD_REQUEST_H
#define CEPH_LIBRBD_IMAGE_DETACH_CHILD_REQUEST_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "librbd/Types.h"
#include "librbd/internal.h"
class Context;
namespace librbd {
class ImageCtx;
namespace image {
template <typename ImageCtxT = ImageCtx>
class DetachChildRequest {
public:
static DetachChildRequest* create(ImageCtxT& image_ctx, Context* on_finish) {
return new DetachChildRequest(image_ctx, on_finish);
}
DetachChildRequest(ImageCtxT& image_ctx, Context* on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish) {
}
~DetachChildRequest();
void send();
private:
/**
* @verbatim
*
* <start>
* |
* (v1) | (v2)
* /--------------/ \--------------\
* | |
* v v
* REMOVE_CHILD CHILD_DETACH
* | |
* | v
* | GET_SNAPSHOT
* | (snapshot in-use) . |
* |/. . . . . . . . . . . . . . . |
* | v
* | OPEN_PARENT
* | |
* | v (has more children)
* | REMOVE_SNAPSHOT ---------------\
* | | |
* | v (noent) |
* | (auto-delete when GET_PARENT_TRASH_ENTRY . . . .\|
* | last child detached) | |
* | v v
* | REMOVE_PARENT_FROM_TRASH CLOSE_PARENT
* | | |
* |/------------------------------/--------------------------/
* |
* v
* <finish>
*
* @endverbatim
*/
ImageCtxT& m_image_ctx;
Context* m_on_finish;
librados::IoCtx m_parent_io_ctx;
cls::rbd::ParentImageSpec m_parent_spec;
std::string m_parent_header_name;
cls::rbd::SnapshotNamespace m_parent_snap_namespace;
std::string m_parent_snap_name;
ImageCtxT* m_parent_image_ctx = nullptr;
ceph::bufferlist m_out_bl;
NoOpProgressContext m_no_op;
void clone_v2_child_detach();
void handle_clone_v2_child_detach(int r);
void clone_v2_get_snapshot();
void handle_clone_v2_get_snapshot(int r);
void clone_v2_open_parent();
void handle_clone_v2_open_parent(int r);
void clone_v2_remove_snapshot();
void handle_clone_v2_remove_snapshot(int r);
void clone_v2_get_parent_trash_entry();
void handle_clone_v2_get_parent_trash_entry(int r);
void clone_v2_remove_parent_from_trash();
void handle_clone_v2_remove_parent_from_trash(int r);
void clone_v2_close_parent();
void handle_clone_v2_close_parent(int r);
void clone_v1_remove_child();
void handle_clone_v1_remove_child(int r);
void finish(int r);
};
} // namespace image
} // namespace librbd
extern template class librbd::image::DetachChildRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_DETACH_CHILD_REQUEST_H
| 3,437 | 27.65 | 79 | h |
null | ceph-main/src/librbd/image/DetachParentRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/image/DetachParentRequest.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::image::DetachParentRequest: " << this \
<< " " << __func__ << ": "
namespace librbd {
namespace image {
using util::create_context_callback;
using util::create_rados_callback;
template <typename I>
void DetachParentRequest<I>::send() {
detach_parent();
}
template <typename I>
void DetachParentRequest<I>::detach_parent() {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
librados::ObjectWriteOperation op;
if (!m_legacy_parent) {
librbd::cls_client::parent_detach(&op);
} else {
librbd::cls_client::remove_parent(&op);
}
auto aio_comp = create_rados_callback<
DetachParentRequest<I>,
&DetachParentRequest<I>::handle_detach_parent>(this);
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, aio_comp, &op);
ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
void DetachParentRequest<I>::handle_detach_parent(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
if (!m_legacy_parent && r == -EOPNOTSUPP) {
ldout(cct, 10) << "retrying using legacy parent method" << dendl;
m_legacy_parent = true;
detach_parent();
return;
}
if (r < 0 && r != -ENOENT) {
lderr(cct) << "detach parent encountered an error: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
void DetachParentRequest<I>::finish(int r) {
auto cct = m_image_ctx.cct;
ldout(cct, 5) << "r=" << r << dendl;
m_on_finish->complete(r);
delete this;
}
} // namespace image
} // namespace librbd
template class librbd::image::DetachParentRequest<librbd::ImageCtx>;
| 2,017 | 23.609756 | 80 | cc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.