repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/crimson/os/seastore/collection_manager/collection_flat_node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/collection_manager.h"
namespace crimson::os::seastore::collection_manager {
struct coll_context_t {
TransactionManager &tm;
Transaction &t;
};
using base_coll_map_t = std::map<denc_coll_t, uint32_t>;
struct coll_map_t : base_coll_map_t {
auto insert(coll_t coll, unsigned bits) {
return emplace(
std::make_pair(denc_coll_t{coll}, bits)
);
}
void update(coll_t coll, unsigned bits) {
(*this)[denc_coll_t{coll}] = bits;
}
void remove(coll_t coll) {
erase(denc_coll_t{coll});
}
};
struct delta_t {
enum class op_t : uint_fast8_t {
INSERT,
UPDATE,
REMOVE,
INVALID
} op = op_t::INVALID;
denc_coll_t coll;
uint32_t bits = 0;
DENC(delta_t, v, p) {
DENC_START(1, 1, p);
denc(v.op, p);
denc(v.coll, p);
denc(v.bits, p);
DENC_FINISH(p);
}
void replay(coll_map_t &l) const;
};
}
WRITE_CLASS_DENC(crimson::os::seastore::collection_manager::delta_t)
namespace crimson::os::seastore::collection_manager {
class delta_buffer_t {
std::vector<delta_t> buffer;
public:
bool empty() const {
return buffer.empty();
}
void insert(coll_t coll, uint32_t bits) {
buffer.push_back(delta_t{delta_t::op_t::INSERT, denc_coll_t(coll), bits});
}
void update(coll_t coll, uint32_t bits) {
buffer.push_back(delta_t{delta_t::op_t::UPDATE, denc_coll_t(coll), bits});
}
void remove(coll_t coll) {
buffer.push_back(delta_t{delta_t::op_t::REMOVE, denc_coll_t(coll), 0});
}
void replay(coll_map_t &l) {
for (auto &i: buffer) {
i.replay(l);
}
}
void clear() { buffer.clear(); }
DENC(delta_buffer_t, v, p) {
DENC_START(1, 1, p);
denc(v.buffer, p);
DENC_FINISH(p);
}
};
}
WRITE_CLASS_DENC(crimson::os::seastore::collection_manager::delta_buffer_t)
namespace crimson::os::seastore::collection_manager {
struct CollectionNode
: LogicalCachedExtent {
using CollectionNodeRef = TCachedExtentRef<CollectionNode>;
bool loaded = false;
template <typename... T>
CollectionNode(T&&... t)
: LogicalCachedExtent(std::forward<T>(t)...) {}
static constexpr extent_types_t type = extent_types_t::COLL_BLOCK;
coll_map_t decoded;
delta_buffer_t delta_buffer;
CachedExtentRef duplicate_for_write(Transaction&) final {
assert(delta_buffer.empty());
return CachedExtentRef(new CollectionNode(*this));
}
delta_buffer_t *maybe_get_delta_buffer() {
return is_mutation_pending() ? &delta_buffer : nullptr;
}
using list_iertr = CollectionManager::list_iertr;
using list_ret = CollectionManager::list_ret;
list_ret list();
enum class create_result_t : uint8_t {
SUCCESS,
OVERFLOW
};
using create_iertr = CollectionManager::create_iertr;
using create_ret = create_iertr::future<create_result_t>;
create_ret create(coll_context_t cc, coll_t coll, unsigned bits);
using remove_iertr = CollectionManager::remove_iertr;
using remove_ret = CollectionManager::remove_ret;
remove_ret remove(coll_context_t cc, coll_t coll);
using update_iertr = CollectionManager::update_iertr;
using update_ret = CollectionManager::update_ret;
update_ret update(coll_context_t cc, coll_t coll, unsigned bits);
void read_to_local() {
if (loaded) return;
bufferlist bl;
bl.append(get_bptr());
auto iter = bl.cbegin();
decode((base_coll_map_t&)decoded, iter);
loaded = true;
}
void copy_to_node() {
bufferlist bl;
encode((base_coll_map_t&)decoded, bl);
auto iter = bl.begin();
auto size = encoded_sizeof((base_coll_map_t&)decoded);
assert(size <= get_bptr().length());
get_bptr().zero();
iter.copy(size, get_bptr().c_str());
}
ceph::bufferlist get_delta() final {
assert(!delta_buffer.empty());
ceph::bufferlist bl;
encode(delta_buffer, bl);
delta_buffer.clear();
return bl;
}
void apply_delta(const ceph::bufferlist &bl) final {
assert(bl.length());
delta_buffer_t buffer;
auto bptr = bl.begin();
decode(buffer, bptr);
buffer.replay(decoded);
copy_to_node();
}
static constexpr extent_types_t TYPE = extent_types_t::COLL_BLOCK;
extent_types_t get_type() const final {
return TYPE;
}
std::ostream &print_detail_l(std::ostream &out) const final;
};
using CollectionNodeRef = CollectionNode::CollectionNodeRef;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::collection_manager::CollectionNode> : fmt::ostream_formatter {};
#endif
| 4,706 | 24.171123 | 121 | h |
null | ceph-main/src/crimson/os/seastore/collection_manager/flat_collection_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <string.h>
#include "crimson/common/log.h"
#include "include/buffer.h"
#include "include/stringify.h"
#include "crimson/os/seastore/collection_manager/flat_collection_manager.h"
#include "crimson/os/seastore/collection_manager/collection_flat_node.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_seastore);
}
}
namespace crimson::os::seastore::collection_manager {
constexpr static extent_len_t MIN_FLAT_BLOCK_SIZE = 4<<10;
[[maybe_unused]] constexpr static extent_len_t MAX_FLAT_BLOCK_SIZE = 4<<20;
FlatCollectionManager::FlatCollectionManager(
TransactionManager &tm)
: tm(tm) {}
FlatCollectionManager::mkfs_ret
FlatCollectionManager::mkfs(Transaction &t)
{
logger().debug("FlatCollectionManager: {}", __func__);
return tm.alloc_extent<CollectionNode>(
t, L_ADDR_MIN, MIN_FLAT_BLOCK_SIZE
).si_then([](auto&& root_extent) {
coll_root_t coll_root = coll_root_t(
root_extent->get_laddr(),
MIN_FLAT_BLOCK_SIZE
);
return mkfs_iertr::make_ready_future<coll_root_t>(coll_root);
});
}
FlatCollectionManager::get_root_ret
FlatCollectionManager::get_coll_root(const coll_root_t &coll_root, Transaction &t)
{
logger().debug("FlatCollectionManager: {}", __func__);
assert(coll_root.get_location() != L_ADDR_NULL);
auto cc = get_coll_context(t);
return cc.tm.read_extent<CollectionNode>(
cc.t,
coll_root.get_location(),
coll_root.get_size()
).si_then([](auto&& e) {
return get_root_iertr::make_ready_future<CollectionNodeRef>(std::move(e));
});
}
FlatCollectionManager::create_ret
FlatCollectionManager::create(coll_root_t &coll_root, Transaction &t,
coll_t cid, coll_info_t info)
{
logger().debug("FlatCollectionManager: {}", __func__);
return get_coll_root(coll_root, t
).si_then([=, this, &coll_root, &t] (auto &&extent) {
return extent->create(
get_coll_context(t), cid, info.split_bits
).si_then([=, this, &coll_root, &t] (auto ret) {
switch (ret) {
case CollectionNode::create_result_t::OVERFLOW: {
logger().debug("FlatCollectionManager: {} overflow!", __func__);
auto new_size = coll_root.get_size() * 2; // double each time
// TODO return error probably, but such a nonsensically large number of
// collections would create a ton of other problems as well
assert(new_size < MAX_FLAT_BLOCK_SIZE);
return tm.alloc_extent<CollectionNode>(
t, L_ADDR_MIN, new_size
).si_then([=, this, &coll_root, &t] (auto &&root_extent) {
coll_root.update(root_extent->get_laddr(), root_extent->get_length());
root_extent->decoded = extent->decoded;
root_extent->loaded = true;
return root_extent->create(
get_coll_context(t), cid, info.split_bits
).si_then([=, this, &t](auto result) {
assert(result == CollectionNode::create_result_t::SUCCESS);
return tm.dec_ref(t, extent->get_laddr());
}).si_then([] (auto) {
return create_iertr::make_ready_future<>();
});
});
}
case CollectionNode::create_result_t::SUCCESS: {
return create_iertr::make_ready_future<>();
}
}
__builtin_unreachable();
});
});
}
FlatCollectionManager::list_ret
FlatCollectionManager::list(const coll_root_t &coll_root, Transaction &t)
{
logger().debug("FlatCollectionManager: {}", __func__);
return get_coll_root(coll_root, t)
.si_then([] (auto extent) {
return extent->list();
});
}
FlatCollectionManager::update_ret
FlatCollectionManager::update(const coll_root_t &coll_root, Transaction &t,
coll_t cid, coll_info_t info)
{
logger().debug("FlatCollectionManager: {}", __func__);
return get_coll_root(coll_root, t)
.si_then([this, &t, cid, info] (auto extent) {
return extent->update(get_coll_context(t), cid, info.split_bits);
});
}
FlatCollectionManager::remove_ret
FlatCollectionManager::remove(const coll_root_t &coll_root, Transaction &t,
coll_t cid )
{
logger().debug("FlatCollectionManager: {}", __func__);
return get_coll_root(coll_root, t).si_then([this, &t, cid] (auto extent) {
return extent->remove(get_coll_context(t), cid);
});
}
}
| 4,328 | 31.066667 | 82 | cc |
null | ceph-main/src/crimson/os/seastore/collection_manager/flat_collection_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/ceph_assert.h"
#include "crimson/os/seastore/collection_manager.h"
#include "crimson/os/seastore/collection_manager/collection_flat_node.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
namespace crimson::os::seastore::collection_manager {
class FlatCollectionManager : public CollectionManager {
TransactionManager &tm;
coll_context_t get_coll_context(Transaction &t) {
return coll_context_t{tm, t};
}
using get_root_iertr = base_iertr;
using get_root_ret = get_root_iertr::future<CollectionNodeRef>;
get_root_ret get_coll_root(const coll_root_t &coll_root, Transaction &t);
public:
explicit FlatCollectionManager(TransactionManager &tm);
mkfs_ret mkfs(Transaction &t) final;
create_ret create(coll_root_t &coll_root, Transaction &t, coll_t cid,
coll_info_t info) final;
list_ret list(const coll_root_t &coll_root, Transaction &t) final;
remove_ret remove(const coll_root_t &coll_root, Transaction &t, coll_t cid) final;
update_ret update(const coll_root_t &coll_root, Transaction &t, coll_t cid, coll_info_t info) final;
};
using FlatCollectionManagerRef = std::unique_ptr<FlatCollectionManager>;
}
| 1,346 | 31.071429 | 102 | h |
null | ceph-main/src/crimson/os/seastore/journal/circular_bounded_journal.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <boost/iterator/counting_iterator.hpp>
#include "crimson/common/errorator-loop.h"
#include "include/intarith.h"
#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/journal/circular_bounded_journal.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/journal/circular_journal_space.h"
SET_SUBSYS(seastore_journal);
namespace crimson::os::seastore::journal {
CircularBoundedJournal::CircularBoundedJournal(
JournalTrimmer &trimmer,
RBMDevice* device,
const std::string &path)
: trimmer(trimmer), path(path),
cjs(device),
record_submitter(crimson::common::get_conf<uint64_t>(
"seastore_journal_iodepth_limit"),
crimson::common::get_conf<uint64_t>(
"seastore_journal_batch_capacity"),
crimson::common::get_conf<Option::size_t>(
"seastore_journal_batch_flush_size"),
crimson::common::get_conf<double>(
"seastore_journal_batch_preferred_fullness"),
cjs)
{}
CircularBoundedJournal::open_for_mkfs_ret
CircularBoundedJournal::open_for_mkfs()
{
return record_submitter.open(true
).safe_then([this](auto ret) {
record_submitter.update_committed_to(get_written_to());
return open_for_mkfs_ret(
open_for_mkfs_ertr::ready_future_marker{},
get_written_to());
});
}
CircularBoundedJournal::open_for_mount_ret
CircularBoundedJournal::open_for_mount()
{
return record_submitter.open(false
).safe_then([this](auto ret) {
record_submitter.update_committed_to(get_written_to());
return open_for_mount_ret(
open_for_mount_ertr::ready_future_marker{},
get_written_to());
});
}
CircularBoundedJournal::close_ertr::future<> CircularBoundedJournal::close()
{
return record_submitter.close();
}
CircularBoundedJournal::submit_record_ret
CircularBoundedJournal::submit_record(
record_t &&record,
OrderingHandle &handle)
{
LOG_PREFIX(CircularBoundedJournal::submit_record);
DEBUG("H{} {} start ...", (void*)&handle, record);
assert(write_pipeline);
return do_submit_record(std::move(record), handle);
}
CircularBoundedJournal::submit_record_ret
CircularBoundedJournal::do_submit_record(
record_t &&record,
OrderingHandle &handle)
{
LOG_PREFIX(CircularBoundedJournal::do_submit_record);
if (!record_submitter.is_available()) {
DEBUG("H{} wait ...", (void*)&handle);
return record_submitter.wait_available(
).safe_then([this, record=std::move(record), &handle]() mutable {
return do_submit_record(std::move(record), handle);
});
}
auto action = record_submitter.check_action(record.size);
if (action == RecordSubmitter::action_t::ROLL) {
return record_submitter.roll_segment(
).safe_then([this, record=std::move(record), &handle]() mutable {
return do_submit_record(std::move(record), handle);
});
}
DEBUG("H{} submit {} ...",
(void*)&handle,
action == RecordSubmitter::action_t::SUBMIT_FULL ?
"FULL" : "NOT_FULL");
auto submit_fut = record_submitter.submit(std::move(record));
return handle.enter(write_pipeline->device_submission
).then([submit_fut=std::move(submit_fut)]() mutable {
return std::move(submit_fut);
}).safe_then([FNAME, this, &handle](record_locator_t result) {
return handle.enter(write_pipeline->finalize
).then([FNAME, this, result, &handle] {
DEBUG("H{} finish with {}", (void*)&handle, result);
auto new_committed_to = result.write_result.get_end_seq();
record_submitter.update_committed_to(new_committed_to);
return result;
});
});
}
Journal::replay_ret CircularBoundedJournal::scan_valid_record_delta(
cbj_delta_handler_t &&delta_handler, journal_seq_t tail)
{
LOG_PREFIX(CircularBoundedJournal::scan_valid_record_delta);
return seastar::do_with(
bool(false),
rbm_abs_addr(get_rbm_addr(tail)),
std::move(delta_handler),
segment_seq_t(NULL_SEG_SEQ),
[this, FNAME](auto &is_rolled, auto &cursor_addr, auto &d_handler, auto &expected_seq) {
return crimson::repeat(
[this, &is_rolled, &cursor_addr, &d_handler, &expected_seq, FNAME]() mutable
-> replay_ertr::future<seastar::stop_iteration> {
paddr_t record_paddr = convert_abs_addr_to_paddr(
cursor_addr,
get_device_id());
return read_record(record_paddr, expected_seq
).safe_then([this, &is_rolled, &cursor_addr, &d_handler, &expected_seq, FNAME](auto ret)
-> replay_ertr::future<seastar::stop_iteration> {
if (!ret.has_value()) {
if (expected_seq == NULL_SEG_SEQ || is_rolled) {
DEBUG("no more records, stop replaying");
return replay_ertr::make_ready_future<
seastar::stop_iteration>(seastar::stop_iteration::yes);
} else {
cursor_addr = get_records_start();
++expected_seq;
is_rolled = true;
return replay_ertr::make_ready_future<
seastar::stop_iteration>(seastar::stop_iteration::no);
}
}
auto [r_header, bl] = *ret;
bufferlist mdbuf;
mdbuf.substr_of(bl, 0, r_header.mdlength);
paddr_t record_block_base = paddr_t::make_blk_paddr(
get_device_id(), cursor_addr + r_header.mdlength);
auto maybe_record_deltas_list = try_decode_deltas(
r_header, mdbuf, record_block_base);
if (!maybe_record_deltas_list) {
// This should be impossible, we did check the crc on the mdbuf
ERROR("unable to decode deltas for record {} at {}",
r_header, record_block_base);
return crimson::ct_error::input_output_error::make();
}
DEBUG("{} at {}", r_header, cursor_addr);
auto write_result = write_result_t{
r_header.committed_to,
bl.length()
};
if (expected_seq == NULL_SEG_SEQ) {
expected_seq = r_header.committed_to.segment_seq;
} else {
assert(expected_seq == r_header.committed_to.segment_seq);
}
cursor_addr += bl.length();
if (cursor_addr >= get_journal_end()) {
assert(cursor_addr == get_journal_end());
cursor_addr = get_records_start();
++expected_seq;
paddr_t addr = convert_abs_addr_to_paddr(
cursor_addr,
get_device_id());
write_result.start_seq.offset = addr;
write_result.start_seq.segment_seq = expected_seq;
is_rolled = true;
}
paddr_t addr = convert_abs_addr_to_paddr(
cursor_addr,
get_device_id());
set_written_to(
journal_seq_t{expected_seq, addr});
return seastar::do_with(
std::move(*maybe_record_deltas_list),
[write_result,
&d_handler,
FNAME](auto& record_deltas_list) {
return crimson::do_for_each(
record_deltas_list,
[write_result,
&d_handler, FNAME](record_deltas_t& record_deltas) {
auto locator = record_locator_t{
record_deltas.record_block_base,
write_result
};
DEBUG("processing {} deltas at block_base {}",
record_deltas.deltas.size(),
locator);
return crimson::do_for_each(
record_deltas.deltas,
[locator,
&d_handler](auto& p) {
auto& modify_time = p.first;
auto& delta = p.second;
return d_handler(
locator,
delta,
modify_time).discard_result();
});
}).safe_then([]() {
return replay_ertr::make_ready_future<
seastar::stop_iteration>(seastar::stop_iteration::no);
});
});
});
});
});
}
Journal::replay_ret CircularBoundedJournal::replay(
delta_handler_t &&delta_handler)
{
/*
* read records from last applied record prior to written_to, and replay
*/
LOG_PREFIX(CircularBoundedJournal::replay);
return cjs.read_header(
).handle_error(
open_for_mount_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error read_header"
}).safe_then([this, FNAME, delta_handler=std::move(delta_handler)](auto p)
mutable {
auto &[head, bl] = *p;
cjs.set_cbj_header(head);
DEBUG("header : {}", cjs.get_cbj_header());
cjs.set_initialized(true);
return seastar::do_with(
std::move(delta_handler),
std::map<paddr_t, journal_seq_t>(),
[this](auto &d_handler, auto &map) {
auto build_paddr_seq_map = [&map](
const auto &offsets,
const auto &e,
sea_time_point modify_time)
{
if (e.type == extent_types_t::ALLOC_INFO) {
alloc_delta_t alloc_delta;
decode(alloc_delta, e.bl);
if (alloc_delta.op == alloc_delta_t::op_types_t::CLEAR) {
for (auto &alloc_blk : alloc_delta.alloc_blk_ranges) {
map[alloc_blk.paddr] = offsets.write_result.start_seq;
}
}
}
return replay_ertr::make_ready_future<bool>(true);
};
auto tail = get_dirty_tail() <= get_alloc_tail() ?
get_dirty_tail() : get_alloc_tail();
set_written_to(tail);
// The first pass to build the paddr->journal_seq_t map
// from extent allocations
return scan_valid_record_delta(std::move(build_paddr_seq_map), tail
).safe_then([this, &map, &d_handler, tail]() {
auto call_d_handler_if_valid = [this, &map, &d_handler](
const auto &offsets,
const auto &e,
sea_time_point modify_time)
{
if (map.find(e.paddr) == map.end() ||
map[e.paddr] <= offsets.write_result.start_seq) {
return d_handler(
offsets,
e,
get_dirty_tail(),
get_alloc_tail(),
modify_time
);
}
return replay_ertr::make_ready_future<bool>(true);
};
// The second pass to replay deltas
return scan_valid_record_delta(std::move(call_d_handler_if_valid), tail);
});
}).safe_then([this]() {
record_submitter.update_committed_to(get_written_to());
trimmer.update_journal_tails(
get_dirty_tail(),
get_alloc_tail());
});
});
}
CircularBoundedJournal::read_record_ret
CircularBoundedJournal::return_record(record_group_header_t& header, bufferlist bl)
{
LOG_PREFIX(CircularBoundedJournal::return_record);
DEBUG("record size {}", bl.length());
assert(bl.length() == header.mdlength + header.dlength);
bufferlist md_bl, data_bl;
md_bl.substr_of(bl, 0, header.mdlength);
data_bl.substr_of(bl, header.mdlength, header.dlength);
if (validate_records_metadata(md_bl) &&
validate_records_data(header, data_bl)) {
return read_record_ret(
read_record_ertr::ready_future_marker{},
std::make_pair(header, std::move(bl)));
} else {
DEBUG("invalid matadata");
return read_record_ret(
read_record_ertr::ready_future_marker{},
std::nullopt);
}
}
CircularBoundedJournal::read_record_ret
CircularBoundedJournal::read_record(paddr_t off, segment_seq_t expected_seq)
{
LOG_PREFIX(CircularBoundedJournal::read_record);
rbm_abs_addr addr = convert_paddr_to_abs_addr(off);
auto read_length = get_block_size();
assert(addr + read_length <= get_journal_end());
DEBUG("reading record from abs addr {} read length {}", addr, read_length);
auto bptr = bufferptr(ceph::buffer::create_page_aligned(read_length));
return cjs.read(addr, bptr
).safe_then([this, addr, bptr, expected_seq, FNAME]() mutable
-> read_record_ret {
record_group_header_t h;
bufferlist bl;
bl.append(bptr);
auto bp = bl.cbegin();
try {
decode(h, bp);
} catch (ceph::buffer::error &e) {
return read_record_ret(
read_record_ertr::ready_future_marker{},
std::nullopt);
}
if (h.mdlength < get_block_size() ||
h.mdlength % get_block_size() != 0 ||
h.dlength % get_block_size() != 0 ||
addr + h.mdlength + h.dlength > get_journal_end() ||
h.committed_to.segment_seq == NULL_SEG_SEQ ||
(expected_seq != NULL_SEG_SEQ &&
h.committed_to.segment_seq != expected_seq)) {
return read_record_ret(
read_record_ertr::ready_future_marker{},
std::nullopt);
}
auto record_size = h.mdlength + h.dlength;
if (record_size > get_block_size()) {
auto next_addr = addr + get_block_size();
auto next_length = record_size - get_block_size();
auto next_bptr = bufferptr(ceph::buffer::create_page_aligned(next_length));
DEBUG("reading record part 2 from abs addr {} read length {}",
next_addr, next_length);
return cjs.read(next_addr, next_bptr
).safe_then([this, h, next_bptr=std::move(next_bptr), bl=std::move(bl)]() mutable {
bl.append(next_bptr);
return return_record(h, bl);
});
} else {
assert(record_size == get_block_size());
return return_record(h, bl);
}
});
}
seastar::future<> CircularBoundedJournal::finish_commit(transaction_type_t type) {
if (is_trim_transaction(type)) {
return update_journal_tail(
trimmer.get_dirty_tail(),
trimmer.get_alloc_tail());
}
return seastar::now();
}
}
| 12,536 | 31.819372 | 94 | cc |
null | ceph-main/src/crimson/os/seastore/journal/circular_bounded_journal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/common/log.h"
#include <boost/intrusive_ptr.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "include/buffer.h"
#include "include/denc.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/journal.h"
#include "include/uuid.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
#include <list>
#include "crimson/os/seastore/journal/record_submitter.h"
#include "crimson/os/seastore/journal/circular_journal_space.h"
namespace crimson::os::seastore::journal {
using RBMDevice = random_block_device::RBMDevice;
/**
* CircularBoundedJournal
*
*
* CircularBoundedJournal (CBJournal) is the journal that works like circular
* queue. With CBJournal, Seastore will append some of the records if the size
* of the record is small (most likely metadata), at which point the head
* (written_to) will be moved. Then, eventually, Seastore applies the records
* in CBjournal to RBM (TODO).
*
* - Commit time
* After submit_record is done, written_to is increased(this in-memory value)
* ---written_to represents where the new record will be appended. Note that
* applied_to is not changed here.
*
* - Replay time
* At replay time, CBJournal begins to replay records in CBjournal by reading
* records from dirty_tail. Then, CBJournal examines whether the records is valid
* one by one, at which point written_to is recovered
* if the valid record is founded. Note that applied_to is stored
* permanently when the apply work---applying the records in CBJournal to RBM---
* is done by CBJournal (TODO).
*
* TODO: apply records from CircularBoundedJournal to RandomBlockManager
*
*/
constexpr uint64_t DEFAULT_BLOCK_SIZE = 4096;
class CircularBoundedJournal : public Journal {
public:
CircularBoundedJournal(
JournalTrimmer &trimmer, RBMDevice* device, const std::string &path);
~CircularBoundedJournal() {}
JournalTrimmer &get_trimmer() final {
return trimmer;
}
open_for_mkfs_ret open_for_mkfs() final;
open_for_mount_ret open_for_mount() final;
close_ertr::future<> close() final;
journal_type_t get_type() final {
return journal_type_t::RANDOM_BLOCK;
}
submit_record_ret submit_record(
record_t &&record,
OrderingHandle &handle
) final;
seastar::future<> flush(
OrderingHandle &handle
) final {
// TODO
return seastar::now();
}
replay_ret replay(delta_handler_t &&delta_handler) final;
rbm_abs_addr get_rbm_addr(journal_seq_t seq) const {
return convert_paddr_to_abs_addr(seq.offset);
}
/**
*
* CircularBoundedJournal write
*
* NVMe will support a large block write (< 512KB) with atomic write unit command.
* With this command, we expect that the most of incoming data can be stored
* as a single write call, which has lower overhead than existing
* way that uses a combination of system calls such as write() and sync().
*
*/
seastar::future<> update_journal_tail(
journal_seq_t dirty,
journal_seq_t alloc) {
return cjs.update_journal_tail(dirty, alloc);
}
journal_seq_t get_dirty_tail() const {
return cjs.get_dirty_tail();
}
journal_seq_t get_alloc_tail() const {
return cjs.get_alloc_tail();
}
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
using read_record_ertr = read_ertr;
using read_record_ret = read_record_ertr::future<
std::optional<std::pair<record_group_header_t, bufferlist>>
>;
/*
* read_record
*
* read record from given address
*
* @param paddr_t to read
* @param expected_seq
*
*/
read_record_ret read_record(paddr_t offset, segment_seq_t expected_seq);
read_record_ret return_record(record_group_header_t& header, bufferlist bl);
void set_write_pipeline(WritePipeline *_write_pipeline) final {
write_pipeline = _write_pipeline;
}
device_id_t get_device_id() const {
return cjs.get_device_id();
}
extent_len_t get_block_size() const {
return cjs.get_block_size();
}
rbm_abs_addr get_journal_end() const {
return cjs.get_journal_end();
}
void set_written_to(journal_seq_t seq) {
cjs.set_written_to(seq);
}
journal_seq_t get_written_to() {
return cjs.get_written_to();
}
rbm_abs_addr get_records_start() const {
return cjs.get_records_start();
}
seastar::future<> finish_commit(transaction_type_t type) final;
using cbj_delta_handler_t = std::function<
replay_ertr::future<bool>(
const record_locator_t&,
const delta_info_t&,
sea_time_point modify_time)>;
Journal::replay_ret scan_valid_record_delta(
cbj_delta_handler_t &&delta_handler,
journal_seq_t tail);
submit_record_ret do_submit_record(record_t &&record, OrderingHandle &handle);
// Test interfaces
CircularJournalSpace& get_cjs() {
return cjs;
}
private:
JournalTrimmer &trimmer;
std::string path;
WritePipeline *write_pipeline = nullptr;
/**
* initialized
*
* true after open_device_read_header, set to false in close().
* Indicates that device is open and in-memory header is valid.
*/
bool initialized = false;
// start address where the newest record will be written
// should be in range [get_records_start(), get_journal_end())
// written_to.segment_seq is circulation seq to track
// the sequence to written records
CircularJournalSpace cjs;
RecordSubmitter record_submitter;
};
}
| 5,697 | 26.133333 | 84 | h |
null | ceph-main/src/crimson/os/seastore/journal/circular_journal_space.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#include "circular_journal_space.h"
#include <fmt/format.h>
#include <fmt/os.h>
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/journal/circular_bounded_journal.h"
SET_SUBSYS(seastore_journal);
namespace crimson::os::seastore::journal {
std::ostream &operator<<(std::ostream &out,
const CircularJournalSpace::cbj_header_t &header)
{
return out << "cbj_header_t("
<< ", dirty_tail=" << header.dirty_tail
<< ", alloc_tail=" << header.alloc_tail
<< ")";
}
CircularJournalSpace::CircularJournalSpace(RBMDevice * device) : device(device) {}
bool CircularJournalSpace::needs_roll(std::size_t length) const {
if (length + get_rbm_addr(get_written_to()) > get_journal_end()) {
return true;
}
return false;
}
extent_len_t CircularJournalSpace::get_block_size() const {
return device->get_block_size();
}
CircularJournalSpace::roll_ertr::future<> CircularJournalSpace::roll() {
paddr_t paddr = convert_abs_addr_to_paddr(
get_records_start(),
get_device_id());
auto seq = get_written_to();
set_written_to(
journal_seq_t{++seq.segment_seq, paddr});
return roll_ertr::now();
}
CircularJournalSpace::write_ret
CircularJournalSpace::write(ceph::bufferlist&& to_write) {
LOG_PREFIX(CircularJournalSpace::write);
assert(get_written_to().segment_seq != NULL_SEG_SEQ);
auto encoded_size = to_write.length();
if (encoded_size > get_records_available_size()) {
ceph_abort("should be impossible with EPM reservation");
}
assert(encoded_size + get_rbm_addr(get_written_to())
< get_journal_end());
journal_seq_t j_seq = get_written_to();
auto target = get_rbm_addr(get_written_to());
auto new_written_to = target + encoded_size;
assert(new_written_to < get_journal_end());
paddr_t paddr = convert_abs_addr_to_paddr(
new_written_to,
get_device_id());
set_written_to(
journal_seq_t{get_written_to().segment_seq, paddr});
DEBUG("{}, target {}", to_write.length(), target);
auto write_result = write_result_t{
j_seq,
encoded_size
};
return device_write_bl(target, to_write
).safe_then([this, target,
length=encoded_size,
write_result,
FNAME] {
DEBUG("commit target {} used_size {} written length {}",
target, get_records_used_size(), length);
return write_result;
}).handle_error(
base_ertr::pass_further{},
crimson::ct_error::assert_all{ "Invalid error" }
);
}
CircularJournalSpace::open_ret CircularJournalSpace::open(bool is_mkfs) {
std::ostringstream oss;
oss << device_id_printer_t{get_device_id()};
print_name = oss.str();
if (is_mkfs) {
LOG_PREFIX(CircularJournalSpace::open);
assert(device);
ceph::bufferlist bl;
CircularJournalSpace::cbj_header_t head;
assert(device->get_journal_size());
head.dirty_tail =
journal_seq_t{0,
convert_abs_addr_to_paddr(
get_records_start(),
device->get_device_id())};
head.alloc_tail = head.dirty_tail;
encode(head, bl);
header = head;
set_written_to(head.dirty_tail);
initialized = true;
DEBUG(
"initialize header block in CircularJournalSpace length {}",
bl.length());
return write_header(
).safe_then([this]() {
return open_ret(
open_ertr::ready_future_marker{},
get_written_to());
}).handle_error(
open_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error write_header"
}
);
}
ceph_assert(initialized);
if (written_to.segment_seq == NULL_SEG_SEQ) {
written_to.segment_seq = 0;
}
return open_ret(
open_ertr::ready_future_marker{},
get_written_to());
}
ceph::bufferlist CircularJournalSpace::encode_header()
{
bufferlist bl;
encode(header, bl);
auto header_crc_filler = bl.append_hole(sizeof(checksum_t));
auto bliter = bl.cbegin();
auto header_crc = bliter.crc32c(
ceph::encoded_sizeof_bounded<cbj_header_t>(),
-1);
ceph_le32 header_crc_le;
header_crc_le = header_crc;
header_crc_filler.copy_in(
sizeof(checksum_t),
reinterpret_cast<const char *>(&header_crc_le));
return bl;
}
CircularJournalSpace::write_ertr::future<> CircularJournalSpace::device_write_bl(
rbm_abs_addr offset, bufferlist &bl)
{
LOG_PREFIX(CircularJournalSpace::device_write_bl);
auto length = bl.length();
if (offset + length > get_journal_end()) {
return crimson::ct_error::erange::make();
}
DEBUG(
"overwrite in CircularJournalSpace, offset {}, length {}",
offset,
length);
return device->writev(offset, bl
).handle_error(
write_ertr::pass_further{},
crimson::ct_error::assert_all{ "Invalid error device->write" }
);
}
CircularJournalSpace::read_header_ret
CircularJournalSpace::read_header()
{
LOG_PREFIX(CircularJournalSpace::read_header);
assert(device);
auto bptr = bufferptr(ceph::buffer::create_page_aligned(
device->get_block_size()));
DEBUG("reading {}", device->get_shard_journal_start());
return device->read(device->get_shard_journal_start(), bptr
).safe_then([bptr, FNAME]() mutable
-> read_header_ret {
bufferlist bl;
bl.append(bptr);
auto bp = bl.cbegin();
cbj_header_t cbj_header;
try {
decode(cbj_header, bp);
} catch (ceph::buffer::error &e) {
ERROR("unable to read header block");
return crimson::ct_error::enoent::make();
}
auto bliter = bl.cbegin();
auto test_crc = bliter.crc32c(
ceph::encoded_sizeof_bounded<cbj_header_t>(),
-1);
ceph_le32 recorded_crc_le;
decode(recorded_crc_le, bliter);
uint32_t recorded_crc = recorded_crc_le;
if (test_crc != recorded_crc) {
ERROR("error, header crc mismatch.");
return read_header_ret(
read_header_ertr::ready_future_marker{},
std::nullopt);
}
return read_header_ret(
read_header_ertr::ready_future_marker{},
std::make_pair(cbj_header, bl)
);
});
}
CircularJournalSpace::write_ertr::future<>
CircularJournalSpace::write_header()
{
LOG_PREFIX(CircularJournalSpace::write_header);
ceph::bufferlist bl = encode_header();
ceph_assert(bl.length() <= get_block_size());
DEBUG(
"sync header of CircularJournalSpace, length {}",
bl.length());
assert(device);
auto iter = bl.begin();
assert(bl.length() < get_block_size());
bufferptr bp = bufferptr(ceph::buffer::create_page_aligned(get_block_size()));
iter.copy(bl.length(), bp.c_str());
return device->write(device->get_shard_journal_start(), std::move(bp)
).handle_error(
write_ertr::pass_further{},
crimson::ct_error::assert_all{ "Invalid error device->write" }
);
}
}
| 6,751 | 27.978541 | 82 | cc |
null | ceph-main/src/crimson/os/seastore/journal/circular_journal_space.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <optional>
#include <seastar/core/circular_buffer.hh>
#include <seastar/core/metrics.hh>
#include <seastar/core/shared_future.hh>
#include "include/buffer.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/journal.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
#include "crimson/os/seastore/journal/record_submitter.h"
#include "crimson/os/seastore/async_cleaner.h"
namespace crimson::os::seastore {
class SegmentProvider;
class JournalTrimmer;
}
namespace crimson::os::seastore::journal {
class CircularBoundedJournal;
class CircularJournalSpace : public JournalAllocator {
public:
const std::string& get_name() const final {
return print_name;
}
extent_len_t get_block_size() const final;
bool can_write() const final {
return (device != nullptr);
}
segment_nonce_t get_nonce() const final {
return 0;
}
bool needs_roll(std::size_t length) const final;
roll_ertr::future<> roll() final;
write_ret write(ceph::bufferlist&& to_write) final;
void update_modify_time(record_t& record) final {}
close_ertr::future<> close() final {
return write_header(
).safe_then([this]() -> close_ertr::future<> {
initialized = false;
return close_ertr::now();
}).handle_error(
Journal::open_for_mount_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error write_header"
}
);
}
open_ret open(bool is_mkfs) final;
public:
CircularJournalSpace(RBMDevice * device);
struct cbj_header_t;
using write_ertr = Journal::submit_record_ertr;
/*
* device_write_bl
*
* @param device address to write
* @param bufferlist to write
*
*/
write_ertr::future<> device_write_bl(rbm_abs_addr offset, ceph::bufferlist &bl);
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
using read_header_ertr = read_ertr;
using read_header_ret = read_header_ertr::future<
std::optional<std::pair<cbj_header_t, bufferlist>>
>;
/*
* read_header
*
* read header block from given absolute address
*
* @param absolute address
*
*/
read_header_ret read_header();
ceph::bufferlist encode_header();
write_ertr::future<> write_header();
/**
* CircularBoundedJournal structure
*
* +-------------------------------------------------------+
* | header | record | record | record | record | ... |
* +-------------------------------------------------------+
* ^-----------block aligned-----------------^
* <----fixed---->
*/
struct cbj_header_t {
// start offset of CircularBoundedJournal in the device
journal_seq_t dirty_tail;
journal_seq_t alloc_tail;
DENC(cbj_header_t, v, p) {
DENC_START(1, 1, p);
denc(v.dirty_tail, p);
denc(v.alloc_tail, p);
DENC_FINISH(p);
}
};
/**
*
* Write position for CircularBoundedJournal
*
* | written to rbm | written length to CircularBoundedJournal | new write |
* ----------------->------------------------------------------------>
* ^ ^
* applied_to written_to
*
*/
journal_seq_t get_written_to() const {
return written_to;
}
rbm_abs_addr get_rbm_addr(journal_seq_t seq) const {
return convert_paddr_to_abs_addr(seq.offset);
}
void set_written_to(journal_seq_t seq) {
rbm_abs_addr addr = convert_paddr_to_abs_addr(seq.offset);
assert(addr >= get_records_start());
assert(addr < get_journal_end());
written_to = seq;
}
device_id_t get_device_id() const {
return device->get_device_id();
}
journal_seq_t get_dirty_tail() const {
return header.dirty_tail;
}
journal_seq_t get_alloc_tail() const {
return header.alloc_tail;
}
/*
Size-related interfaces
+---------------------------------------------------------+
| header | record | record | record | record | ... |
+---------------------------------------------------------+
^ ^ ^
| | |
get_journal_start | get_journal_end
get_records_start
<-- get_records_total_size + block_size -->
<--------------- get_journal_size ------------------------>
*/
size_t get_records_used_size() const {
auto rbm_written_to = get_rbm_addr(get_written_to());
auto rbm_tail = get_rbm_addr(get_dirty_tail());
return rbm_written_to >= rbm_tail ?
rbm_written_to - rbm_tail :
rbm_written_to + get_records_total_size() + get_block_size()
- rbm_tail;
}
size_t get_records_total_size() const {
assert(device);
// a block is for header and a block is reserved to denote the end
return device->get_journal_size() - (2 * get_block_size());
}
rbm_abs_addr get_records_start() const {
assert(device);
return device->get_shard_journal_start() + get_block_size();
}
size_t get_records_available_size() const {
return get_records_total_size() - get_records_used_size();
}
bool is_available_size(uint64_t size) {
auto rbm_written_to = get_rbm_addr(get_written_to());
auto rbm_tail = get_rbm_addr(get_dirty_tail());
if (rbm_written_to > rbm_tail &&
(get_journal_end() - rbm_written_to) < size &&
size > (get_records_used_size() -
(get_journal_end() - rbm_written_to))) {
return false;
}
return get_records_available_size() >= size;
}
rbm_abs_addr get_journal_end() const {
assert(device);
return device->get_shard_journal_start() + device->get_journal_size();
}
read_ertr::future<> read(
uint64_t offset,
bufferptr &bptr) {
assert(device);
return device->read(offset, bptr);
}
seastar::future<> update_journal_tail(
journal_seq_t dirty,
journal_seq_t alloc) {
header.dirty_tail = dirty;
header.alloc_tail = alloc;
return write_header(
).handle_error(
crimson::ct_error::assert_all{
"encountered invalid error in update_journal_tail"
});
}
void set_initialized(bool init) {
initialized = init;
}
void set_cbj_header(cbj_header_t& head) {
header = head;
}
cbj_header_t get_cbj_header() {
return header;
}
private:
std::string print_name;
cbj_header_t header;
RBMDevice* device;
journal_seq_t written_to;
bool initialized = false;
};
std::ostream &operator<<(std::ostream &out, const CircularJournalSpace::cbj_header_t &header);
}
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::journal::CircularJournalSpace::cbj_header_t)
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::journal::CircularJournalSpace::cbj_header_t> : fmt::ostream_formatter {};
#endif
| 7,178 | 26.611538 | 130 | h |
null | ceph-main/src/crimson/os/seastore/journal/record_submitter.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#include "record_submitter.h"
#include <fmt/format.h>
#include <fmt/os.h>
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/async_cleaner.h"
SET_SUBSYS(seastore_journal);
namespace crimson::os::seastore::journal {
RecordBatch::add_pending_ret
RecordBatch::add_pending(
const std::string& name,
record_t&& record,
extent_len_t block_size)
{
LOG_PREFIX(RecordBatch::add_pending);
auto new_size = get_encoded_length_after(record, block_size);
auto dlength_offset = pending.size.dlength;
TRACE("{} batches={}, write_size={}, dlength_offset={} ...",
name,
pending.get_size() + 1,
new_size.get_encoded_length(),
dlength_offset);
assert(state != state_t::SUBMITTING);
assert(evaluate_submit(record.size, block_size).submit_size == new_size);
pending.push_back(
std::move(record), block_size);
assert(pending.size == new_size);
if (state == state_t::EMPTY) {
assert(!io_promise.has_value());
io_promise = seastar::shared_promise<maybe_promise_result_t>();
} else {
assert(io_promise.has_value());
}
state = state_t::PENDING;
return io_promise->get_shared_future(
).then([dlength_offset, FNAME, &name
](auto maybe_promise_result) -> add_pending_ret {
if (!maybe_promise_result.has_value()) {
ERROR("{} write failed", name);
return crimson::ct_error::input_output_error::make();
}
auto write_result = maybe_promise_result->write_result;
auto submit_result = record_locator_t{
write_result.start_seq.offset.add_offset(
maybe_promise_result->mdlength + dlength_offset),
write_result
};
TRACE("{} write finish with {}", name, submit_result);
return add_pending_ret(
add_pending_ertr::ready_future_marker{},
submit_result);
});
}
std::pair<ceph::bufferlist, record_group_size_t>
RecordBatch::encode_batch(
const journal_seq_t& committed_to,
segment_nonce_t segment_nonce)
{
assert(state == state_t::PENDING);
assert(pending.get_size() > 0);
assert(io_promise.has_value());
state = state_t::SUBMITTING;
submitting_size = pending.get_size();
auto gsize = pending.size;
submitting_length = gsize.get_encoded_length();
submitting_mdlength = gsize.get_mdlength();
auto bl = encode_records(pending, committed_to, segment_nonce);
// Note: pending is cleared here
assert(bl.length() == submitting_length);
return std::make_pair(bl, gsize);
}
void RecordBatch::set_result(
maybe_result_t maybe_write_result)
{
maybe_promise_result_t result;
if (maybe_write_result.has_value()) {
assert(maybe_write_result->length == submitting_length);
result = promise_result_t{
*maybe_write_result,
submitting_mdlength
};
}
assert(state == state_t::SUBMITTING);
assert(io_promise.has_value());
state = state_t::EMPTY;
submitting_size = 0;
submitting_length = 0;
submitting_mdlength = 0;
io_promise->set_value(result);
io_promise.reset();
}
std::pair<ceph::bufferlist, record_group_size_t>
RecordBatch::submit_pending_fast(
record_t&& record,
extent_len_t block_size,
const journal_seq_t& committed_to,
segment_nonce_t segment_nonce)
{
auto new_size = get_encoded_length_after(record, block_size);
std::ignore = new_size;
assert(state == state_t::EMPTY);
assert(evaluate_submit(record.size, block_size).submit_size == new_size);
auto group = record_group_t(std::move(record), block_size);
auto size = group.size;
assert(size == new_size);
auto bl = encode_records(group, committed_to, segment_nonce);
assert(bl.length() == size.get_encoded_length());
return std::make_pair(std::move(bl), size);
}
RecordSubmitter::RecordSubmitter(
std::size_t io_depth,
std::size_t batch_capacity,
std::size_t batch_flush_size,
double preferred_fullness,
JournalAllocator& ja)
: io_depth_limit{io_depth},
preferred_fullness{preferred_fullness},
journal_allocator{ja},
batches(new RecordBatch[io_depth + 1])
{
LOG_PREFIX(RecordSubmitter);
INFO("{} io_depth_limit={}, batch_capacity={}, batch_flush_size={}, "
"preferred_fullness={}",
get_name(), io_depth, batch_capacity,
batch_flush_size, preferred_fullness);
ceph_assert(io_depth > 0);
ceph_assert(batch_capacity > 0);
ceph_assert(preferred_fullness >= 0 &&
preferred_fullness <= 1);
free_batch_ptrs.reserve(io_depth + 1);
for (std::size_t i = 0; i <= io_depth; ++i) {
batches[i].initialize(i, batch_capacity, batch_flush_size);
free_batch_ptrs.push_back(&batches[i]);
}
pop_free_batch();
}
bool RecordSubmitter::is_available() const
{
auto ret = !wait_available_promise.has_value() &&
!has_io_error;
#ifndef NDEBUG
if (ret) {
// unconditional invariants
ceph_assert(journal_allocator.can_write());
ceph_assert(p_current_batch != nullptr);
ceph_assert(!p_current_batch->is_submitting());
// the current batch accepts a further write
ceph_assert(!p_current_batch->needs_flush());
if (!p_current_batch->is_empty()) {
auto submit_length =
p_current_batch->get_submit_size().get_encoded_length();
ceph_assert(!journal_allocator.needs_roll(submit_length));
}
// I'm not rolling
}
#endif
return ret;
}
RecordSubmitter::wa_ertr::future<>
RecordSubmitter::wait_available()
{
LOG_PREFIX(RecordSubmitter::wait_available);
assert(!is_available());
if (has_io_error) {
ERROR("{} I/O is failed before wait", get_name());
return crimson::ct_error::input_output_error::make();
}
return wait_available_promise->get_shared_future(
).then([FNAME, this]() -> wa_ertr::future<> {
if (has_io_error) {
ERROR("{} I/O is failed after wait", get_name());
return crimson::ct_error::input_output_error::make();
}
return wa_ertr::now();
});
}
RecordSubmitter::action_t
RecordSubmitter::check_action(
const record_size_t& rsize) const
{
assert(is_available());
auto eval = p_current_batch->evaluate_submit(
rsize, journal_allocator.get_block_size());
if (journal_allocator.needs_roll(eval.submit_size.get_encoded_length())) {
return action_t::ROLL;
} else if (eval.is_full) {
return action_t::SUBMIT_FULL;
} else {
return action_t::SUBMIT_NOT_FULL;
}
}
RecordSubmitter::roll_segment_ertr::future<>
RecordSubmitter::roll_segment()
{
LOG_PREFIX(RecordSubmitter::roll_segment);
ceph_assert(p_current_batch->needs_flush() ||
is_available());
// #1 block concurrent submissions due to rolling
wait_available_promise = seastar::shared_promise<>();
ceph_assert(!wait_unfull_flush_promise.has_value());
return [FNAME, this] {
if (p_current_batch->is_pending()) {
if (state == state_t::FULL) {
DEBUG("{} wait flush ...", get_name());
wait_unfull_flush_promise = seastar::promise<>();
return wait_unfull_flush_promise->get_future();
} else { // IDLE/PENDING
DEBUG("{} flush", get_name());
flush_current_batch();
return seastar::now();
}
} else {
assert(p_current_batch->is_empty());
return seastar::now();
}
}().then_wrapped([FNAME, this](auto fut) {
if (fut.failed()) {
ERROR("{} rolling is skipped unexpectedly, available", get_name());
has_io_error = true;
wait_available_promise->set_value();
wait_available_promise.reset();
return roll_segment_ertr::now();
} else {
// start rolling in background
std::ignore = journal_allocator.roll(
).safe_then([FNAME, this] {
// good
DEBUG("{} rolling done, available", get_name());
assert(!has_io_error);
wait_available_promise->set_value();
wait_available_promise.reset();
}).handle_error(
crimson::ct_error::all_same_way([FNAME, this](auto e) {
ERROR("{} got error {}, available", get_name(), e);
has_io_error = true;
wait_available_promise->set_value();
wait_available_promise.reset();
})
).handle_exception([FNAME, this](auto e) {
ERROR("{} got exception {}, available", get_name(), e);
has_io_error = true;
wait_available_promise->set_value();
wait_available_promise.reset();
});
// wait for background rolling
return wait_available();
}
});
}
RecordSubmitter::submit_ret
RecordSubmitter::submit(
record_t&& record,
bool with_atomic_roll_segment)
{
LOG_PREFIX(RecordSubmitter::submit);
ceph_assert(is_available());
assert(check_action(record.size) != action_t::ROLL);
journal_allocator.update_modify_time(record);
auto eval = p_current_batch->evaluate_submit(
record.size, journal_allocator.get_block_size());
bool needs_flush = (
state == state_t::IDLE ||
eval.submit_size.get_fullness() > preferred_fullness ||
// RecordBatch::needs_flush()
eval.is_full ||
p_current_batch->get_num_records() + 1 >=
p_current_batch->get_batch_capacity());
if (p_current_batch->is_empty() &&
needs_flush &&
state != state_t::FULL) {
// fast path with direct write
increment_io();
auto [to_write, sizes] = p_current_batch->submit_pending_fast(
std::move(record),
journal_allocator.get_block_size(),
get_committed_to(),
journal_allocator.get_nonce());
DEBUG("{} fast submit {}, committed_to={}, outstanding_io={} ...",
get_name(), sizes, get_committed_to(), num_outstanding_io);
account_submission(1, sizes);
return journal_allocator.write(std::move(to_write)
).safe_then([mdlength = sizes.get_mdlength()](auto write_result) {
return record_locator_t{
write_result.start_seq.offset.add_offset(mdlength),
write_result
};
}).finally([this] {
decrement_io_with_flush();
});
}
// indirect batched write
auto write_fut = p_current_batch->add_pending(
get_name(),
std::move(record),
journal_allocator.get_block_size());
if (needs_flush) {
if (state == state_t::FULL) {
// #2 block concurrent submissions due to lack of resource
DEBUG("{} added with {} pending, outstanding_io={}, unavailable, wait flush ...",
get_name(),
p_current_batch->get_num_records(),
num_outstanding_io);
if (with_atomic_roll_segment) {
// wait_available_promise and wait_unfull_flush_promise
// need to be delegated to the follow-up atomic roll_segment();
assert(p_current_batch->is_pending());
} else {
wait_available_promise = seastar::shared_promise<>();
ceph_assert(!wait_unfull_flush_promise.has_value());
wait_unfull_flush_promise = seastar::promise<>();
// flush and mark available in background
std::ignore = wait_unfull_flush_promise->get_future(
).finally([FNAME, this] {
DEBUG("{} flush done, available", get_name());
wait_available_promise->set_value();
wait_available_promise.reset();
});
}
} else {
DEBUG("{} added pending, flush", get_name());
flush_current_batch();
}
} else {
// will flush later
DEBUG("{} added with {} pending, outstanding_io={}",
get_name(),
p_current_batch->get_num_records(),
num_outstanding_io);
assert(!p_current_batch->needs_flush());
}
return write_fut;
}
RecordSubmitter::open_ret
RecordSubmitter::open(bool is_mkfs)
{
return journal_allocator.open(is_mkfs
).safe_then([this](journal_seq_t ret) {
LOG_PREFIX(RecordSubmitter::open);
DEBUG("{} register metrics", get_name());
stats = {};
namespace sm = seastar::metrics;
std::vector<sm::label_instance> label_instances;
label_instances.push_back(sm::label_instance("submitter", get_name()));
metrics.add_group(
"journal",
{
sm::make_counter(
"record_num",
stats.record_batch_stats.num_io,
sm::description("total number of records submitted"),
label_instances
),
sm::make_counter(
"record_batch_num",
stats.record_batch_stats.num_io_grouped,
sm::description("total number of records batched"),
label_instances
),
sm::make_counter(
"io_num",
stats.io_depth_stats.num_io,
sm::description("total number of io submitted"),
label_instances
),
sm::make_counter(
"io_depth_num",
stats.io_depth_stats.num_io_grouped,
sm::description("total number of io depth"),
label_instances
),
sm::make_counter(
"record_group_padding_bytes",
stats.record_group_padding_bytes,
sm::description("bytes of metadata padding when write record groups"),
label_instances
),
sm::make_counter(
"record_group_metadata_bytes",
stats.record_group_metadata_bytes,
sm::description("bytes of raw metadata when write record groups"),
label_instances
),
sm::make_counter(
"record_group_data_bytes",
stats.record_group_data_bytes,
sm::description("bytes of data when write record groups"),
label_instances
),
}
);
return ret;
});
}
RecordSubmitter::close_ertr::future<>
RecordSubmitter::close()
{
committed_to = JOURNAL_SEQ_NULL;
ceph_assert(state == state_t::IDLE);
ceph_assert(num_outstanding_io == 0);
ceph_assert(p_current_batch != nullptr);
ceph_assert(p_current_batch->is_empty());
ceph_assert(!wait_available_promise.has_value());
has_io_error = false;
ceph_assert(!wait_unfull_flush_promise.has_value());
metrics.clear();
return journal_allocator.close();
}
void RecordSubmitter::update_state()
{
if (num_outstanding_io == 0) {
state = state_t::IDLE;
} else if (num_outstanding_io < io_depth_limit) {
state = state_t::PENDING;
} else if (num_outstanding_io == io_depth_limit) {
state = state_t::FULL;
} else {
ceph_abort("fatal error: io-depth overflow");
}
}
void RecordSubmitter::decrement_io_with_flush()
{
LOG_PREFIX(RecordSubmitter::decrement_io_with_flush);
assert(num_outstanding_io > 0);
auto prv_state = state;
--num_outstanding_io;
update_state();
if (prv_state == state_t::FULL) {
if (wait_unfull_flush_promise.has_value()) {
DEBUG("{} flush, resolve wait_unfull_flush_promise", get_name());
assert(!p_current_batch->is_empty());
assert(wait_available_promise.has_value());
flush_current_batch();
wait_unfull_flush_promise->set_value();
wait_unfull_flush_promise.reset();
return;
}
} else {
ceph_assert(!wait_unfull_flush_promise.has_value());
}
auto needs_flush = (
!p_current_batch->is_empty() && (
state == state_t::IDLE ||
p_current_batch->get_submit_size().get_fullness() > preferred_fullness ||
p_current_batch->needs_flush()
));
if (needs_flush) {
DEBUG("{} flush", get_name());
flush_current_batch();
}
}
void RecordSubmitter::account_submission(
std::size_t num,
const record_group_size_t& size)
{
stats.record_group_padding_bytes +=
(size.get_mdlength() - size.get_raw_mdlength());
stats.record_group_metadata_bytes += size.get_raw_mdlength();
stats.record_group_data_bytes += size.dlength;
stats.record_batch_stats.increment(num);
}
void RecordSubmitter::finish_submit_batch(
RecordBatch* p_batch,
maybe_result_t maybe_result)
{
assert(p_batch->is_submitting());
p_batch->set_result(maybe_result);
free_batch_ptrs.push_back(p_batch);
decrement_io_with_flush();
}
void RecordSubmitter::flush_current_batch()
{
LOG_PREFIX(RecordSubmitter::flush_current_batch);
RecordBatch* p_batch = p_current_batch;
assert(p_batch->is_pending());
p_current_batch = nullptr;
pop_free_batch();
increment_io();
auto num = p_batch->get_num_records();
auto [to_write, sizes] = p_batch->encode_batch(
get_committed_to(), journal_allocator.get_nonce());
DEBUG("{} {} records, {}, committed_to={}, outstanding_io={} ...",
get_name(), num, sizes, get_committed_to(), num_outstanding_io);
account_submission(num, sizes);
std::ignore = journal_allocator.write(std::move(to_write)
).safe_then([this, p_batch, FNAME, num, sizes=sizes](auto write_result) {
TRACE("{} {} records, {}, write done with {}",
get_name(), num, sizes, write_result);
finish_submit_batch(p_batch, write_result);
}).handle_error(
crimson::ct_error::all_same_way([this, p_batch, FNAME, num, sizes=sizes](auto e) {
ERROR("{} {} records, {}, got error {}",
get_name(), num, sizes, e);
finish_submit_batch(p_batch, std::nullopt);
})
).handle_exception([this, p_batch, FNAME, num, sizes=sizes](auto e) {
ERROR("{} {} records, {}, got exception {}",
get_name(), num, sizes, e);
finish_submit_batch(p_batch, std::nullopt);
});
}
}
| 17,121 | 31.06367 | 87 | cc |
null | ceph-main/src/crimson/os/seastore/journal/record_submitter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <optional>
#include <seastar/core/circular_buffer.hh>
#include <seastar/core/metrics.hh>
#include <seastar/core/shared_future.hh>
#include "include/buffer.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/segment_manager_group.h"
#include "crimson/os/seastore/segment_seq_allocator.h"
namespace crimson::os::seastore {
class SegmentProvider;
class JournalTrimmer;
}
namespace crimson::os::seastore::journal {
class JournalAllocator {
public:
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
virtual const std::string& get_name() const = 0;
virtual void update_modify_time(record_t& record) = 0;
virtual extent_len_t get_block_size() const = 0;
using close_ertr = base_ertr;
virtual close_ertr::future<> close() = 0;
virtual segment_nonce_t get_nonce() const = 0;
using write_ertr = base_ertr;
using write_ret = write_ertr::future<write_result_t>;
virtual write_ret write(ceph::bufferlist&& to_write) = 0;
virtual bool can_write() const = 0;
using roll_ertr = base_ertr;
virtual roll_ertr::future<> roll() = 0;
virtual bool needs_roll(std::size_t length) const = 0;
using open_ertr = base_ertr;
using open_ret = open_ertr::future<journal_seq_t>;
virtual open_ret open(bool is_mkfs) = 0;
};
/**
* RecordBatch
*
* Maintain a batch of records for submit.
*/
class RecordBatch {
enum class state_t {
EMPTY = 0,
PENDING,
SUBMITTING
};
public:
RecordBatch() = default;
RecordBatch(RecordBatch&&) = delete;
RecordBatch(const RecordBatch&) = delete;
RecordBatch& operator=(RecordBatch&&) = delete;
RecordBatch& operator=(const RecordBatch&) = delete;
bool is_empty() const {
return state == state_t::EMPTY;
}
bool is_pending() const {
return state == state_t::PENDING;
}
bool is_submitting() const {
return state == state_t::SUBMITTING;
}
std::size_t get_index() const {
return index;
}
std::size_t get_num_records() const {
return pending.get_size();
}
std::size_t get_batch_capacity() const {
return batch_capacity;
}
const record_group_size_t& get_submit_size() const {
assert(state != state_t::EMPTY);
return pending.size;
}
bool needs_flush() const {
assert(state != state_t::SUBMITTING);
assert(pending.get_size() <= batch_capacity);
if (state == state_t::EMPTY) {
return false;
} else {
assert(state == state_t::PENDING);
return (pending.get_size() >= batch_capacity ||
pending.size.get_encoded_length() > batch_flush_size);
}
}
struct evaluation_t {
record_group_size_t submit_size;
bool is_full;
};
evaluation_t evaluate_submit(
const record_size_t& rsize,
extent_len_t block_size) const {
assert(!needs_flush());
auto submit_size = pending.size.get_encoded_length_after(
rsize, block_size);
bool is_full = submit_size.get_encoded_length() > batch_flush_size;
return {submit_size, is_full};
}
void initialize(std::size_t i,
std::size_t _batch_capacity,
std::size_t _batch_flush_size) {
ceph_assert(_batch_capacity > 0);
index = i;
batch_capacity = _batch_capacity;
batch_flush_size = _batch_flush_size;
pending.reserve(batch_capacity);
}
// Add to the batch, the future will be resolved after the batch is
// written.
//
// Set write_result_t::write_length to 0 if the record is not the first one
// in the batch.
using add_pending_ertr = JournalAllocator::write_ertr;
using add_pending_ret = add_pending_ertr::future<record_locator_t>;
add_pending_ret add_pending(
const std::string& name,
record_t&&,
extent_len_t block_size);
// Encode the batched records for write.
std::pair<ceph::bufferlist, record_group_size_t> encode_batch(
const journal_seq_t& committed_to,
segment_nonce_t segment_nonce);
// Set the write result and reset for reuse
using maybe_result_t = std::optional<write_result_t>;
void set_result(maybe_result_t maybe_write_end_seq);
// The fast path that is equivalent to submit a single record as a batch.
//
// Essentially, equivalent to the combined logic of:
// add_pending(), encode_batch() and set_result() above without
// the intervention of the shared io_promise.
//
// Note the current RecordBatch can be reused afterwards.
std::pair<ceph::bufferlist, record_group_size_t> submit_pending_fast(
record_t&&,
extent_len_t block_size,
const journal_seq_t& committed_to,
segment_nonce_t segment_nonce);
private:
record_group_size_t get_encoded_length_after(
const record_t& record,
extent_len_t block_size) const {
return pending.size.get_encoded_length_after(
record.size, block_size);
}
state_t state = state_t::EMPTY;
std::size_t index = 0;
std::size_t batch_capacity = 0;
std::size_t batch_flush_size = 0;
record_group_t pending;
std::size_t submitting_size = 0;
extent_len_t submitting_length = 0;
extent_len_t submitting_mdlength = 0;
struct promise_result_t {
write_result_t write_result;
extent_len_t mdlength;
};
using maybe_promise_result_t = std::optional<promise_result_t>;
std::optional<seastar::shared_promise<maybe_promise_result_t> > io_promise;
};
/**
* RecordSubmitter
*
* Submit records concurrently with RecordBatch with SegmentAllocator.
*
* Configurations and controls:
* - io_depth: the io-depth limit to SegmentAllocator;
* - batch_capacity: the number limit of records in a RecordBatch;
* - batch_flush_size: the bytes threshold to force flush a RecordBatch to
* control the maximum latency;
* - preferred_fullness: the fullness threshold to flush a RecordBatch;
*/
class RecordSubmitter {
enum class state_t {
IDLE = 0, // outstanding_io == 0
PENDING, // outstanding_io < io_depth_limit
FULL // outstanding_io == io_depth_limit
// OVERFLOW: outstanding_io > io_depth_limit is impossible
};
struct grouped_io_stats {
uint64_t num_io = 0;
uint64_t num_io_grouped = 0;
void increment(uint64_t num_grouped_io) {
++num_io;
num_io_grouped += num_grouped_io;
}
};
using base_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
public:
RecordSubmitter(std::size_t io_depth,
std::size_t batch_capacity,
std::size_t batch_flush_size,
double preferred_fullness,
JournalAllocator&);
const std::string& get_name() const {
return journal_allocator.get_name();
}
journal_seq_t get_committed_to() const {
return committed_to;
}
// whether is available to submit a record
bool is_available() const;
// wait for available if cannot submit, should check is_available() again
// when the future is resolved.
using wa_ertr = base_ertr;
wa_ertr::future<> wait_available();
// when available, check for the submit action
// according to the pending record size
enum class action_t {
ROLL,
SUBMIT_FULL,
SUBMIT_NOT_FULL
};
action_t check_action(const record_size_t&) const;
// when available, roll the segment if needed
using roll_segment_ertr = base_ertr;
roll_segment_ertr::future<> roll_segment();
// when available, submit the record if possible
using submit_ertr = base_ertr;
using submit_ret = submit_ertr::future<record_locator_t>;
submit_ret submit(record_t&&, bool with_atomic_roll_segment=false);
void update_committed_to(const journal_seq_t& new_committed_to) {
assert(new_committed_to != JOURNAL_SEQ_NULL);
assert(committed_to == JOURNAL_SEQ_NULL ||
committed_to <= new_committed_to);
committed_to = new_committed_to;
}
// open for write, generate the correct print name, and register metrics
using open_ertr = base_ertr;
using open_ret = open_ertr::future<journal_seq_t>;
open_ret open(bool is_mkfs);
using close_ertr = base_ertr;
close_ertr::future<> close();
private:
void update_state();
void increment_io() {
++num_outstanding_io;
stats.io_depth_stats.increment(num_outstanding_io);
update_state();
}
void decrement_io_with_flush();
void pop_free_batch() {
assert(p_current_batch == nullptr);
assert(!free_batch_ptrs.empty());
p_current_batch = free_batch_ptrs.front();
assert(p_current_batch->is_empty());
assert(p_current_batch == &batches[p_current_batch->get_index()]);
free_batch_ptrs.pop_front();
}
void account_submission(std::size_t, const record_group_size_t&);
using maybe_result_t = RecordBatch::maybe_result_t;
void finish_submit_batch(RecordBatch*, maybe_result_t);
void flush_current_batch();
state_t state = state_t::IDLE;
std::size_t num_outstanding_io = 0;
std::size_t io_depth_limit;
double preferred_fullness;
JournalAllocator& journal_allocator;
// committed_to may be in a previous journal segment
journal_seq_t committed_to = JOURNAL_SEQ_NULL;
std::unique_ptr<RecordBatch[]> batches;
// should not be nullptr after constructed
RecordBatch* p_current_batch = nullptr;
seastar::circular_buffer<RecordBatch*> free_batch_ptrs;
// blocked for rolling or lack of resource
std::optional<seastar::shared_promise<> > wait_available_promise;
bool has_io_error = false;
// when needs flush but io depth is full,
// wait for decrement_io_with_flush()
std::optional<seastar::promise<> > wait_unfull_flush_promise;
struct {
grouped_io_stats record_batch_stats;
grouped_io_stats io_depth_stats;
uint64_t record_group_padding_bytes = 0;
uint64_t record_group_metadata_bytes = 0;
uint64_t record_group_data_bytes = 0;
} stats;
seastar::metrics::metric_group metrics;
};
}
| 9,903 | 27.45977 | 77 | h |
null | ceph-main/src/crimson/os/seastore/journal/segment_allocator.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#include "segment_allocator.h"
#include <fmt/format.h>
#include <fmt/os.h>
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/async_cleaner.h"
SET_SUBSYS(seastore_journal);
namespace crimson::os::seastore::journal {
SegmentAllocator::SegmentAllocator(
JournalTrimmer *trimmer,
data_category_t category,
rewrite_gen_t gen,
SegmentProvider &sp,
SegmentSeqAllocator &ssa)
: print_name{fmt::format("{}_G{}", category, gen)},
type{trimmer == nullptr ?
segment_type_t::OOL :
segment_type_t::JOURNAL},
category{category},
gen{gen},
segment_provider{sp},
sm_group{*sp.get_segment_manager_group()},
segment_seq_allocator(ssa),
trimmer{trimmer}
{
reset();
}
segment_nonce_t calc_new_nonce(
segment_type_t type,
uint32_t crc,
unsigned char const *data,
unsigned length)
{
crc &= std::numeric_limits<uint32_t>::max() >> 1;
crc |= static_cast<uint32_t>(type) << 31;
return ceph_crc32c(crc, data, length);
}
SegmentAllocator::open_ret
SegmentAllocator::do_open(bool is_mkfs)
{
LOG_PREFIX(SegmentAllocator::do_open);
ceph_assert(!current_segment);
segment_seq_t new_segment_seq =
segment_seq_allocator.get_and_inc_next_segment_seq();
auto meta = sm_group.get_meta();
current_segment_nonce = calc_new_nonce(
type,
new_segment_seq,
reinterpret_cast<const unsigned char *>(meta.seastore_id.bytes()),
sizeof(meta.seastore_id.uuid));
auto new_segment_id = segment_provider.allocate_segment(
new_segment_seq, type, category, gen);
ceph_assert(new_segment_id != NULL_SEG_ID);
return sm_group.open(new_segment_id
).handle_error(
open_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in SegmentAllocator::do_open open"
}
).safe_then([this, is_mkfs, FNAME, new_segment_seq](auto sref) {
// initialize new segment
segment_id_t segment_id = sref->get_segment_id();
journal_seq_t dirty_tail;
journal_seq_t alloc_tail;
if (type == segment_type_t::JOURNAL) {
dirty_tail = trimmer->get_dirty_tail();
alloc_tail = trimmer->get_alloc_tail();
if (is_mkfs) {
ceph_assert(dirty_tail == JOURNAL_SEQ_NULL);
ceph_assert(alloc_tail == JOURNAL_SEQ_NULL);
auto mkfs_seq = journal_seq_t{
new_segment_seq,
paddr_t::make_seg_paddr(segment_id, 0)
};
dirty_tail = mkfs_seq;
alloc_tail = mkfs_seq;
} else {
ceph_assert(dirty_tail != JOURNAL_SEQ_NULL);
ceph_assert(alloc_tail != JOURNAL_SEQ_NULL);
}
} else { // OOL
ceph_assert(!is_mkfs);
dirty_tail = JOURNAL_SEQ_NULL;
alloc_tail = JOURNAL_SEQ_NULL;
}
auto header = segment_header_t{
new_segment_seq,
segment_id,
dirty_tail,
alloc_tail,
current_segment_nonce,
type,
category,
gen};
INFO("{} writing header {}", print_name, header);
auto header_length = get_block_size();
bufferlist bl;
encode(header, bl);
bufferptr bp(ceph::buffer::create_page_aligned(header_length));
bp.zero();
auto iter = bl.cbegin();
iter.copy(bl.length(), bp.c_str());
bl.clear();
bl.append(bp);
ceph_assert(sref->get_write_ptr() == 0);
assert((unsigned)header_length == bl.length());
written_to = header_length;
auto new_journal_seq = journal_seq_t{
new_segment_seq,
paddr_t::make_seg_paddr(segment_id, written_to)};
segment_provider.update_segment_avail_bytes(
type, new_journal_seq.offset);
return sref->write(0, std::move(bl)
).handle_error(
open_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in SegmentAllocator::do_open write"
}
).safe_then([this,
FNAME,
new_journal_seq,
sref=std::move(sref)]() mutable {
ceph_assert(!current_segment);
current_segment = std::move(sref);
DEBUG("{} rolled new segment id={}",
print_name, current_segment->get_segment_id());
ceph_assert(new_journal_seq.segment_seq ==
segment_provider.get_seg_info(current_segment->get_segment_id()).seq);
return new_journal_seq;
});
});
}
SegmentAllocator::open_ret
SegmentAllocator::open(bool is_mkfs)
{
LOG_PREFIX(SegmentAllocator::open);
auto& device_ids = sm_group.get_device_ids();
ceph_assert(device_ids.size());
std::ostringstream oss;
for (auto& device_id : device_ids) {
oss << device_id_printer_t{device_id} << "_";
}
oss << fmt::format("{}_G{}", category, gen);
print_name = oss.str();
DEBUG("{}", print_name);
return do_open(is_mkfs);
}
SegmentAllocator::roll_ertr::future<>
SegmentAllocator::roll()
{
ceph_assert(can_write());
return close_segment().safe_then([this] {
return do_open(false).discard_result();
});
}
SegmentAllocator::write_ret
SegmentAllocator::write(ceph::bufferlist&& to_write)
{
LOG_PREFIX(SegmentAllocator::write);
assert(can_write());
auto write_length = to_write.length();
auto write_start_offset = written_to;
auto write_start_seq = journal_seq_t{
segment_provider.get_seg_info(current_segment->get_segment_id()).seq,
paddr_t::make_seg_paddr(
current_segment->get_segment_id(), write_start_offset)
};
TRACE("{} {}~{}", print_name, write_start_seq, write_length);
assert(write_length > 0);
assert((write_length % get_block_size()) == 0);
assert(!needs_roll(write_length));
auto write_result = write_result_t{
write_start_seq,
write_length
};
written_to += write_length;
segment_provider.update_segment_avail_bytes(
type,
paddr_t::make_seg_paddr(
current_segment->get_segment_id(), written_to)
);
return current_segment->write(
write_start_offset, std::move(to_write)
).handle_error(
write_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in SegmentAllocator::write"
}
).safe_then([write_result, cs=current_segment] {
return write_result;
});
}
SegmentAllocator::close_ertr::future<>
SegmentAllocator::close()
{
return [this] {
LOG_PREFIX(SegmentAllocator::close);
if (current_segment) {
DEBUG("{} close current segment", print_name);
return close_segment();
} else {
INFO("{} no current segment", print_name);
return close_segment_ertr::now();
}
}().finally([this] {
reset();
});
}
SegmentAllocator::close_segment_ertr::future<>
SegmentAllocator::close_segment()
{
LOG_PREFIX(SegmentAllocator::close_segment);
assert(can_write());
// Note: make sure no one can access the current segment once closing
auto seg_to_close = std::move(current_segment);
auto close_segment_id = seg_to_close->get_segment_id();
auto close_seg_info = segment_provider.get_seg_info(close_segment_id);
ceph_assert((close_seg_info.modify_time == NULL_TIME &&
close_seg_info.num_extents == 0) ||
(close_seg_info.modify_time != NULL_TIME &&
close_seg_info.num_extents != 0));
auto tail = segment_tail_t{
close_seg_info.seq,
close_segment_id,
current_segment_nonce,
type,
timepoint_to_mod(close_seg_info.modify_time),
close_seg_info.num_extents};
ceph::bufferlist bl;
encode(tail, bl);
INFO("{} close segment {}, written_to={}",
print_name,
tail,
written_to);
bufferptr bp(ceph::buffer::create_page_aligned(get_block_size()));
bp.zero();
auto iter = bl.cbegin();
iter.copy(bl.length(), bp.c_str());
bl.clear();
bl.append(bp);
assert(bl.length() == sm_group.get_rounded_tail_length());
auto p_seg_to_close = seg_to_close.get();
return p_seg_to_close->advance_wp(
sm_group.get_segment_size() - sm_group.get_rounded_tail_length()
).safe_then([this, FNAME, bl=std::move(bl), p_seg_to_close]() mutable {
DEBUG("Writing tail info to segment {}", p_seg_to_close->get_segment_id());
return p_seg_to_close->write(
sm_group.get_segment_size() - sm_group.get_rounded_tail_length(),
std::move(bl));
}).safe_then([p_seg_to_close] {
return p_seg_to_close->close();
}).safe_then([this, seg_to_close=std::move(seg_to_close)] {
segment_provider.close_segment(seg_to_close->get_segment_id());
}).handle_error(
close_segment_ertr::pass_further{},
crimson::ct_error::assert_all {
"Invalid error in SegmentAllocator::close_segment"
});
}
}
| 8,525 | 29.021127 | 79 | cc |
null | ceph-main/src/crimson/os/seastore/journal/segment_allocator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <optional>
#include <seastar/core/circular_buffer.hh>
#include <seastar/core/metrics.hh>
#include <seastar/core/shared_future.hh>
#include "include/buffer.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/segment_manager_group.h"
#include "crimson/os/seastore/segment_seq_allocator.h"
#include "crimson/os/seastore/journal/record_submitter.h"
#include "crimson/os/seastore/async_cleaner.h"
namespace crimson::os::seastore {
class SegmentProvider;
class JournalTrimmer;
}
namespace crimson::os::seastore::journal {
/**
* SegmentAllocator
*
* Maintain an available segment for writes.
*/
class SegmentAllocator : public JournalAllocator {
public:
// SegmentAllocator specific methods
SegmentAllocator(JournalTrimmer *trimmer,
data_category_t category,
rewrite_gen_t gen,
SegmentProvider &sp,
SegmentSeqAllocator &ssa);
segment_id_t get_segment_id() const {
assert(can_write());
return current_segment->get_segment_id();
}
extent_len_t get_max_write_length() const {
return sm_group.get_segment_size() -
sm_group.get_rounded_header_length() -
sm_group.get_rounded_tail_length();
}
public:
// overriding methods
const std::string& get_name() const final {
return print_name;
}
extent_len_t get_block_size() const final {
return sm_group.get_block_size();
}
bool can_write() const final {
return !!current_segment;
}
segment_nonce_t get_nonce() const final {
assert(can_write());
return current_segment_nonce;
}
// returns true iff the current segment has insufficient space
bool needs_roll(std::size_t length) const final {
assert(can_write());
assert(current_segment->get_write_capacity() ==
sm_group.get_segment_size());
auto write_capacity = current_segment->get_write_capacity() -
sm_group.get_rounded_tail_length();
return length + written_to > std::size_t(write_capacity);
}
// open for write and generate the correct print name
open_ret open(bool is_mkfs) final;
// close the current segment and initialize next one
roll_ertr::future<> roll() final;
// write the buffer, return the write result
//
// May be called concurrently, but writes may complete in any order.
// If rolling/opening, no write is allowed.
write_ret write(ceph::bufferlist&& to_write) final;
using close_ertr = base_ertr;
close_ertr::future<> close() final;
void update_modify_time(record_t& record) final {
segment_provider.update_modify_time(
get_segment_id(),
record.modify_time,
record.extents.size());
}
private:
open_ret do_open(bool is_mkfs);
void reset() {
current_segment.reset();
written_to = 0;
current_segment_nonce = 0;
}
using close_segment_ertr = base_ertr;
close_segment_ertr::future<> close_segment();
// device id is not available during construction,
// so generate the print_name later.
std::string print_name;
const segment_type_t type; // JOURNAL or OOL
const data_category_t category;
const rewrite_gen_t gen;
SegmentProvider &segment_provider;
SegmentManagerGroup &sm_group;
SegmentRef current_segment;
segment_off_t written_to;
SegmentSeqAllocator &segment_seq_allocator;
segment_nonce_t current_segment_nonce;
JournalTrimmer *trimmer;
};
}
| 3,545 | 25.863636 | 72 | h |
null | ceph-main/src/crimson/os/seastore/journal/segmented_journal.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <boost/iterator/counting_iterator.hpp>
#include "include/intarith.h"
#include "segmented_journal.h"
#include "crimson/common/config_proxy.h"
#include "crimson/os/seastore/logging.h"
SET_SUBSYS(seastore_journal);
/*
* format:
* - H<handle-addr> information
*
* levels:
* - INFO: major initiation, closing, rolling and replay operations
* - DEBUG: INFO details, major submit operations
* - TRACE: DEBUG details
*/
namespace crimson::os::seastore::journal {
SegmentedJournal::SegmentedJournal(
SegmentProvider &segment_provider,
JournalTrimmer &trimmer)
: segment_seq_allocator(
new SegmentSeqAllocator(segment_type_t::JOURNAL)),
journal_segment_allocator(&trimmer,
data_category_t::METADATA,
INLINE_GENERATION,
segment_provider,
*segment_seq_allocator),
record_submitter(crimson::common::get_conf<uint64_t>(
"seastore_journal_iodepth_limit"),
crimson::common::get_conf<uint64_t>(
"seastore_journal_batch_capacity"),
crimson::common::get_conf<Option::size_t>(
"seastore_journal_batch_flush_size"),
crimson::common::get_conf<double>(
"seastore_journal_batch_preferred_fullness"),
journal_segment_allocator),
sm_group(*segment_provider.get_segment_manager_group()),
trimmer{trimmer}
{
}
SegmentedJournal::open_for_mkfs_ret
SegmentedJournal::open_for_mkfs()
{
return record_submitter.open(true);
}
SegmentedJournal::open_for_mount_ret
SegmentedJournal::open_for_mount()
{
return record_submitter.open(false);
}
SegmentedJournal::close_ertr::future<> SegmentedJournal::close()
{
LOG_PREFIX(Journal::close);
INFO("closing, committed_to={}",
record_submitter.get_committed_to());
return record_submitter.close();
}
SegmentedJournal::prep_replay_segments_fut
SegmentedJournal::prep_replay_segments(
std::vector<std::pair<segment_id_t, segment_header_t>> segments)
{
LOG_PREFIX(Journal::prep_replay_segments);
if (segments.empty()) {
ERROR("no journal segments for replay");
return crimson::ct_error::input_output_error::make();
}
std::sort(
segments.begin(),
segments.end(),
[](const auto <, const auto &rt) {
return lt.second.segment_seq <
rt.second.segment_seq;
});
segment_seq_allocator->set_next_segment_seq(
segments.rbegin()->second.segment_seq + 1);
std::for_each(
segments.begin(),
segments.end(),
[FNAME](auto &seg)
{
if (seg.first != seg.second.physical_segment_id ||
seg.second.get_type() != segment_type_t::JOURNAL) {
ERROR("illegal journal segment for replay -- {}", seg.second);
ceph_abort();
}
});
auto last_segment_id = segments.rbegin()->first;
auto last_header = segments.rbegin()->second;
return scan_last_segment(last_segment_id, last_header
).safe_then([this, FNAME, segments=std::move(segments)] {
INFO("dirty_tail={}, alloc_tail={}",
trimmer.get_dirty_tail(),
trimmer.get_alloc_tail());
auto journal_tail = trimmer.get_journal_tail();
auto journal_tail_paddr = journal_tail.offset;
ceph_assert(journal_tail != JOURNAL_SEQ_NULL);
ceph_assert(journal_tail_paddr != P_ADDR_NULL);
auto from = std::find_if(
segments.begin(),
segments.end(),
[&journal_tail_paddr](const auto &seg) -> bool {
auto& seg_addr = journal_tail_paddr.as_seg_paddr();
return seg.first == seg_addr.get_segment_id();
});
if (from->second.segment_seq != journal_tail.segment_seq) {
ERROR("journal_tail {} does not match {}",
journal_tail, from->second);
ceph_abort();
}
auto num_segments = segments.end() - from;
INFO("{} segments to replay", num_segments);
auto ret = replay_segments_t(num_segments);
std::transform(
from, segments.end(), ret.begin(),
[this](const auto &p) {
auto ret = journal_seq_t{
p.second.segment_seq,
paddr_t::make_seg_paddr(
p.first,
sm_group.get_block_size())
};
return std::make_pair(ret, p.second);
});
ret[0].first.offset = journal_tail_paddr;
return prep_replay_segments_fut(
replay_ertr::ready_future_marker{},
std::move(ret));
});
}
SegmentedJournal::scan_last_segment_ertr::future<>
SegmentedJournal::scan_last_segment(
const segment_id_t &segment_id,
const segment_header_t &segment_header)
{
LOG_PREFIX(SegmentedJournal::scan_last_segment);
assert(segment_id == segment_header.physical_segment_id);
trimmer.update_journal_tails(
segment_header.dirty_tail, segment_header.alloc_tail);
auto seq = journal_seq_t{
segment_header.segment_seq,
paddr_t::make_seg_paddr(segment_id, 0)
};
INFO("scanning journal tail deltas -- {}", segment_header);
return seastar::do_with(
scan_valid_records_cursor(seq),
SegmentManagerGroup::found_record_handler_t(
[FNAME, this](
record_locator_t locator,
const record_group_header_t& record_group_header,
const bufferlist& mdbuf
) -> SegmentManagerGroup::scan_valid_records_ertr::future<>
{
DEBUG("decoding {} at {}", record_group_header, locator);
bool has_tail_delta = false;
auto maybe_headers = try_decode_record_headers(
record_group_header, mdbuf);
if (!maybe_headers) {
// This should be impossible, we did check the crc on the mdbuf
ERROR("unable to decode headers from {} at {}",
record_group_header, locator);
ceph_abort();
}
for (auto &record_header : *maybe_headers) {
ceph_assert(is_valid_transaction(record_header.type));
if (is_background_transaction(record_header.type)) {
has_tail_delta = true;
}
}
if (has_tail_delta) {
bool found_delta = false;
auto maybe_record_deltas_list = try_decode_deltas(
record_group_header, mdbuf, locator.record_block_base);
if (!maybe_record_deltas_list) {
ERROR("unable to decode deltas from {} at {}",
record_group_header, locator);
ceph_abort();
}
for (auto &record_deltas : *maybe_record_deltas_list) {
for (auto &[ctime, delta] : record_deltas.deltas) {
if (delta.type == extent_types_t::JOURNAL_TAIL) {
found_delta = true;
journal_tail_delta_t tail_delta;
decode(tail_delta, delta.bl);
auto start_seq = locator.write_result.start_seq;
DEBUG("got {}, at {}", tail_delta, start_seq);
ceph_assert(tail_delta.dirty_tail != JOURNAL_SEQ_NULL);
ceph_assert(tail_delta.alloc_tail != JOURNAL_SEQ_NULL);
trimmer.update_journal_tails(
tail_delta.dirty_tail, tail_delta.alloc_tail);
}
}
}
ceph_assert(found_delta);
}
return seastar::now();
}),
[this, nonce=segment_header.segment_nonce](auto &cursor, auto &handler)
{
return sm_group.scan_valid_records(
cursor,
nonce,
std::numeric_limits<std::size_t>::max(),
handler).discard_result();
});
}
SegmentedJournal::replay_ertr::future<>
SegmentedJournal::replay_segment(
journal_seq_t seq,
segment_header_t header,
delta_handler_t &handler,
replay_stats_t &stats)
{
LOG_PREFIX(Journal::replay_segment);
INFO("starting at {} -- {}", seq, header);
return seastar::do_with(
scan_valid_records_cursor(seq),
SegmentManagerGroup::found_record_handler_t(
[&handler, this, &stats](
record_locator_t locator,
const record_group_header_t& header,
const bufferlist& mdbuf)
-> SegmentManagerGroup::scan_valid_records_ertr::future<>
{
LOG_PREFIX(Journal::replay_segment);
++stats.num_record_groups;
auto maybe_record_deltas_list = try_decode_deltas(
header, mdbuf, locator.record_block_base);
if (!maybe_record_deltas_list) {
// This should be impossible, we did check the crc on the mdbuf
ERROR("unable to decode deltas for record {} at {}",
header, locator);
return crimson::ct_error::input_output_error::make();
}
return seastar::do_with(
std::move(*maybe_record_deltas_list),
[write_result=locator.write_result,
this,
FNAME,
&handler,
&stats](auto& record_deltas_list)
{
return crimson::do_for_each(
record_deltas_list,
[write_result,
this,
FNAME,
&handler,
&stats](record_deltas_t& record_deltas)
{
++stats.num_records;
auto locator = record_locator_t{
record_deltas.record_block_base,
write_result
};
DEBUG("processing {} deltas at block_base {}",
record_deltas.deltas.size(),
locator);
return crimson::do_for_each(
record_deltas.deltas,
[locator,
this,
&handler,
&stats](auto &p)
{
auto& modify_time = p.first;
auto& delta = p.second;
return handler(
locator,
delta,
trimmer.get_dirty_tail(),
trimmer.get_alloc_tail(),
modify_time
).safe_then([&stats, delta_type=delta.type](bool is_applied) {
if (is_applied) {
// see Cache::replay_delta()
assert(delta_type != extent_types_t::JOURNAL_TAIL);
if (delta_type == extent_types_t::ALLOC_INFO) {
++stats.num_alloc_deltas;
} else {
++stats.num_dirty_deltas;
}
}
});
});
});
});
}),
[=, this](auto &cursor, auto &dhandler) {
return sm_group.scan_valid_records(
cursor,
header.segment_nonce,
std::numeric_limits<size_t>::max(),
dhandler).safe_then([](auto){}
).handle_error(
replay_ertr::pass_further{},
crimson::ct_error::assert_all{
"shouldn't meet with any other error other replay_ertr"
}
);
}
);
}
SegmentedJournal::replay_ret SegmentedJournal::replay(
delta_handler_t &&delta_handler)
{
LOG_PREFIX(Journal::replay);
return sm_group.find_journal_segment_headers(
).safe_then([this, FNAME, delta_handler=std::move(delta_handler)]
(auto &&segment_headers) mutable -> replay_ret {
INFO("got {} segments", segment_headers.size());
return seastar::do_with(
std::move(delta_handler),
replay_segments_t(),
replay_stats_t(),
[this, segment_headers=std::move(segment_headers), FNAME]
(auto &handler, auto &segments, auto &stats) mutable -> replay_ret {
return prep_replay_segments(std::move(segment_headers)
).safe_then([this, &handler, &segments, &stats](auto replay_segs) mutable {
segments = std::move(replay_segs);
return crimson::do_for_each(segments,[this, &handler, &stats](auto i) mutable {
return replay_segment(i.first, i.second, handler, stats);
});
}).safe_then([&stats, FNAME] {
INFO("replay done, record_groups={}, records={}, "
"alloc_deltas={}, dirty_deltas={}",
stats.num_record_groups,
stats.num_records,
stats.num_alloc_deltas,
stats.num_dirty_deltas);
});
});
});
}
seastar::future<> SegmentedJournal::flush(OrderingHandle &handle)
{
LOG_PREFIX(SegmentedJournal::flush);
DEBUG("H{} flush ...", (void*)&handle);
assert(write_pipeline);
return handle.enter(write_pipeline->device_submission
).then([this, &handle] {
return handle.enter(write_pipeline->finalize);
}).then([FNAME, &handle] {
DEBUG("H{} flush done", (void*)&handle);
});
}
SegmentedJournal::submit_record_ret
SegmentedJournal::do_submit_record(
record_t &&record,
OrderingHandle &handle)
{
LOG_PREFIX(SegmentedJournal::do_submit_record);
if (!record_submitter.is_available()) {
DEBUG("H{} wait ...", (void*)&handle);
return record_submitter.wait_available(
).safe_then([this, record=std::move(record), &handle]() mutable {
return do_submit_record(std::move(record), handle);
});
}
auto action = record_submitter.check_action(record.size);
if (action == RecordSubmitter::action_t::ROLL) {
DEBUG("H{} roll, unavailable ...", (void*)&handle);
return record_submitter.roll_segment(
).safe_then([this, record=std::move(record), &handle]() mutable {
return do_submit_record(std::move(record), handle);
});
} else { // SUBMIT_FULL/NOT_FULL
DEBUG("H{} submit {} ...",
(void*)&handle,
action == RecordSubmitter::action_t::SUBMIT_FULL ?
"FULL" : "NOT_FULL");
auto submit_fut = record_submitter.submit(std::move(record));
return handle.enter(write_pipeline->device_submission
).then([submit_fut=std::move(submit_fut)]() mutable {
return std::move(submit_fut);
}).safe_then([FNAME, this, &handle](record_locator_t result) {
return handle.enter(write_pipeline->finalize
).then([FNAME, this, result, &handle] {
DEBUG("H{} finish with {}", (void*)&handle, result);
auto new_committed_to = result.write_result.get_end_seq();
record_submitter.update_committed_to(new_committed_to);
return result;
});
});
}
}
SegmentedJournal::submit_record_ret
SegmentedJournal::submit_record(
record_t &&record,
OrderingHandle &handle)
{
LOG_PREFIX(SegmentedJournal::submit_record);
DEBUG("H{} {} start ...", (void*)&handle, record);
assert(write_pipeline);
auto expected_size = record_group_size_t(
record.size,
journal_segment_allocator.get_block_size()
).get_encoded_length();
auto max_record_length = journal_segment_allocator.get_max_write_length();
if (expected_size > max_record_length) {
ERROR("H{} {} exceeds max record size {}",
(void*)&handle, record, max_record_length);
return crimson::ct_error::erange::make();
}
return do_submit_record(std::move(record), handle);
}
}
| 14,449 | 32.294931 | 82 | cc |
null | ceph-main/src/crimson/os/seastore/journal/segmented_journal.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "include/buffer.h"
#include "include/denc.h"
#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/journal.h"
#include "crimson/os/seastore/segment_manager_group.h"
#include "crimson/os/seastore/ordering_handle.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/osd/exceptions.h"
#include "segment_allocator.h"
#include "crimson/os/seastore/segment_seq_allocator.h"
#include "record_submitter.h"
namespace crimson::os::seastore::journal {
/**
* Manages stream of atomically written records to a SegmentManager.
*/
class SegmentedJournal : public Journal {
public:
SegmentedJournal(
SegmentProvider &segment_provider,
JournalTrimmer &trimmer);
~SegmentedJournal() {}
JournalTrimmer &get_trimmer() final {
return trimmer;
}
open_for_mkfs_ret open_for_mkfs() final;
open_for_mount_ret open_for_mount() final;
close_ertr::future<> close() final;
submit_record_ret submit_record(
record_t &&record,
OrderingHandle &handle) final;
seastar::future<> flush(OrderingHandle &handle) final;
replay_ret replay(delta_handler_t &&delta_handler) final;
void set_write_pipeline(WritePipeline *_write_pipeline) final {
write_pipeline = _write_pipeline;
}
journal_type_t get_type() final {
return journal_type_t::SEGMENTED;
}
seastar::future<> finish_commit(transaction_type_t type) {
return seastar::now();
}
private:
submit_record_ret do_submit_record(
record_t &&record,
OrderingHandle &handle
);
SegmentSeqAllocatorRef segment_seq_allocator;
SegmentAllocator journal_segment_allocator;
RecordSubmitter record_submitter;
SegmentManagerGroup &sm_group;
JournalTrimmer &trimmer;
WritePipeline* write_pipeline = nullptr;
/// return ordered vector of segments to replay
using replay_segments_t = std::vector<
std::pair<journal_seq_t, segment_header_t>>;
using prep_replay_segments_fut = replay_ertr::future<
replay_segments_t>;
prep_replay_segments_fut prep_replay_segments(
std::vector<std::pair<segment_id_t, segment_header_t>> segments);
/// scan the last segment for tail deltas
using scan_last_segment_ertr = replay_ertr;
scan_last_segment_ertr::future<> scan_last_segment(
const segment_id_t&, const segment_header_t&);
struct replay_stats_t {
std::size_t num_record_groups = 0;
std::size_t num_records = 0;
std::size_t num_alloc_deltas = 0;
std::size_t num_dirty_deltas = 0;
};
/// replays records starting at start through end of segment
replay_ertr::future<>
replay_segment(
journal_seq_t start, ///< [in] starting addr, seq
segment_header_t header, ///< [in] segment header
delta_handler_t &delta_handler, ///< [in] processes deltas in order
replay_stats_t &stats ///< [out] replay stats
);
};
}
| 3,038 | 27.669811 | 72 | h |
null | ceph-main/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sys/mman.h>
#include <string.h>
#include <seastar/core/metrics.hh>
#include "include/buffer.h"
#include "crimson/os/seastore/lba_manager/btree/btree_lba_manager.h"
#include "crimson/os/seastore/lba_manager/btree/lba_btree_node.h"
#include "crimson/os/seastore/logging.h"
SET_SUBSYS(seastore_lba);
/*
* levels:
* - INFO: mkfs
* - DEBUG: modification operations
* - TRACE: read operations, DEBUG details
*/
namespace crimson::os::seastore {
template <typename T>
Transaction::tree_stats_t& get_tree_stats(Transaction &t)
{
return t.get_lba_tree_stats();
}
template Transaction::tree_stats_t&
get_tree_stats<
crimson::os::seastore::lba_manager::btree::LBABtree>(
Transaction &t);
template <typename T>
phy_tree_root_t& get_phy_tree_root(root_t &r)
{
return r.lba_root;
}
template phy_tree_root_t&
get_phy_tree_root<
crimson::os::seastore::lba_manager::btree::LBABtree>(root_t &r);
template <>
const get_phy_tree_root_node_ret get_phy_tree_root_node<
crimson::os::seastore::lba_manager::btree::LBABtree>(
const RootBlockRef &root_block, op_context_t<laddr_t> c)
{
auto lba_root = root_block->lba_root_node;
if (lba_root) {
ceph_assert(lba_root->is_initial_pending()
== root_block->is_pending());
return {true,
trans_intr::make_interruptible(
c.cache.get_extent_viewable_by_trans(c.trans, lba_root))};
} else if (root_block->is_pending()) {
auto &prior = static_cast<RootBlock&>(*root_block->get_prior_instance());
lba_root = prior.lba_root_node;
if (lba_root) {
return {true,
trans_intr::make_interruptible(
c.cache.get_extent_viewable_by_trans(c.trans, lba_root))};
} else {
return {false,
trans_intr::make_interruptible(
Cache::get_extent_ertr::make_ready_future<
CachedExtentRef>())};
}
} else {
return {false,
trans_intr::make_interruptible(
Cache::get_extent_ertr::make_ready_future<
CachedExtentRef>())};
}
}
template <typename ROOT>
void link_phy_tree_root_node(RootBlockRef &root_block, ROOT* lba_root) {
root_block->lba_root_node = lba_root;
ceph_assert(lba_root != nullptr);
lba_root->root_block = root_block;
}
template void link_phy_tree_root_node(
RootBlockRef &root_block, lba_manager::btree::LBAInternalNode* lba_root);
template void link_phy_tree_root_node(
RootBlockRef &root_block, lba_manager::btree::LBALeafNode* lba_root);
template void link_phy_tree_root_node(
RootBlockRef &root_block, lba_manager::btree::LBANode* lba_root);
template <>
void unlink_phy_tree_root_node<laddr_t>(RootBlockRef &root_block) {
root_block->lba_root_node = nullptr;
}
}
namespace crimson::os::seastore::lba_manager::btree {
BtreeLBAManager::mkfs_ret
BtreeLBAManager::mkfs(
Transaction &t)
{
LOG_PREFIX(BtreeLBAManager::mkfs);
INFOT("start", t);
return cache.get_root(t).si_then([this, &t](auto croot) {
assert(croot->is_mutation_pending());
croot->get_root().lba_root = LBABtree::mkfs(croot, get_context(t));
return mkfs_iertr::now();
}).handle_error_interruptible(
mkfs_iertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in BtreeLBAManager::mkfs"
}
);
}
BtreeLBAManager::get_mappings_ret
BtreeLBAManager::get_mappings(
Transaction &t,
laddr_t offset, extent_len_t length)
{
LOG_PREFIX(BtreeLBAManager::get_mappings);
TRACET("{}~{}", t, offset, length);
auto c = get_context(t);
return with_btree_state<LBABtree, lba_pin_list_t>(
cache,
c,
[c, offset, length, FNAME](auto &btree, auto &ret) {
return LBABtree::iterate_repeat(
c,
btree.upper_bound_right(c, offset),
[&ret, offset, length, c, FNAME](auto &pos) {
if (pos.is_end() || pos.get_key() >= (offset + length)) {
TRACET("{}~{} done with {} results",
c.trans, offset, length, ret.size());
return typename LBABtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::yes);
}
TRACET("{}~{} got {}, {}, repeat ...",
c.trans, offset, length, pos.get_key(), pos.get_val());
ceph_assert((pos.get_key() + pos.get_val().len) > offset);
ret.push_back(pos.get_pin(c));
return typename LBABtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::no);
});
});
}
BtreeLBAManager::get_mappings_ret
BtreeLBAManager::get_mappings(
Transaction &t,
laddr_list_t &&list)
{
LOG_PREFIX(BtreeLBAManager::get_mappings);
TRACET("{}", t, list);
auto l = std::make_unique<laddr_list_t>(std::move(list));
auto retptr = std::make_unique<lba_pin_list_t>();
auto &ret = *retptr;
return trans_intr::do_for_each(
l->begin(),
l->end(),
[this, &t, &ret](const auto &p) {
return this->get_mappings(t, p.first, p.second).si_then(
[&ret](auto res) {
ret.splice(ret.end(), res, res.begin(), res.end());
return get_mappings_iertr::now();
});
}).si_then([l=std::move(l), retptr=std::move(retptr)]() mutable {
return std::move(*retptr);
});
}
BtreeLBAManager::get_mapping_ret
BtreeLBAManager::get_mapping(
Transaction &t,
laddr_t offset)
{
LOG_PREFIX(BtreeLBAManager::get_mapping);
TRACET("{}", t, offset);
auto c = get_context(t);
return with_btree_ret<LBABtree, LBAMappingRef>(
cache,
c,
[FNAME, c, offset](auto &btree) {
return btree.lower_bound(
c, offset
).si_then([FNAME, offset, c](auto iter) -> get_mapping_ret {
if (iter.is_end() || iter.get_key() != offset) {
ERRORT("laddr={} doesn't exist", c.trans, offset);
return crimson::ct_error::enoent::make();
} else {
TRACET("{} got {}, {}",
c.trans, offset, iter.get_key(), iter.get_val());
auto e = iter.get_pin(c);
return get_mapping_ret(
interruptible::ready_future_marker{},
std::move(e));
}
});
});
}
BtreeLBAManager::alloc_extent_ret
BtreeLBAManager::alloc_extent(
Transaction &t,
laddr_t hint,
extent_len_t len,
paddr_t addr,
LogicalCachedExtent* nextent)
{
struct state_t {
laddr_t last_end;
std::optional<typename LBABtree::iterator> insert_iter;
std::optional<typename LBABtree::iterator> ret;
state_t(laddr_t hint) : last_end(hint) {}
};
LOG_PREFIX(BtreeLBAManager::alloc_extent);
TRACET("{}~{}, hint={}", t, addr, len, hint);
auto c = get_context(t);
++stats.num_alloc_extents;
auto lookup_attempts = stats.num_alloc_extents_iter_nexts;
return crimson::os::seastore::with_btree_state<LBABtree, state_t>(
cache,
c,
hint,
[this, FNAME, c, hint, len, addr, lookup_attempts,
&t, nextent](auto &btree, auto &state) {
return LBABtree::iterate_repeat(
c,
btree.upper_bound_right(c, hint),
[this, &state, len, addr, &t, hint, FNAME, lookup_attempts](auto &pos) {
++stats.num_alloc_extents_iter_nexts;
if (pos.is_end()) {
DEBUGT("{}~{}, hint={}, state: end, done with {} attempts, insert at {}",
t, addr, len, hint,
stats.num_alloc_extents_iter_nexts - lookup_attempts,
state.last_end);
state.insert_iter = pos;
return typename LBABtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::yes);
} else if (pos.get_key() >= (state.last_end + len)) {
DEBUGT("{}~{}, hint={}, state: {}~{}, done with {} attempts, insert at {} -- {}",
t, addr, len, hint,
pos.get_key(), pos.get_val().len,
stats.num_alloc_extents_iter_nexts - lookup_attempts,
state.last_end,
pos.get_val());
state.insert_iter = pos;
return typename LBABtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::yes);
} else {
state.last_end = pos.get_key() + pos.get_val().len;
TRACET("{}~{}, hint={}, state: {}~{}, repeat ... -- {}",
t, addr, len, hint,
pos.get_key(), pos.get_val().len,
pos.get_val());
return typename LBABtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::no);
}
}).si_then([FNAME, c, addr, len, hint, &btree, &state, nextent] {
return btree.insert(
c,
*state.insert_iter,
state.last_end,
lba_map_val_t{len, addr, 1, 0},
nextent
).si_then([&state, FNAME, c, addr, len, hint, nextent](auto &&p) {
auto [iter, inserted] = std::move(p);
TRACET("{}~{}, hint={}, inserted at {}",
c.trans, addr, len, hint, state.last_end);
if (nextent) {
nextent->set_laddr(iter.get_key());
}
ceph_assert(inserted);
state.ret = iter;
});
});
}).si_then([c](auto &&state) {
return state.ret->get_pin(c);
});
}
static bool is_lba_node(const CachedExtent &e)
{
return is_lba_node(e.get_type());
}
BtreeLBAManager::base_iertr::template future<>
_init_cached_extent(
op_context_t<laddr_t> c,
const CachedExtentRef &e,
LBABtree &btree,
bool &ret)
{
if (e->is_logical()) {
auto logn = e->cast<LogicalCachedExtent>();
return btree.lower_bound(
c,
logn->get_laddr()
).si_then([e, c, logn, &ret](auto iter) {
LOG_PREFIX(BtreeLBAManager::init_cached_extent);
if (!iter.is_end() &&
iter.get_key() == logn->get_laddr() &&
iter.get_val().paddr == logn->get_paddr()) {
assert(!iter.get_leaf_node()->is_pending());
iter.get_leaf_node()->link_child(logn.get(), iter.get_leaf_pos());
logn->set_laddr(iter.get_pin(c)->get_key());
ceph_assert(iter.get_val().len == e->get_length());
DEBUGT("logical extent {} live", c.trans, *logn);
ret = true;
} else {
DEBUGT("logical extent {} not live", c.trans, *logn);
ret = false;
}
});
} else {
return btree.init_cached_extent(c, e
).si_then([&ret](bool is_alive) {
ret = is_alive;
});
}
}
BtreeLBAManager::init_cached_extent_ret
BtreeLBAManager::init_cached_extent(
Transaction &t,
CachedExtentRef e)
{
LOG_PREFIX(BtreeLBAManager::init_cached_extent);
TRACET("{}", t, *e);
return seastar::do_with(bool(), [this, e, &t](bool &ret) {
auto c = get_context(t);
return with_btree<LBABtree>(
cache, c,
[c, e, &ret](auto &btree) -> base_iertr::future<> {
LOG_PREFIX(BtreeLBAManager::init_cached_extent);
DEBUGT("extent {}", c.trans, *e);
return _init_cached_extent(c, e, btree, ret);
}
).si_then([&ret] { return ret; });
});
}
BtreeLBAManager::check_child_trackers_ret
BtreeLBAManager::check_child_trackers(
Transaction &t) {
auto c = get_context(t);
return with_btree<LBABtree>(
cache, c,
[c](auto &btree) {
return btree.check_child_trackers(c);
});
}
BtreeLBAManager::scan_mappings_ret
BtreeLBAManager::scan_mappings(
Transaction &t,
laddr_t begin,
laddr_t end,
scan_mappings_func_t &&f)
{
LOG_PREFIX(BtreeLBAManager::scan_mappings);
DEBUGT("begin: {}, end: {}", t, begin, end);
auto c = get_context(t);
return with_btree<LBABtree>(
cache,
c,
[c, f=std::move(f), begin, end](auto &btree) mutable {
return LBABtree::iterate_repeat(
c,
btree.upper_bound_right(c, begin),
[f=std::move(f), begin, end](auto &pos) {
if (pos.is_end() || pos.get_key() >= end) {
return typename LBABtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::yes);
}
ceph_assert((pos.get_key() + pos.get_val().len) > begin);
f(pos.get_key(), pos.get_val().paddr, pos.get_val().len);
return typename LBABtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::no);
});
});
}
BtreeLBAManager::rewrite_extent_ret
BtreeLBAManager::rewrite_extent(
Transaction &t,
CachedExtentRef extent)
{
LOG_PREFIX(BtreeLBAManager::rewrite_extent);
if (extent->has_been_invalidated()) {
ERRORT("extent has been invalidated -- {}", t, *extent);
ceph_abort();
}
assert(!extent->is_logical());
if (is_lba_node(*extent)) {
DEBUGT("rewriting lba extent -- {}", t, *extent);
auto c = get_context(t);
return with_btree<LBABtree>(
cache,
c,
[c, extent](auto &btree) mutable {
return btree.rewrite_extent(c, extent);
});
} else {
DEBUGT("skip non lba extent -- {}", t, *extent);
return rewrite_extent_iertr::now();
}
}
BtreeLBAManager::update_mapping_ret
BtreeLBAManager::update_mapping(
Transaction& t,
laddr_t laddr,
paddr_t prev_addr,
paddr_t addr,
LogicalCachedExtent *nextent)
{
LOG_PREFIX(BtreeLBAManager::update_mapping);
TRACET("laddr={}, paddr {} => {}", t, laddr, prev_addr, addr);
return _update_mapping(
t,
laddr,
[prev_addr, addr](
const lba_map_val_t &in) {
assert(!addr.is_null());
lba_map_val_t ret = in;
ceph_assert(in.paddr == prev_addr);
ret.paddr = addr;
return ret;
},
nextent
).si_then([&t, laddr, prev_addr, addr, FNAME](auto result) {
DEBUGT("laddr={}, paddr {} => {} done -- {}",
t, laddr, prev_addr, addr, result);
},
update_mapping_iertr::pass_further{},
/* ENOENT in particular should be impossible */
crimson::ct_error::assert_all{
"Invalid error in BtreeLBAManager::update_mapping"
}
);
}
BtreeLBAManager::get_physical_extent_if_live_ret
BtreeLBAManager::get_physical_extent_if_live(
Transaction &t,
extent_types_t type,
paddr_t addr,
laddr_t laddr,
extent_len_t len)
{
LOG_PREFIX(BtreeLBAManager::get_physical_extent_if_live);
DEBUGT("{}, laddr={}, paddr={}, length={}",
t, type, laddr, addr, len);
ceph_assert(is_lba_node(type));
auto c = get_context(t);
return with_btree_ret<LBABtree, CachedExtentRef>(
cache,
c,
[c, type, addr, laddr, len](auto &btree) {
if (type == extent_types_t::LADDR_INTERNAL) {
return btree.get_internal_if_live(c, addr, laddr, len);
} else {
assert(type == extent_types_t::LADDR_LEAF ||
type == extent_types_t::DINK_LADDR_LEAF);
return btree.get_leaf_if_live(c, addr, laddr, len);
}
});
}
void BtreeLBAManager::register_metrics()
{
LOG_PREFIX(BtreeLBAManager::register_metrics);
DEBUG("start");
stats = {};
namespace sm = seastar::metrics;
metrics.add_group(
"LBA",
{
sm::make_counter(
"alloc_extents",
stats.num_alloc_extents,
sm::description("total number of lba alloc_extent operations")
),
sm::make_counter(
"alloc_extents_iter_nexts",
stats.num_alloc_extents_iter_nexts,
sm::description("total number of iterator next operations during extent allocation")
),
}
);
}
BtreeLBAManager::update_refcount_ret
BtreeLBAManager::update_refcount(
Transaction &t,
laddr_t addr,
int delta)
{
LOG_PREFIX(BtreeLBAManager::update_refcount);
TRACET("laddr={}, delta={}", t, addr, delta);
return _update_mapping(
t,
addr,
[delta](const lba_map_val_t &in) {
lba_map_val_t out = in;
ceph_assert((int)out.refcount + delta >= 0);
out.refcount += delta;
return out;
},
nullptr
).si_then([&t, addr, delta, FNAME](auto result) {
DEBUGT("laddr={}, delta={} done -- {}", t, addr, delta, result);
return ref_update_result_t{
result.refcount,
result.paddr,
result.len
};
});
}
BtreeLBAManager::_update_mapping_ret
BtreeLBAManager::_update_mapping(
Transaction &t,
laddr_t addr,
update_func_t &&f,
LogicalCachedExtent* nextent)
{
auto c = get_context(t);
return with_btree_ret<LBABtree, lba_map_val_t>(
cache,
c,
[f=std::move(f), c, addr, nextent](auto &btree) mutable {
return btree.lower_bound(
c, addr
).si_then([&btree, f=std::move(f), c, addr, nextent](auto iter)
-> _update_mapping_ret {
if (iter.is_end() || iter.get_key() != addr) {
LOG_PREFIX(BtreeLBAManager::_update_mapping);
ERRORT("laddr={} doesn't exist", c.trans, addr);
return crimson::ct_error::enoent::make();
}
auto ret = f(iter.get_val());
if (ret.refcount == 0) {
return btree.remove(
c,
iter
).si_then([ret] {
return ret;
});
} else {
return btree.update(
c,
iter,
ret,
nextent
).si_then([ret](auto) {
return ret;
});
}
});
});
}
}
| 16,411 | 27.199313 | 92 | cc |
null | ceph-main/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "include/buffer_fwd.h"
#include "include/interval_set.h"
#include "common/interval_map.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/btree/fixed_kv_btree.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/lba_manager.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/lba_manager/btree/lba_btree_node.h"
#include "crimson/os/seastore/btree/btree_range_pin.h"
namespace crimson::os::seastore::lba_manager::btree {
class BtreeLBAMapping : public BtreeNodeMapping<laddr_t, paddr_t> {
public:
BtreeLBAMapping(op_context_t<laddr_t> ctx)
: BtreeNodeMapping(ctx) {}
BtreeLBAMapping(
op_context_t<laddr_t> c,
CachedExtentRef parent,
uint16_t pos,
lba_map_val_t &val,
lba_node_meta_t &&meta)
: BtreeNodeMapping(
c,
parent,
pos,
val.paddr,
val.len,
std::forward<lba_node_meta_t>(meta))
{}
};
using LBABtree = FixedKVBtree<
laddr_t, lba_map_val_t, LBAInternalNode,
LBALeafNode, BtreeLBAMapping, LBA_BLOCK_SIZE, true>;
/**
* BtreeLBAManager
*
* Uses a wandering btree to track two things:
* 1) lba state including laddr_t -> paddr_t mapping
* 2) reverse paddr_t -> laddr_t mapping for gc (TODO)
*
* Generally, any transaction will involve
* 1) deltas against lba tree nodes
* 2) new lba tree nodes
* - Note, there must necessarily be a delta linking
* these new nodes into the tree -- might be a
* bootstrap_state_t delta if new root
*
* get_mappings, alloc_extent_*, etc populate a Transaction
* which then gets submitted
*/
class BtreeLBAManager : public LBAManager {
public:
BtreeLBAManager(Cache &cache)
: cache(cache)
{
register_metrics();
}
mkfs_ret mkfs(
Transaction &t) final;
get_mappings_ret get_mappings(
Transaction &t,
laddr_t offset, extent_len_t length) final;
get_mappings_ret get_mappings(
Transaction &t,
laddr_list_t &&list) final;
get_mapping_ret get_mapping(
Transaction &t,
laddr_t offset) final;
alloc_extent_ret alloc_extent(
Transaction &t,
laddr_t hint,
extent_len_t len,
paddr_t addr,
LogicalCachedExtent*) final;
ref_ret decref_extent(
Transaction &t,
laddr_t addr) final {
return update_refcount(t, addr, -1);
}
ref_ret incref_extent(
Transaction &t,
laddr_t addr) final {
return update_refcount(t, addr, 1);
}
/**
* init_cached_extent
*
* Checks whether e is live (reachable from lba tree) and drops or initializes
* accordingly.
*
* Returns if e is live.
*/
init_cached_extent_ret init_cached_extent(
Transaction &t,
CachedExtentRef e) final;
check_child_trackers_ret check_child_trackers(Transaction &t) final;
scan_mappings_ret scan_mappings(
Transaction &t,
laddr_t begin,
laddr_t end,
scan_mappings_func_t &&f) final;
rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent) final;
update_mapping_ret update_mapping(
Transaction& t,
laddr_t laddr,
paddr_t prev_addr,
paddr_t paddr,
LogicalCachedExtent*) final;
get_physical_extent_if_live_ret get_physical_extent_if_live(
Transaction &t,
extent_types_t type,
paddr_t addr,
laddr_t laddr,
extent_len_t len) final;
private:
Cache &cache;
struct {
uint64_t num_alloc_extents = 0;
uint64_t num_alloc_extents_iter_nexts = 0;
} stats;
op_context_t<laddr_t> get_context(Transaction &t) {
return op_context_t<laddr_t>{cache, t};
}
seastar::metrics::metric_group metrics;
void register_metrics();
/**
* update_refcount
*
* Updates refcount, returns resulting refcount
*/
using update_refcount_ret = ref_ret;
update_refcount_ret update_refcount(
Transaction &t,
laddr_t addr,
int delta);
/**
* _update_mapping
*
* Updates mapping, removes if f returns nullopt
*/
using _update_mapping_iertr = ref_iertr;
using _update_mapping_ret = ref_iertr::future<lba_map_val_t>;
using update_func_t = std::function<
lba_map_val_t(const lba_map_val_t &v)
>;
_update_mapping_ret _update_mapping(
Transaction &t,
laddr_t addr,
update_func_t &&f,
LogicalCachedExtent*);
};
using BtreeLBAManagerRef = std::unique_ptr<BtreeLBAManager>;
}
| 4,585 | 22.639175 | 80 | h |
null | ceph-main/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sys/mman.h>
#include <string.h>
#include <memory>
#include <string.h>
#include "include/buffer.h"
#include "include/byteorder.h"
#include "crimson/os/seastore/lba_manager/btree/lba_btree_node.h"
#include "crimson/os/seastore/logging.h"
SET_SUBSYS(seastore_lba);
namespace crimson::os::seastore::lba_manager::btree {
std::ostream& operator<<(std::ostream& out, const lba_map_val_t& v)
{
return out << "lba_map_val_t("
<< v.paddr
<< "~" << v.len
<< ", refcount=" << v.refcount
<< ", checksum=" << v.checksum
<< ")";
}
std::ostream &LBALeafNode::_print_detail(std::ostream &out) const
{
out << ", size=" << this->get_size()
<< ", meta=" << this->get_meta()
<< ", my_tracker=" << (void*)this->my_tracker;
if (this->my_tracker) {
out << ", my_tracker->parent=" << (void*)this->my_tracker->get_parent().get();
}
return out << ", root_block=" << (void*)this->root_block.get();
}
void LBALeafNode::resolve_relative_addrs(paddr_t base)
{
LOG_PREFIX(LBALeafNode::resolve_relative_addrs);
for (auto i: *this) {
if (i->get_val().paddr.is_relative()) {
auto val = i->get_val();
val.paddr = base.add_relative(val.paddr);
TRACE("{} -> {}", i->get_val().paddr, val.paddr);
i->set_val(val);
}
}
}
}
| 1,429 | 25 | 82 | cc |
null | ceph-main/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <sys/mman.h>
#include <memory>
#include <string.h>
#include "include/buffer.h"
#include "crimson/common/fixed_kv_node_layout.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/lba_manager.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/btree/btree_range_pin.h"
#include "crimson/os/seastore/btree/fixed_kv_btree.h"
#include "crimson/os/seastore/btree/fixed_kv_node.h"
namespace crimson::os::seastore::lba_manager::btree {
using base_iertr = LBAManager::base_iertr;
using LBANode = FixedKVNode<laddr_t>;
/**
* lba_map_val_t
*
* struct representing a single lba mapping
*/
struct lba_map_val_t {
extent_len_t len = 0; ///< length of mapping
paddr_t paddr; ///< physical addr of mapping
uint32_t refcount = 0; ///< refcount
uint32_t checksum = 0; ///< checksum of original block written at paddr (TODO)
lba_map_val_t() = default;
lba_map_val_t(
extent_len_t len,
paddr_t paddr,
uint32_t refcount,
uint32_t checksum)
: len(len), paddr(paddr), refcount(refcount), checksum(checksum) {}
bool operator==(const lba_map_val_t&) const = default;
};
std::ostream& operator<<(std::ostream& out, const lba_map_val_t&);
constexpr size_t LBA_BLOCK_SIZE = 4096;
using lba_node_meta_t = fixed_kv_node_meta_t<laddr_t>;
using lba_node_meta_le_t = fixed_kv_node_meta_le_t<laddr_le_t>;
/**
* LBAInternalNode
*
* Abstracts operations on and layout of internal nodes for the
* LBA Tree.
*
* Layout (4k):
* size : uint32_t[1] 4b
* (padding) : 4b
* meta : lba_node_meta_le_t[3] (1*24)b
* keys : laddr_t[255] (254*8)b
* values : paddr_t[255] (254*8)b
* = 4096
* TODO: make the above capacity calculation part of FixedKVNodeLayout
* TODO: the above alignment probably isn't portable without further work
*/
constexpr size_t INTERNAL_NODE_CAPACITY = 254;
struct LBAInternalNode
: FixedKVInternalNode<
INTERNAL_NODE_CAPACITY,
laddr_t, laddr_le_t,
LBA_BLOCK_SIZE,
LBAInternalNode> {
using Ref = TCachedExtentRef<LBAInternalNode>;
using internal_iterator_t = const_iterator;
template <typename... T>
LBAInternalNode(T&&... t) :
FixedKVInternalNode(std::forward<T>(t)...) {}
static constexpr extent_types_t TYPE = extent_types_t::LADDR_INTERNAL;
extent_types_t get_type() const final {
return TYPE;
}
};
using LBAInternalNodeRef = LBAInternalNode::Ref;
/**
* LBALeafNode
*
* Abstracts operations on and layout of leaf nodes for the
* LBA Tree.
*
* Layout (4k):
* size : uint32_t[1] 4b
* (padding) : 4b
* meta : lba_node_meta_le_t[3] (1*24)b
* keys : laddr_t[170] (145*8)b
* values : lba_map_val_t[170] (145*20)b
* = 4092
*
* TODO: update FixedKVNodeLayout to handle the above calculation
* TODO: the above alignment probably isn't portable without further work
*/
constexpr size_t LEAF_NODE_CAPACITY = 145;
/**
* lba_map_val_le_t
*
* On disk layout for lba_map_val_t.
*/
struct lba_map_val_le_t {
extent_len_le_t len = init_extent_len_le(0);
paddr_le_t paddr;
ceph_le32 refcount{0};
ceph_le32 checksum{0};
lba_map_val_le_t() = default;
lba_map_val_le_t(const lba_map_val_le_t &) = default;
explicit lba_map_val_le_t(const lba_map_val_t &val)
: len(init_extent_len_le(val.len)),
paddr(paddr_le_t(val.paddr)),
refcount(val.refcount),
checksum(val.checksum) {}
operator lba_map_val_t() const {
return lba_map_val_t{ len, paddr, refcount, checksum };
}
};
struct LBALeafNode
: FixedKVLeafNode<
LEAF_NODE_CAPACITY,
laddr_t, laddr_le_t,
lba_map_val_t, lba_map_val_le_t,
LBA_BLOCK_SIZE,
LBALeafNode,
true> {
using Ref = TCachedExtentRef<LBALeafNode>;
using parent_type_t = FixedKVLeafNode<
LEAF_NODE_CAPACITY,
laddr_t, laddr_le_t,
lba_map_val_t, lba_map_val_le_t,
LBA_BLOCK_SIZE,
LBALeafNode,
true>;
using internal_const_iterator_t =
typename parent_type_t::node_layout_t::const_iterator;
using internal_iterator_t =
typename parent_type_t::node_layout_t::iterator;
template <typename... T>
LBALeafNode(T&&... t) :
parent_type_t(std::forward<T>(t)...) {}
static constexpr extent_types_t TYPE = extent_types_t::LADDR_LEAF;
bool validate_stable_children() final {
LOG_PREFIX(LBALeafNode::validate_stable_children);
if (this->children.empty()) {
return false;
}
for (auto i : *this) {
auto child = (LogicalCachedExtent*)this->children[i.get_offset()];
if (is_valid_child_ptr(child) && child->get_laddr() != i.get_key()) {
SUBERROR(seastore_fixedkv_tree,
"stable child not valid: child {}, key {}",
*child,
i.get_key());
ceph_abort();
return false;
}
}
return true;
}
void update(
internal_const_iterator_t iter,
lba_map_val_t val,
LogicalCachedExtent* nextent) final {
LOG_PREFIX(LBALeafNode::update);
if (nextent) {
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, {}",
this->pending_for_transaction,
iter.get_offset(),
*nextent);
// child-ptr may already be correct, see LBAManager::update_mappings()
this->update_child_ptr(iter, nextent);
}
val.paddr = this->maybe_generate_relative(val.paddr);
return this->journal_update(
iter,
val,
this->maybe_get_delta_buffer());
}
internal_const_iterator_t insert(
internal_const_iterator_t iter,
laddr_t addr,
lba_map_val_t val,
LogicalCachedExtent* nextent) final {
LOG_PREFIX(LBALeafNode::insert);
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, key {}, extent {}",
this->pending_for_transaction,
iter.get_offset(),
addr,
(void*)nextent);
this->insert_child_ptr(iter, nextent);
val.paddr = this->maybe_generate_relative(val.paddr);
this->journal_insert(
iter,
addr,
val,
this->maybe_get_delta_buffer());
return iter;
}
void remove(internal_const_iterator_t iter) final {
LOG_PREFIX(LBALeafNode::remove);
SUBTRACE(seastore_fixedkv_tree, "trans.{}, pos {}, key {}",
this->pending_for_transaction,
iter.get_offset(),
iter.get_key());
assert(iter != this->end());
this->remove_child_ptr(iter);
return this->journal_remove(
iter,
this->maybe_get_delta_buffer());
}
// See LBAInternalNode, same concept
void resolve_relative_addrs(paddr_t base);
void node_resolve_vals(
internal_iterator_t from,
internal_iterator_t to) const final
{
if (this->is_initial_pending()) {
for (auto i = from; i != to; ++i) {
auto val = i->get_val();
if (val.paddr.is_relative()) {
assert(val.paddr.is_block_relative());
val.paddr = this->get_paddr().add_relative(val.paddr);
i->set_val(val);
}
}
}
}
void node_unresolve_vals(
internal_iterator_t from,
internal_iterator_t to) const final
{
if (this->is_initial_pending()) {
for (auto i = from; i != to; ++i) {
auto val = i->get_val();
if (val.paddr.is_relative()) {
auto val = i->get_val();
assert(val.paddr.is_record_relative());
val.paddr = val.paddr.block_relative_to(this->get_paddr());
i->set_val(val);
}
}
}
}
extent_types_t get_type() const final {
return TYPE;
}
std::ostream &_print_detail(std::ostream &out) const final;
};
using LBALeafNodeRef = TCachedExtentRef<LBALeafNode>;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::lba_manager::btree::lba_node_meta_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::lba_manager::btree::lba_map_val_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::lba_manager::btree::LBAInternalNode> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::lba_manager::btree::LBALeafNode> : fmt::ostream_formatter {};
#endif
| 8,388 | 28.027682 | 122 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/btree_omap_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <string.h>
#include "crimson/common/log.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/omap_manager/btree/btree_omap_manager.h"
#include "crimson/os/seastore/omap_manager/btree/omap_btree_node_impl.h"
SET_SUBSYS(seastore_omap);
namespace crimson::os::seastore::omap_manager {
BtreeOMapManager::BtreeOMapManager(
TransactionManager &tm)
: tm(tm) {}
BtreeOMapManager::initialize_omap_ret
BtreeOMapManager::initialize_omap(Transaction &t, laddr_t hint)
{
LOG_PREFIX(BtreeOMapManager::initialize_omap);
DEBUGT("hint: {}", t, hint);
return tm.alloc_extent<OMapLeafNode>(t, hint, OMAP_LEAF_BLOCK_SIZE)
.si_then([hint, &t](auto&& root_extent) {
root_extent->set_size(0);
omap_node_meta_t meta{1};
root_extent->set_meta(meta);
omap_root_t omap_root;
omap_root.update(root_extent->get_laddr(), 1, hint);
t.get_omap_tree_stats().depth = 1u;
t.get_omap_tree_stats().extents_num_delta++;
return initialize_omap_iertr::make_ready_future<omap_root_t>(omap_root);
});
}
BtreeOMapManager::get_root_ret
BtreeOMapManager::get_omap_root(omap_context_t oc, const omap_root_t &omap_root)
{
assert(omap_root.get_location() != L_ADDR_NULL);
laddr_t laddr = omap_root.get_location();
return omap_load_extent(oc, laddr, omap_root.get_depth());
}
BtreeOMapManager::handle_root_split_ret
BtreeOMapManager::handle_root_split(
omap_context_t oc,
omap_root_t &omap_root,
const OMapNode::mutation_result_t& mresult)
{
LOG_PREFIX(BtreeOMapManager::handle_root_split);
DEBUGT("{}", oc.t, omap_root);
return oc.tm.alloc_extent<OMapInnerNode>(oc.t, omap_root.hint,
OMAP_INNER_BLOCK_SIZE)
.si_then([&omap_root, mresult, oc](auto&& nroot) -> handle_root_split_ret {
auto [left, right, pivot] = *(mresult.split_tuple);
omap_node_meta_t meta{omap_root.depth + 1};
nroot->set_meta(meta);
nroot->journal_inner_insert(nroot->iter_begin(), left->get_laddr(),
"", nroot->maybe_get_delta_buffer());
nroot->journal_inner_insert(nroot->iter_begin() + 1, right->get_laddr(),
pivot, nroot->maybe_get_delta_buffer());
omap_root.update(nroot->get_laddr(), omap_root.get_depth() + 1, omap_root.hint);
oc.t.get_omap_tree_stats().depth = omap_root.depth;
++(oc.t.get_omap_tree_stats().extents_num_delta);
return seastar::now();
});
}
BtreeOMapManager::handle_root_merge_ret
BtreeOMapManager::handle_root_merge(
omap_context_t oc,
omap_root_t &omap_root,
OMapNode::mutation_result_t mresult)
{
LOG_PREFIX(BtreeOMapManager::handle_root_merge);
DEBUGT("{}", oc.t, omap_root);
auto root = *(mresult.need_merge);
auto iter = root->cast<OMapInnerNode>()->iter_begin();
omap_root.update(
iter->get_val(),
omap_root.depth -= 1,
omap_root.hint);
oc.t.get_omap_tree_stats().depth = omap_root.depth;
oc.t.get_omap_tree_stats().extents_num_delta--;
return oc.tm.dec_ref(oc.t, root->get_laddr()
).si_then([](auto &&ret) -> handle_root_merge_ret {
return seastar::now();
}).handle_error_interruptible(
handle_root_merge_iertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in handle_root_merge"
}
);
}
BtreeOMapManager::omap_get_value_ret
BtreeOMapManager::omap_get_value(
const omap_root_t &omap_root,
Transaction &t,
const std::string &key)
{
LOG_PREFIX(BtreeOMapManager::omap_get_value);
DEBUGT("key={}", t, key);
return get_omap_root(
get_omap_context(t, omap_root.hint),
omap_root
).si_then([this, &t, &key, &omap_root](auto&& extent) {
return extent->get_value(get_omap_context(t, omap_root.hint), key);
}).si_then([](auto &&e) {
return omap_get_value_ret(
interruptible::ready_future_marker{},
std::move(e));
});
}
BtreeOMapManager::omap_set_keys_ret
BtreeOMapManager::omap_set_keys(
omap_root_t &omap_root,
Transaction &t,
std::map<std::string, ceph::bufferlist>&& keys)
{
return seastar::do_with(std::move(keys), [&, this](auto& keys) {
return trans_intr::do_for_each(
keys.begin(),
keys.end(),
[&, this](auto &p) {
return omap_set_key(omap_root, t, p.first, p.second);
});
});
}
BtreeOMapManager::omap_set_key_ret
BtreeOMapManager::omap_set_key(
omap_root_t &omap_root,
Transaction &t,
const std::string &key,
const ceph::bufferlist &value)
{
LOG_PREFIX(BtreeOMapManager::omap_set_key);
DEBUGT("{} -> {}", t, key, value);
return get_omap_root(
get_omap_context(t, omap_root.hint),
omap_root
).si_then([this, &t, &key, &value, &omap_root](auto root) {
return root->insert(get_omap_context(t, omap_root.hint), key, value);
}).si_then([this, &omap_root, &t](auto mresult) -> omap_set_key_ret {
if (mresult.status == mutation_status_t::SUCCESS)
return seastar::now();
else if (mresult.status == mutation_status_t::WAS_SPLIT)
return handle_root_split(get_omap_context(t, omap_root.hint), omap_root, mresult);
else
return seastar::now();
});
}
BtreeOMapManager::omap_rm_key_ret
BtreeOMapManager::omap_rm_key(
omap_root_t &omap_root,
Transaction &t,
const std::string &key)
{
LOG_PREFIX(BtreeOMapManager::omap_rm_key);
DEBUGT("{}", t, key);
return get_omap_root(
get_omap_context(t, omap_root.hint),
omap_root
).si_then([this, &t, &key, &omap_root](auto root) {
return root->rm_key(get_omap_context(t, omap_root.hint), key);
}).si_then([this, &omap_root, &t](auto mresult) -> omap_rm_key_ret {
if (mresult.status == mutation_status_t::SUCCESS) {
return seastar::now();
} else if (mresult.status == mutation_status_t::WAS_SPLIT) {
return handle_root_split(get_omap_context(t, omap_root.hint), omap_root, mresult);
} else if (mresult.status == mutation_status_t::NEED_MERGE) {
auto root = *(mresult.need_merge);
if (root->get_node_size() == 1 && omap_root.depth != 1) {
return handle_root_merge(get_omap_context(t, omap_root.hint), omap_root, mresult);
} else {
return seastar::now();
}
} else {
return seastar::now();
}
});
}
BtreeOMapManager::omap_rm_key_range_ret
BtreeOMapManager::omap_rm_key_range(
omap_root_t &omap_root,
Transaction &t,
const std::string &first,
const std::string &last,
omap_list_config_t config)
{
LOG_PREFIX(BtreeOMapManager::omap_rm_key_range);
DEBUGT("{} ~ {}", t, first, last);
assert(first <= last);
return seastar::do_with(
std::make_optional<std::string>(first),
std::make_optional<std::string>(last),
[this, &omap_root, &t, config](auto &first, auto &last) {
return omap_list(
omap_root,
t,
first,
last,
config);
}).si_then([this, &omap_root, &t](auto results) {
LOG_PREFIX(BtreeOMapManager::omap_rm_key_range);
auto &[complete, kvs] = results;
std::vector<std::string> keys;
for (const auto& [k, _] : kvs) {
keys.push_back(k);
}
DEBUGT("total {} keys to remove", t, keys.size());
return seastar::do_with(
std::move(keys),
[this, &omap_root, &t](auto& keys) {
return trans_intr::do_for_each(
keys.begin(),
keys.end(),
[this, &omap_root, &t](auto& key) {
return omap_rm_key(omap_root, t, key);
});
});
});
}
BtreeOMapManager::omap_list_ret
BtreeOMapManager::omap_list(
const omap_root_t &omap_root,
Transaction &t,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config)
{
LOG_PREFIX(BtreeOMapManager::omap_list);
if (first && last) {
DEBUGT("{}, first: {}, last: {}", t, omap_root, *first, *last);
assert(last >= first);
} else if (first) {
DEBUGT("{}, first: {}", t, omap_root, *first);
} else if (last) {
DEBUGT("{}, last: {}", t, omap_root, *last);
} else {
DEBUGT("{}", t, omap_root);
}
return get_omap_root(
get_omap_context(t, omap_root.hint),
omap_root
).si_then([this, config, &t, &first, &last, &omap_root](auto extent) {
return extent->list(
get_omap_context(t, omap_root.hint),
first,
last,
config);
});
}
BtreeOMapManager::omap_clear_ret
BtreeOMapManager::omap_clear(
omap_root_t &omap_root,
Transaction &t)
{
LOG_PREFIX(BtreeOMapManager::omap_clear);
DEBUGT("{}", t, omap_root);
return get_omap_root(
get_omap_context(t, omap_root.hint),
omap_root
).si_then([this, &t, &omap_root](auto extent) {
return extent->clear(get_omap_context(t, omap_root.hint));
}).si_then([this, &omap_root, &t] {
return tm.dec_ref(
t, omap_root.get_location()
).si_then([&omap_root] (auto ret) {
omap_root.update(
L_ADDR_NULL,
0, L_ADDR_MIN);
return omap_clear_iertr::now();
});
}).handle_error_interruptible(
omap_clear_iertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in BtreeOMapManager::omap_clear"
}
);
}
}
| 9,074 | 29.867347 | 90 | cc |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/btree_omap_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/omap_manager.h"
#include "crimson/os/seastore/omap_manager/btree/omap_btree_node.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
namespace crimson::os::seastore::omap_manager {
/**
* BtreeOMapManager
*
* Uses a btree to track :
* string -> string mapping for each onode omap
*/
class BtreeOMapManager : public OMapManager {
TransactionManager &tm;
omap_context_t get_omap_context(
Transaction &t, laddr_t addr_min) {
return omap_context_t{tm, t, addr_min};
}
/* get_omap_root
*
* load omap tree root node
*/
using get_root_iertr = base_iertr;
using get_root_ret = get_root_iertr::future<OMapNodeRef>;
static get_root_ret get_omap_root(
omap_context_t c,
const omap_root_t &omap_root);
/* handle_root_split
*
* root has been split and needs to update omap_root_t
*/
using handle_root_split_iertr = base_iertr;
using handle_root_split_ret = handle_root_split_iertr::future<>;
handle_root_split_ret handle_root_split(
omap_context_t c,
omap_root_t &omap_root,
const OMapNode::mutation_result_t& mresult);
/* handle_root_merge
*
* root node has only one item and it is not leaf node, need remove a layer
*/
using handle_root_merge_iertr = base_iertr;
using handle_root_merge_ret = handle_root_merge_iertr::future<>;
handle_root_merge_ret handle_root_merge(
omap_context_t oc,
omap_root_t &omap_root,
OMapNode:: mutation_result_t mresult);
public:
explicit BtreeOMapManager(TransactionManager &tm);
initialize_omap_ret initialize_omap(Transaction &t, laddr_t hint) final;
omap_get_value_ret omap_get_value(
const omap_root_t &omap_root,
Transaction &t,
const std::string &key) final;
omap_set_key_ret omap_set_key(
omap_root_t &omap_root,
Transaction &t,
const std::string &key, const ceph::bufferlist &value) final;
omap_set_keys_ret omap_set_keys(
omap_root_t &omap_root,
Transaction &t,
std::map<std::string, ceph::bufferlist>&& keys) final;
omap_rm_key_ret omap_rm_key(
omap_root_t &omap_root,
Transaction &t,
const std::string &key) final;
omap_rm_key_range_ret omap_rm_key_range(
omap_root_t &omap_root,
Transaction &t,
const std::string &first,
const std::string &last,
omap_list_config_t config) final;
omap_list_ret omap_list(
const omap_root_t &omap_root,
Transaction &t,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config = omap_list_config_t()) final;
omap_clear_ret omap_clear(
omap_root_t &omap_root,
Transaction &t) final;
};
using BtreeOMapManagerRef = std::unique_ptr<BtreeOMapManager>;
}
| 3,082 | 26.526786 | 77 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/omap_btree_node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <vector>
//#include <boost/iterator/counting_iterator.hpp>
#include "crimson/common/log.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/omap_manager.h"
#include "crimson/os/seastore/omap_manager/btree/omap_types.h"
namespace crimson::os::seastore::omap_manager{
struct omap_context_t {
TransactionManager &tm;
Transaction &t;
laddr_t hint;
};
enum class mutation_status_t : uint8_t {
SUCCESS = 0,
WAS_SPLIT = 1,
NEED_MERGE = 2,
FAIL = 3
};
struct OMapNode : LogicalCachedExtent {
using base_iertr = OMapManager::base_iertr;
using OMapNodeRef = TCachedExtentRef<OMapNode>;
struct mutation_result_t {
mutation_status_t status;
/// Only populated if WAS_SPLIT, indicates the newly created left and right nodes
/// from splitting the target entry during insertion.
std::optional<std::tuple<OMapNodeRef, OMapNodeRef, std::string>> split_tuple;
/// only sopulated if need merged, indicate which entry need be doing merge in upper layer.
std::optional<OMapNodeRef> need_merge;
mutation_result_t(mutation_status_t s, std::optional<std::tuple<OMapNodeRef,
OMapNodeRef, std::string>> tuple, std::optional<OMapNodeRef> n_merge)
: status(s),
split_tuple(tuple),
need_merge(n_merge) {}
};
OMapNode(ceph::bufferptr &&ptr) : LogicalCachedExtent(std::move(ptr)) {}
OMapNode(const OMapNode &other)
: LogicalCachedExtent(other) {}
using get_value_iertr = base_iertr;
using get_value_ret = OMapManager::omap_get_value_ret;
virtual get_value_ret get_value(
omap_context_t oc,
const std::string &key) = 0;
using insert_iertr = base_iertr;
using insert_ret = insert_iertr::future<mutation_result_t>;
virtual insert_ret insert(
omap_context_t oc,
const std::string &key,
const ceph::bufferlist &value) = 0;
using rm_key_iertr = base_iertr;
using rm_key_ret = rm_key_iertr::future<mutation_result_t>;
virtual rm_key_ret rm_key(
omap_context_t oc,
const std::string &key) = 0;
using omap_list_config_t = OMapManager::omap_list_config_t;
using list_iertr = base_iertr;
using list_bare_ret = OMapManager::omap_list_bare_ret;
using list_ret = OMapManager::omap_list_ret;
virtual list_ret list(
omap_context_t oc,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config) = 0;
using clear_iertr = base_iertr;
using clear_ret = clear_iertr::future<>;
virtual clear_ret clear(omap_context_t oc) = 0;
using full_merge_iertr = base_iertr;
using full_merge_ret = full_merge_iertr::future<OMapNodeRef>;
virtual full_merge_ret make_full_merge(
omap_context_t oc,
OMapNodeRef right) = 0;
using make_balanced_iertr = base_iertr;
using make_balanced_ret = make_balanced_iertr::future
<std::tuple<OMapNodeRef, OMapNodeRef, std::string>>;
virtual make_balanced_ret make_balanced(
omap_context_t oc,
OMapNodeRef _right) = 0;
virtual omap_node_meta_t get_node_meta() const = 0;
virtual bool extent_will_overflow(
size_t ksize,
std::optional<size_t> vsize) const = 0;
virtual bool can_merge(OMapNodeRef right) const = 0;
virtual bool extent_is_below_min() const = 0;
virtual uint32_t get_node_size() = 0;
virtual ~OMapNode() = default;
};
using OMapNodeRef = OMapNode::OMapNodeRef;
using omap_load_extent_iertr = OMapNode::base_iertr;
omap_load_extent_iertr::future<OMapNodeRef>
omap_load_extent(omap_context_t oc, laddr_t laddr, depth_t depth);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::omap_manager::OMapNode> : fmt::ostream_formatter {};
#endif
| 3,874 | 30.504065 | 109 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/omap_btree_node_impl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <algorithm>
#include <string.h>
#include "include/buffer.h"
#include "include/byteorder.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/omap_manager/btree/omap_btree_node.h"
#include "crimson/os/seastore/omap_manager/btree/omap_btree_node_impl.h"
#include "seastar/core/thread.hh"
SET_SUBSYS(seastore_omap);
namespace crimson::os::seastore::omap_manager {
std::ostream &operator<<(std::ostream &out, const omap_inner_key_t &rhs)
{
return out << "omap_inner_key (" << rhs.key_off<< " - " << rhs.key_len
<< " - " << rhs.laddr << ")";
}
std::ostream &operator<<(std::ostream &out, const omap_leaf_key_t &rhs)
{
return out << "omap_leaf_key_t (" << rhs.key_off<< " - " << rhs.key_len
<< " - " << rhs.val_len << ")";
}
std::ostream &OMapInnerNode::print_detail_l(std::ostream &out) const
{
return out << ", size=" << get_size()
<< ", depth=" << get_meta().depth;
}
using dec_ref_iertr = OMapInnerNode::base_iertr;
using dec_ref_ret = dec_ref_iertr::future<>;
template <typename T>
dec_ref_ret dec_ref(omap_context_t oc, T&& addr) {
return oc.tm.dec_ref(oc.t, std::forward<T>(addr)).handle_error_interruptible(
dec_ref_iertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error in OMapInnerNode helper dec_ref"
}
).si_then([](auto &&e) {});
}
/**
* make_split_insert
*
* insert an entry at iter, with the address of key.
* will result in a split outcome encoded in the returned mutation_result_t
*/
OMapInnerNode::make_split_insert_ret
OMapInnerNode::make_split_insert(
omap_context_t oc,
internal_iterator_t iter,
std::string key,
laddr_t laddr)
{
LOG_PREFIX(OMapInnerNode::make_split_insert);
DEBUGT("this: {}, key: {}", oc.t, *this, key);
return make_split_children(oc).si_then([=] (auto tuple) {
auto [left, right, pivot] = tuple;
if (pivot > key) {
auto liter = left->iter_idx(iter.get_index());
left->journal_inner_insert(liter, laddr, key,
left->maybe_get_delta_buffer());
} else { //right
auto riter = right->iter_idx(iter.get_index() - left->get_node_size());
right->journal_inner_insert(riter, laddr, key,
right->maybe_get_delta_buffer());
}
++(oc.t.get_omap_tree_stats().extents_num_delta);
return make_split_insert_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::WAS_SPLIT, tuple, std::nullopt));
});
}
OMapInnerNode::handle_split_ret
OMapInnerNode::handle_split(
omap_context_t oc,
internal_iterator_t iter,
mutation_result_t mresult)
{
LOG_PREFIX(OMapInnerNode::handle_split);
DEBUGT("this: {}", oc.t, *this);
if (!is_mutable()) {
auto mut = oc.tm.get_mutable_extent(oc.t, this)->cast<OMapInnerNode>();
auto mut_iter = mut->iter_idx(iter.get_index());
return mut->handle_split(oc, mut_iter, mresult);
}
auto [left, right, pivot] = *(mresult.split_tuple);
//update operation will not cause node overflow, so we can do it first.
journal_inner_update(iter, left->get_laddr(), maybe_get_delta_buffer());
bool overflow = extent_will_overflow(pivot.size(), std::nullopt);
if (!overflow) {
journal_inner_insert(iter + 1, right->get_laddr(), pivot,
maybe_get_delta_buffer());
return insert_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::SUCCESS, std::nullopt, std::nullopt));
} else {
return make_split_insert(oc, iter + 1, pivot, right->get_laddr())
.si_then([this, oc] (auto m_result) {
return dec_ref(oc, get_laddr())
.si_then([m_result = std::move(m_result)] {
return insert_ret(
interruptible::ready_future_marker{},
m_result);
});
});
}
}
OMapInnerNode::get_value_ret
OMapInnerNode::get_value(
omap_context_t oc,
const std::string &key)
{
LOG_PREFIX(OMapInnerNode::get_value);
DEBUGT("key = {}, this: {}", oc.t, key, *this);
auto child_pt = get_containing_child(key);
assert(child_pt != iter_cend());
auto laddr = child_pt->get_val();
return omap_load_extent(oc, laddr, get_meta().depth - 1).si_then(
[oc, &key] (auto extent) {
return extent->get_value(oc, key);
}).finally([ref = OMapNodeRef(this)] {});
}
OMapInnerNode::insert_ret
OMapInnerNode::insert(
omap_context_t oc,
const std::string &key,
const ceph::bufferlist &value)
{
LOG_PREFIX(OMapInnerNode::insert);
DEBUGT("{}->{}, this: {}", oc.t, key, value, *this);
auto child_pt = get_containing_child(key);
assert(child_pt != iter_cend());
auto laddr = child_pt->get_val();
return omap_load_extent(oc, laddr, get_meta().depth - 1).si_then(
[oc, &key, &value] (auto extent) {
return extent->insert(oc, key, value);
}).si_then([this, oc, child_pt] (auto mresult) {
if (mresult.status == mutation_status_t::SUCCESS) {
return insert_iertr::make_ready_future<mutation_result_t>(mresult);
} else if (mresult.status == mutation_status_t::WAS_SPLIT) {
return handle_split(oc, child_pt, mresult);
} else {
return insert_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::SUCCESS, std::nullopt, std::nullopt));
}
});
}
OMapInnerNode::rm_key_ret
OMapInnerNode::rm_key(omap_context_t oc, const std::string &key)
{
LOG_PREFIX(OMapInnerNode::rm_key);
DEBUGT("key={}, this: {}", oc.t, key, *this);
auto child_pt = get_containing_child(key);
assert(child_pt != iter_cend());
auto laddr = child_pt->get_val();
return omap_load_extent(oc, laddr, get_meta().depth - 1).si_then(
[this, oc, &key, child_pt] (auto extent) {
return extent->rm_key(oc, key)
.si_then([this, oc, child_pt, extent = std::move(extent)] (auto mresult) {
switch (mresult.status) {
case mutation_status_t::SUCCESS:
case mutation_status_t::FAIL:
return rm_key_iertr::make_ready_future<mutation_result_t>(mresult);
case mutation_status_t::NEED_MERGE: {
if (get_node_size() >1)
return merge_entry(oc, child_pt, *(mresult.need_merge));
else
return rm_key_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::SUCCESS,
std::nullopt, std::nullopt));
}
case mutation_status_t::WAS_SPLIT:
return handle_split(oc, child_pt, mresult);
default:
return rm_key_iertr::make_ready_future<mutation_result_t>(mresult);
}
});
});
}
OMapInnerNode::list_ret
OMapInnerNode::list(
omap_context_t oc,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config)
{
LOG_PREFIX(OMapInnerNode::list);
if (first && last) {
DEBUGT("first: {}, last: {}, this: {}", oc.t, *first, *last, *this);
assert(*first <= *last);
} else if (first) {
DEBUGT("first: {}, this: {}", oc.t, *first, *this);
} else if (last) {
DEBUGT("last: {}, this: {}", oc.t, *last, *this);
} else {
DEBUGT("this: {}", oc.t, *this);
}
auto first_iter = first ?
get_containing_child(*first) :
iter_cbegin();
auto last_iter = last ?
get_containing_child(*last) + 1:
iter_cend();
assert(first_iter != iter_cend());
return seastar::do_with(
first_iter,
last_iter,
iter_t(first_iter),
list_bare_ret(false, {}),
[this, &first, &last, oc, config](
auto &fiter,
auto &liter,
auto &iter,
auto &ret)
{
auto &complete = std::get<0>(ret);
auto &result = std::get<1>(ret);
return trans_intr::repeat(
[&, config, oc, this]() -> list_iertr::future<seastar::stop_iteration>
{
if (iter == liter || result.size() == config.max_result_size) {
complete = iter == liter;
return list_iertr::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
auto laddr = iter->get_val();
return omap_load_extent(
oc, laddr,
get_meta().depth - 1
).si_then([&, config, oc](auto &&extent) {
return seastar::do_with(
iter == fiter ? first : std::optional<std::string>(std::nullopt),
iter == liter - 1 ? last : std::optional<std::string>(std::nullopt),
[&result, extent = std::move(extent), config, oc](
auto &nfirst,
auto &nlast) {
return extent->list(
oc,
nfirst,
nlast,
config.with_reduced_max(result.size()));
}).si_then([&, config](auto &&child_ret) mutable {
boost::ignore_unused(config); // avoid clang warning;
auto &[child_complete, child_result] = child_ret;
if (result.size() && child_result.size()) {
assert(child_result.begin()->first > result.rbegin()->first);
}
if (child_result.size() && first && iter == fiter) {
if (config.first_inclusive) {
assert(child_result.begin()->first >= *first);
} else {
assert(child_result.begin()->first > *first);
}
}
if (child_result.size() && last && iter == liter - 1) {
auto biter = --(child_result.end());
if (config.last_inclusive) {
assert(biter->first <= *last);
} else {
assert(biter->first < *last);
}
}
result.merge(std::move(child_result));
++iter;
assert(child_complete || result.size() == config.max_result_size);
return list_iertr::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
});
});
}).si_then([&ret, ref = OMapNodeRef(this)] {
return list_iertr::make_ready_future<list_bare_ret>(std::move(ret));
});
});
}
OMapInnerNode::clear_ret
OMapInnerNode::clear(omap_context_t oc)
{
LOG_PREFIX(OMapInnerNode::clear);
DEBUGT("this: {}", oc.t, *this);
return trans_intr::do_for_each(iter_begin(), iter_end(),
[oc, this](auto iter) {
auto laddr = iter->get_val();
auto ndepth = get_meta().depth - 1;
if (ndepth > 1) {
return omap_load_extent(oc, laddr, ndepth
).si_then([oc](auto &&extent) {
return extent->clear(oc);
}).si_then([oc, laddr] {
return dec_ref(oc, laddr);
}).si_then([ref = OMapNodeRef(this)] {
return clear_iertr::now();
});
} else {
assert(ndepth == 1);
return dec_ref(oc, laddr
).si_then([ref = OMapNodeRef(this)] {
return clear_iertr::now();
});
}
});
}
OMapInnerNode::split_children_ret
OMapInnerNode:: make_split_children(omap_context_t oc)
{
LOG_PREFIX(OMapInnerNode::make_split_children);
DEBUGT("this: {}", oc.t, *this);
return oc.tm.alloc_extents<OMapInnerNode>(oc.t, oc.hint,
OMAP_INNER_BLOCK_SIZE, 2)
.si_then([this, oc] (auto &&ext_pair) {
LOG_PREFIX(OMapInnerNode::make_split_children);
auto left = ext_pair.front();
auto right = ext_pair.back();
DEBUGT("this: {}, split into: l {} r {}", oc.t, *this, *left, *right);
return split_children_ret(
interruptible::ready_future_marker{},
std::make_tuple(left, right, split_into(*left, *right)));
});
}
OMapInnerNode::full_merge_ret
OMapInnerNode::make_full_merge(omap_context_t oc, OMapNodeRef right)
{
LOG_PREFIX(OMapInnerNode::make_full_merge);
DEBUGT("", oc.t);
return oc.tm.alloc_extent<OMapInnerNode>(oc.t, oc.hint,
OMAP_INNER_BLOCK_SIZE)
.si_then([this, right] (auto &&replacement) {
replacement->merge_from(*this, *right->cast<OMapInnerNode>());
return full_merge_ret(
interruptible::ready_future_marker{},
std::move(replacement));
});
}
OMapInnerNode::make_balanced_ret
OMapInnerNode::make_balanced(omap_context_t oc, OMapNodeRef _right)
{
LOG_PREFIX(OMapInnerNode::make_balanced);
DEBUGT("l: {}, r: {}", oc.t, *this, *_right);
ceph_assert(_right->get_type() == TYPE);
return oc.tm.alloc_extents<OMapInnerNode>(oc.t, oc.hint,
OMAP_INNER_BLOCK_SIZE, 2)
.si_then([this, _right] (auto &&replacement_pair){
auto replacement_left = replacement_pair.front();
auto replacement_right = replacement_pair.back();
auto &right = *_right->cast<OMapInnerNode>();
return make_balanced_ret(
interruptible::ready_future_marker{},
std::make_tuple(replacement_left, replacement_right,
balance_into_new_nodes(*this, right,
*replacement_left, *replacement_right)));
});
}
OMapInnerNode::merge_entry_ret
OMapInnerNode::merge_entry(
omap_context_t oc,
internal_iterator_t iter,
OMapNodeRef entry)
{
LOG_PREFIX(OMapInnerNode::merge_entry);
DEBUGT("{}, parent: {}", oc.t, *entry, *this);
if (!is_mutable()) {
auto mut = oc.tm.get_mutable_extent(oc.t, this)->cast<OMapInnerNode>();
auto mut_iter = mut->iter_idx(iter->get_index());
return mut->merge_entry(oc, mut_iter, entry);
}
auto is_left = (iter + 1) == iter_cend();
auto donor_iter = is_left ? iter - 1 : iter + 1;
return omap_load_extent(oc, donor_iter->get_val(), get_meta().depth - 1
).si_then([=, this](auto &&donor) mutable {
LOG_PREFIX(OMapInnerNode::merge_entry);
auto [l, r] = is_left ?
std::make_pair(donor, entry) : std::make_pair(entry, donor);
auto [liter, riter] = is_left ?
std::make_pair(donor_iter, iter) : std::make_pair(iter, donor_iter);
if (l->can_merge(r)) {
DEBUGT("make_full_merge l {} r {}", oc.t, *l, *r);
assert(entry->extent_is_below_min());
return l->make_full_merge(oc, r
).si_then([liter=liter, riter=riter, l=l, r=r, oc, this]
(auto &&replacement) {
LOG_PREFIX(OMapInnerNode::merge_entry);
DEBUGT("to update parent: {}", oc.t, *this);
journal_inner_update(
liter,
replacement->get_laddr(),
maybe_get_delta_buffer());
journal_inner_remove(riter, maybe_get_delta_buffer());
//retire extent
std::vector<laddr_t> dec_laddrs {l->get_laddr(), r->get_laddr()};
return dec_ref(oc, dec_laddrs
).si_then([this, oc] {
--(oc.t.get_omap_tree_stats().extents_num_delta);
if (extent_is_below_min()) {
return merge_entry_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::NEED_MERGE,
std::nullopt, this));
} else {
return merge_entry_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::SUCCESS,
std::nullopt, std::nullopt));
}
});
});
} else {
DEBUGT("balanced l {} r {}", oc.t, *l, *r);
return l->make_balanced(oc, r
).si_then([liter=liter, riter=riter, l=l, r=r, oc, this](auto tuple) {
LOG_PREFIX(OMapInnerNode::merge_entry);
DEBUGT("to update parent: {}", oc.t, *this);
auto [replacement_l, replacement_r, replacement_pivot] = tuple;
//update operation will not cuase node overflow, so we can do it first
journal_inner_update(
liter,
replacement_l->get_laddr(),
maybe_get_delta_buffer());
bool overflow = extent_will_overflow(replacement_pivot.size(),
std::nullopt);
if (!overflow) {
journal_inner_remove(riter, maybe_get_delta_buffer());
journal_inner_insert(
riter,
replacement_r->get_laddr(),
replacement_pivot,
maybe_get_delta_buffer());
std::vector<laddr_t> dec_laddrs{l->get_laddr(), r->get_laddr()};
return dec_ref(oc, dec_laddrs
).si_then([] {
return merge_entry_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::SUCCESS,
std::nullopt, std::nullopt));
});
} else {
DEBUGT("balanced and split {} r {}", oc.t, *l, *r);
//use remove and insert to instead of replace,
//remove operation will not cause node split, so we can do it first
journal_inner_remove(riter, maybe_get_delta_buffer());
return make_split_insert(oc, riter, replacement_pivot,
replacement_r->get_laddr()
).si_then([this, oc, l = l, r = r](auto mresult) {
std::vector<laddr_t> dec_laddrs{
l->get_laddr(),
r->get_laddr(),
get_laddr()};
return dec_ref(oc, dec_laddrs
).si_then([mresult = std::move(mresult)] {
return merge_entry_ret(
interruptible::ready_future_marker{}, mresult);
});
});
}
});
}
});
}
OMapInnerNode::internal_iterator_t
OMapInnerNode::get_containing_child(const std::string &key)
{
auto iter = std::find_if(iter_begin(), iter_end(),
[&key](auto it) { return it.contains(key); });
return iter;
}
std::ostream &OMapLeafNode::print_detail_l(std::ostream &out) const
{
return out << ", size=" << get_size()
<< ", depth=" << get_meta().depth;
}
OMapLeafNode::get_value_ret
OMapLeafNode::get_value(omap_context_t oc, const std::string &key)
{
LOG_PREFIX(OMapLeafNode::get_value);
DEBUGT("key = {}, this: {}", oc.t, *this, key);
auto ite = find_string_key(key);
if (ite != iter_end()) {
auto value = ite->get_val();
return get_value_ret(
interruptible::ready_future_marker{},
value);
} else {
return get_value_ret(
interruptible::ready_future_marker{},
std::nullopt);
}
}
OMapLeafNode::insert_ret
OMapLeafNode::insert(
omap_context_t oc,
const std::string &key,
const ceph::bufferlist &value)
{
LOG_PREFIX(OMapLeafNode::insert);
DEBUGT("{} -> {}, this: {}", oc.t, key, value, *this);
bool overflow = extent_will_overflow(key.size(), value.length());
if (!overflow) {
if (!is_mutable()) {
auto mut = oc.tm.get_mutable_extent(oc.t, this)->cast<OMapLeafNode>();
return mut->insert(oc, key, value);
}
auto replace_pt = find_string_key(key);
if (replace_pt != iter_end()) {
++(oc.t.get_omap_tree_stats().num_updates);
journal_leaf_update(replace_pt, key, value, maybe_get_delta_buffer());
} else {
++(oc.t.get_omap_tree_stats().num_inserts);
auto insert_pt = string_lower_bound(key);
journal_leaf_insert(insert_pt, key, value, maybe_get_delta_buffer());
DEBUGT("inserted {}, this: {}", oc.t, insert_pt.get_key(), *this);
}
return insert_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::SUCCESS, std::nullopt, std::nullopt));
} else {
return make_split_children(oc).si_then([this, oc, &key, &value] (auto tuple) {
auto [left, right, pivot] = tuple;
auto replace_pt = find_string_key(key);
if (replace_pt != iter_end()) {
++(oc.t.get_omap_tree_stats().num_updates);
if (key < pivot) { //left
auto mut_iter = left->iter_idx(replace_pt->get_index());
left->journal_leaf_update(mut_iter, key, value, left->maybe_get_delta_buffer());
} else if (key >= pivot) { //right
auto mut_iter = right->iter_idx(replace_pt->get_index() - left->get_node_size());
right->journal_leaf_update(mut_iter, key, value, right->maybe_get_delta_buffer());
}
} else {
++(oc.t.get_omap_tree_stats().num_inserts);
auto insert_pt = string_lower_bound(key);
if (key < pivot) { //left
auto mut_iter = left->iter_idx(insert_pt->get_index());
left->journal_leaf_insert(mut_iter, key, value, left->maybe_get_delta_buffer());
} else {
auto mut_iter = right->iter_idx(insert_pt->get_index() - left->get_node_size());
right->journal_leaf_insert(mut_iter, key, value, right->maybe_get_delta_buffer());
}
}
++(oc.t.get_omap_tree_stats().extents_num_delta);
return dec_ref(oc, get_laddr())
.si_then([tuple = std::move(tuple)] {
return insert_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::WAS_SPLIT, tuple, std::nullopt));
});
});
}
}
OMapLeafNode::rm_key_ret
OMapLeafNode::rm_key(omap_context_t oc, const std::string &key)
{
LOG_PREFIX(OMapLeafNode::rm_key);
DEBUGT("{}, this: {}", oc.t, key, *this);
auto rm_pt = find_string_key(key);
if (!is_mutable() && rm_pt != iter_end()) {
auto mut = oc.tm.get_mutable_extent(oc.t, this)->cast<OMapLeafNode>();
return mut->rm_key(oc, key);
}
if (rm_pt != iter_end()) {
++(oc.t.get_omap_tree_stats().num_erases);
journal_leaf_remove(rm_pt, maybe_get_delta_buffer());
if (extent_is_below_min()) {
return rm_key_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::NEED_MERGE, std::nullopt,
this->cast<OMapNode>()));
} else {
return rm_key_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::SUCCESS, std::nullopt, std::nullopt));
}
} else {
return rm_key_ret(
interruptible::ready_future_marker{},
mutation_result_t(mutation_status_t::FAIL, std::nullopt, std::nullopt));
}
}
OMapLeafNode::list_ret
OMapLeafNode::list(
omap_context_t oc,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config)
{
LOG_PREFIX(OMapLeafNode::list);
DEBUGT(
"first {} last {} max_result_size {} first_inclusive {} \
last_inclusive {}, this: {}",
oc.t,
first ? first->c_str() : "",
last ? last->c_str() : "",
config.max_result_size,
config.first_inclusive,
config.last_inclusive,
*this
);
auto ret = list_bare_ret(false, {});
auto &[complete, result] = ret;
auto iter = first ?
(config.first_inclusive ?
string_lower_bound(*first) :
string_upper_bound(*first)) :
iter_begin();
auto liter = last ?
(config.last_inclusive ?
string_upper_bound(*last) :
string_lower_bound(*last)) :
iter_end();
for (; iter != liter && result.size() < config.max_result_size; iter++) {
result.emplace(std::make_pair(iter->get_key(), iter->get_val()));
}
complete = (iter == liter);
return list_iertr::make_ready_future<list_bare_ret>(
std::move(ret));
}
OMapLeafNode::clear_ret
OMapLeafNode::clear(omap_context_t oc)
{
return clear_iertr::now();
}
OMapLeafNode::split_children_ret
OMapLeafNode::make_split_children(omap_context_t oc)
{
LOG_PREFIX(OMapLeafNode::make_split_children);
DEBUGT("this: {}", oc.t, *this);
return oc.tm.alloc_extents<OMapLeafNode>(oc.t, oc.hint, OMAP_LEAF_BLOCK_SIZE, 2)
.si_then([this] (auto &&ext_pair) {
auto left = ext_pair.front();
auto right = ext_pair.back();
return split_children_ret(
interruptible::ready_future_marker{},
std::make_tuple(left, right, split_into(*left, *right)));
});
}
OMapLeafNode::full_merge_ret
OMapLeafNode::make_full_merge(omap_context_t oc, OMapNodeRef right)
{
ceph_assert(right->get_type() == TYPE);
LOG_PREFIX(OMapLeafNode::make_full_merge);
DEBUGT("this: {}", oc.t, *this);
return oc.tm.alloc_extent<OMapLeafNode>(oc.t, oc.hint, OMAP_LEAF_BLOCK_SIZE)
.si_then([this, right] (auto &&replacement) {
replacement->merge_from(*this, *right->cast<OMapLeafNode>());
return full_merge_ret(
interruptible::ready_future_marker{},
std::move(replacement));
});
}
OMapLeafNode::make_balanced_ret
OMapLeafNode::make_balanced(omap_context_t oc, OMapNodeRef _right)
{
ceph_assert(_right->get_type() == TYPE);
LOG_PREFIX(OMapLeafNode::make_balanced);
DEBUGT("this: {}", oc.t, *this);
return oc.tm.alloc_extents<OMapLeafNode>(oc.t, oc.hint, OMAP_LEAF_BLOCK_SIZE, 2)
.si_then([this, _right] (auto &&replacement_pair) {
auto replacement_left = replacement_pair.front();
auto replacement_right = replacement_pair.back();
auto &right = *_right->cast<OMapLeafNode>();
return make_balanced_ret(
interruptible::ready_future_marker{},
std::make_tuple(
replacement_left, replacement_right,
balance_into_new_nodes(
*this, right,
*replacement_left, *replacement_right)));
});
}
omap_load_extent_iertr::future<OMapNodeRef>
omap_load_extent(omap_context_t oc, laddr_t laddr, depth_t depth)
{
ceph_assert(depth > 0);
if (depth > 1) {
return oc.tm.read_extent<OMapInnerNode>(oc.t, laddr,
OMAP_INNER_BLOCK_SIZE)
.handle_error_interruptible(
omap_load_extent_iertr::pass_further{},
crimson::ct_error::assert_all{ "Invalid error in omap_load_extent" }
).si_then(
[](auto&& e) {
return seastar::make_ready_future<OMapNodeRef>(std::move(e));
});
} else {
return oc.tm.read_extent<OMapLeafNode>(oc.t, laddr, OMAP_LEAF_BLOCK_SIZE
).handle_error_interruptible(
omap_load_extent_iertr::pass_further{},
crimson::ct_error::assert_all{ "Invalid error in omap_load_extent" }
).si_then(
[](auto&& e) {
return seastar::make_ready_future<OMapNodeRef>(std::move(e));
});
}
}
}
| 25,246 | 33.396458 | 92 | cc |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/omap_btree_node_impl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string.h>
#include "include/buffer.h"
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/omap_manager.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/omap_manager/btree/string_kv_node_layout.h"
#include "crimson/os/seastore/omap_manager/btree/omap_types.h"
#include "crimson/os/seastore/omap_manager/btree/omap_btree_node.h"
namespace crimson::os::seastore::omap_manager {
/**
* OMapInnerNode
*
* Abstracts operations on and layout of internal nodes for the
* omap Tree.
*
* Layout (4k):
* num_entries: meta : keys : values :
*/
struct OMapInnerNode
: OMapNode,
StringKVInnerNodeLayout {
using OMapInnerNodeRef = TCachedExtentRef<OMapInnerNode>;
using internal_iterator_t = const_iterator;
template <typename... T>
OMapInnerNode(T&&... t) :
OMapNode(std::forward<T>(t)...),
StringKVInnerNodeLayout(get_bptr().c_str()) {}
omap_node_meta_t get_node_meta() const final { return get_meta(); }
bool extent_will_overflow(size_t ksize, std::optional<size_t> vsize) const {
return is_overflow(ksize);
}
bool can_merge(OMapNodeRef right) const {
return !is_overflow(*right->cast<OMapInnerNode>());
}
bool extent_is_below_min() const { return below_min(); }
uint32_t get_node_size() { return get_size(); }
CachedExtentRef duplicate_for_write(Transaction&) final {
assert(delta_buffer.empty());
return CachedExtentRef(new OMapInnerNode(*this));
}
delta_inner_buffer_t delta_buffer;
delta_inner_buffer_t *maybe_get_delta_buffer() {
return is_mutation_pending() ? &delta_buffer : nullptr;
}
get_value_ret get_value(omap_context_t oc, const std::string &key) final;
insert_ret insert(
omap_context_t oc,
const std::string &key,
const ceph::bufferlist &value) final;
rm_key_ret rm_key(
omap_context_t oc,
const std::string &key) final;
list_ret list(
omap_context_t oc,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config) final;
clear_ret clear(omap_context_t oc) final;
using split_children_iertr = base_iertr;
using split_children_ret = split_children_iertr::future
<std::tuple<OMapInnerNodeRef, OMapInnerNodeRef, std::string>>;
split_children_ret make_split_children(omap_context_t oc);
full_merge_ret make_full_merge(
omap_context_t oc, OMapNodeRef right) final;
make_balanced_ret make_balanced(
omap_context_t oc, OMapNodeRef right) final;
using make_split_insert_iertr = base_iertr;
using make_split_insert_ret = make_split_insert_iertr::future<mutation_result_t>;
make_split_insert_ret make_split_insert(
omap_context_t oc, internal_iterator_t iter,
std::string key, laddr_t laddr);
using merge_entry_iertr = base_iertr;
using merge_entry_ret = merge_entry_iertr::future<mutation_result_t>;
merge_entry_ret merge_entry(
omap_context_t oc,
internal_iterator_t iter, OMapNodeRef entry);
using handle_split_iertr = base_iertr;
using handle_split_ret = handle_split_iertr::future<mutation_result_t>;
handle_split_ret handle_split(
omap_context_t oc, internal_iterator_t iter,
mutation_result_t mresult);
std::ostream &print_detail_l(std::ostream &out) const final;
static constexpr extent_types_t TYPE = extent_types_t::OMAP_INNER;
extent_types_t get_type() const final {
return TYPE;
}
ceph::bufferlist get_delta() final {
ceph::bufferlist bl;
if (!delta_buffer.empty()) {
encode(delta_buffer, bl);
delta_buffer.clear();
}
return bl;
}
void apply_delta(const ceph::bufferlist &bl) final {
assert(bl.length());
delta_inner_buffer_t buffer;
auto bptr = bl.cbegin();
decode(buffer, bptr);
buffer.replay(*this);
}
internal_iterator_t get_containing_child(const std::string &key);
};
using OMapInnerNodeRef = OMapInnerNode::OMapInnerNodeRef;
/**
* OMapLeafNode
*
* Abstracts operations on and layout of leaf nodes for the
* OMap Tree.
*
* Layout (4k):
* num_entries: meta : keys : values :
*/
struct OMapLeafNode
: OMapNode,
StringKVLeafNodeLayout {
using OMapLeafNodeRef = TCachedExtentRef<OMapLeafNode>;
using internal_iterator_t = const_iterator;
template <typename... T>
OMapLeafNode(T&&... t) :
OMapNode(std::forward<T>(t)...),
StringKVLeafNodeLayout(get_bptr().c_str()) {}
omap_node_meta_t get_node_meta() const final { return get_meta(); }
bool extent_will_overflow(
size_t ksize, std::optional<size_t> vsize) const {
return is_overflow(ksize, *vsize);
}
bool can_merge(OMapNodeRef right) const {
return !is_overflow(*right->cast<OMapLeafNode>());
}
bool extent_is_below_min() const { return below_min(); }
uint32_t get_node_size() { return get_size(); }
CachedExtentRef duplicate_for_write(Transaction&) final {
assert(delta_buffer.empty());
return CachedExtentRef(new OMapLeafNode(*this));
}
delta_leaf_buffer_t delta_buffer;
delta_leaf_buffer_t *maybe_get_delta_buffer() {
return is_mutation_pending() ? &delta_buffer : nullptr;
}
get_value_ret get_value(
omap_context_t oc, const std::string &key) final;
insert_ret insert(
omap_context_t oc,
const std::string &key,
const ceph::bufferlist &value) final;
rm_key_ret rm_key(
omap_context_t oc, const std::string &key) final;
list_ret list(
omap_context_t oc,
const std::optional<std::string> &first,
const std::optional<std::string> &last,
omap_list_config_t config) final;
clear_ret clear(
omap_context_t oc) final;
using split_children_iertr = base_iertr;
using split_children_ret = split_children_iertr::future
<std::tuple<OMapLeafNodeRef, OMapLeafNodeRef, std::string>>;
split_children_ret make_split_children(
omap_context_t oc);
full_merge_ret make_full_merge(
omap_context_t oc,
OMapNodeRef right) final;
make_balanced_ret make_balanced(
omap_context_t oc,
OMapNodeRef _right) final;
static constexpr extent_types_t TYPE = extent_types_t::OMAP_LEAF;
extent_types_t get_type() const final {
return TYPE;
}
ceph::bufferlist get_delta() final {
ceph::bufferlist bl;
if (!delta_buffer.empty()) {
encode(delta_buffer, bl);
delta_buffer.clear();
}
return bl;
}
void apply_delta(const ceph::bufferlist &_bl) final {
assert(_bl.length());
ceph::bufferlist bl = _bl;
bl.rebuild();
delta_leaf_buffer_t buffer;
auto bptr = bl.cbegin();
decode(buffer, bptr);
buffer.replay(*this);
}
std::ostream &print_detail_l(std::ostream &out) const final;
std::pair<internal_iterator_t, internal_iterator_t>
get_leaf_entries(std::string &key);
};
using OMapLeafNodeRef = OMapLeafNode::OMapLeafNodeRef;
std::ostream &operator<<(std::ostream &out, const omap_inner_key_t &rhs);
std::ostream &operator<<(std::ostream &out, const omap_leaf_key_t &rhs);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::omap_manager::OMapInnerNode> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::omap_manager::OMapLeafNode> : fmt::ostream_formatter {};
#endif
| 7,350 | 28.286853 | 114 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/omap_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/seastore_types.h"
namespace crimson::os::seastore::omap_manager {
struct omap_node_meta_t {
depth_t depth = 0;
std::pair<omap_node_meta_t, omap_node_meta_t> split_into() const {
return std::make_pair(
omap_node_meta_t{depth},
omap_node_meta_t{depth});
}
static omap_node_meta_t merge_from(
const omap_node_meta_t &lhs, const omap_node_meta_t &rhs) {
assert(lhs.depth == rhs.depth);
return omap_node_meta_t{lhs.depth};
}
static std::pair<omap_node_meta_t, omap_node_meta_t>
rebalance(const omap_node_meta_t &lhs, const omap_node_meta_t &rhs) {
assert(lhs.depth == rhs.depth);
return std::make_pair(
omap_node_meta_t{lhs.depth},
omap_node_meta_t{lhs.depth});
}
};
struct omap_node_meta_le_t {
depth_le_t depth = init_depth_le(0);
omap_node_meta_le_t() = default;
omap_node_meta_le_t(const omap_node_meta_le_t &) = default;
explicit omap_node_meta_le_t(const omap_node_meta_t &val)
: depth(init_depth_le(val.depth)) {}
operator omap_node_meta_t() const {
return omap_node_meta_t{ depth };
}
};
struct omap_inner_key_t {
uint16_t key_off = 0;
uint16_t key_len = 0;
laddr_t laddr = 0;
omap_inner_key_t() = default;
omap_inner_key_t(uint16_t off, uint16_t len, laddr_t addr)
: key_off(off), key_len(len), laddr(addr) {}
inline bool operator==(const omap_inner_key_t b) const {
return key_off == b.key_off && key_len == b.key_len && laddr == b.laddr;
}
inline bool operator!=(const omap_inner_key_t b) const {
return key_off != b.key_off || key_len != b.key_len || laddr != b.laddr;
}
DENC(omap_inner_key_t, v, p) {
DENC_START(1, 1, p);
denc(v.key_off, p);
denc(v.key_len, p);
denc(v.laddr, p);
DENC_FINISH(p);
}
};
struct omap_inner_key_le_t {
ceph_le16 key_off{0};
ceph_le16 key_len{0};
laddr_le_t laddr{0};
omap_inner_key_le_t() = default;
omap_inner_key_le_t(const omap_inner_key_le_t &) = default;
explicit omap_inner_key_le_t(const omap_inner_key_t &key)
: key_off(key.key_off),
key_len(key.key_len),
laddr(key.laddr) {}
operator omap_inner_key_t() const {
return omap_inner_key_t{uint16_t(key_off), uint16_t(key_len), laddr_t(laddr)};
}
omap_inner_key_le_t& operator=(omap_inner_key_t key) {
key_off = key.key_off;
key_len = key.key_len;
laddr = laddr_le_t(key.laddr);
return *this;
}
inline bool operator==(const omap_inner_key_le_t b) const {
return key_off == b.key_off && key_len == b.key_len && laddr == b.laddr;
}
};
struct omap_leaf_key_t {
uint16_t key_off = 0;
uint16_t key_len = 0;
uint16_t val_len = 0;
omap_leaf_key_t() = default;
omap_leaf_key_t(uint16_t k_off, uint16_t k_len, uint16_t v_len)
: key_off(k_off), key_len(k_len), val_len(v_len) {}
inline bool operator==(const omap_leaf_key_t b) const {
return key_off == b.key_off && key_len == b.key_len &&
val_len == b.val_len;
}
inline bool operator!=(const omap_leaf_key_t b) const {
return key_off != b.key_off || key_len != b.key_len ||
val_len != b.val_len;
}
DENC(omap_leaf_key_t, v, p) {
DENC_START(1, 1, p);
denc(v.key_off, p);
denc(v.key_len, p);
denc(v.val_len, p);
DENC_FINISH(p);
}
};
struct omap_leaf_key_le_t {
ceph_le16 key_off{0};
ceph_le16 key_len{0};
ceph_le16 val_len{0};
omap_leaf_key_le_t() = default;
omap_leaf_key_le_t(const omap_leaf_key_le_t &) = default;
explicit omap_leaf_key_le_t(const omap_leaf_key_t &key)
: key_off(key.key_off),
key_len(key.key_len),
val_len(key.val_len) {}
operator omap_leaf_key_t() const {
return omap_leaf_key_t{uint16_t(key_off), uint16_t(key_len),
uint16_t(val_len)};
}
omap_leaf_key_le_t& operator=(omap_leaf_key_t key) {
key_off = key.key_off;
key_len = key.key_len;
val_len = key.val_len;
return *this;
}
inline bool operator==(const omap_leaf_key_le_t b) const {
return key_off == b.key_off && key_len == b.key_len &&
val_len == b.val_len;
}
};
}
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::omap_manager::omap_inner_key_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::omap_manager::omap_leaf_key_t)
| 4,373 | 26.683544 | 82 | h |
null | ceph-main/src/crimson/os/seastore/omap_manager/btree/string_kv_node_layout.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iostream>
#include <string>
#include "include/byteorder.h"
#include "include/denc.h"
#include "include/encoding.h"
#include "crimson/common/layout.h"
#include "crimson/common/fixed_kv_node_layout.h"
#include "crimson/os/seastore/omap_manager.h"
#include "crimson/os/seastore/omap_manager/btree/omap_types.h"
namespace crimson::os::seastore::omap_manager {
class StringKVInnerNodeLayout;
class StringKVLeafNodeLayout;
/**
* copy_from_foreign
*
* Copy from another node entries to this node.
* [from_src, to_src) is another node entry range.
* tgt is this node entry to copy to.
* tgt and from_src must be from different nodes.
* from_src and to_src must be in the same node.
*/
template <typename iterator, typename const_iterator>
static void copy_from_foreign(
iterator tgt,
const_iterator from_src,
const_iterator to_src) {
assert(tgt->node != from_src->node);
assert(to_src->node == from_src->node);
if (from_src == to_src)
return;
auto to_copy = from_src->get_right_ptr_end() - to_src->get_right_ptr_end();
assert(to_copy > 0);
memcpy(
tgt->get_right_ptr_end() - to_copy,
to_src->get_right_ptr_end(),
to_copy);
memcpy(
tgt->get_node_key_ptr(),
from_src->get_node_key_ptr(),
to_src->get_node_key_ptr() - from_src->get_node_key_ptr());
auto offset_diff = tgt->get_right_offset_end() - from_src->get_right_offset_end();
for (auto i = tgt; i != tgt + (to_src - from_src); ++i) {
i->update_offset(offset_diff);
}
}
/**
* copy_from_local
*
* Copies entries from [from_src, to_src) to tgt.
* tgt, from_src, and to_src must be from the same node.
*/
template <typename iterator>
static void copy_from_local(
unsigned len,
iterator tgt,
iterator from_src,
iterator to_src) {
assert(tgt->node == from_src->node);
assert(to_src->node == from_src->node);
auto to_copy = from_src->get_right_ptr_end() - to_src->get_right_ptr_end();
assert(to_copy > 0);
int adjust_offset = tgt > from_src? -len : len;
memmove(to_src->get_right_ptr_end() + adjust_offset,
to_src->get_right_ptr_end(),
to_copy);
for ( auto ite = from_src; ite < to_src; ite++) {
ite->update_offset(-adjust_offset);
}
memmove(tgt->get_node_key_ptr(), from_src->get_node_key_ptr(),
to_src->get_node_key_ptr() - from_src->get_node_key_ptr());
}
struct delta_inner_t {
enum class op_t : uint_fast8_t {
INSERT,
UPDATE,
REMOVE,
} op;
std::string key;
laddr_t addr;
DENC(delta_inner_t, v, p) {
DENC_START(1, 1, p);
denc(v.op, p);
denc(v.key, p);
denc(v.addr, p);
DENC_FINISH(p);
}
void replay(StringKVInnerNodeLayout &l);
bool operator==(const delta_inner_t &rhs) const {
return op == rhs.op &&
key == rhs.key &&
addr == rhs.addr;
}
};
}
WRITE_CLASS_DENC(crimson::os::seastore::omap_manager::delta_inner_t)
namespace crimson::os::seastore::omap_manager {
struct delta_leaf_t {
enum class op_t : uint_fast8_t {
INSERT,
UPDATE,
REMOVE,
} op;
std::string key;
ceph::bufferlist val;
DENC(delta_leaf_t, v, p) {
DENC_START(1, 1, p);
denc(v.op, p);
denc(v.key, p);
denc(v.val, p);
DENC_FINISH(p);
}
void replay(StringKVLeafNodeLayout &l);
bool operator==(const delta_leaf_t &rhs) const {
return op == rhs.op &&
key == rhs.key &&
val == rhs.val;
}
};
}
WRITE_CLASS_DENC(crimson::os::seastore::omap_manager::delta_leaf_t)
namespace crimson::os::seastore::omap_manager {
class delta_inner_buffer_t {
std::vector<delta_inner_t> buffer;
public:
bool empty() const {
return buffer.empty();
}
void insert(
const std::string &key,
laddr_t addr) {
buffer.push_back(
delta_inner_t{
delta_inner_t::op_t::INSERT,
key,
addr
});
}
void update(
const std::string &key,
laddr_t addr) {
buffer.push_back(
delta_inner_t{
delta_inner_t::op_t::UPDATE,
key,
addr
});
}
void remove(const std::string &key) {
buffer.push_back(
delta_inner_t{
delta_inner_t::op_t::REMOVE,
key,
L_ADDR_NULL
});
}
void replay(StringKVInnerNodeLayout &node) {
for (auto &i: buffer) {
i.replay(node);
}
}
void clear() {
buffer.clear();
}
DENC(delta_inner_buffer_t, v, p) {
DENC_START(1, 1, p);
denc(v.buffer, p);
DENC_FINISH(p);
}
bool operator==(const delta_inner_buffer_t &rhs) const {
return buffer == rhs.buffer;
}
};
}
WRITE_CLASS_DENC(crimson::os::seastore::omap_manager::delta_inner_buffer_t)
namespace crimson::os::seastore::omap_manager {
class delta_leaf_buffer_t {
std::vector<delta_leaf_t> buffer;
public:
bool empty() const {
return buffer.empty();
}
void insert(
const std::string &key,
const ceph::bufferlist &val) {
buffer.push_back(
delta_leaf_t{
delta_leaf_t::op_t::INSERT,
key,
val
});
}
void update(
const std::string &key,
const ceph::bufferlist &val) {
buffer.push_back(
delta_leaf_t{
delta_leaf_t::op_t::UPDATE,
key,
val
});
}
void remove(const std::string &key) {
buffer.push_back(
delta_leaf_t{
delta_leaf_t::op_t::REMOVE,
key,
bufferlist()
});
}
void replay(StringKVLeafNodeLayout &node) {
for (auto &i: buffer) {
i.replay(node);
}
}
void clear() {
buffer.clear();
}
DENC(delta_leaf_buffer_t, v, p) {
DENC_START(1, 1, p);
denc(v.buffer, p);
DENC_FINISH(p);
}
bool operator==(const delta_leaf_buffer_t &rhs) const {
return buffer == rhs.buffer;
}
};
}
WRITE_CLASS_DENC(crimson::os::seastore::omap_manager::delta_leaf_buffer_t)
namespace crimson::os::seastore::omap_manager {
/**
* StringKVInnerNodeLayout
*
* Uses absl::container_internal::Layout for the actual key memory layout.
*
* The primary interface exposed is centered on the iterator
* and related methods.
*
* Also included are helpers for doing splits and merges as for a btree.
*
* layout diagram:
*
* # <----------------------------- node range --------------------------------------------> #
* # #<~># free space #
* # <------------- left part -----------------------------> # <~# <----- right keys -----> #
* # # <------------ left keys --------------> #~> # #
* # # keys [2, n) |<~># #<~>| right keys [2, n) #
* # # <--- key 0 ----> | <--- key 1 ----> | # # | <- k1 -> | <-- k0 --> #
* # # | | # # | | #
* # num_ | meta # key | key | val | key | key | val | # # | key | key #
* # keys | depth # off | len | laddr| off | len | laddr| # # | buff | buff #
* # | # 0 | 0 | 0 | 1 | 1 | 1 |...#...#...| key 1 | key 0 #
* # | | | <- off --+----------> #
* # | | ^ | <- off --> #
* | | | ^
* | +----------------------------------+ |
* +----------------------------------------------------------------+
*/
class StringKVInnerNodeLayout {
char *buf = nullptr;
using L = absl::container_internal::Layout<ceph_le32, omap_node_meta_le_t, omap_inner_key_le_t>;
static constexpr L layout{1, 1, 1}; // = L::Partial(1, 1, 1);
friend class delta_inner_t;
public:
template <bool is_const>
class iter_t {
friend class StringKVInnerNodeLayout;
template <typename iterator, typename const_iterator>
friend void copy_from_foreign(iterator, const_iterator, const_iterator);
template <typename iterator>
friend void copy_from_local(unsigned, iterator, iterator, iterator);
using parent_t = typename crimson::common::maybe_const_t<StringKVInnerNodeLayout, is_const>::type;
mutable parent_t node;
uint16_t index;
iter_t(
parent_t parent,
uint16_t index) : node(parent), index(index) {}
public:
using iterator_category = std::input_iterator_tag;
using value_type = StringKVInnerNodeLayout;
using difference_type = std::ptrdiff_t;
using pointer = StringKVInnerNodeLayout*;
using reference = iter_t&;
iter_t(const iter_t &) = default;
iter_t(iter_t &&) = default;
iter_t &operator=(const iter_t &) = default;
iter_t &operator=(iter_t &&) = default;
operator iter_t<!is_const>() const {
static_assert(!is_const);
return iter_t<!is_const>(node, index);
}
iter_t &operator*() { return *this; }
iter_t *operator->() { return this; }
iter_t operator++(int) {
auto ret = *this;
++index;
return ret;
}
iter_t &operator++() {
++index;
return *this;
}
iter_t operator--(int) {
auto ret = *this;
assert(index > 0);
--index;
return ret;
}
iter_t &operator--() {
assert(index > 0);
--index;
return *this;
}
uint16_t operator-(const iter_t &rhs) const {
assert(rhs.node == node);
return index - rhs.index;
}
iter_t operator+(uint16_t off) const {
return iter_t(node, index + off);
}
iter_t operator-(uint16_t off) const {
return iter_t(node, index - off);
}
uint16_t operator<(const iter_t &rhs) const {
assert(rhs.node == node);
return index < rhs.index;
}
uint16_t operator>(const iter_t &rhs) const {
assert(rhs.node == node);
return index > rhs.index;
}
friend bool operator==(const iter_t &lhs, const iter_t &rhs) {
assert(lhs.node == rhs.node);
return lhs.index == rhs.index;
}
private:
omap_inner_key_t get_node_key() const {
omap_inner_key_le_t kint = node->get_node_key_ptr()[index];
return omap_inner_key_t(kint);
}
auto get_node_key_ptr() const {
return reinterpret_cast<
typename crimson::common::maybe_const_t<char, is_const>::type>(
node->get_node_key_ptr() + index);
}
uint32_t get_node_val_offset() const {
return get_node_key().key_off;
}
auto get_node_val_ptr() const {
auto tail = node->buf + OMAP_INNER_BLOCK_SIZE;
if (*this == node->iter_end())
return tail;
else {
return tail - get_node_val_offset();
}
}
int get_right_offset_end() const {
if (index == 0)
return 0;
else
return (*this - 1)->get_node_val_offset();
}
auto get_right_ptr_end() const {
return node->buf + OMAP_INNER_BLOCK_SIZE - get_right_offset_end();
}
void update_offset(int offset) {
static_assert(!is_const);
auto key = get_node_key();
assert(offset + key.key_off >= 0);
key.key_off += offset;
set_node_key(key);
}
void set_node_key(omap_inner_key_t _lb) {
static_assert(!is_const);
omap_inner_key_le_t lb;
lb = _lb;
node->get_node_key_ptr()[index] = lb;
}
void set_node_val(const std::string &str) {
static_assert(!is_const);
assert(str.size() == get_node_key().key_len);
assert(get_node_key().key_off >= str.size());
assert(get_node_key().key_off < OMAP_INNER_BLOCK_SIZE);
assert(str.size() < OMAP_INNER_BLOCK_SIZE);
::memcpy(get_node_val_ptr(), str.data(), str.size());
}
public:
uint16_t get_index() const {
return index;
}
std::string get_key() const {
return std::string(
get_node_val_ptr(),
get_node_key().key_len);
}
laddr_t get_val() const {
return get_node_key().laddr;
}
bool contains(std::string_view key) const {
assert(*this != node->iter_end());
auto next = *this + 1;
if (next == node->iter_end()) {
return get_key() <= key;
} else {
return (get_key() <= key) && (next->get_key() > key);
}
}
};
using const_iterator = iter_t<true>;
using iterator = iter_t<false>;
public:
void journal_inner_insert(
const_iterator _iter,
const laddr_t laddr,
const std::string &key,
delta_inner_buffer_t *recorder) {
auto iter = iterator(this, _iter.index);
if (recorder) {
recorder->insert(
key,
laddr);
}
inner_insert(iter, key, laddr);
}
void journal_inner_update(
const_iterator _iter,
const laddr_t laddr,
delta_inner_buffer_t *recorder) {
auto iter = iterator(this, _iter.index);
auto key = iter->get_key();
if (recorder) {
recorder->update(key, laddr);
}
inner_update(iter, laddr);
}
void journal_inner_remove(
const_iterator _iter,
delta_inner_buffer_t *recorder) {
auto iter = iterator(this, _iter.index);
if (recorder) {
recorder->remove(iter->get_key());
}
inner_remove(iter);
}
StringKVInnerNodeLayout(char *buf) :
buf(buf) {}
uint32_t get_size() const {
ceph_le32 &size = *layout.template Pointer<0>(buf);
return uint32_t(size);
}
/**
* set_size
*
* Set size representation to match size
*/
void set_size(uint32_t size) {
ceph_le32 s;
s = size;
*layout.template Pointer<0>(buf) = s;
}
const_iterator iter_cbegin() const {
return const_iterator(
this,
0);
}
const_iterator iter_begin() const {
return iter_cbegin();
}
const_iterator iter_cend() const {
return const_iterator(
this,
get_size());
}
const_iterator iter_end() const {
return iter_cend();
}
iterator iter_begin() {
return iterator(
this,
0);
}
iterator iter_end() {
return iterator(
this,
get_size());
}
const_iterator iter_idx(uint16_t off) const {
return const_iterator(
this,
off);
}
const_iterator string_lower_bound(std::string_view str) const {
auto it = std::lower_bound(boost::make_counting_iterator<uint16_t>(0),
boost::make_counting_iterator<uint16_t>(get_size()),
str,
[this](uint16_t i, std::string_view str) {
const_iterator iter(this, i);
return iter->get_key() < str;
});
return const_iterator(this, *it);
}
iterator string_lower_bound(std::string_view str) {
const auto &tref = *this;
return iterator(this, tref.string_lower_bound(str).index);
}
const_iterator string_upper_bound(std::string_view str) const {
auto it = std::upper_bound(boost::make_counting_iterator<uint16_t>(0),
boost::make_counting_iterator<uint16_t>(get_size()),
str,
[this](std::string_view str, uint16_t i) {
const_iterator iter(this, i);
return str < iter->get_key();
});
return const_iterator(this, *it);
}
iterator string_upper_bound(std::string_view str) {
const auto &tref = *this;
return iterator(this, tref.string_upper_bound(str).index);
}
const_iterator find_string_key(std::string_view str) const {
auto ret = iter_begin();
for (; ret != iter_end(); ++ret) {
std::string s = ret->get_key();
if (s == str)
break;
}
return ret;
}
iterator find_string_key(std::string_view str) {
const auto &tref = *this;
return iterator(this, tref.find_string_key(str).index);
}
const_iterator get_split_pivot() const {
uint32_t total_size = omap_inner_key_t(
get_node_key_ptr()[get_size()-1]).key_off;
uint32_t pivot_size = total_size / 2;
uint32_t size = 0;
for (auto ite = iter_begin(); ite < iter_end(); ite++) {
auto node_key = ite->get_node_key();
size += node_key.key_len;
if (size >= pivot_size){
return ite;
}
}
return iter_end();
}
/**
* get_meta/set_meta
*
* Enables stashing a templated type within the layout.
* Cannot be modified after initial write as it is not represented
* in delta_t
*/
omap_node_meta_t get_meta() const {
omap_node_meta_le_t &metaint = *layout.template Pointer<1>(buf);
return omap_node_meta_t(metaint);
}
void set_meta(const omap_node_meta_t &meta) {
*layout.template Pointer<1>(buf) = omap_node_meta_le_t(meta);
}
uint32_t used_space() const {
uint32_t count = get_size();
if (count) {
omap_inner_key_t last_key = omap_inner_key_t(get_node_key_ptr()[count-1]);
return last_key.key_off + count * sizeof(omap_inner_key_le_t);
} else {
return 0;
}
}
uint32_t free_space() const {
return capacity() - used_space();
}
uint16_t capacity() const {
return OMAP_INNER_BLOCK_SIZE
- (reinterpret_cast<char*>(layout.template Pointer<2>(buf))
- reinterpret_cast<char*>(layout.template Pointer<0>(buf)));
}
bool is_overflow(size_t ksize) const {
return free_space() < (sizeof(omap_inner_key_le_t) + ksize);
}
bool is_overflow(const StringKVInnerNodeLayout &rhs) const {
return free_space() < rhs.used_space();
}
bool below_min() const {
return free_space() > (capacity() / 2);
}
bool operator==(const StringKVInnerNodeLayout &rhs) const {
if (get_size() != rhs.get_size()) {
return false;
}
auto iter = iter_begin();
auto iter2 = rhs.iter_begin();
while (iter != iter_end()) {
if (iter->get_key() != iter2->get_key() ||
iter->get_val() != iter2->get_val()) {
return false;
}
iter++;
iter2++;
}
return true;
}
/**
* split_into
*
* Takes *this and splits its contents into left and right.
*/
std::string split_into(
StringKVInnerNodeLayout &left,
StringKVInnerNodeLayout &right) const {
auto piviter = get_split_pivot();
assert(piviter != iter_end());
copy_from_foreign(left.iter_begin(), iter_begin(), piviter);
left.set_size(piviter - iter_begin());
copy_from_foreign(right.iter_begin(), piviter, iter_end());
right.set_size(iter_end() - piviter);
auto [lmeta, rmeta] = get_meta().split_into();
left.set_meta(lmeta);
right.set_meta(rmeta);
return piviter->get_key();
}
/**
* merge_from
*
* Takes two nodes and copies their contents into *this.
*
* precondition: left.size() + right.size() < CAPACITY
*/
void merge_from(
const StringKVInnerNodeLayout &left,
const StringKVInnerNodeLayout &right) {
copy_from_foreign(
iter_end(),
left.iter_begin(),
left.iter_end());
set_size(left.get_size());
copy_from_foreign(
iter_end(),
right.iter_begin(),
right.iter_end());
set_size(left.get_size() + right.get_size());
set_meta(omap_node_meta_t::merge_from(left.get_meta(), right.get_meta()));
}
/**
* balance_into_new_nodes
*
* Takes the contents of left and right and copies them into
* replacement_left and replacement_right such that
* the size of replacement_left just >= 1/2 of (left + right)
*/
static std::string balance_into_new_nodes(
const StringKVInnerNodeLayout &left,
const StringKVInnerNodeLayout &right,
StringKVInnerNodeLayout &replacement_left,
StringKVInnerNodeLayout &replacement_right)
{
uint32_t left_size = omap_inner_key_t(left.get_node_key_ptr()[left.get_size()-1]).key_off;
uint32_t right_size = omap_inner_key_t(right.get_node_key_ptr()[right.get_size()-1]).key_off;
uint32_t total = left_size + right_size;
uint32_t pivot_size = total / 2;
uint32_t pivot_idx = 0;
if (pivot_size < left_size) {
uint32_t size = 0;
for (auto ite = left.iter_begin(); ite < left.iter_end(); ite++) {
auto node_key = ite->get_node_key();
size += node_key.key_len;
if (size >= pivot_size){
pivot_idx = ite.get_index();
break;
}
}
} else {
uint32_t more_size = pivot_size - left_size;
uint32_t size = 0;
for (auto ite = right.iter_begin(); ite < right.iter_end(); ite++) {
auto node_key = ite->get_node_key();
size += node_key.key_len;
if (size >= more_size){
pivot_idx = ite.get_index() + left.get_size();
break;
}
}
}
auto replacement_pivot = pivot_idx >= left.get_size() ?
right.iter_idx(pivot_idx - left.get_size())->get_key() :
left.iter_idx(pivot_idx)->get_key();
if (pivot_size < left_size) {
copy_from_foreign(
replacement_left.iter_end(),
left.iter_begin(),
left.iter_idx(pivot_idx));
replacement_left.set_size(pivot_idx);
copy_from_foreign(
replacement_right.iter_end(),
left.iter_idx(pivot_idx),
left.iter_end());
replacement_right.set_size(left.get_size() - pivot_idx);
copy_from_foreign(
replacement_right.iter_end(),
right.iter_begin(),
right.iter_end());
replacement_right.set_size(right.get_size() + left.get_size()- pivot_idx);
} else {
copy_from_foreign(
replacement_left.iter_end(),
left.iter_begin(),
left.iter_end());
replacement_left.set_size(left.get_size());
copy_from_foreign(
replacement_left.iter_end(),
right.iter_begin(),
right.iter_idx(pivot_idx - left.get_size()));
replacement_left.set_size(pivot_idx);
copy_from_foreign(
replacement_right.iter_end(),
right.iter_idx(pivot_idx - left.get_size()),
right.iter_end());
replacement_right.set_size(right.get_size() + left.get_size() - pivot_idx);
}
auto [lmeta, rmeta] = omap_node_meta_t::rebalance(
left.get_meta(), right.get_meta());
replacement_left.set_meta(lmeta);
replacement_right.set_meta(rmeta);
return replacement_pivot;
}
private:
void inner_insert(
iterator iter,
const std::string &key,
laddr_t val) {
if (iter != iter_begin()) {
assert((iter - 1)->get_key() < key);
}
if (iter != iter_end()) {
assert(iter->get_key() > key);
}
assert(!is_overflow(key.size()));
if (iter != iter_end()) {
copy_from_local(key.size(), iter + 1, iter, iter_end());
}
omap_inner_key_t nkey;
nkey.key_len = key.size();
nkey.laddr = val;
if (iter != iter_begin()) {
auto pkey = (iter - 1).get_node_key();
nkey.key_off = nkey.key_len + pkey.key_off;
} else {
nkey.key_off = nkey.key_len;
}
iter->set_node_key(nkey);
set_size(get_size() + 1);
iter->set_node_val(key);
}
void inner_update(
iterator iter,
laddr_t addr) {
assert(iter != iter_end());
auto node_key = iter->get_node_key();
node_key.laddr = addr;
iter->set_node_key(node_key);
}
void inner_remove(iterator iter) {
assert(iter != iter_end());
if ((iter + 1) != iter_end())
copy_from_local(iter->get_node_key().key_len, iter, iter + 1, iter_end());
set_size(get_size() - 1);
}
/**
* get_key_ptr
*
* Get pointer to start of key array
*/
omap_inner_key_le_t *get_node_key_ptr() {
return L::Partial(1, 1, get_size()).template Pointer<2>(buf);
}
const omap_inner_key_le_t *get_node_key_ptr() const {
return L::Partial(1, 1, get_size()).template Pointer<2>(buf);
}
};
/**
* StringKVLeafNodeLayout
*
* layout diagram:
*
* # <----------------------------- node range -------------------------------------------------> #
* # #<~># free space #
* # <------------- left part ---------------------------> # <~# <----- right key-value pairs --> #
* # # <------------ left keys ------------> #~> # #
* # # keys [2, n) |<~># #<~>| right kvs [2, n) #
* # # <--- key 0 ---> | <--- key 1 ---> | # # | <-- kv 1 --> | <-- kv 0 --> #
* # # | | # # | | #
* # num_ | meta # key | key | val | key | key | val | # # | key | val | key | val #
* # keys | depth # off | len | len | off | len | len | # # | buff | buff | buff | buff #
* # # 0 | 0 | 0 | 1 | 1 | 1 |...#...#...| key 1 | val 1| key 0 | val 0 #
* # | | | <--- off ----+-------------> #
* # | | ^ | <--- off ---> #
* | | | ^
* | +-----------------------------------+ |
* +-------------------------------------------------------------------+
*/
class StringKVLeafNodeLayout {
char *buf = nullptr;
using L = absl::container_internal::Layout<ceph_le32, omap_node_meta_le_t, omap_leaf_key_le_t>;
static constexpr L layout{1, 1, 1}; // = L::Partial(1, 1, 1);
friend class delta_leaf_t;
public:
template <bool is_const>
class iter_t {
friend class StringKVLeafNodeLayout;
using parent_t = typename crimson::common::maybe_const_t<StringKVLeafNodeLayout, is_const>::type;
template <typename iterator, typename const_iterator>
friend void copy_from_foreign(iterator, const_iterator, const_iterator);
template <typename iterator>
friend void copy_from_local(unsigned, iterator, iterator, iterator);
parent_t node;
uint16_t index;
iter_t(
parent_t parent,
uint16_t index) : node(parent), index(index) {}
public:
iter_t(const iter_t &) = default;
iter_t(iter_t &&) = default;
iter_t &operator=(const iter_t &) = default;
iter_t &operator=(iter_t &&) = default;
operator iter_t<!is_const>() const {
static_assert(!is_const);
return iter_t<!is_const>(node, index);
}
iter_t &operator*() { return *this; }
iter_t *operator->() { return this; }
iter_t operator++(int) {
auto ret = *this;
++index;
return ret;
}
iter_t &operator++() {
++index;
return *this;
}
uint16_t operator-(const iter_t &rhs) const {
assert(rhs.node == node);
return index - rhs.index;
}
iter_t operator+(uint16_t off) const {
return iter_t(
node,
index + off);
}
iter_t operator-(uint16_t off) const {
return iter_t(
node,
index - off);
}
uint16_t operator<(const iter_t &rhs) const {
assert(rhs.node == node);
return index < rhs.index;
}
uint16_t operator>(const iter_t &rhs) const {
assert(rhs.node == node);
return index > rhs.index;
}
bool operator==(const iter_t &rhs) const {
assert(node == rhs.node);
return rhs.index == index;
}
bool operator!=(const iter_t &rhs) const {
assert(node == rhs.node);
return index != rhs.index;
}
private:
omap_leaf_key_t get_node_key() const {
omap_leaf_key_le_t kint = node->get_node_key_ptr()[index];
return omap_leaf_key_t(kint);
}
auto get_node_key_ptr() const {
return reinterpret_cast<
typename crimson::common::maybe_const_t<char, is_const>::type>(
node->get_node_key_ptr() + index);
}
uint32_t get_node_val_offset() const {
return get_node_key().key_off;
}
auto get_node_val_ptr() const {
auto tail = node->buf + OMAP_LEAF_BLOCK_SIZE;
if (*this == node->iter_end())
return tail;
else {
return tail - get_node_val_offset();
}
}
int get_right_offset_end() const {
if (index == 0)
return 0;
else
return (*this - 1)->get_node_val_offset();
}
auto get_right_ptr_end() const {
return node->buf + OMAP_LEAF_BLOCK_SIZE - get_right_offset_end();
}
void update_offset(int offset) {
auto key = get_node_key();
assert(offset + key.key_off >= 0);
key.key_off += offset;
set_node_key(key);
}
void set_node_key(omap_leaf_key_t _lb) const {
static_assert(!is_const);
omap_leaf_key_le_t lb;
lb = _lb;
node->get_node_key_ptr()[index] = lb;
}
void set_node_val(const std::string &key, const ceph::bufferlist &val) {
static_assert(!is_const);
auto node_key = get_node_key();
assert(key.size() == node_key.key_len);
assert(val.length() == node_key.val_len);
::memcpy(get_node_val_ptr(), key.data(), key.size());
auto bliter = val.begin();
bliter.copy(node_key.val_len, get_node_val_ptr() + node_key.key_len);
}
public:
uint16_t get_index() const {
return index;
}
std::string get_key() const {
return std::string(
get_node_val_ptr(),
get_node_key().key_len);
}
std::string get_str_val() const {
auto node_key = get_node_key();
return std::string(
get_node_val_ptr() + node_key.key_len,
get_node_key().val_len);
}
ceph::bufferlist get_val() const {
auto node_key = get_node_key();
ceph::bufferlist bl;
ceph::bufferptr bptr(
get_node_val_ptr() + node_key.key_len,
get_node_key().val_len);
bl.append(bptr);
return bl;
}
};
using const_iterator = iter_t<true>;
using iterator = iter_t<false>;
public:
void journal_leaf_insert(
const_iterator _iter,
const std::string &key,
const ceph::bufferlist &val,
delta_leaf_buffer_t *recorder) {
auto iter = iterator(this, _iter.index);
if (recorder) {
recorder->insert(
key,
val);
}
leaf_insert(iter, key, val);
}
void journal_leaf_update(
const_iterator _iter,
const std::string &key,
const ceph::bufferlist &val,
delta_leaf_buffer_t *recorder) {
auto iter = iterator(this, _iter.index);
if (recorder) {
recorder->remove(iter->get_key());
recorder->insert(key, val);
}
leaf_update(iter, key, val);
}
void journal_leaf_remove(
const_iterator _iter,
delta_leaf_buffer_t *recorder) {
auto iter = iterator(this, _iter.index);
if (recorder) {
recorder->remove(iter->get_key());
}
leaf_remove(iter);
}
StringKVLeafNodeLayout(char *buf) :
buf(buf) {}
const_iterator iter_begin() const {
return const_iterator(
this,
0);
}
const_iterator iter_end() const {
return const_iterator(
this,
get_size());
}
iterator iter_begin() {
return iterator(
this,
0);
}
iterator iter_end() {
return iterator(
this,
get_size());
}
const_iterator iter_idx(uint16_t off) const {
return const_iterator(
this,
off);
}
const_iterator string_lower_bound(std::string_view str) const {
uint16_t start = 0, end = get_size();
while (start != end) {
unsigned mid = (start + end) / 2;
const_iterator iter(this, mid);
std::string s = iter->get_key();
if (s < str) {
start = ++mid;
} else if (s > str) {
end = mid;
} else {
return iter;
}
}
return const_iterator(this, start);
}
iterator string_lower_bound(std::string_view str) {
const auto &tref = *this;
return iterator(this, tref.string_lower_bound(str).index);
}
const_iterator string_upper_bound(std::string_view str) const {
auto ret = iter_begin();
for (; ret != iter_end(); ++ret) {
std::string s = ret->get_key();
if (s > str)
break;
}
return ret;
}
iterator string_upper_bound(std::string_view str) {
const auto &tref = *this;
return iterator(this, tref.string_upper_bound(str).index);
}
const_iterator find_string_key(std::string_view str) const {
auto ret = iter_begin();
for (; ret != iter_end(); ++ret) {
std::string s = ret->get_key();
if (s == str)
break;
}
return ret;
}
iterator find_string_key(std::string_view str) {
const auto &tref = *this;
return iterator(this, tref.find_string_key(str).index);
}
const_iterator get_split_pivot() const {
uint32_t total_size = omap_leaf_key_t(get_node_key_ptr()[get_size()-1]).key_off;
uint32_t pivot_size = total_size / 2;
uint32_t size = 0;
for (auto ite = iter_begin(); ite < iter_end(); ite++) {
auto node_key = ite->get_node_key();
size += node_key.key_len + node_key.val_len;
if (size >= pivot_size){
return ite;
}
}
return iter_end();
}
uint32_t get_size() const {
ceph_le32 &size = *layout.template Pointer<0>(buf);
return uint32_t(size);
}
/**
* set_size
*
* Set size representation to match size
*/
void set_size(uint32_t size) {
ceph_le32 s;
s = size;
*layout.template Pointer<0>(buf) = s;
}
/**
* get_meta/set_meta
*
* Enables stashing a templated type within the layout.
* Cannot be modified after initial write as it is not represented
* in delta_t
*/
omap_node_meta_t get_meta() const {
omap_node_meta_le_t &metaint = *layout.template Pointer<1>(buf);
return omap_node_meta_t(metaint);
}
void set_meta(const omap_node_meta_t &meta) {
*layout.template Pointer<1>(buf) = omap_node_meta_le_t(meta);
}
uint32_t used_space() const {
uint32_t count = get_size();
if (count) {
omap_leaf_key_t last_key = omap_leaf_key_t(get_node_key_ptr()[count-1]);
return last_key.key_off + count * sizeof(omap_leaf_key_le_t);
} else {
return 0;
}
}
uint32_t free_space() const {
return capacity() - used_space();
}
uint32_t capacity() const {
return OMAP_LEAF_BLOCK_SIZE
- (reinterpret_cast<char*>(layout.template Pointer<2>(buf))
- reinterpret_cast<char*>(layout.template Pointer<0>(buf)));
}
bool is_overflow(size_t ksize, size_t vsize) const {
return free_space() < (sizeof(omap_leaf_key_le_t) + ksize + vsize);
}
bool is_overflow(const StringKVLeafNodeLayout &rhs) const {
return free_space() < rhs.used_space();
}
bool below_min() const {
return free_space() > (capacity() / 2);
}
bool operator==(const StringKVLeafNodeLayout &rhs) const {
if (get_size() != rhs.get_size()) {
return false;
}
auto iter = iter_begin();
auto iter2 = rhs.iter_begin();
while (iter != iter_end()) {
if(iter->get_key() != iter2->get_key() ||
iter->get_val() != iter2->get_val()) {
return false;
}
iter++;
iter2++;
}
return true;
}
/**
* split_into
*
* Takes *this and splits its contents into left and right.
*/
std::string split_into(
StringKVLeafNodeLayout &left,
StringKVLeafNodeLayout &right) const {
auto piviter = get_split_pivot();
assert (piviter != iter_end());
copy_from_foreign(left.iter_begin(), iter_begin(), piviter);
left.set_size(piviter - iter_begin());
copy_from_foreign(right.iter_begin(), piviter, iter_end());
right.set_size(iter_end() - piviter);
auto [lmeta, rmeta] = get_meta().split_into();
left.set_meta(lmeta);
right.set_meta(rmeta);
return piviter->get_key();
}
/**
* merge_from
*
* Takes two nodes and copies their contents into *this.
*
* precondition: left.size() + right.size() < CAPACITY
*/
void merge_from(
const StringKVLeafNodeLayout &left,
const StringKVLeafNodeLayout &right)
{
copy_from_foreign(
iter_end(),
left.iter_begin(),
left.iter_end());
set_size(left.get_size());
copy_from_foreign(
iter_end(),
right.iter_begin(),
right.iter_end());
set_size(left.get_size() + right.get_size());
set_meta(omap_node_meta_t::merge_from(left.get_meta(), right.get_meta()));
}
/**
* balance_into_new_nodes
*
* Takes the contents of left and right and copies them into
* replacement_left and replacement_right such that
* the size of replacement_left side just >= 1/2 of the total size (left + right).
*/
static std::string balance_into_new_nodes(
const StringKVLeafNodeLayout &left,
const StringKVLeafNodeLayout &right,
StringKVLeafNodeLayout &replacement_left,
StringKVLeafNodeLayout &replacement_right)
{
uint32_t left_size = omap_leaf_key_t(left.get_node_key_ptr()[left.get_size()-1]).key_off;
uint32_t right_size = omap_leaf_key_t(right.get_node_key_ptr()[right.get_size()-1]).key_off;
uint32_t total = left_size + right_size;
uint32_t pivot_size = total / 2;
uint32_t pivot_idx = 0;
if (pivot_size < left_size) {
uint32_t size = 0;
for (auto ite = left.iter_begin(); ite < left.iter_end(); ite++) {
auto node_key = ite->get_node_key();
size += node_key.key_len + node_key.val_len;
if (size >= pivot_size){
pivot_idx = ite.get_index();
break;
}
}
} else {
uint32_t more_size = pivot_size - left_size;
uint32_t size = 0;
for (auto ite = right.iter_begin(); ite < right.iter_end(); ite++) {
auto node_key = ite->get_node_key();
size += node_key.key_len + node_key.val_len;
if (size >= more_size){
pivot_idx = ite.get_index() + left.get_size();
break;
}
}
}
auto replacement_pivot = pivot_idx >= left.get_size() ?
right.iter_idx(pivot_idx - left.get_size())->get_key() :
left.iter_idx(pivot_idx)->get_key();
if (pivot_size < left_size) {
copy_from_foreign(
replacement_left.iter_end(),
left.iter_begin(),
left.iter_idx(pivot_idx));
replacement_left.set_size(pivot_idx);
copy_from_foreign(
replacement_right.iter_end(),
left.iter_idx(pivot_idx),
left.iter_end());
replacement_right.set_size(left.get_size() - pivot_idx);
copy_from_foreign(
replacement_right.iter_end(),
right.iter_begin(),
right.iter_end());
replacement_right.set_size(right.get_size() + left.get_size() - pivot_idx);
} else {
copy_from_foreign(
replacement_left.iter_end(),
left.iter_begin(),
left.iter_end());
replacement_left.set_size(left.get_size());
copy_from_foreign(
replacement_left.iter_end(),
right.iter_begin(),
right.iter_idx(pivot_idx - left.get_size()));
replacement_left.set_size(pivot_idx);
copy_from_foreign(
replacement_right.iter_end(),
right.iter_idx(pivot_idx - left.get_size()),
right.iter_end());
replacement_right.set_size(right.get_size() + left.get_size() - pivot_idx);
}
auto [lmeta, rmeta] = omap_node_meta_t::rebalance(
left.get_meta(), right.get_meta());
replacement_left.set_meta(lmeta);
replacement_right.set_meta(rmeta);
return replacement_pivot;
}
private:
void leaf_insert(
iterator iter,
const std::string &key,
const bufferlist &val) {
if (iter != iter_begin()) {
assert((iter - 1)->get_key() < key);
}
if (iter != iter_end()) {
assert(iter->get_key() > key);
}
assert(!is_overflow(key.size(), val.length()));
omap_leaf_key_t node_key;
if (iter == iter_begin()) {
node_key.key_off = key.size() + val.length();
node_key.key_len = key.size();
node_key.val_len = val.length();
} else {
node_key.key_off = (iter - 1)->get_node_key().key_off +
(key.size() + val.length());
node_key.key_len = key.size();
node_key.val_len = val.length();
}
if (get_size() != 0 && iter != iter_end())
copy_from_local(node_key.key_len + node_key.val_len, iter + 1, iter, iter_end());
iter->set_node_key(node_key);
set_size(get_size() + 1);
iter->set_node_val(key, val);
}
void leaf_update(
iterator iter,
const std::string &key,
const ceph::bufferlist &val) {
assert(iter != iter_end());
leaf_remove(iter);
assert(!is_overflow(key.size(), val.length()));
leaf_insert(iter, key, val);
}
void leaf_remove(iterator iter) {
assert(iter != iter_end());
if ((iter + 1) != iter_end()) {
omap_leaf_key_t key = iter->get_node_key();
copy_from_local(key.key_len + key.val_len, iter, iter + 1, iter_end());
}
set_size(get_size() - 1);
}
/**
* get_key_ptr
*
* Get pointer to start of key array
*/
omap_leaf_key_le_t *get_node_key_ptr() {
return L::Partial(1, 1, get_size()).template Pointer<2>(buf);
}
const omap_leaf_key_le_t *get_node_key_ptr() const {
return L::Partial(1, 1, get_size()).template Pointer<2>(buf);
}
};
inline void delta_inner_t::replay(StringKVInnerNodeLayout &l) {
switch (op) {
case op_t::INSERT: {
l.inner_insert(l.string_lower_bound(key), key, addr);
break;
}
case op_t::UPDATE: {
auto iter = l.find_string_key(key);
assert(iter != l.iter_end());
l.inner_update(iter, addr);
break;
}
case op_t::REMOVE: {
auto iter = l.find_string_key(key);
assert(iter != l.iter_end());
l.inner_remove(iter);
break;
}
default:
assert(0 == "Impossible");
}
}
inline void delta_leaf_t::replay(StringKVLeafNodeLayout &l) {
switch (op) {
case op_t::INSERT: {
l.leaf_insert(l.string_lower_bound(key), key, val);
break;
}
case op_t::UPDATE: {
auto iter = l.find_string_key(key);
assert(iter != l.iter_end());
l.leaf_update(iter, key, val);
break;
}
case op_t::REMOVE: {
auto iter = l.find_string_key(key);
assert(iter != l.iter_end());
l.leaf_remove(iter);
break;
}
default:
assert(0 == "Impossible");
}
}
}
| 42,173 | 26.191489 | 102 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h"
SET_SUBSYS(seastore_onode);
namespace crimson::os::seastore::onode {
FLTreeOnodeManager::contains_onode_ret FLTreeOnodeManager::contains_onode(
Transaction &trans,
const ghobject_t &hoid)
{
return tree.contains(trans, hoid);
}
FLTreeOnodeManager::get_onode_ret FLTreeOnodeManager::get_onode(
Transaction &trans,
const ghobject_t &hoid)
{
LOG_PREFIX(FLTreeOnodeManager::get_onode);
return tree.find(
trans, hoid
).si_then([this, &hoid, &trans, FNAME](auto cursor)
-> get_onode_ret {
if (cursor == tree.end()) {
DEBUGT("no entry for {}", trans, hoid);
return crimson::ct_error::enoent::make();
}
auto val = OnodeRef(new FLTreeOnode(
default_data_reservation,
default_metadata_range,
cursor.value()));
return get_onode_iertr::make_ready_future<OnodeRef>(
val
);
});
}
FLTreeOnodeManager::get_or_create_onode_ret
FLTreeOnodeManager::get_or_create_onode(
Transaction &trans,
const ghobject_t &hoid)
{
LOG_PREFIX(FLTreeOnodeManager::get_or_create_onode);
return tree.insert(
trans, hoid,
OnodeTree::tree_value_config_t{sizeof(onode_layout_t)}
).si_then([this, &trans, &hoid, FNAME](auto p)
-> get_or_create_onode_ret {
auto [cursor, created] = std::move(p);
auto val = OnodeRef(new FLTreeOnode(
default_data_reservation,
default_metadata_range,
cursor.value()));
if (created) {
DEBUGT("created onode for entry for {}", trans, hoid);
val->get_mutable_layout(trans) = onode_layout_t{};
}
return get_or_create_onode_iertr::make_ready_future<OnodeRef>(
val
);
});
}
FLTreeOnodeManager::get_or_create_onodes_ret
FLTreeOnodeManager::get_or_create_onodes(
Transaction &trans,
const std::vector<ghobject_t> &hoids)
{
return seastar::do_with(
std::vector<OnodeRef>(),
[this, &hoids, &trans](auto &ret) {
ret.reserve(hoids.size());
return trans_intr::do_for_each(
hoids,
[this, &trans, &ret](auto &hoid) {
return get_or_create_onode(trans, hoid
).si_then([&ret](auto &&onoderef) {
ret.push_back(std::move(onoderef));
});
}).si_then([&ret] {
return std::move(ret);
});
});
}
FLTreeOnodeManager::write_dirty_ret FLTreeOnodeManager::write_dirty(
Transaction &trans,
const std::vector<OnodeRef> &onodes)
{
return trans_intr::do_for_each(
onodes,
[this, &trans](auto &onode) -> eagain_ifuture<> {
auto &flonode = static_cast<FLTreeOnode&>(*onode);
switch (flonode.status) {
case FLTreeOnode::status_t::MUTATED: {
flonode.populate_recorder(trans);
return eagain_iertr::make_ready_future<>();
}
case FLTreeOnode::status_t::DELETED: {
return tree.erase(trans, flonode);
}
case FLTreeOnode::status_t::STABLE: {
return eagain_iertr::make_ready_future<>();
}
default:
__builtin_unreachable();
}
});
}
FLTreeOnodeManager::erase_onode_ret FLTreeOnodeManager::erase_onode(
Transaction &trans,
OnodeRef &onode)
{
auto &flonode = static_cast<FLTreeOnode&>(*onode);
flonode.mark_delete();
return erase_onode_iertr::now();
}
FLTreeOnodeManager::list_onodes_ret FLTreeOnodeManager::list_onodes(
Transaction &trans,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit)
{
return tree.lower_bound(trans, start
).si_then([this, &trans, end, limit] (auto&& cursor) {
using crimson::os::seastore::onode::full_key_t;
return seastar::do_with(
limit,
std::move(cursor),
list_onodes_bare_ret(),
[this, &trans, end] (auto& to_list, auto& current_cursor, auto& ret) {
return trans_intr::repeat(
[this, &trans, end, &to_list, ¤t_cursor, &ret] ()
-> eagain_ifuture<seastar::stop_iteration> {
if (current_cursor.is_end()) {
std::get<1>(ret) = ghobject_t::get_max();
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
} else if (current_cursor.get_ghobj() >= end) {
std::get<1>(ret) = end;
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
if (to_list == 0) {
std::get<1>(ret) = current_cursor.get_ghobj();
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
std::get<0>(ret).emplace_back(current_cursor.get_ghobj());
return tree.get_next(trans, current_cursor
).si_then([&to_list, ¤t_cursor] (auto&& next_cursor) mutable {
// we intentionally hold the current_cursor during get_next() to
// accelerate tree lookup.
--to_list;
current_cursor = next_cursor;
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
});
}).si_then([&ret] () mutable {
return seastar::make_ready_future<list_onodes_bare_ret>(
std::move(ret));
// return ret;
});
});
});
}
FLTreeOnodeManager::~FLTreeOnodeManager() {}
}
| 5,413 | 29.587571 | 81 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/onode_manager.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/value.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/tree.h"
namespace crimson::os::seastore::onode {
struct FLTreeOnode final : Onode, Value {
static constexpr tree_conf_t TREE_CONF = {
value_magic_t::ONODE,
256, // max_ns_size
// same to option osd_max_object_namespace_len
2048, // max_oid_size
// same to option osd_max_object_name_len
1200, // max_value_payload_size
// see crimson::os::seastore::onode_layout_t
8192, // internal_node_size
// see the formula in validate_tree_config
16384 // leaf_node_size
// see the formula in validate_tree_config
};
enum class status_t {
STABLE,
MUTATED,
DELETED
} status = status_t::STABLE;
FLTreeOnode(FLTreeOnode&&) = default;
FLTreeOnode& operator=(FLTreeOnode&&) = delete;
FLTreeOnode(const FLTreeOnode&) = default;
FLTreeOnode& operator=(const FLTreeOnode&) = delete;
template <typename... T>
FLTreeOnode(uint32_t ddr, uint32_t dmr, T&&... args)
: Onode(ddr, dmr),
Value(std::forward<T>(args)...) {}
template <typename... T>
FLTreeOnode(T&&... args)
: Onode(0, 0),
Value(std::forward<T>(args)...) {}
struct Recorder : public ValueDeltaRecorder {
Recorder(bufferlist &bl) : ValueDeltaRecorder(bl) {}
value_magic_t get_header_magic() const final {
return TREE_CONF.value_magic;
}
void apply_value_delta(
ceph::bufferlist::const_iterator &bliter,
NodeExtentMutable &value,
laddr_t) final {
assert(value.get_length() == sizeof(onode_layout_t));
bliter.copy(value.get_length(), value.get_write());
}
void record_delta(NodeExtentMutable &value) {
// TODO: probably could use versioning, etc
assert(value.get_length() == sizeof(onode_layout_t));
ceph::buffer::ptr bptr(value.get_length());
memcpy(bptr.c_str(), value.get_read(), value.get_length());
get_encoded(value).append(bptr);
}
};
const onode_layout_t &get_layout() const final {
assert(status != status_t::DELETED);
return *read_payload<onode_layout_t>();
}
onode_layout_t &get_mutable_layout(Transaction &t) final {
assert(status != status_t::DELETED);
auto p = prepare_mutate_payload<
onode_layout_t,
Recorder>(t);
status = status_t::MUTATED;
return *reinterpret_cast<onode_layout_t*>(p.first.get_write());
};
void populate_recorder(Transaction &t) {
assert(status == status_t::MUTATED);
auto p = prepare_mutate_payload<
onode_layout_t,
Recorder>(t);
if (p.second) {
p.second->record_delta(
p.first);
}
status = status_t::STABLE;
}
void mark_delete() {
assert(status != status_t::DELETED);
status = status_t::DELETED;
}
laddr_t get_hint() const final {
return Value::get_hint();
}
~FLTreeOnode() final {}
};
using OnodeTree = Btree<FLTreeOnode>;
using crimson::common::get_conf;
class FLTreeOnodeManager : public crimson::os::seastore::OnodeManager {
OnodeTree tree;
uint32_t default_data_reservation = 0;
uint32_t default_metadata_offset = 0;
uint32_t default_metadata_range = 0;
public:
FLTreeOnodeManager(TransactionManager &tm) :
tree(NodeExtentManager::create_seastore(tm)),
default_data_reservation(
get_conf<uint64_t>("seastore_default_max_object_size")),
default_metadata_offset(default_data_reservation),
default_metadata_range(
get_conf<uint64_t>("seastore_default_object_metadata_reservation"))
{}
mkfs_ret mkfs(Transaction &t) {
return tree.mkfs(t);
}
contains_onode_ret contains_onode(
Transaction &trans,
const ghobject_t &hoid) final;
get_onode_ret get_onode(
Transaction &trans,
const ghobject_t &hoid) final;
get_or_create_onode_ret get_or_create_onode(
Transaction &trans,
const ghobject_t &hoid) final;
get_or_create_onodes_ret get_or_create_onodes(
Transaction &trans,
const std::vector<ghobject_t> &hoids) final;
write_dirty_ret write_dirty(
Transaction &trans,
const std::vector<OnodeRef> &onodes) final;
erase_onode_ret erase_onode(
Transaction &trans,
OnodeRef &onode) final;
list_onodes_ret list_onodes(
Transaction &trans,
const ghobject_t& start,
const ghobject_t& end,
uint64_t limit) final;
~FLTreeOnodeManager();
};
using FLTreeOnodeManagerRef = std::unique_ptr<FLTreeOnodeManager>;
}
| 4,723 | 26.952663 | 73 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/fwd.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <algorithm>
#include <cstring>
#include <limits>
#include <memory>
#include <ostream>
#include <string>
#include "crimson/common/errorator.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/transaction.h"
namespace crimson::os::seastore::onode {
using eagain_iertr = trans_iertr<
crimson::errorator<crimson::ct_error::input_output_error> >;
template <class ValueT=void>
using eagain_ifuture = eagain_iertr::future<ValueT>;
using crimson::os::seastore::Transaction;
using crimson::os::seastore::TransactionRef;
using crimson::os::seastore::laddr_t;
using crimson::os::seastore::L_ADDR_MIN;
using crimson::os::seastore::L_ADDR_NULL;
using crimson::os::seastore::extent_len_t;
class DeltaRecorder;
class NodeExtent;
class NodeExtentManager;
class RootNodeTracker;
struct ValueBuilder;
using DeltaRecorderURef = std::unique_ptr<DeltaRecorder>;
using NodeExtentRef = crimson::os::seastore::TCachedExtentRef<NodeExtent>;
using NodeExtentManagerURef = std::unique_ptr<NodeExtentManager>;
using RootNodeTrackerURef = std::unique_ptr<RootNodeTracker>;
struct context_t {
NodeExtentManager& nm;
const ValueBuilder& vb;
Transaction& t;
};
class LeafNodeImpl;
class InternalNodeImpl;
class NodeImpl;
using LeafNodeImplURef = std::unique_ptr<LeafNodeImpl>;
using InternalNodeImplURef = std::unique_ptr<InternalNodeImpl>;
using NodeImplURef = std::unique_ptr<NodeImpl>;
using level_t = uint8_t;
constexpr auto MAX_LEVEL = std::numeric_limits<level_t>::max();
// a type only to index within a node, 32 bits should be enough
using index_t = uint32_t;
constexpr auto INDEX_END = std::numeric_limits<index_t>::max();
constexpr auto INDEX_LAST = INDEX_END - 0x4;
constexpr auto INDEX_UPPER_BOUND = INDEX_END - 0x8;
inline bool is_valid_index(index_t index) { return index < INDEX_UPPER_BOUND; }
// we support up to 64 KiB tree nodes
using node_offset_t = uint16_t;
constexpr node_offset_t DISK_BLOCK_SIZE = 1u << 12;
constexpr auto MAX_NODE_SIZE =
(extent_len_t)std::numeric_limits<node_offset_t>::max() + 1;
inline bool is_valid_node_size(extent_len_t node_size) {
return (node_size > 0 &&
node_size <= MAX_NODE_SIZE &&
node_size % DISK_BLOCK_SIZE == 0);
}
using string_size_t = uint16_t;
enum class MatchKindBS : int8_t { NE = -1, EQ = 0 };
enum class MatchKindCMP : int8_t { LT = -1, EQ = 0, GT };
inline MatchKindCMP toMatchKindCMP(int value) {
if (value > 0) {
return MatchKindCMP::GT;
} else if (value < 0) {
return MatchKindCMP::LT;
} else {
return MatchKindCMP::EQ;
}
}
template <typename Type>
MatchKindCMP toMatchKindCMP(const Type& l, const Type& r) {
if (l > r) {
return MatchKindCMP::GT;
} else if (l < r) {
return MatchKindCMP::LT;
} else {
return MatchKindCMP::EQ;
}
}
inline MatchKindCMP toMatchKindCMP(
std::string_view l, std::string_view r) {
return toMatchKindCMP(l.compare(r));
}
inline MatchKindCMP reverse(MatchKindCMP cmp) {
if (cmp == MatchKindCMP::LT) {
return MatchKindCMP::GT;
} else if (cmp == MatchKindCMP::GT) {
return MatchKindCMP::LT;
} else {
return cmp;
}
}
struct tree_stats_t {
size_t size_persistent_leaf = 0;
size_t size_persistent_internal = 0;
size_t size_filled_leaf = 0;
size_t size_filled_internal = 0;
size_t size_logical_leaf = 0;
size_t size_logical_internal = 0;
size_t size_overhead_leaf = 0;
size_t size_overhead_internal = 0;
size_t size_value_leaf = 0;
size_t size_value_internal = 0;
unsigned num_kvs_leaf = 0;
unsigned num_kvs_internal = 0;
unsigned num_nodes_leaf = 0;
unsigned num_nodes_internal = 0;
unsigned height = 0;
size_t size_persistent() const {
return size_persistent_leaf + size_persistent_internal; }
size_t size_filled() const {
return size_filled_leaf + size_filled_internal; }
size_t size_logical() const {
return size_logical_leaf + size_logical_internal; }
size_t size_overhead() const {
return size_overhead_leaf + size_overhead_internal; }
size_t size_value() const {
return size_value_leaf + size_value_internal; }
unsigned num_kvs() const {
return num_kvs_leaf + num_kvs_internal; }
unsigned num_nodes() const {
return num_nodes_leaf + num_nodes_internal; }
double ratio_fullness() const {
return (double)size_filled() / size_persistent(); }
double ratio_key_compression() const {
return (double)(size_filled() - size_value()) / (size_logical() - size_value()); }
double ratio_overhead() const {
return (double)size_overhead() / size_filled(); }
double ratio_keys_leaf() const {
return (double)num_kvs_leaf / num_kvs(); }
double ratio_nodes_leaf() const {
return (double)num_nodes_leaf / num_nodes(); }
double ratio_filled_leaf() const {
return (double)size_filled_leaf / size_filled(); }
};
inline std::ostream& operator<<(std::ostream& os, const tree_stats_t& stats) {
os << "Tree stats:"
<< "\n height = " << stats.height
<< "\n num values = " << stats.num_kvs_leaf
<< "\n num nodes = " << stats.num_nodes()
<< " (leaf=" << stats.num_nodes_leaf
<< ", internal=" << stats.num_nodes_internal << ")"
<< "\n size persistent = " << stats.size_persistent() << "B"
<< "\n size filled = " << stats.size_filled() << "B"
<< " (value=" << stats.size_value_leaf << "B"
<< ", rest=" << stats.size_filled() - stats.size_value_leaf << "B)"
<< "\n size logical = " << stats.size_logical() << "B"
<< "\n size overhead = " << stats.size_overhead() << "B"
<< "\n ratio fullness = " << stats.ratio_fullness()
<< "\n ratio keys leaf = " << stats.ratio_keys_leaf()
<< "\n ratio nodes leaf = " << stats.ratio_nodes_leaf()
<< "\n ratio filled leaf = " << stats.ratio_filled_leaf()
<< "\n ratio key compression = " << stats.ratio_key_compression();
assert(stats.num_kvs_internal + 1 == stats.num_nodes());
return os;
}
template <typename PtrType>
void reset_ptr(PtrType& ptr, const char* origin_base,
const char* new_base, extent_len_t node_size) {
assert((const char*)ptr > origin_base);
assert((const char*)ptr - origin_base < (int)node_size);
ptr = reinterpret_cast<PtrType>(
(const char*)ptr - origin_base + new_base);
}
}
#if FMT_VERSION >= 90000
template<>
struct fmt::formatter<crimson::os::seastore::onode::tree_stats_t> : fmt::ostream_formatter {};
#endif
| 6,567 | 32.340102 | 94 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "node.h"
#include <cassert>
#include <exception>
#include <sstream>
#include "common/likely.h"
#include "crimson/common/utility.h"
#include "crimson/os/seastore/logging.h"
#include "node_extent_manager.h"
#include "node_impl.h"
#include "stages/node_stage_layout.h"
SET_SUBSYS(seastore_onode);
namespace fmt {
template <typename T>
const void* ptr(const ::boost::intrusive_ptr<T>& p) {
return p.get();
}
}
namespace crimson::os::seastore::onode {
/*
* tree_cursor_t
*/
// create from insert
tree_cursor_t::tree_cursor_t(Ref<LeafNode> node, const search_position_t& pos)
: ref_leaf_node{node}, position{pos}, cache{ref_leaf_node}
{
assert(is_tracked());
ref_leaf_node->do_track_cursor<true>(*this);
// do not account updates for the inserted values
is_mutated = true;
}
// create from lookup
tree_cursor_t::tree_cursor_t(
Ref<LeafNode> node, const search_position_t& pos,
const key_view_t& key_view, const value_header_t* p_value_header)
: ref_leaf_node{node}, position{pos}, cache{ref_leaf_node}
{
assert(is_tracked());
update_cache_same_node(key_view, p_value_header);
ref_leaf_node->do_track_cursor<true>(*this);
}
// lookup reaches the end, contain leaf node for further insert
tree_cursor_t::tree_cursor_t(Ref<LeafNode> node)
: ref_leaf_node{node}, position{search_position_t::end()}, cache{ref_leaf_node}
{
assert(is_end());
assert(ref_leaf_node->is_level_tail());
}
// create an invalid tree_cursor_t
tree_cursor_t::~tree_cursor_t()
{
if (is_tracked()) {
ref_leaf_node->do_untrack_cursor(*this);
}
}
eagain_ifuture<Ref<tree_cursor_t>>
tree_cursor_t::get_next(context_t c)
{
assert(is_tracked());
return ref_leaf_node->get_next_cursor(c, position);
}
void tree_cursor_t::assert_next_to(
const tree_cursor_t& prv, value_magic_t magic) const
{
#ifndef NDEBUG
assert(!prv.is_end());
if (is_end()) {
assert(ref_leaf_node == prv.ref_leaf_node);
assert(ref_leaf_node->is_level_tail());
} else if (is_tracked()) {
auto key = get_key_view(magic);
auto prv_key = prv.get_key_view(magic);
assert(key > prv_key);
if (ref_leaf_node == prv.ref_leaf_node) {
position.assert_next_to(prv.position);
} else {
assert(!prv.ref_leaf_node->is_level_tail());
assert(position == search_position_t::begin());
}
} else {
assert(is_invalid());
ceph_abort("impossible");
}
#endif
}
template <bool FORCE_MERGE>
eagain_ifuture<Ref<tree_cursor_t>>
tree_cursor_t::erase(context_t c, bool get_next)
{
assert(is_tracked());
return ref_leaf_node->erase<FORCE_MERGE>(c, position, get_next);
}
template eagain_ifuture<Ref<tree_cursor_t>>
tree_cursor_t::erase<true>(context_t, bool);
template eagain_ifuture<Ref<tree_cursor_t>>
tree_cursor_t::erase<false>(context_t, bool);
std::strong_ordering tree_cursor_t::compare_to(
const tree_cursor_t& o, value_magic_t magic) const
{
if (!is_tracked() && !o.is_tracked()) {
return std::strong_ordering::equal;
} else if (!is_tracked()) {
return std::strong_ordering::greater;
} else if (!o.is_tracked()) {
return std::strong_ordering::less;
}
assert(is_tracked() && o.is_tracked());
// all tracked cursors are singletons
if (this == &o) {
return std::strong_ordering::equal;
}
std::strong_ordering ret = std::strong_ordering::equal;
if (ref_leaf_node == o.ref_leaf_node) {
ret = position <=> o.position;
} else {
auto key = get_key_view(magic);
auto o_key = o.get_key_view(magic);
ret = key <=> o_key;
}
assert(ret != 0);
return ret;
}
eagain_ifuture<>
tree_cursor_t::extend_value(context_t c, value_size_t extend_size)
{
assert(is_tracked());
return ref_leaf_node->extend_value(c, position, extend_size);
}
eagain_ifuture<>
tree_cursor_t::trim_value(context_t c, value_size_t trim_size)
{
assert(is_tracked());
return ref_leaf_node->trim_value(c, position, trim_size);
}
template <bool VALIDATE>
void tree_cursor_t::update_track(
Ref<LeafNode> node, const search_position_t& pos)
{
// I must be already untracked
assert(is_tracked());
assert(!ref_leaf_node->check_is_tracking(*this));
// track the new node and new pos
assert(!pos.is_end());
ref_leaf_node = node;
position = pos;
// we lazy update the key/value information until user asked
cache.invalidate();
ref_leaf_node->do_track_cursor<VALIDATE>(*this);
}
template void tree_cursor_t::update_track<true>(Ref<LeafNode>, const search_position_t&);
template void tree_cursor_t::update_track<false>(Ref<LeafNode>, const search_position_t&);
void tree_cursor_t::update_cache_same_node(const key_view_t& key_view,
const value_header_t* p_value_header) const
{
assert(is_tracked());
cache.update_all(ref_leaf_node->get_version(), key_view, p_value_header);
cache.validate_is_latest(position);
}
void tree_cursor_t::invalidate()
{
assert(is_tracked());
ref_leaf_node.reset();
assert(is_invalid());
// I must be removed from LeafNode
}
/*
* tree_cursor_t::Cache
*/
tree_cursor_t::Cache::Cache(Ref<LeafNode>& ref_leaf_node)
: ref_leaf_node{ref_leaf_node} {}
void tree_cursor_t::Cache::update_all(const node_version_t& current_version,
const key_view_t& _key_view,
const value_header_t* _p_value_header)
{
assert(_p_value_header);
needs_update_all = false;
version = current_version;
p_node_base = ref_leaf_node->read();
key_view = _key_view;
p_value_header = _p_value_header;
assert((const char*)p_value_header > p_node_base);
assert((const char*)p_value_header - p_node_base <
(int)ref_leaf_node->get_node_size());
value_payload_mut.reset();
p_value_recorder = nullptr;
}
void tree_cursor_t::Cache::maybe_duplicate(const node_version_t& current_version)
{
assert(!needs_update_all);
assert(version.layout == current_version.layout);
if (version.state == current_version.state) {
// cache is already latest.
} else if (version.state < current_version.state) {
// the extent has been copied but the layout has not been changed.
assert(p_node_base != nullptr);
assert(key_view.has_value());
assert(p_value_header != nullptr);
auto current_p_node_base = ref_leaf_node->read();
assert(current_p_node_base != p_node_base);
auto node_size = ref_leaf_node->get_node_size();
version.state = current_version.state;
reset_ptr(p_value_header, p_node_base,
current_p_node_base, node_size);
key_view->reset_to(p_node_base, current_p_node_base, node_size);
value_payload_mut.reset();
p_value_recorder = nullptr;
p_node_base = current_p_node_base;
} else {
// It is impossible to change state backwards, see node_types.h.
ceph_abort("impossible");
}
}
void tree_cursor_t::Cache::make_latest(
value_magic_t magic, const search_position_t& pos)
{
auto current_version = ref_leaf_node->get_version();
if (needs_update_all || version.layout != current_version.layout) {
auto [_key_view, _p_value_header] = ref_leaf_node->get_kv(pos);
update_all(current_version, _key_view, _p_value_header);
} else {
maybe_duplicate(current_version);
}
assert(p_value_header->magic == magic);
validate_is_latest(pos);
}
void tree_cursor_t::Cache::validate_is_latest(const search_position_t& pos) const
{
#ifndef NDEBUG
assert(!needs_update_all);
assert(version == ref_leaf_node->get_version());
auto [_key_view, _p_value_header] = ref_leaf_node->get_kv(pos);
assert(p_node_base == ref_leaf_node->read());
assert(key_view ==_key_view);
assert(p_value_header == _p_value_header);
#endif
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
tree_cursor_t::Cache::prepare_mutate_value_payload(
context_t c, const search_position_t& pos)
{
make_latest(c.vb.get_header_magic(), pos);
if (!value_payload_mut.has_value()) {
assert(!p_value_recorder);
auto value_mutable = ref_leaf_node->prepare_mutate_value_payload(c);
auto current_version = ref_leaf_node->get_version();
maybe_duplicate(current_version);
value_payload_mut = p_value_header->get_payload_mutable(value_mutable.first);
p_value_recorder = value_mutable.second;
validate_is_latest(pos);
}
return {*value_payload_mut, p_value_recorder};
}
/*
* Node
*/
Node::Node(NodeImplURef&& impl) : impl{std::move(impl)} {}
Node::~Node()
{
if (!is_tracked()) {
// possible scenarios:
// a. I'm erased;
// b. Eagain happened after the node extent is allocated/loaded
// and before the node is initialized correctly;
} else {
assert(!impl->is_extent_retired());
if (is_root()) {
super->do_untrack_root(*this);
} else {
_parent_info->ptr->do_untrack_child(*this);
}
}
}
level_t Node::level() const
{
return impl->level();
}
eagain_ifuture<Node::search_result_t> Node::lower_bound(
context_t c, const key_hobj_t& key)
{
return seastar::do_with(
MatchHistory(), [this, c, &key](auto& history) {
return lower_bound_tracked(c, key, history);
}
);
}
eagain_ifuture<std::pair<Ref<tree_cursor_t>, bool>> Node::insert(
context_t c,
const key_hobj_t& key,
value_config_t vconf,
Ref<Node>&& this_ref)
{
return seastar::do_with(
MatchHistory(), [this, c, &key, vconf,
this_ref = std::move(this_ref)] (auto& history) mutable {
return lower_bound_tracked(c, key, history
).si_then([c, &key, vconf, &history,
this_ref = std::move(this_ref)] (auto result) mutable {
// the cursor in the result should already hold the root node upwards
this_ref.reset();
if (result.match() == MatchKindBS::EQ) {
return eagain_iertr::make_ready_future<std::pair<Ref<tree_cursor_t>, bool>>(
std::make_pair(result.p_cursor, false));
} else {
auto leaf_node = result.p_cursor->get_leaf_node();
return leaf_node->insert_value(
c, key, vconf, result.p_cursor->get_position(), history, result.mstat
).si_then([](auto p_cursor) {
return seastar::make_ready_future<std::pair<Ref<tree_cursor_t>, bool>>(
std::make_pair(p_cursor, true));
});
}
});
}
);
}
eagain_ifuture<std::size_t> Node::erase(
context_t c,
const key_hobj_t& key,
Ref<Node>&& this_ref)
{
return lower_bound(c, key
).si_then([c, this_ref = std::move(this_ref)] (auto result) mutable {
// the cursor in the result should already hold the root node upwards
this_ref.reset();
if (result.match() != MatchKindBS::EQ) {
return eagain_iertr::make_ready_future<std::size_t>(0);
}
auto ref_cursor = result.p_cursor;
return ref_cursor->erase(c, false
).si_then([ref_cursor] (auto next_cursor) {
assert(ref_cursor->is_invalid());
assert(!next_cursor);
return std::size_t(1);
});
});
}
eagain_ifuture<tree_stats_t> Node::get_tree_stats(context_t c)
{
return seastar::do_with(
tree_stats_t(), [this, c](auto& stats) {
return do_get_tree_stats(c, stats).si_then([&stats] {
return stats;
});
}
);
}
std::ostream& Node::dump(std::ostream& os) const
{
return impl->dump(os);
}
std::ostream& Node::dump_brief(std::ostream& os) const
{
return impl->dump_brief(os);
}
const std::string& Node::get_name() const
{
return impl->get_name();
}
void Node::test_make_destructable(
context_t c, NodeExtentMutable& mut, Super::URef&& _super)
{
impl->test_set_tail(mut);
make_root(c, std::move(_super));
}
eagain_ifuture<> Node::mkfs(context_t c, RootNodeTracker& root_tracker)
{
LOG_PREFIX(OTree::Node::mkfs);
return LeafNode::allocate_root(c, root_tracker
).si_then([c, FNAME](auto ret) {
c.t.get_onode_tree_stats().extents_num_delta++;
INFOT("allocated root {}", c.t, ret->get_name());
});
}
eagain_ifuture<Ref<Node>> Node::load_root(context_t c, RootNodeTracker& root_tracker)
{
LOG_PREFIX(OTree::Node::load_root);
return c.nm.get_super(c.t, root_tracker
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle([FNAME, c] {
ERRORT("EIO during get_super()", c.t);
ceph_abort("fatal error");
})
).si_then([c, &root_tracker, FNAME](auto&& _super) {
assert(_super);
auto root_addr = _super->get_root_laddr();
assert(root_addr != L_ADDR_NULL);
TRACET("loading root_addr={:x} ...", c.t, root_addr);
return Node::load(c, root_addr, true
).si_then([c, _super = std::move(_super),
&root_tracker, FNAME](auto root) mutable {
TRACET("loaded {}", c.t, root->get_name());
assert(root->impl->field_type() == field_type_t::N0);
root->as_root(std::move(_super));
std::ignore = c; // as only used in an assert
std::ignore = root_tracker;
assert(root == root_tracker.get_root(c.t));
return seastar::make_ready_future<Ref<Node>>(root);
});
});
}
void Node::make_root(context_t c, Super::URef&& _super)
{
_super->write_root_laddr(c, impl->laddr());
as_root(std::move(_super));
c.t.get_onode_tree_stats().depth = static_cast<uint64_t>(level()) + 1;
}
void Node::as_root(Super::URef&& _super)
{
assert(!is_tracked());
assert(_super->get_root_laddr() == impl->laddr());
assert(impl->is_level_tail());
super = std::move(_super);
super->do_track_root(*this);
assert(is_root());
}
Super::URef Node::deref_super()
{
assert(is_root());
assert(super->get_root_laddr() == impl->laddr());
assert(impl->is_level_tail());
super->do_untrack_root(*this);
auto ret = std::move(super);
assert(!is_tracked());
return ret;
}
eagain_ifuture<> Node::upgrade_root(context_t c, laddr_t hint)
{
LOG_PREFIX(OTree::Node::upgrade_root);
assert(impl->field_type() == field_type_t::N0);
auto super_to_move = deref_super();
return InternalNode::allocate_root(
c, hint, impl->level(), impl->laddr(), std::move(super_to_move)
).si_then([this, c, FNAME](auto new_root) {
as_child(search_position_t::end(), new_root);
INFOT("upgraded from {} to {}",
c.t, get_name(), new_root->get_name());
});
}
template <bool VALIDATE>
void Node::as_child(const search_position_t& pos, Ref<InternalNode> parent_node)
{
assert(!is_tracked() || !is_root());
#ifndef NDEBUG
// Although I might have an outdated _parent_info during fixing,
// I must be already untracked.
if (_parent_info.has_value()) {
assert(!_parent_info->ptr->check_is_tracking(*this));
}
#endif
_parent_info = parent_info_t{pos, parent_node};
parent_info().ptr->do_track_child<VALIDATE>(*this);
assert(!is_root());
}
template void Node::as_child<true>(const search_position_t&, Ref<InternalNode>);
template void Node::as_child<false>(const search_position_t&, Ref<InternalNode>);
Ref<InternalNode> Node::deref_parent()
{
assert(!is_root());
auto parent_ref = std::move(parent_info().ptr);
parent_ref->do_untrack_child(*this);
_parent_info.reset();
assert(!is_tracked());
return parent_ref;
}
eagain_ifuture<> Node::apply_split_to_parent(
context_t c,
Ref<Node>&& this_ref,
Ref<Node>&& split_right,
bool update_right_index)
{
assert(!is_root());
assert(this == this_ref.get());
// TODO(cross-node string dedup)
return parent_info().ptr->apply_child_split(
c, std::move(this_ref), std::move(split_right), update_right_index);
}
eagain_ifuture<Ref<tree_cursor_t>>
Node::get_next_cursor_from_parent(context_t c)
{
assert(!impl->is_level_tail());
assert(!is_root());
return parent_info().ptr->get_next_cursor(c, parent_info().position);
}
template <bool FORCE_MERGE>
eagain_ifuture<>
Node::try_merge_adjacent(
context_t c, bool update_parent_index, Ref<Node>&& this_ref)
{
LOG_PREFIX(OTree::Node::try_merge_adjacent);
assert(this == this_ref.get());
impl->validate_non_empty();
assert(!is_root());
if constexpr (!FORCE_MERGE) {
if (!impl->is_size_underflow() &&
!impl->has_single_value()) {
// skip merge
if (update_parent_index) {
return fix_parent_index(c, std::move(this_ref), false);
} else {
parent_info().ptr->validate_child_tracked(*this);
return eagain_iertr::now();
}
}
}
return parent_info().ptr->get_child_peers(c, parent_info().position
).si_then([c, this_ref = std::move(this_ref), this, FNAME,
update_parent_index] (auto lr_nodes) mutable -> eagain_ifuture<> {
auto& [lnode, rnode] = lr_nodes;
Ref<Node> left_for_merge;
Ref<Node> right_for_merge;
Ref<Node>* p_this_ref;
bool is_left;
if (!lnode && !rnode) {
// XXX: this is possible before node rebalance is implemented,
// when its parent cannot merge with its peers and has only one child
// (this node).
p_this_ref = &this_ref;
} else if (!lnode) {
left_for_merge = std::move(this_ref);
p_this_ref = &left_for_merge;
right_for_merge = std::move(rnode);
is_left = true;
} else if (!rnode) {
left_for_merge = std::move(lnode);
right_for_merge = std::move(this_ref);
p_this_ref = &right_for_merge;
is_left = false;
} else { // lnode && rnode
if (lnode->impl->free_size() > rnode->impl->free_size()) {
left_for_merge = std::move(lnode);
right_for_merge = std::move(this_ref);
p_this_ref = &right_for_merge;
is_left = false;
} else { // lnode free size <= rnode free size
left_for_merge = std::move(this_ref);
p_this_ref = &left_for_merge;
right_for_merge = std::move(rnode);
is_left = true;
}
}
if (left_for_merge) {
assert(right_for_merge);
auto [merge_stage, merge_size] = left_for_merge->impl->evaluate_merge(
*right_for_merge->impl);
if (merge_size <= left_for_merge->impl->total_size()) {
// proceed merge
bool update_index_after_merge;
if (is_left) {
update_index_after_merge = false;
} else {
update_index_after_merge = update_parent_index;
}
DEBUGT("merge {} and {} at merge_stage={}, merge_size={}B, "
"update_index={}, is_left={} ...",
c.t, left_for_merge->get_name(), right_for_merge->get_name(),
merge_stage, merge_size, update_index_after_merge, is_left);
// we currently cannot generate delta depends on another extent content,
// so use rebuild_extent() as a workaround to rebuild the node from a
// fresh extent, thus no need to generate delta.
auto left_addr = left_for_merge->impl->laddr();
return left_for_merge->rebuild_extent(c
).si_then([c, update_index_after_merge,
left_addr,
merge_stage = merge_stage,
merge_size = merge_size,
left_for_merge = std::move(left_for_merge),
right_for_merge = std::move(right_for_merge)] (auto left_mut) mutable {
if (left_for_merge->impl->node_type() == node_type_t::LEAF) {
auto& left = *static_cast<LeafNode*>(left_for_merge.get());
left.on_layout_change();
}
search_position_t left_last_pos = left_for_merge->impl->merge(
left_mut, *right_for_merge->impl, merge_stage, merge_size);
left_for_merge->track_merge(right_for_merge, merge_stage, left_last_pos);
--(c.t.get_onode_tree_stats().extents_num_delta);
return left_for_merge->parent_info().ptr->apply_children_merge(
c, std::move(left_for_merge), left_addr,
std::move(right_for_merge), update_index_after_merge);
});
} else {
// size will overflow if merge
}
}
// cannot merge
if (update_parent_index) {
return fix_parent_index(c, std::move(*p_this_ref), false);
} else {
parent_info().ptr->validate_child_tracked(*this);
return eagain_iertr::now();
}
// XXX: rebalance
});
}
template eagain_ifuture<> Node::try_merge_adjacent<true>(context_t, bool, Ref<Node>&&);
template eagain_ifuture<> Node::try_merge_adjacent<false>(context_t, bool, Ref<Node>&&);
eagain_ifuture<> Node::erase_node(context_t c, Ref<Node>&& this_ref)
{
// To erase a node:
// 1. I'm supposed to have already untracked any children or cursors
// 2. unlink parent/super --ptr-> me
// 3. unlink me --ref-> parent/super
// 4. retire extent
// 5. destruct node
assert(this_ref.get() == this);
assert(!is_tracking());
assert(!is_root());
assert(this_ref->use_count() == 1);
return parent_info().ptr->erase_child(c, std::move(this_ref));
}
template <bool FORCE_MERGE>
eagain_ifuture<> Node::fix_parent_index(
context_t c, Ref<Node>&& this_ref, bool check_downgrade)
{
assert(!is_root());
assert(this == this_ref.get());
return parent_info().ptr->fix_index<FORCE_MERGE>(
c, std::move(this_ref), check_downgrade);
}
template eagain_ifuture<> Node::fix_parent_index<true>(context_t, Ref<Node>&&, bool);
template eagain_ifuture<> Node::fix_parent_index<false>(context_t, Ref<Node>&&, bool);
eagain_ifuture<Ref<Node>> Node::load(
context_t c, laddr_t addr, bool expect_is_level_tail)
{
LOG_PREFIX(OTree::Node::load);
return c.nm.read_extent(c.t, addr
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, addr, expect_is_level_tail] {
ERRORT("EIO -- addr={:x}, is_level_tail={}",
c.t, addr, expect_is_level_tail);
ceph_abort("fatal error");
}),
crimson::ct_error::invarg::handle(
[FNAME, c, addr, expect_is_level_tail] {
ERRORT("EINVAL -- addr={:x}, is_level_tail={}",
c.t, addr, expect_is_level_tail);
ceph_abort("fatal error");
}),
crimson::ct_error::enoent::handle(
[FNAME, c, addr, expect_is_level_tail] {
ERRORT("ENOENT -- addr={:x}, is_level_tail={}",
c.t, addr, expect_is_level_tail);
ceph_abort("fatal error");
}),
crimson::ct_error::erange::handle(
[FNAME, c, addr, expect_is_level_tail] {
ERRORT("ERANGE -- addr={:x}, is_level_tail={}",
c.t, addr, expect_is_level_tail);
ceph_abort("fatal error");
})
).si_then([FNAME, c, addr, expect_is_level_tail](auto extent)
-> eagain_ifuture<Ref<Node>> {
assert(extent);
auto header = extent->get_header();
auto field_type = header.get_field_type();
if (!field_type) {
ERRORT("load addr={:x}, is_level_tail={} error, "
"got invalid header -- {}",
c.t, addr, expect_is_level_tail, fmt::ptr(extent));
ceph_abort("fatal error");
}
if (header.get_is_level_tail() != expect_is_level_tail) {
ERRORT("load addr={:x}, is_level_tail={} error, "
"is_level_tail mismatch -- {}",
c.t, addr, expect_is_level_tail, fmt::ptr(extent));
ceph_abort("fatal error");
}
auto node_type = header.get_node_type();
if (node_type == node_type_t::LEAF) {
if (extent->get_length() != c.vb.get_leaf_node_size()) {
ERRORT("load addr={:x}, is_level_tail={} error, "
"leaf length mismatch -- {}",
c.t, addr, expect_is_level_tail, fmt::ptr(extent));
ceph_abort("fatal error");
}
auto impl = LeafNodeImpl::load(extent, *field_type);
auto *derived_ptr = impl.get();
return eagain_iertr::make_ready_future<Ref<Node>>(
new LeafNode(derived_ptr, std::move(impl)));
} else if (node_type == node_type_t::INTERNAL) {
if (extent->get_length() != c.vb.get_internal_node_size()) {
ERRORT("load addr={:x}, is_level_tail={} error, "
"internal length mismatch -- {}",
c.t, addr, expect_is_level_tail, fmt::ptr(extent));
ceph_abort("fatal error");
}
auto impl = InternalNodeImpl::load(extent, *field_type);
auto *derived_ptr = impl.get();
return eagain_iertr::make_ready_future<Ref<Node>>(
new InternalNode(derived_ptr, std::move(impl)));
} else {
ceph_abort("impossible path");
}
});
}
eagain_ifuture<NodeExtentMutable> Node::rebuild_extent(context_t c)
{
LOG_PREFIX(OTree::Node::rebuild_extent);
DEBUGT("{} ...", c.t, get_name());
assert(!is_root());
// assume I'm already ref counted by caller
// note: laddr can be changed after rebuild, but we don't fix the parent
// mapping as it is part of the merge process.
return impl->rebuild_extent(c);
}
eagain_ifuture<> Node::retire(context_t c, Ref<Node>&& this_ref)
{
LOG_PREFIX(OTree::Node::retire);
DEBUGT("{} ...", c.t, get_name());
assert(this_ref.get() == this);
assert(!is_tracking());
assert(!is_tracked());
assert(this_ref->use_count() == 1);
return impl->retire_extent(c
).si_then([this_ref = std::move(this_ref)]{ /* deallocate node */});
}
void Node::make_tail(context_t c)
{
LOG_PREFIX(OTree::Node::make_tail);
assert(!impl->is_level_tail());
assert(!impl->is_keys_empty());
DEBUGT("{} ...", c.t, get_name());
impl->prepare_mutate(c);
auto tail_pos = impl->make_tail();
if (impl->node_type() == node_type_t::INTERNAL) {
auto& node = *static_cast<InternalNode*>(this);
node.track_make_tail(tail_pos);
}
}
/*
* InternalNode
*/
InternalNode::InternalNode(InternalNodeImpl* impl, NodeImplURef&& impl_ref)
: Node(std::move(impl_ref)), impl{impl} {}
eagain_ifuture<Ref<tree_cursor_t>>
InternalNode::get_next_cursor(context_t c, const search_position_t& pos)
{
impl->validate_non_empty();
if (pos.is_end()) {
assert(impl->is_level_tail());
return get_next_cursor_from_parent(c);
}
search_position_t next_pos = pos;
const laddr_packed_t* p_child_addr = nullptr;
impl->get_next_slot(next_pos, nullptr, &p_child_addr);
if (next_pos.is_end() && !impl->is_level_tail()) {
return get_next_cursor_from_parent(c);
} else {
if (next_pos.is_end()) {
p_child_addr = impl->get_tail_value();
}
assert(p_child_addr);
return get_or_track_child(c, next_pos, p_child_addr->value
).si_then([c](auto child) {
return child->lookup_smallest(c);
});
}
}
eagain_ifuture<> InternalNode::apply_child_split(
context_t c, Ref<Node>&& left_child, Ref<Node>&& right_child,
bool update_right_index)
{
LOG_PREFIX(OTree::InternalNode::apply_child_split);
auto& left_pos = left_child->parent_info().position;
#ifndef NDEBUG
assert(left_child->parent_info().ptr.get() == this);
assert(!left_child->impl->is_level_tail());
if (left_pos.is_end()) {
assert(impl->is_level_tail());
assert(right_child->impl->is_level_tail());
assert(!update_right_index);
}
// right_child has not assigned parent yet
assert(!right_child->is_tracked());
#endif
impl->prepare_mutate(c);
DEBUGT("apply {}'s child {} to split to {}, update_index={} ...",
c.t, get_name(), left_child->get_name(),
right_child->get_name(), update_right_index);
// update layout from left_pos => left_child_addr to right_child_addr
auto left_child_addr = left_child->impl->laddr();
auto right_child_addr = right_child->impl->laddr();
impl->replace_child_addr(left_pos, right_child_addr, left_child_addr);
// update track from left_pos => left_child to right_child
replace_track(right_child, left_child, update_right_index);
auto left_key = *left_child->impl->get_pivot_index();
Ref<Node> this_ref = this;
return insert_or_split(
c, left_pos, left_key, left_child,
(update_right_index ? right_child : nullptr)
).si_then([this, c,
this_ref = std::move(this_ref)] (auto split_right) mutable {
if (split_right) {
// even if update_right_index could be true,
// we haven't fixed the right_child index of this node yet,
// so my parent index should be correct now.
return apply_split_to_parent(
c, std::move(this_ref), std::move(split_right), false);
} else {
return eagain_iertr::now();
}
}).si_then([c, update_right_index,
right_child = std::move(right_child)] () mutable {
if (update_right_index) {
// XXX: might not need to call validate_tracked_children() in fix_index()
return right_child->fix_parent_index(c, std::move(right_child), false);
} else {
// there is no need to call try_merge_adjacent() because
// the filled size of the inserted node or the split right node
// won't be reduced if update_right_index is false.
return eagain_iertr::now();
}
});
}
eagain_ifuture<> InternalNode::erase_child(context_t c, Ref<Node>&& child_ref)
{
LOG_PREFIX(OTree::InternalNode::erase_child);
// this is a special version of recursive merge
impl->validate_non_empty();
assert(child_ref->use_count() == 1);
validate_child_tracked(*child_ref);
// fix the child's previous node as the new tail,
// and trigger prv_child_ref->try_merge_adjacent() at the end
bool fix_tail = (child_ref->parent_info().position.is_end() &&
!impl->is_keys_empty());
return eagain_iertr::now().si_then([c, this, fix_tail] {
if (fix_tail) {
search_position_t new_tail_pos;
const laddr_packed_t* new_tail_p_addr = nullptr;
impl->get_largest_slot(&new_tail_pos, nullptr, &new_tail_p_addr);
return get_or_track_child(c, new_tail_pos, new_tail_p_addr->value);
} else {
return eagain_iertr::make_ready_future<Ref<Node>>();
}
}).si_then([c, this, child_ref = std::move(child_ref), FNAME]
(auto&& new_tail_child) mutable {
auto child_pos = child_ref->parent_info().position;
if (new_tail_child) {
DEBUGT("erase {}'s child {} at pos({}), "
"and fix new child tail {} at pos({}) ...",
c.t, get_name(), child_ref->get_name(), child_pos,
new_tail_child->get_name(), new_tail_child->parent_info().position);
assert(!new_tail_child->impl->is_level_tail());
new_tail_child->make_tail(c);
assert(new_tail_child->impl->is_level_tail());
if (new_tail_child->impl->node_type() == node_type_t::LEAF) {
// no need to proceed merge because the filled size is not changed
new_tail_child.reset();
}
} else {
DEBUGT("erase {}'s child {} at pos({}) ...",
c.t, get_name(), child_ref->get_name(), child_pos);
}
Ref<Node> this_ref = child_ref->deref_parent();
assert(this_ref == this);
return child_ref->retire(c, std::move(child_ref)
).si_then([c, this, child_pos, FNAME,
this_ref = std::move(this_ref)] () mutable {
if (impl->has_single_value()) {
// fast path without mutating the extent
DEBUGT("{} has one value left, erase ...", c.t, get_name());
#ifndef NDEBUG
if (impl->is_level_tail()) {
assert(child_pos.is_end());
} else {
assert(child_pos == search_position_t::begin());
}
#endif
if (is_root()) {
// Note: if merge/split works as expected, we should never encounter the
// situation when the internal root has <=1 children:
//
// A newly created internal root (see Node::upgrade_root()) will have 2
// children after split is finished.
//
// When merge happens, children will try to merge each other, and if the
// root detects there is only one child left, the root will be
// down-graded to the only child.
//
// In order to preserve the invariant, we need to make sure the new
// internal root also has at least 2 children.
ceph_abort("trying to erase the last item from the internal root node");
}
// track erase
assert(tracked_child_nodes.empty());
// no child should be referencing this node now, this_ref is the last one.
assert(this_ref->use_count() == 1);
return Node::erase_node(c, std::move(this_ref));
}
impl->prepare_mutate(c);
auto [erase_stage, next_or_last_pos] = impl->erase(child_pos);
if (child_pos.is_end()) {
// next_or_last_pos as last_pos
track_make_tail(next_or_last_pos);
} else {
// next_or_last_pos as next_pos
track_erase(child_pos, erase_stage);
}
validate_tracked_children();
if (is_root()) {
return try_downgrade_root(c, std::move(this_ref));
} else {
bool update_parent_index;
if (impl->is_level_tail()) {
update_parent_index = false;
} else {
// next_or_last_pos as next_pos
next_or_last_pos.is_end() ? update_parent_index = true
: update_parent_index = false;
}
return try_merge_adjacent(c, update_parent_index, std::move(this_ref));
}
}).si_then([c, new_tail_child = std::move(new_tail_child)] () mutable {
// finally, check if the new tail child needs to merge
if (new_tail_child && !new_tail_child->is_root()) {
assert(new_tail_child->impl->is_level_tail());
return new_tail_child->try_merge_adjacent(
c, false, std::move(new_tail_child));
} else {
return eagain_iertr::now();
}
});
});
}
template <bool FORCE_MERGE>
eagain_ifuture<> InternalNode::fix_index(
context_t c, Ref<Node>&& child, bool check_downgrade)
{
LOG_PREFIX(OTree::InternalNode::fix_index);
impl->validate_non_empty();
validate_child_inconsistent(*child);
auto& child_pos = child->parent_info().position;
Ref<Node> this_ref = child->deref_parent();
assert(this_ref == this);
validate_tracked_children();
impl->prepare_mutate(c);
key_view_t new_key = *child->impl->get_pivot_index();
DEBUGT("fix {}'s index of child {} at pos({}), new_key={} ...",
c.t, get_name(), child->get_name(), child_pos, new_key);
// erase the incorrect item
auto [erase_stage, next_pos] = impl->erase(child_pos);
track_erase(child_pos, erase_stage);
validate_tracked_children();
// find out whether there is a need to fix parent index recursively
bool update_parent_index;
if (impl->is_level_tail()) {
update_parent_index = false;
} else {
next_pos.is_end() ? update_parent_index = true
: update_parent_index = false;
}
return insert_or_split(c, next_pos, new_key, child
).si_then([this, c, update_parent_index, check_downgrade,
this_ref = std::move(this_ref)] (auto split_right) mutable {
if (split_right) {
// after split, the parent index to the split_right will be incorrect
// if update_parent_index is true.
return apply_split_to_parent(
c, std::move(this_ref), std::move(split_right), update_parent_index);
} else {
// no split path
if (is_root()) {
if (check_downgrade) {
return try_downgrade_root(c, std::move(this_ref));
} else {
// no need to call try_downgrade_root() because the number of keys
// has not changed, and I must have at least 2 keys.
assert(!impl->is_keys_empty());
return eagain_iertr::now();
}
} else {
// for non-root, maybe need merge adjacent or fix parent,
// because the filled node size may be reduced.
return try_merge_adjacent<FORCE_MERGE>(
c, update_parent_index, std::move(this_ref));
}
}
});
}
template <bool FORCE_MERGE>
eagain_ifuture<> InternalNode::apply_children_merge(
context_t c, Ref<Node>&& left_child, laddr_t origin_left_addr,
Ref<Node>&& right_child, bool update_index)
{
LOG_PREFIX(OTree::InternalNode::apply_children_merge);
auto left_pos = left_child->parent_info().position;
auto left_addr = left_child->impl->laddr();
auto& right_pos = right_child->parent_info().position;
auto right_addr = right_child->impl->laddr();
DEBUGT("apply {}'s child {} (was {:#x}) at pos({}), "
"to merge with {} at pos({}), update_index={} ...",
c.t, get_name(), left_child->get_name(), origin_left_addr, left_pos,
right_child->get_name(), right_pos, update_index);
#ifndef NDEBUG
assert(left_child->parent_info().ptr == this);
assert(!left_pos.is_end());
const laddr_packed_t* p_value_left;
impl->get_slot(left_pos, nullptr, &p_value_left);
assert(p_value_left->value == origin_left_addr);
assert(right_child->use_count() == 1);
assert(right_child->parent_info().ptr == this);
const laddr_packed_t* p_value_right;
if (right_pos.is_end()) {
assert(right_child->impl->is_level_tail());
assert(left_child->impl->is_level_tail());
assert(impl->is_level_tail());
assert(!update_index);
p_value_right = impl->get_tail_value();
} else {
assert(!right_child->impl->is_level_tail());
assert(!left_child->impl->is_level_tail());
impl->get_slot(right_pos, nullptr, &p_value_right);
}
assert(p_value_right->value == right_addr);
#endif
// XXX: we may jump to try_downgrade_root() without mutating this node.
// update layout from right_pos => right_addr to left_addr
impl->prepare_mutate(c);
impl->replace_child_addr(right_pos, left_addr, right_addr);
// update track from right_pos => right_child to left_child
left_child->deref_parent();
replace_track(left_child, right_child, update_index);
// erase left_pos from layout
auto [erase_stage, next_pos] = impl->erase(left_pos);
track_erase<false>(left_pos, erase_stage);
assert(next_pos == left_child->parent_info().position);
// All good to retire the right_child.
// I'm already ref-counted by left_child.
return right_child->retire(c, std::move(right_child)
).si_then([c, this, update_index,
left_child = std::move(left_child)] () mutable {
if (update_index) {
// I'm all good but:
// - my number of keys is reduced by 1
// - my size may underflow, but try_merge_adjacent() is already part of fix_index()
return left_child->fix_parent_index<FORCE_MERGE>(c, std::move(left_child), true);
} else {
validate_tracked_children();
Ref<Node> this_ref = this;
left_child.reset();
// I'm all good but:
// - my number of keys is reduced by 1
// - my size may underflow
if (is_root()) {
return try_downgrade_root(c, std::move(this_ref));
} else {
return try_merge_adjacent<FORCE_MERGE>(
c, false, std::move(this_ref));
}
}
});
}
template eagain_ifuture<> InternalNode::apply_children_merge<true>(
context_t, Ref<Node>&&, laddr_t, Ref<Node>&&, bool);
template eagain_ifuture<> InternalNode::apply_children_merge<false>(
context_t, Ref<Node>&&, laddr_t, Ref<Node>&&, bool);
eagain_ifuture<std::pair<Ref<Node>, Ref<Node>>> InternalNode::get_child_peers(
context_t c, const search_position_t& pos)
{
// assume I'm already ref counted by caller
search_position_t prev_pos;
const laddr_packed_t* prev_p_child_addr = nullptr;
search_position_t next_pos;
const laddr_packed_t* next_p_child_addr = nullptr;
if (pos.is_end()) {
assert(impl->is_level_tail());
if (!impl->is_keys_empty()) {
// got previous child only
impl->get_largest_slot(&prev_pos, nullptr, &prev_p_child_addr);
assert(prev_pos < pos);
assert(prev_p_child_addr != nullptr);
} else {
// no keys, so no peer children
}
} else { // !pos.is_end()
if (pos != search_position_t::begin()) {
// got previous child
prev_pos = pos;
impl->get_prev_slot(prev_pos, nullptr, &prev_p_child_addr);
assert(prev_pos < pos);
assert(prev_p_child_addr != nullptr);
} else {
// is already the first child, so no previous child
}
next_pos = pos;
impl->get_next_slot(next_pos, nullptr, &next_p_child_addr);
if (next_pos.is_end()) {
if (impl->is_level_tail()) {
// the next child is the tail
next_p_child_addr = impl->get_tail_value();
assert(pos < next_pos);
assert(next_p_child_addr != nullptr);
} else {
// next child doesn't exist
assert(next_p_child_addr == nullptr);
}
} else {
// got the next child
assert(pos < next_pos);
assert(next_p_child_addr != nullptr);
}
}
return eagain_iertr::now().si_then([this, c, prev_pos, prev_p_child_addr] {
if (prev_p_child_addr != nullptr) {
return get_or_track_child(c, prev_pos, prev_p_child_addr->value);
} else {
return eagain_iertr::make_ready_future<Ref<Node>>();
}
}).si_then([this, c, next_pos, next_p_child_addr] (Ref<Node> lnode) {
if (next_p_child_addr != nullptr) {
return get_or_track_child(c, next_pos, next_p_child_addr->value
).si_then([lnode] (Ref<Node> rnode) {
return seastar::make_ready_future<std::pair<Ref<Node>, Ref<Node>>>(
lnode, rnode);
});
} else {
return eagain_iertr::make_ready_future<std::pair<Ref<Node>, Ref<Node>>>(
lnode, nullptr);
}
});
}
eagain_ifuture<Ref<InternalNode>> InternalNode::allocate_root(
context_t c, laddr_t hint, level_t old_root_level,
laddr_t old_root_addr, Super::URef&& super)
{
// support tree height up to 256
ceph_assert(old_root_level < MAX_LEVEL);
return InternalNode::allocate(c, hint, field_type_t::N0, true, old_root_level + 1
).si_then([c, old_root_addr,
super = std::move(super)](auto fresh_node) mutable {
auto root = fresh_node.node;
assert(root->impl->is_keys_empty());
auto p_value = root->impl->get_tail_value();
fresh_node.mut.copy_in_absolute(
const_cast<laddr_packed_t*>(p_value), old_root_addr);
root->make_root_from(c, std::move(super), old_root_addr);
++(c.t.get_onode_tree_stats().extents_num_delta);
return root;
});
}
eagain_ifuture<Ref<tree_cursor_t>>
InternalNode::lookup_smallest(context_t c)
{
impl->validate_non_empty();
auto position = search_position_t::begin();
const laddr_packed_t* p_child_addr;
impl->get_slot(position, nullptr, &p_child_addr);
return get_or_track_child(c, position, p_child_addr->value
).si_then([c](auto child) {
return child->lookup_smallest(c);
});
}
eagain_ifuture<Ref<tree_cursor_t>>
InternalNode::lookup_largest(context_t c)
{
// NOTE: unlike LeafNode::lookup_largest(), this only works for the tail
// internal node to return the tail child address.
impl->validate_non_empty();
assert(impl->is_level_tail());
auto p_child_addr = impl->get_tail_value();
return get_or_track_child(c, search_position_t::end(), p_child_addr->value
).si_then([c](auto child) {
return child->lookup_largest(c);
});
}
eagain_ifuture<Node::search_result_t>
InternalNode::lower_bound_tracked(
context_t c, const key_hobj_t& key, MatchHistory& history)
{
auto result = impl->lower_bound(key, history);
return get_or_track_child(c, result.position, result.p_value->value
).si_then([c, &key, &history](auto child) {
// XXX(multi-type): pass result.mstat to child
return child->lower_bound_tracked(c, key, history);
});
}
eagain_ifuture<> InternalNode::do_get_tree_stats(
context_t c, tree_stats_t& stats)
{
impl->validate_non_empty();
auto nstats = impl->get_stats();
stats.size_persistent_internal += nstats.size_persistent;
stats.size_filled_internal += nstats.size_filled;
stats.size_logical_internal += nstats.size_logical;
stats.size_overhead_internal += nstats.size_overhead;
stats.size_value_internal += nstats.size_value;
stats.num_kvs_internal += nstats.num_kvs;
stats.num_nodes_internal += 1;
Ref<Node> this_ref = this;
return seastar::do_with(
search_position_t(), (const laddr_packed_t*)(nullptr),
[this, this_ref, c, &stats](auto& pos, auto& p_child_addr) {
pos = search_position_t::begin();
impl->get_slot(pos, nullptr, &p_child_addr);
return trans_intr::repeat(
[this, this_ref, c, &stats, &pos, &p_child_addr]()
-> eagain_ifuture<seastar::stop_iteration> {
return get_or_track_child(c, pos, p_child_addr->value
).si_then([c, &stats](auto child) {
return child->do_get_tree_stats(c, stats);
}).si_then([this, this_ref, &pos, &p_child_addr] {
if (pos.is_end()) {
return seastar::stop_iteration::yes;
} else {
impl->get_next_slot(pos, nullptr, &p_child_addr);
if (pos.is_end()) {
if (impl->is_level_tail()) {
p_child_addr = impl->get_tail_value();
return seastar::stop_iteration::no;
} else {
return seastar::stop_iteration::yes;
}
} else {
return seastar::stop_iteration::no;
}
}
});
});
}
);
}
void InternalNode::track_merge(
Ref<Node> _right_node, match_stage_t stage, search_position_t& left_last_pos)
{
assert(level() == _right_node->level());
assert(impl->node_type() == _right_node->impl->node_type());
auto& right_node = *static_cast<InternalNode*>(_right_node.get());
if (right_node.tracked_child_nodes.empty()) {
return;
}
match_stage_t curr_stage = STAGE_BOTTOM;
// prepare the initial left_last_pos for offset
while (curr_stage < stage) {
left_last_pos.index_by_stage(curr_stage) = 0;
++curr_stage;
}
++left_last_pos.index_by_stage(curr_stage);
// fix the tracked child nodes of right_node, stage by stage.
auto& right_tracked_children = right_node.tracked_child_nodes;
auto rit = right_tracked_children.begin();
while (curr_stage <= STAGE_TOP) {
auto right_pos_until = search_position_t::begin();
right_pos_until.index_by_stage(curr_stage) = INDEX_UPPER_BOUND;
auto rend = right_tracked_children.lower_bound(right_pos_until);
while (rit != rend) {
auto new_pos = rit->second->parent_info().position;
assert(new_pos == rit->first);
assert(rit->second->parent_info().ptr == &right_node);
new_pos += left_last_pos;
auto p_child = rit->second;
rit = right_tracked_children.erase(rit);
p_child->as_child(new_pos, this);
}
left_last_pos.index_by_stage(curr_stage) = 0;
++curr_stage;
}
// fix the end tracked child node of right_node, if exists.
if (rit != right_tracked_children.end()) {
assert(rit->first == search_position_t::end());
assert(rit->second->parent_info().position == search_position_t::end());
assert(right_node.impl->is_level_tail());
assert(impl->is_level_tail());
auto p_child = rit->second;
rit = right_tracked_children.erase(rit);
p_child->as_child(search_position_t::end(), this);
}
assert(right_tracked_children.empty());
validate_tracked_children();
}
eagain_ifuture<> InternalNode::test_clone_root(
context_t c_other, RootNodeTracker& tracker_other) const
{
assert(is_root());
assert(impl->is_level_tail());
assert(impl->field_type() == field_type_t::N0);
Ref<const Node> this_ref = this;
return InternalNode::allocate(c_other, L_ADDR_MIN, field_type_t::N0, true, impl->level()
).si_then([this, c_other, &tracker_other](auto fresh_other) {
impl->test_copy_to(fresh_other.mut);
auto cloned_root = fresh_other.node;
return c_other.nm.get_super(c_other.t, tracker_other
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::assert_all{"Invalid error during test clone"}
).si_then([c_other, cloned_root](auto&& super_other) {
assert(super_other);
cloned_root->make_root_new(c_other, std::move(super_other));
return cloned_root;
});
}).si_then([this_ref, this, c_other](auto cloned_root) {
// clone tracked children
// In some unit tests, the children are stubbed out that they
// don't exist in NodeExtentManager, and are only tracked in memory.
return trans_intr::do_for_each(
tracked_child_nodes.begin(),
tracked_child_nodes.end(),
[this_ref, c_other, cloned_root](auto& kv) {
assert(kv.first == kv.second->parent_info().position);
return kv.second->test_clone_non_root(c_other, cloned_root);
}
);
});
}
eagain_ifuture<> InternalNode::try_downgrade_root(
context_t c, Ref<Node>&& this_ref)
{
LOG_PREFIX(OTree::InternalNode::try_downgrade_root);
assert(this_ref.get() == this);
assert(is_root());
assert(impl->is_level_tail());
if (!impl->is_keys_empty()) {
// I have more than 1 values, no need to downgrade
return eagain_iertr::now();
}
// proceed downgrade root to the only child
laddr_t child_addr = impl->get_tail_value()->value;
return get_or_track_child(c, search_position_t::end(), child_addr
).si_then([c, this, FNAME,
this_ref = std::move(this_ref)] (auto child) mutable {
INFOT("downgrade {} to new root {}",
c.t, get_name(), child->get_name());
// Invariant, see InternalNode::erase_child()
// the new internal root should have at least 2 children.
assert(child->impl->is_level_tail());
if (child->impl->node_type() == node_type_t::INTERNAL) {
ceph_assert(!child->impl->is_keys_empty());
}
assert(tracked_child_nodes.size() == 1);
child->deref_parent();
auto super_to_move = deref_super();
child->make_root_from(c, std::move(super_to_move), impl->laddr());
--(c.t.get_onode_tree_stats().extents_num_delta);
return retire(c, std::move(this_ref));
});
}
eagain_ifuture<Ref<InternalNode>> InternalNode::insert_or_split(
context_t c,
const search_position_t& pos,
const key_view_t& insert_key,
Ref<Node> insert_child,
Ref<Node> outdated_child)
{
LOG_PREFIX(OTree::InternalNode::insert_or_split);
// XXX: check the insert_child is unlinked from this node
#ifndef NDEBUG
auto _insert_key = *insert_child->impl->get_pivot_index();
assert(insert_key == _insert_key);
#endif
auto insert_value = insert_child->impl->laddr();
auto insert_pos = pos;
DEBUGT("insert {} with insert_key={}, insert_child={}, insert_pos({}), "
"outdated_child={} ...",
c.t, get_name(), insert_key, insert_child->get_name(),
insert_pos, (outdated_child ? "True" : "False"));
auto [insert_stage, insert_size] = impl->evaluate_insert(
insert_key, insert_value, insert_pos);
auto free_size = impl->free_size();
if (free_size >= insert_size) {
// proceed to insert
[[maybe_unused]] auto p_value = impl->insert(
insert_key, insert_value, insert_pos, insert_stage, insert_size);
assert(impl->free_size() == free_size - insert_size);
assert(insert_pos <= pos);
assert(p_value->value == insert_value);
if (outdated_child) {
track_insert<false>(insert_pos, insert_stage, insert_child);
validate_child_inconsistent(*outdated_child);
#ifndef NDEBUG
do_untrack_child(*outdated_child);
validate_tracked_children();
do_track_child<false>(*outdated_child);
#endif
} else {
track_insert(insert_pos, insert_stage, insert_child);
validate_tracked_children();
}
return eagain_iertr::make_ready_future<Ref<InternalNode>>(nullptr);
}
// proceed to split with insert
// assume I'm already ref-counted by caller
laddr_t left_hint, right_hint;
{
key_view_t left_key;
impl->get_slot(search_position_t::begin(), &left_key, nullptr);
left_hint = left_key.get_hint();
key_view_t right_key;
impl->get_largest_slot(nullptr, &right_key, nullptr);
right_hint = right_key.get_hint();
}
return (is_root() ? upgrade_root(c, left_hint) : eagain_iertr::now()
).si_then([this, c, right_hint] {
return InternalNode::allocate(
c, right_hint, impl->field_type(), impl->is_level_tail(), impl->level());
}).si_then([this, insert_key, insert_child, insert_pos,
insert_stage=insert_stage, insert_size=insert_size,
outdated_child, c, FNAME](auto fresh_right) mutable {
// I'm the left_node and need to split into the right_node
auto right_node = fresh_right.node;
DEBUGT("proceed split {} to fresh {} with insert_child={},"
" outdated_child={} ...",
c.t, get_name(), right_node->get_name(),
insert_child->get_name(),
(outdated_child ? outdated_child->get_name() : "N/A"));
auto insert_value = insert_child->impl->laddr();
auto [split_pos, is_insert_left, p_value] = impl->split_insert(
fresh_right.mut, *right_node->impl, insert_key, insert_value,
insert_pos, insert_stage, insert_size);
assert(p_value->value == insert_value);
track_split(split_pos, right_node);
if (outdated_child) {
if (is_insert_left) {
track_insert<false>(insert_pos, insert_stage, insert_child);
} else {
right_node->template track_insert<false>(insert_pos, insert_stage, insert_child);
}
#ifndef NDEBUG
auto& _parent = outdated_child->parent_info().ptr;
_parent->validate_child_inconsistent(*outdated_child);
_parent->do_untrack_child(*outdated_child);
validate_tracked_children();
right_node->validate_tracked_children();
_parent->do_track_child<false>(*outdated_child);
#endif
} else {
if (is_insert_left) {
track_insert(insert_pos, insert_stage, insert_child);
} else {
right_node->track_insert(insert_pos, insert_stage, insert_child);
}
validate_tracked_children();
right_node->validate_tracked_children();
}
++(c.t.get_onode_tree_stats().extents_num_delta);
return right_node;
});
}
eagain_ifuture<Ref<Node>> InternalNode::get_or_track_child(
context_t c, const search_position_t& position, laddr_t child_addr)
{
LOG_PREFIX(OTree::InternalNode::get_or_track_child);
Ref<Node> this_ref = this;
return [this, position, child_addr, c, FNAME] {
auto found = tracked_child_nodes.find(position);
if (found != tracked_child_nodes.end()) {
TRACET("loaded child tracked {} at pos({}) addr={:x}",
c.t, found->second->get_name(), position, child_addr);
return eagain_iertr::make_ready_future<Ref<Node>>(found->second);
}
// the child is not loaded yet
TRACET("loading child at pos({}) addr={:x} ...",
c.t, position, child_addr);
bool level_tail = position.is_end();
return Node::load(c, child_addr, level_tail
).si_then([this, position, c, FNAME] (auto child) {
TRACET("loaded child untracked {}",
c.t, child->get_name());
if (child->level() + 1 != level()) {
ERRORT("loaded child {} error from parent {} at pos({}), level mismatch",
c.t, child->get_name(), get_name(), position);
ceph_abort("fatal error");
}
child->as_child(position, this);
return child;
});
}().si_then([this_ref, this, position, child_addr] (auto child) {
assert(child_addr == child->impl->laddr());
assert(position == child->parent_info().position);
std::ignore = position;
std::ignore = child_addr;
validate_child_tracked(*child);
return child;
});
}
template <bool VALIDATE>
void InternalNode::track_insert(
const search_position_t& insert_pos, match_stage_t insert_stage,
Ref<Node> insert_child, Ref<Node> nxt_child)
{
// update tracks
auto pos_upper_bound = insert_pos;
pos_upper_bound.index_by_stage(insert_stage) = INDEX_UPPER_BOUND;
auto first = tracked_child_nodes.lower_bound(insert_pos);
auto last = tracked_child_nodes.lower_bound(pos_upper_bound);
std::vector<Node*> nodes;
std::for_each(first, last, [&nodes](auto& kv) {
nodes.push_back(kv.second);
});
tracked_child_nodes.erase(first, last);
for (auto& node : nodes) {
auto _pos = node->parent_info().position;
assert(!_pos.is_end());
++_pos.index_by_stage(insert_stage);
node->as_child<VALIDATE>(_pos, this);
}
// track insert
insert_child->as_child(insert_pos, this);
#ifndef NDEBUG
// validate left_child is before right_child
if (nxt_child) {
auto iter = tracked_child_nodes.find(insert_pos);
++iter;
assert(iter->second == nxt_child);
}
#endif
}
template void InternalNode::track_insert<true>(const search_position_t&, match_stage_t, Ref<Node>, Ref<Node>);
template void InternalNode::track_insert<false>(const search_position_t&, match_stage_t, Ref<Node>, Ref<Node>);
void InternalNode::replace_track(
Ref<Node> new_child, Ref<Node> old_child, bool is_new_child_outdated)
{
assert(!new_child->is_tracked());
auto& pos = old_child->parent_info().position;
auto this_ref = old_child->deref_parent();
assert(this_ref == this);
if (is_new_child_outdated) {
// we need to keep track of the outdated child through
// insert and split.
new_child->as_child<false>(pos, this);
} else {
new_child->as_child(pos, this);
}
#ifndef NDEBUG
if (is_new_child_outdated) {
validate_child_inconsistent(*new_child);
} else {
validate_child_tracked(*new_child);
}
#endif
}
void InternalNode::track_split(
const search_position_t& split_pos, Ref<InternalNode> right_node)
{
auto iter = tracked_child_nodes.lower_bound(split_pos);
while (iter != tracked_child_nodes.end()) {
auto new_pos = iter->first;
auto p_node = iter->second;
iter = tracked_child_nodes.erase(iter);
new_pos -= split_pos;
p_node->as_child<false>(new_pos, right_node);
}
}
template <bool VALIDATE>
void InternalNode::track_erase(
const search_position_t& erase_pos, match_stage_t erase_stage)
{
auto first = tracked_child_nodes.lower_bound(erase_pos);
assert(first == tracked_child_nodes.end() ||
first->first != erase_pos);
auto pos_upper_bound = erase_pos;
pos_upper_bound.index_by_stage(erase_stage) = INDEX_UPPER_BOUND;
auto last = tracked_child_nodes.lower_bound(pos_upper_bound);
std::vector<Node*> p_nodes;
std::for_each(first, last, [&p_nodes](auto& kv) {
p_nodes.push_back(kv.second);
});
tracked_child_nodes.erase(first, last);
for (auto& p_node: p_nodes) {
auto new_pos = p_node->parent_info().position;
assert(new_pos.index_by_stage(erase_stage) > 0);
--new_pos.index_by_stage(erase_stage);
p_node->as_child<VALIDATE>(new_pos, this);
}
}
template void InternalNode::track_erase<true>(const search_position_t&, match_stage_t);
template void InternalNode::track_erase<false>(const search_position_t&, match_stage_t);
void InternalNode::track_make_tail(const search_position_t& last_pos)
{
// assume I'm ref counted by the caller.
assert(impl->is_level_tail());
assert(!last_pos.is_end());
assert(tracked_child_nodes.find(search_position_t::end()) ==
tracked_child_nodes.end());
auto last_it = tracked_child_nodes.find(last_pos);
if (last_it != tracked_child_nodes.end()) {
assert(std::next(last_it) == tracked_child_nodes.end());
auto p_last_child = last_it->second;
tracked_child_nodes.erase(last_it);
p_last_child->as_child(search_position_t::end(), this);
} else {
assert(tracked_child_nodes.lower_bound(last_pos) ==
tracked_child_nodes.end());
}
}
void InternalNode::validate_child(const Node& child) const
{
#ifndef NDEBUG
assert(impl->level() - 1 == child.impl->level());
assert(this == child.parent_info().ptr);
auto& child_pos = child.parent_info().position;
if (child_pos.is_end()) {
assert(impl->is_level_tail());
assert(child.impl->is_level_tail());
assert(impl->get_tail_value()->value == child.impl->laddr());
} else {
assert(!child.impl->is_level_tail());
key_view_t index_key;
const laddr_packed_t* p_child_addr;
impl->get_slot(child_pos, &index_key, &p_child_addr);
assert(index_key == *child.impl->get_pivot_index());
assert(p_child_addr->value == child.impl->laddr());
}
// XXX(multi-type)
assert(impl->field_type() <= child.impl->field_type());
#endif
}
void InternalNode::validate_child_inconsistent(const Node& child) const
{
#ifndef NDEBUG
assert(impl->level() - 1 == child.impl->level());
assert(check_is_tracking(child));
auto& child_pos = child.parent_info().position;
// the tail value has no key to fix
assert(!child_pos.is_end());
assert(!child.impl->is_level_tail());
key_view_t current_key;
const laddr_packed_t* p_value;
impl->get_slot(child_pos, ¤t_key, &p_value);
key_view_t new_key = *child.impl->get_pivot_index();
assert(current_key != new_key);
assert(p_value->value == child.impl->laddr());
#endif
}
eagain_ifuture<InternalNode::fresh_node_t> InternalNode::allocate(
context_t c, laddr_t hint, field_type_t field_type, bool is_level_tail, level_t level)
{
return InternalNodeImpl::allocate(c, hint, field_type, is_level_tail, level
).si_then([](auto&& fresh_impl) {
auto *derived_ptr = fresh_impl.impl.get();
auto node = Ref<InternalNode>(new InternalNode(
derived_ptr, std::move(fresh_impl.impl)));
return fresh_node_t{node, fresh_impl.mut};
});
}
/*
* LeafNode
*/
LeafNode::LeafNode(LeafNodeImpl* impl, NodeImplURef&& impl_ref)
: Node(std::move(impl_ref)), impl{impl} {}
bool LeafNode::is_level_tail() const
{
return impl->is_level_tail();
}
node_version_t LeafNode::get_version() const
{
return {layout_version, impl->get_extent_state()};
}
const char* LeafNode::read() const
{
return impl->read();
}
extent_len_t LeafNode::get_node_size() const
{
return impl->get_node_size();
}
std::tuple<key_view_t, const value_header_t*>
LeafNode::get_kv(const search_position_t& pos) const
{
key_view_t key_view;
const value_header_t* p_value_header;
impl->get_slot(pos, &key_view, &p_value_header);
return {key_view, p_value_header};
}
eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::get_next_cursor(context_t c, const search_position_t& pos)
{
impl->validate_non_empty();
search_position_t next_pos = pos;
key_view_t index_key;
const value_header_t* p_value_header = nullptr;
impl->get_next_slot(next_pos, &index_key, &p_value_header);
if (next_pos.is_end()) {
if (unlikely(is_level_tail())) {
return eagain_iertr::make_ready_future<Ref<tree_cursor_t>>(
tree_cursor_t::create_end(this));
} else {
return get_next_cursor_from_parent(c);
}
} else {
return eagain_iertr::make_ready_future<Ref<tree_cursor_t>>(
get_or_track_cursor(next_pos, index_key, p_value_header));
}
}
template <bool FORCE_MERGE>
eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::erase(context_t c, const search_position_t& pos, bool get_next)
{
LOG_PREFIX(OTree::LeafNode::erase);
assert(!pos.is_end());
assert(!impl->is_keys_empty());
Ref<Node> this_ref = this;
DEBUGT("erase {}'s pos({}), get_next={} ...",
c.t, get_name(), pos, get_next);
++(c.t.get_onode_tree_stats().num_erases);
// get the next cursor
return eagain_iertr::now().si_then([c, &pos, get_next, this] {
if (get_next) {
return get_next_cursor(c, pos);
} else {
return eagain_iertr::make_ready_future<Ref<tree_cursor_t>>();
}
}).si_then([c, &pos, this_ref = std::move(this_ref),
this, FNAME] (Ref<tree_cursor_t> next_cursor) mutable {
if (next_cursor && next_cursor->is_end()) {
// reset the node reference from the end cursor
next_cursor.reset();
}
return eagain_iertr::now().si_then(
[c, &pos, this_ref = std::move(this_ref), this, FNAME] () mutable {
assert_moveable(this_ref);
#ifndef NDEBUG
assert(!impl->is_keys_empty());
if (impl->has_single_value()) {
assert(pos == search_position_t::begin());
}
#endif
if (!is_root() && impl->has_single_value()) {
// we need to keep the root as an empty leaf node
// fast path without mutating the extent
// track_erase
DEBUGT("{} has one value left, erase ...", c.t, get_name());
assert(tracked_cursors.size() == 1);
auto iter = tracked_cursors.begin();
assert(iter->first == pos);
iter->second->invalidate();
tracked_cursors.clear();
// no cursor should be referencing this node now, this_ref is the last one.
assert(this_ref->use_count() == 1);
return Node::erase_node(c, std::move(this_ref));
}
on_layout_change();
impl->prepare_mutate(c);
auto [erase_stage, next_pos] = impl->erase(pos);
track_erase(pos, erase_stage);
validate_tracked_cursors();
if (is_root()) {
return eagain_iertr::now();
} else {
bool update_parent_index;
if (impl->is_level_tail()) {
update_parent_index = false;
} else {
next_pos.is_end() ? update_parent_index = true
: update_parent_index = false;
}
return try_merge_adjacent<FORCE_MERGE>(
c, update_parent_index, std::move(this_ref));
}
}).si_then([next_cursor] {
return next_cursor;
});
});
}
template eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::erase<true>(context_t, const search_position_t&, bool);
template eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::erase<false>(context_t, const search_position_t&, bool);
eagain_ifuture<> LeafNode::extend_value(
context_t c, const search_position_t& pos, value_size_t extend_size)
{
ceph_abort("not implemented");
return eagain_iertr::now();
}
eagain_ifuture<> LeafNode::trim_value(
context_t c, const search_position_t& pos, value_size_t trim_size)
{
ceph_abort("not implemented");
return eagain_iertr::now();
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
LeafNode::prepare_mutate_value_payload(context_t c)
{
return impl->prepare_mutate_value_payload(c);
}
eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::lookup_smallest(context_t)
{
if (unlikely(impl->is_keys_empty())) {
assert(is_root());
return seastar::make_ready_future<Ref<tree_cursor_t>>(
tree_cursor_t::create_end(this));
}
auto pos = search_position_t::begin();
key_view_t index_key;
const value_header_t* p_value_header;
impl->get_slot(pos, &index_key, &p_value_header);
return seastar::make_ready_future<Ref<tree_cursor_t>>(
get_or_track_cursor(pos, index_key, p_value_header));
}
eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::lookup_largest(context_t)
{
if (unlikely(impl->is_keys_empty())) {
assert(is_root());
return seastar::make_ready_future<Ref<tree_cursor_t>>(
tree_cursor_t::create_end(this));
}
search_position_t pos;
key_view_t index_key;
const value_header_t* p_value_header = nullptr;
impl->get_largest_slot(&pos, &index_key, &p_value_header);
return seastar::make_ready_future<Ref<tree_cursor_t>>(
get_or_track_cursor(pos, index_key, p_value_header));
}
eagain_ifuture<Node::search_result_t>
LeafNode::lower_bound_tracked(
context_t c, const key_hobj_t& key, MatchHistory& history)
{
key_view_t index_key;
auto result = impl->lower_bound(key, history, &index_key);
Ref<tree_cursor_t> cursor;
if (result.position.is_end()) {
assert(!result.p_value);
cursor = tree_cursor_t::create_end(this);
} else {
cursor = get_or_track_cursor(result.position, index_key, result.p_value);
}
search_result_t ret{cursor, result.mstat};
ret.validate_input_key(key, c.vb.get_header_magic());
return seastar::make_ready_future<search_result_t>(ret);
}
eagain_ifuture<> LeafNode::do_get_tree_stats(context_t, tree_stats_t& stats)
{
auto nstats = impl->get_stats();
stats.size_persistent_leaf += nstats.size_persistent;
stats.size_filled_leaf += nstats.size_filled;
stats.size_logical_leaf += nstats.size_logical;
stats.size_overhead_leaf += nstats.size_overhead;
stats.size_value_leaf += nstats.size_value;
stats.num_kvs_leaf += nstats.num_kvs;
stats.num_nodes_leaf += 1;
return eagain_iertr::now();
}
void LeafNode::track_merge(
Ref<Node> _right_node, match_stage_t stage, search_position_t& left_last_pos)
{
assert(level() == _right_node->level());
// assert(impl->node_type() == _right_node->impl->node_type());
auto& right_node = *static_cast<LeafNode*>(_right_node.get());
if (right_node.tracked_cursors.empty()) {
return;
}
match_stage_t curr_stage = STAGE_BOTTOM;
// prepare the initial left_last_pos for offset
while (curr_stage < stage) {
left_last_pos.index_by_stage(curr_stage) = 0;
++curr_stage;
}
++left_last_pos.index_by_stage(curr_stage);
// fix the tracked child nodes of right_node, stage by stage.
auto& right_tracked_cursors = right_node.tracked_cursors;
auto rit = right_tracked_cursors.begin();
while (curr_stage <= STAGE_TOP) {
auto right_pos_until = search_position_t::begin();
right_pos_until.index_by_stage(curr_stage) = INDEX_UPPER_BOUND;
auto rend = right_tracked_cursors.lower_bound(right_pos_until);
while (rit != rend) {
auto new_pos = rit->second->get_position();
assert(new_pos == rit->first);
assert(rit->second->get_leaf_node().get() == &right_node);
new_pos += left_last_pos;
auto p_cursor = rit->second;
rit = right_tracked_cursors.erase(rit);
p_cursor->update_track<true>(this, new_pos);
}
left_last_pos.index_by_stage(curr_stage) = 0;
++curr_stage;
}
assert(right_tracked_cursors.empty());
validate_tracked_cursors();
}
eagain_ifuture<> LeafNode::test_clone_root(
context_t c_other, RootNodeTracker& tracker_other) const
{
assert(is_root());
assert(impl->is_level_tail());
assert(impl->field_type() == field_type_t::N0);
Ref<const Node> this_ref = this;
return LeafNode::allocate(c_other, L_ADDR_MIN, field_type_t::N0, true
).si_then([this, c_other, &tracker_other](auto fresh_other) {
impl->test_copy_to(fresh_other.mut);
auto cloned_root = fresh_other.node;
return c_other.nm.get_super(c_other.t, tracker_other
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::assert_all{"Invalid error during test clone"}
).si_then([c_other, cloned_root](auto&& super_other) {
assert(super_other);
cloned_root->make_root_new(c_other, std::move(super_other));
});
}).si_then([this_ref]{});
}
eagain_ifuture<Ref<tree_cursor_t>> LeafNode::insert_value(
context_t c, const key_hobj_t& key, value_config_t vconf,
const search_position_t& pos, const MatchHistory& history,
match_stat_t mstat)
{
LOG_PREFIX(OTree::LeafNode::insert_value);
#ifndef NDEBUG
if (pos.is_end()) {
assert(impl->is_level_tail());
}
#endif
DEBUGT("insert {} with insert_key={}, insert_value={}, insert_pos({}), "
"history={}, mstat({}) ...",
c.t, get_name(), key, vconf, pos, history, mstat);
++(c.t.get_onode_tree_stats().num_inserts);
search_position_t insert_pos = pos;
auto [insert_stage, insert_size] = impl->evaluate_insert(
key, vconf, history, mstat, insert_pos);
auto free_size = impl->free_size();
if (free_size >= insert_size) {
// proceed to insert
on_layout_change();
impl->prepare_mutate(c);
auto p_value_header = impl->insert(key, vconf, insert_pos, insert_stage, insert_size);
assert(impl->free_size() == free_size - insert_size);
assert(insert_pos <= pos);
assert(p_value_header->payload_size == vconf.payload_size);
auto ret = track_insert(insert_pos, insert_stage, p_value_header);
validate_tracked_cursors();
return eagain_iertr::make_ready_future<Ref<tree_cursor_t>>(ret);
}
// split and insert
Ref<Node> this_ref = this;
laddr_t left_hint, right_hint;
{
key_view_t left_key;
impl->get_slot(search_position_t::begin(), &left_key, nullptr);
left_hint = left_key.get_hint();
key_view_t right_key;
impl->get_largest_slot(nullptr, &right_key, nullptr);
right_hint = right_key.get_hint();
}
return (is_root() ? upgrade_root(c, left_hint) : eagain_iertr::now()
).si_then([this, c, right_hint] {
return LeafNode::allocate(c, right_hint, impl->field_type(), impl->is_level_tail());
}).si_then([this_ref = std::move(this_ref), this, c, &key, vconf, FNAME,
insert_pos, insert_stage=insert_stage, insert_size=insert_size](auto fresh_right) mutable {
auto right_node = fresh_right.node;
DEBUGT("proceed split {} to fresh {} ...",
c.t, get_name(), right_node->get_name());
// no need to bump version for right node, as it is fresh
on_layout_change();
impl->prepare_mutate(c);
auto [split_pos, is_insert_left, p_value_header] = impl->split_insert(
fresh_right.mut, *right_node->impl, key, vconf,
insert_pos, insert_stage, insert_size);
assert(p_value_header->payload_size == vconf.payload_size);
track_split(split_pos, right_node);
Ref<tree_cursor_t> ret;
if (is_insert_left) {
ret = track_insert(insert_pos, insert_stage, p_value_header);
} else {
ret = right_node->track_insert(insert_pos, insert_stage, p_value_header);
}
validate_tracked_cursors();
right_node->validate_tracked_cursors();
++(c.t.get_onode_tree_stats().extents_num_delta);
return apply_split_to_parent(
c, std::move(this_ref), std::move(right_node), false
).si_then([ret] {
return ret;
});
// TODO (optimize)
// try to acquire space from siblings before split... see btrfs
});
}
eagain_ifuture<Ref<LeafNode>> LeafNode::allocate_root(
context_t c, RootNodeTracker& root_tracker)
{
LOG_PREFIX(OTree::LeafNode::allocate_root);
return LeafNode::allocate(c, L_ADDR_MIN, field_type_t::N0, true
).si_then([c, &root_tracker, FNAME](auto fresh_node) {
auto root = fresh_node.node;
return c.nm.get_super(c.t, root_tracker
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle([FNAME, c] {
ERRORT("EIO during get_super()", c.t);
ceph_abort("fatal error");
})
).si_then([c, root](auto&& super) {
assert(super);
root->make_root_new(c, std::move(super));
return root;
});
});
}
Ref<tree_cursor_t> LeafNode::get_or_track_cursor(
const search_position_t& position,
const key_view_t& key, const value_header_t* p_value_header)
{
assert(!position.is_end());
assert(p_value_header);
Ref<tree_cursor_t> p_cursor;
auto found = tracked_cursors.find(position);
if (found == tracked_cursors.end()) {
p_cursor = tree_cursor_t::create_tracked(
this, position, key, p_value_header);
} else {
p_cursor = found->second;
assert(p_cursor->get_leaf_node() == this);
assert(p_cursor->get_position() == position);
p_cursor->update_cache_same_node(key, p_value_header);
}
return p_cursor;
}
void LeafNode::validate_cursor(const tree_cursor_t& cursor) const
{
#ifndef NDEBUG
assert(this == cursor.get_leaf_node().get());
assert(cursor.is_tracked());
assert(!impl->is_extent_retired());
// We need to make sure user has freed all the cursors before submitting the
// according transaction. Otherwise the below checks will have undefined
// behaviors.
auto [key, p_value_header] = get_kv(cursor.get_position());
auto magic = p_value_header->magic;
assert(key == cursor.get_key_view(magic));
assert(p_value_header == cursor.read_value_header(magic));
#endif
}
Ref<tree_cursor_t> LeafNode::track_insert(
const search_position_t& insert_pos, match_stage_t insert_stage,
const value_header_t* p_value_header)
{
// update cursor position
auto pos_upper_bound = insert_pos;
pos_upper_bound.index_by_stage(insert_stage) = INDEX_UPPER_BOUND;
auto first = tracked_cursors.lower_bound(insert_pos);
auto last = tracked_cursors.lower_bound(pos_upper_bound);
std::vector<tree_cursor_t*> p_cursors;
std::for_each(first, last, [&p_cursors](auto& kv) {
p_cursors.push_back(kv.second);
});
tracked_cursors.erase(first, last);
for (auto& p_cursor : p_cursors) {
search_position_t new_pos = p_cursor->get_position();
++new_pos.index_by_stage(insert_stage);
p_cursor->update_track<true>(this, new_pos);
}
// track insert
// TODO: getting key_view_t from stage::proceed_insert() and
// stage::append_insert() has not supported yet
return tree_cursor_t::create_inserted(
this, insert_pos);
}
void LeafNode::track_split(
const search_position_t& split_pos, Ref<LeafNode> right_node)
{
// update cursor ownership and position
auto iter = tracked_cursors.lower_bound(split_pos);
while (iter != tracked_cursors.end()) {
auto new_pos = iter->first;
auto p_cursor = iter->second;
iter = tracked_cursors.erase(iter);
new_pos -= split_pos;
p_cursor->update_track<false>(right_node, new_pos);
}
}
void LeafNode::track_erase(
const search_position_t& erase_pos, match_stage_t erase_stage)
{
// erase tracking and invalidate the erased cursor
auto to_erase = tracked_cursors.find(erase_pos);
assert(to_erase != tracked_cursors.end());
to_erase->second->invalidate();
auto first = tracked_cursors.erase(to_erase);
// update cursor position
assert(first == tracked_cursors.lower_bound(erase_pos));
auto pos_upper_bound = erase_pos;
pos_upper_bound.index_by_stage(erase_stage) = INDEX_UPPER_BOUND;
auto last = tracked_cursors.lower_bound(pos_upper_bound);
std::vector<tree_cursor_t*> p_cursors;
std::for_each(first, last, [&p_cursors](auto& kv) {
p_cursors.push_back(kv.second);
});
tracked_cursors.erase(first, last);
for (auto& p_cursor : p_cursors) {
search_position_t new_pos = p_cursor->get_position();
assert(new_pos.index_by_stage(erase_stage) > 0);
--new_pos.index_by_stage(erase_stage);
p_cursor->update_track<true>(this, new_pos);
}
}
eagain_ifuture<LeafNode::fresh_node_t> LeafNode::allocate(
context_t c, laddr_t hint, field_type_t field_type, bool is_level_tail)
{
return LeafNodeImpl::allocate(c, hint, field_type, is_level_tail
).si_then([](auto&& fresh_impl) {
auto *derived_ptr = fresh_impl.impl.get();
auto node = Ref<LeafNode>(new LeafNode(
derived_ptr, std::move(fresh_impl.impl)));
return fresh_node_t{node, fresh_impl.mut};
});
}
}
| 78,153 | 33.233027 | 111 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <compare>
#include <map>
#include <memory>
#include <ostream>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include "crimson/common/type_helpers.h"
#include "node_extent_mutable.h"
#include "stages/key_layout.h"
#include "stages/stage_types.h"
#include "super.h"
#include "value.h"
/**
* Tree example (2 levels):
*
* Root node keys: [ 3 7 ]
* values: [p1 p2 p3]
* / | \
* ------- | -------
* | | |
* V V V
* Leaf node keys: [ 1 2 3] [ 4 5 7] [ 9 11 12]
* values: [v1 v2 v3] [v4 v5 v6] [v7 v8 v9]
*
* Tree structure properties:
* - As illustrated above, the parent key is strictly equal to its left child's
* largest key;
* - If a tree is indexing multiple seastore transactions, each transaction
* will be mapped to a Super which points to a distinct root node. So the
* transactions are isolated at tree level. However, tree nodes from
* different transactions can reference the same seastore CachedExtent before
* modification;
* - The resources of the transactional tree are tracked by tree_cursor_ts held
* by users. As long as any cursor is alive, the according tree hierarchy is
* alive and keeps tracked. See the reversed resource management sections
* below;
*/
namespace crimson::os::seastore::onode {
class LeafNode;
class InternalNode;
using layout_version_t = uint32_t;
struct node_version_t {
layout_version_t layout;
nextent_state_t state;
bool operator==(const node_version_t& rhs) const {
return (layout == rhs.layout && state == rhs.state);
}
bool operator!=(const node_version_t& rhs) const {
return !(*this == rhs);
}
};
/**
* tree_cursor_t
*
* A cursor points to a position (LeafNode and search_position_t) of the tree
* where it can find the according key and value pair. The position is updated
* by LeafNode insert/split/delete/merge internally and is kept valid. It also
* caches the key-value information for a specific node layout version.
*
* Exposes public interfaces for Btree::Cursor.
*/
class tree_cursor_t final
: public boost::intrusive_ref_counter<
tree_cursor_t, boost::thread_unsafe_counter> {
public:
~tree_cursor_t();
tree_cursor_t(const tree_cursor_t&) = delete;
tree_cursor_t(tree_cursor_t&&) = delete;
tree_cursor_t& operator=(const tree_cursor_t&) = delete;
tree_cursor_t& operator=(tree_cursor_t&&) = delete;
// public to Btree
/**
* is_end
*
* Represents one-past-the-last of all the sorted key-value
* pairs in the tree. An end cursor won't contain valid key-value
* information.
*/
bool is_end() const { return !!ref_leaf_node && position.is_end(); }
/**
* is_tracked
*
* Represents a key-value pair stored in the tree, which is always tracked
* across insert/split/erase/merge operations.
*/
bool is_tracked() const { return !!ref_leaf_node && !position.is_end(); }
/**
* is_invalid
*
* Represents an invalid cursor which was once valid and tracked by the tree
* but is now erased and untracked. User may still hold an invalid cursor.
*/
bool is_invalid() const { return !ref_leaf_node; }
/// Returns the key view in tree if it is not an end cursor.
const key_view_t& get_key_view(value_magic_t magic) const {
assert(is_tracked());
return cache.get_key_view(magic, position);
}
/// Returns the next tree_cursor_t in tree, can be end if there's no next.
eagain_ifuture<Ref<tree_cursor_t>> get_next(context_t);
/// Check that this is next to prv
void assert_next_to(const tree_cursor_t&, value_magic_t) const;
/// Erases the key-value pair from tree.
template <bool FORCE_MERGE = false>
eagain_ifuture<Ref<tree_cursor_t>> erase(context_t, bool get_next);
std::strong_ordering compare_to(const tree_cursor_t&, value_magic_t) const;
// public to Value
/// Get the latest value_header_t pointer for read.
const value_header_t* read_value_header(value_magic_t magic) const {
assert(is_tracked());
return cache.get_p_value_header(magic, position);
}
/// Prepare the node extent to be mutable and recorded.
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t c) {
assert(is_tracked());
if (!is_mutated) {
is_mutated = true;
++(c.t.get_onode_tree_stats().num_updates);
}
return cache.prepare_mutate_value_payload(c, position);
}
/// Extends the size of value payload.
eagain_ifuture<> extend_value(context_t, value_size_t);
/// Trim and shrink the value payload.
eagain_ifuture<> trim_value(context_t, value_size_t);
static Ref<tree_cursor_t> get_invalid() {
Ref<tree_cursor_t> INVALID = new tree_cursor_t();
return INVALID;
}
private:
// create from insert
tree_cursor_t(Ref<LeafNode>, const search_position_t&);
// create from lookup
tree_cursor_t(Ref<LeafNode>, const search_position_t&,
const key_view_t&, const value_header_t*);
// lookup reaches the end, contain leaf node for further insert
tree_cursor_t(Ref<LeafNode>);
// create an invalid tree_cursor_t
tree_cursor_t() : cache{ref_leaf_node} {}
const search_position_t& get_position() const { return position; }
Ref<LeafNode> get_leaf_node() const { return ref_leaf_node; }
template <bool VALIDATE>
void update_track(Ref<LeafNode>, const search_position_t&);
void update_cache_same_node(const key_view_t&,
const value_header_t*) const;
void invalidate();
static Ref<tree_cursor_t> create_inserted(
Ref<LeafNode> node, const search_position_t& pos) {
return new tree_cursor_t(node, pos);
}
static Ref<tree_cursor_t> create_tracked(
Ref<LeafNode> node, const search_position_t& pos,
const key_view_t& key, const value_header_t* p_header) {
return new tree_cursor_t(node, pos, key, p_header);
}
static Ref<tree_cursor_t> create_end(Ref<LeafNode> node) {
return new tree_cursor_t(node);
}
/**
* Reversed resource management (tree_cursor_t)
*
* tree_cursor_t holds a reference to the LeafNode, so the LeafNode will be
* alive as long as any of it's cursors is still referenced by user.
*/
Ref<LeafNode> ref_leaf_node;
search_position_t position;
// account 1 update even if there are multiple updates to the same value
bool is_mutated = false;
/** Cache
*
* Cached memory pointers or views which may be outdated due to
* extent copy-on-write or asynchronous leaf node updates.
*/
class Cache {
public:
Cache(Ref<LeafNode>&);
void validate_is_latest(const search_position_t&) const;
void invalidate() { needs_update_all = true; }
void update_all(const node_version_t&, const key_view_t&, const value_header_t*);
const key_view_t& get_key_view(
value_magic_t magic, const search_position_t& pos) {
make_latest(magic, pos);
return *key_view;
}
const value_header_t* get_p_value_header(
value_magic_t magic, const search_position_t& pos) {
make_latest(magic, pos);
return p_value_header;
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t, const search_position_t&);
private:
void maybe_duplicate(const node_version_t&);
void make_latest(value_magic_t, const search_position_t&);
// metadata about how cache is valid
Ref<LeafNode>& ref_leaf_node;
bool needs_update_all = true;
node_version_t version;
// cached key value info
const char* p_node_base = nullptr;
std::optional<key_view_t> key_view;
const value_header_t* p_value_header = nullptr;
// cached data-structures to update value payload
std::optional<NodeExtentMutable> value_payload_mut;
ValueDeltaRecorder* p_value_recorder = nullptr;
};
mutable Cache cache;
friend class LeafNode;
friend class Node; // get_position(), get_leaf_node()
};
/**
* Node
*
* An abstracted class for both InternalNode and LeafNode.
*
* Exposes public interfaces for Btree.
*/
class Node
: public boost::intrusive_ref_counter<
Node, boost::thread_unsafe_counter> {
public:
// public to Btree
struct search_result_t {
bool is_end() const { return p_cursor->is_end(); }
Ref<tree_cursor_t> p_cursor;
match_stat_t mstat;
MatchKindBS match() const {
assert(mstat >= MSTAT_MIN && mstat <= MSTAT_MAX);
return (mstat == MSTAT_EQ ? MatchKindBS::EQ : MatchKindBS::NE);
}
void validate_input_key(const key_hobj_t& key, value_magic_t magic) const {
#ifndef NDEBUG
if (match() == MatchKindBS::EQ) {
assert(key == p_cursor->get_key_view(magic));
} else {
assert(match() == MatchKindBS::NE);
if (p_cursor->is_tracked()) {
assert(key < p_cursor->get_key_view(magic));
} else if (p_cursor->is_end()) {
// good
} else {
assert(p_cursor->is_invalid());
ceph_abort("impossible");
}
}
#endif
}
};
virtual ~Node();
Node(const Node&) = delete;
Node(Node&&) = delete;
Node& operator=(const Node&) = delete;
Node& operator=(Node&&) = delete;
/**
* level
*
* A positive value denotes the level (or height) of this node in tree.
* 0 means LeafNode, positive means InternalNode.
*/
level_t level() const;
/**
* lookup_smallest
*
* Returns a cursor pointing to the smallest key in the sub-tree formed by
* this node.
*
* Returns an end cursor if it is an empty root node.
*/
virtual eagain_ifuture<Ref<tree_cursor_t>> lookup_smallest(context_t) = 0;
/**
* lookup_largest
*
* Returns a cursor pointing to the largest key in the sub-tree formed by
* this node.
*
* Returns an end cursor if it is an empty root node.
*/
virtual eagain_ifuture<Ref<tree_cursor_t>> lookup_largest(context_t) = 0;
/**
* lower_bound
*
* Returns a cursor pointing to the first element in the range [first, last)
* of the sub-tree which does not compare less than the input key. The
* result also denotes whether the pointed key is equal to the input key.
*
* Returns an end cursor with MatchKindBS::NE if:
* - It is an empty root node;
* - Or the input key is larger than all the keys in the sub-tree;
*/
eagain_ifuture<search_result_t> lower_bound(context_t c, const key_hobj_t& key);
/**
* insert
*
* Try to insert a key-value pair into the sub-tree formed by this node.
*
* Returns a boolean denoting whether the insertion is successful:
* - If true, the returned cursor points to the inserted element in tree;
* - If false, the returned cursor points to the conflicting element in tree;
*/
eagain_ifuture<std::pair<Ref<tree_cursor_t>, bool>> insert(
context_t, const key_hobj_t&, value_config_t, Ref<Node>&&);
/**
* erase
*
* Removes a key-value pair from the sub-tree formed by this node.
*
* Returns the number of erased key-value pairs (0 or 1).
*/
eagain_ifuture<std::size_t> erase(context_t, const key_hobj_t&, Ref<Node>&&);
/// Recursively collects the statistics of the sub-tree formed by this node
eagain_ifuture<tree_stats_t> get_tree_stats(context_t);
/// Returns an ostream containing a dump of all the elements in the node.
std::ostream& dump(std::ostream&) const;
/// Returns an ostream containing an one-line summary of this node.
std::ostream& dump_brief(std::ostream&) const;
/// Print the node name
const std::string& get_name() const;
/// Initializes the tree by allocating an empty root node.
static eagain_ifuture<> mkfs(context_t, RootNodeTracker&);
/// Loads the tree root. The tree must be initialized.
static eagain_ifuture<Ref<Node>> load_root(context_t, RootNodeTracker&);
// Only for unit test purposes.
void test_make_destructable(context_t, NodeExtentMutable&, Super::URef&&);
virtual eagain_ifuture<> test_clone_root(context_t, RootNodeTracker&) const = 0;
protected:
virtual eagain_ifuture<> test_clone_non_root(context_t, Ref<InternalNode>) const {
ceph_abort("impossible path");
}
virtual eagain_ifuture<search_result_t> lower_bound_tracked(
context_t, const key_hobj_t&, MatchHistory&) = 0;
virtual eagain_ifuture<> do_get_tree_stats(context_t, tree_stats_t&) = 0;
virtual bool is_tracking() const = 0;
virtual void track_merge(Ref<Node>, match_stage_t, search_position_t&) = 0;
protected:
Node(NodeImplURef&&);
bool is_tracked() const {
assert(!(super && _parent_info.has_value()));
return (super || _parent_info.has_value());
}
bool is_root() const {
assert(is_tracked());
return !_parent_info.has_value();
}
// as root
void make_root(context_t c, Super::URef&& _super);
void make_root_new(context_t c, Super::URef&& _super) {
assert(_super->get_root_laddr() == L_ADDR_NULL);
make_root(c, std::move(_super));
}
void make_root_from(context_t c, Super::URef&& _super, laddr_t from_addr) {
assert(_super->get_root_laddr() == from_addr);
make_root(c, std::move(_super));
}
void as_root(Super::URef&& _super);
eagain_ifuture<> upgrade_root(context_t, laddr_t);
Super::URef deref_super();
// as child/non-root
template <bool VALIDATE = true>
void as_child(const search_position_t&, Ref<InternalNode>);
struct parent_info_t {
search_position_t position;
Ref<InternalNode> ptr;
};
const parent_info_t& parent_info() const { return *_parent_info; }
Ref<InternalNode> deref_parent();
eagain_ifuture<> apply_split_to_parent(context_t, Ref<Node>&&, Ref<Node>&&, bool);
eagain_ifuture<Ref<tree_cursor_t>> get_next_cursor_from_parent(context_t);
template <bool FORCE_MERGE = false>
eagain_ifuture<> try_merge_adjacent(context_t, bool, Ref<Node>&&);
eagain_ifuture<> erase_node(context_t, Ref<Node>&&);
template <bool FORCE_MERGE = false>
eagain_ifuture<> fix_parent_index(context_t, Ref<Node>&&, bool);
eagain_ifuture<NodeExtentMutable> rebuild_extent(context_t);
eagain_ifuture<> retire(context_t, Ref<Node>&&);
void make_tail(context_t);
private:
/**
* Reversed resource management (Node)
*
* Root Node holds a reference to its parent Super class, so its parent
* will be alive as long as this root node is alive.
*
* None-root Node holds a reference to its parent Node, so its parent will
* be alive as long as any of it's children is alive.
*/
// as root
Super::URef super;
// as child/non-root
std::optional<parent_info_t> _parent_info;
private:
static eagain_ifuture<Ref<Node>> load(context_t, laddr_t, bool expect_is_level_tail);
NodeImplURef impl;
friend class InternalNode;
};
inline std::ostream& operator<<(std::ostream& os, const Node& node) {
return node.dump_brief(os);
}
/**
* InternalNode
*
* A concrete implementation of Node class that represents an internal tree
* node. Its level is always positive and its values are logical block
* addresses to its child nodes. An internal node cannot be empty.
*/
class InternalNode final : public Node {
public:
// public to Node
InternalNode(InternalNodeImpl*, NodeImplURef&&);
~InternalNode() override { assert(tracked_child_nodes.empty()); }
InternalNode(const InternalNode&) = delete;
InternalNode(InternalNode&&) = delete;
InternalNode& operator=(const InternalNode&) = delete;
InternalNode& operator=(InternalNode&&) = delete;
eagain_ifuture<Ref<tree_cursor_t>> get_next_cursor(context_t, const search_position_t&);
eagain_ifuture<> apply_child_split(context_t, Ref<Node>&& left, Ref<Node>&& right, bool);
template <bool VALIDATE>
void do_track_child(Node& child) {
if constexpr (VALIDATE) {
validate_child(child);
}
auto& child_pos = child.parent_info().position;
assert(tracked_child_nodes.find(child_pos) == tracked_child_nodes.end());
tracked_child_nodes[child_pos] = &child;
}
void do_untrack_child(const Node& child) {
assert(check_is_tracking(child));
auto& child_pos = child.parent_info().position;
[[maybe_unused]] auto removed = tracked_child_nodes.erase(child_pos);
assert(removed);
}
bool check_is_tracking(const Node& child) const {
auto& child_pos = child.parent_info().position;
auto found = tracked_child_nodes.find(child_pos);
if (found != tracked_child_nodes.end() && found->second == &child) {
assert(child.parent_info().ptr == this);
return true;
} else {
return false;
}
}
eagain_ifuture<std::pair<Ref<Node>, Ref<Node>>> get_child_peers(
context_t, const search_position_t&);
eagain_ifuture<> erase_child(context_t, Ref<Node>&&);
template <bool FORCE_MERGE = false>
eagain_ifuture<> fix_index(context_t, Ref<Node>&&, bool);
template <bool FORCE_MERGE = false>
eagain_ifuture<> apply_children_merge(
context_t, Ref<Node>&& left, laddr_t, Ref<Node>&& right, bool update_index);
void validate_child_tracked(const Node& child) const {
validate_child(child);
assert(tracked_child_nodes.find(child.parent_info().position) !=
tracked_child_nodes.end());
assert(tracked_child_nodes.find(child.parent_info().position)->second == &child);
}
void validate_child_inconsistent(const Node& child) const;
void validate_tracked_children() const {
#ifndef NDEBUG
for (auto& kv : tracked_child_nodes) {
assert(kv.first == kv.second->parent_info().position);
validate_child(*kv.second);
}
#endif
}
void track_make_tail(const search_position_t&);
static eagain_ifuture<Ref<InternalNode>> allocate_root(
context_t, laddr_t, level_t, laddr_t, Super::URef&&);
protected:
eagain_ifuture<Ref<tree_cursor_t>> lookup_smallest(context_t) override;
eagain_ifuture<Ref<tree_cursor_t>> lookup_largest(context_t) override;
eagain_ifuture<search_result_t> lower_bound_tracked(
context_t, const key_hobj_t&, MatchHistory&) override;
eagain_ifuture<> do_get_tree_stats(context_t, tree_stats_t&) override;
bool is_tracking() const override {
return !tracked_child_nodes.empty();
}
void track_merge(Ref<Node>, match_stage_t, search_position_t&) override;
eagain_ifuture<> test_clone_root(context_t, RootNodeTracker&) const override;
private:
eagain_ifuture<> try_downgrade_root(context_t, Ref<Node>&&);
eagain_ifuture<Ref<InternalNode>> insert_or_split(
context_t, const search_position_t&, const key_view_t&, Ref<Node>,
Ref<Node> outdated_child=nullptr);
// XXX: extract a common tracker for InternalNode to track Node,
// and LeafNode to track tree_cursor_t.
eagain_ifuture<Ref<Node>> get_or_track_child(context_t, const search_position_t&, laddr_t);
template <bool VALIDATE = true>
void track_insert(
const search_position_t&, match_stage_t, Ref<Node>, Ref<Node> nxt_child = nullptr);
void replace_track(Ref<Node> new_child, Ref<Node> old_child, bool);
void track_split(const search_position_t&, Ref<InternalNode>);
template <bool VALIDATE = true>
void track_erase(const search_position_t&, match_stage_t);
void validate_child(const Node& child) const;
struct fresh_node_t {
Ref<InternalNode> node;
NodeExtentMutable mut;
std::pair<Ref<Node>, NodeExtentMutable> make_pair() {
return std::make_pair(Ref<Node>(node), mut);
}
};
static eagain_ifuture<fresh_node_t> allocate(context_t, laddr_t, field_type_t, bool, level_t);
private:
/**
* Reversed resource management (InternalNode)
*
* InteralNode keeps track of its child nodes which are still alive in
* memory, and their positions will be updated throughout
* insert/split/delete/merge operations of this node.
*/
// XXX: leverage intrusive data structure to control memory overhead
std::map<search_position_t, Node*> tracked_child_nodes;
InternalNodeImpl* impl;
};
/**
* LeafNode
*
* A concrete implementation of Node class that represents a leaf tree node.
* Its level is always 0. A leaf node can only be empty if it is root.
*/
class LeafNode final : public Node {
public:
// public to tree_cursor_t
~LeafNode() override { assert(tracked_cursors.empty()); }
LeafNode(const LeafNode&) = delete;
LeafNode(LeafNode&&) = delete;
LeafNode& operator=(const LeafNode&) = delete;
LeafNode& operator=(LeafNode&&) = delete;
bool is_level_tail() const;
node_version_t get_version() const;
const char* read() const;
extent_len_t get_node_size() const;
std::tuple<key_view_t, const value_header_t*> get_kv(const search_position_t&) const;
eagain_ifuture<Ref<tree_cursor_t>> get_next_cursor(context_t, const search_position_t&);
/**
* erase
*
* Removes a key-value pair from the position.
*
* If get_next is true, returns the cursor pointing to the next key-value
* pair that followed the erased element, which can be nullptr if is end.
*/
template <bool FORCE_MERGE>
eagain_ifuture<Ref<tree_cursor_t>> erase(
context_t, const search_position_t&, bool get_next);
template <bool VALIDATE>
void do_track_cursor(tree_cursor_t& cursor) {
if constexpr (VALIDATE) {
validate_cursor(cursor);
}
auto& cursor_pos = cursor.get_position();
assert(tracked_cursors.find(cursor_pos) == tracked_cursors.end());
tracked_cursors.emplace(cursor_pos, &cursor);
}
void do_untrack_cursor(const tree_cursor_t& cursor) {
validate_cursor(cursor);
auto& cursor_pos = cursor.get_position();
assert(check_is_tracking(cursor));
[[maybe_unused]] auto removed = tracked_cursors.erase(cursor_pos);
assert(removed);
}
bool check_is_tracking(const tree_cursor_t& cursor) const {
auto& cursor_pos = cursor.get_position();
auto found = tracked_cursors.find(cursor_pos);
if (found != tracked_cursors.end() && found->second == &cursor) {
assert(cursor.ref_leaf_node == this);
return true;
} else {
return false;
}
}
eagain_ifuture<> extend_value(context_t, const search_position_t&, value_size_t);
eagain_ifuture<> trim_value(context_t, const search_position_t&, value_size_t);
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t);
protected:
eagain_ifuture<Ref<tree_cursor_t>> lookup_smallest(context_t) override;
eagain_ifuture<Ref<tree_cursor_t>> lookup_largest(context_t) override;
eagain_ifuture<search_result_t> lower_bound_tracked(
context_t, const key_hobj_t&, MatchHistory&) override;
eagain_ifuture<> do_get_tree_stats(context_t, tree_stats_t&) override;
bool is_tracking() const override {
return !tracked_cursors.empty();
}
void track_merge(Ref<Node>, match_stage_t, search_position_t&) override;
eagain_ifuture<> test_clone_root(context_t, RootNodeTracker&) const override;
private:
LeafNode(LeafNodeImpl*, NodeImplURef&&);
eagain_ifuture<Ref<tree_cursor_t>> insert_value(
context_t, const key_hobj_t&, value_config_t,
const search_position_t&, const MatchHistory&,
match_stat_t mstat);
static eagain_ifuture<Ref<LeafNode>> allocate_root(context_t, RootNodeTracker&);
friend class Node;
private:
// XXX: extract a common tracker for InternalNode to track Node,
// and LeafNode to track tree_cursor_t.
Ref<tree_cursor_t> get_or_track_cursor(
const search_position_t&, const key_view_t&, const value_header_t*);
Ref<tree_cursor_t> track_insert(
const search_position_t&, match_stage_t, const value_header_t*);
void track_split(const search_position_t&, Ref<LeafNode>);
void track_erase(const search_position_t&, match_stage_t);
void validate_tracked_cursors() const {
#ifndef NDEBUG
for (auto& kv : tracked_cursors) {
assert(kv.first == kv.second->get_position());
validate_cursor(*kv.second);
}
#endif
}
void validate_cursor(const tree_cursor_t& cursor) const;
// invalidate p_value pointers in tree_cursor_t
void on_layout_change() { ++layout_version; }
struct fresh_node_t {
Ref<LeafNode> node;
NodeExtentMutable mut;
std::pair<Ref<Node>, NodeExtentMutable> make_pair() {
return std::make_pair(Ref<Node>(node), mut);
}
};
static eagain_ifuture<fresh_node_t> allocate(context_t, laddr_t, field_type_t, bool);
private:
/**
* Reversed resource management (LeafNode)
*
* LeafNode keeps track of the referencing cursors which are still alive in
* memory, and their positions will be updated throughout
* insert/split/delete/merge operations of this node.
*/
// XXX: leverage intrusive data structure to control memory overhead
std::map<search_position_t, tree_cursor_t*> tracked_cursors;
LeafNodeImpl* impl;
layout_version_t layout_version = 0;
};
}
| 24,933 | 32.513441 | 96 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_delta_recorder.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/buffer.h"
#include "node_types.h"
#include "value.h"
namespace crimson::os::seastore::onode {
/**
* DeltaRecorder
*
* An abstracted class to encapsulate different implementations to apply delta
* to a specific node layout.
*/
class DeltaRecorder {
public:
virtual ~DeltaRecorder() {
/* May be non-empty if transaction is abandoned without
* being submitted -- conflicts are a particularly common
* example (denoted generally by returning crimson::ct_error::eagain).
*/
}
bool is_empty() const {
return encoded.length() == 0;
}
ceph::bufferlist get_delta() {
return std::move(encoded);
}
ValueDeltaRecorder* get_value_recorder() const {
assert(value_recorder);
return value_recorder.get();
}
virtual node_type_t node_type() const = 0;
virtual field_type_t field_type() const = 0;
virtual void apply_delta(ceph::bufferlist::const_iterator&,
NodeExtentMutable&,
const NodeExtent&) = 0;
protected:
DeltaRecorder() = default;
DeltaRecorder(const ValueBuilder& vb)
: value_recorder{vb.build_value_recorder(encoded)} {}
ceph::bufferlist encoded;
std::unique_ptr<ValueDeltaRecorder> value_recorder;
};
}
| 1,368 | 23.446429 | 78 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_accessor.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/logging.h"
#include "node_extent_manager.h"
#include "node_delta_recorder.h"
#include "node_layout_replayable.h"
#include "value.h"
#ifndef NDEBUG
#include "node_extent_manager/test_replay.h"
#endif
namespace crimson::os::seastore::onode {
/**
* DeltaRecorderT
*
* Responsible to encode and decode delta, and apply delta for a specific node
* layout.
*/
template <typename FieldType, node_type_t NODE_TYPE>
class DeltaRecorderT final: public DeltaRecorder {
public:
using layout_t = NodeLayoutReplayableT<FieldType, NODE_TYPE>;
using node_stage_t = typename layout_t::node_stage_t;
using position_t = typename layout_t::position_t;
using StagedIterator = typename layout_t::StagedIterator;
using value_input_t = typename layout_t::value_input_t;
static constexpr auto FIELD_TYPE = layout_t::FIELD_TYPE;
~DeltaRecorderT() override = default;
template <KeyT KT>
void encode_insert(
const full_key_t<KT>& key,
const value_input_t& value,
const position_t& insert_pos,
const match_stage_t& insert_stage,
const node_offset_t& insert_size) {
ceph::encode(node_delta_op_t::INSERT, encoded);
encode_key(key, encoded);
encode_value(value, encoded);
insert_pos.encode(encoded);
ceph::encode(insert_stage, encoded);
ceph::encode(insert_size, encoded);
}
void encode_split(
const StagedIterator& split_at,
const char* p_node_start) {
ceph::encode(node_delta_op_t::SPLIT, encoded);
split_at.encode(p_node_start, encoded);
}
template <KeyT KT>
void encode_split_insert(
const StagedIterator& split_at,
const full_key_t<KT>& key,
const value_input_t& value,
const position_t& insert_pos,
const match_stage_t& insert_stage,
const node_offset_t& insert_size,
const char* p_node_start) {
ceph::encode(node_delta_op_t::SPLIT_INSERT, encoded);
split_at.encode(p_node_start, encoded);
encode_key(key, encoded);
encode_value(value, encoded);
insert_pos.encode(encoded);
ceph::encode(insert_stage, encoded);
ceph::encode(insert_size, encoded);
}
void encode_update_child_addr(
const laddr_t new_addr,
const laddr_packed_t* p_addr,
const char* p_node_start,
extent_len_t node_size) {
ceph::encode(node_delta_op_t::UPDATE_CHILD_ADDR, encoded);
ceph::encode(new_addr, encoded);
int node_offset = reinterpret_cast<const char*>(p_addr) - p_node_start;
assert(node_offset > 0 && node_offset < (int)node_size);
ceph::encode(static_cast<node_offset_t>(node_offset), encoded);
}
void encode_erase(
const position_t& erase_pos) {
ceph::encode(node_delta_op_t::ERASE, encoded);
erase_pos.encode(encoded);
}
void encode_make_tail() {
ceph::encode(node_delta_op_t::MAKE_TAIL, encoded);
}
static DeltaRecorderURef create_for_encode(const ValueBuilder& v_builder) {
return std::unique_ptr<DeltaRecorder>(new DeltaRecorderT(v_builder));
}
static DeltaRecorderURef create_for_replay() {
return std::unique_ptr<DeltaRecorder>(new DeltaRecorderT());
}
protected:
DeltaRecorderT() : DeltaRecorder() {}
DeltaRecorderT(const ValueBuilder& vb) : DeltaRecorder(vb) {}
node_type_t node_type() const override { return NODE_TYPE; }
field_type_t field_type() const override { return FIELD_TYPE; }
void apply_delta(ceph::bufferlist::const_iterator& delta,
NodeExtentMutable& mut,
const NodeExtent& node) override {
LOG_PREFIX(OTree::Extent::Replay);
assert(is_empty());
node_stage_t stage(reinterpret_cast<const FieldType*>(mut.get_read()),
mut.get_length());
node_delta_op_t op;
try {
ceph::decode(op, delta);
switch (op) {
case node_delta_op_t::INSERT: {
SUBDEBUG(seastore_onode, "decoding INSERT ...");
auto key = key_hobj_t::decode(delta);
auto value = decode_value(delta);
auto insert_pos = position_t::decode(delta);
match_stage_t insert_stage;
ceph::decode(insert_stage, delta);
node_offset_t insert_size;
ceph::decode(insert_size, delta);
SUBDEBUG(seastore_onode,
"apply {}, {}, insert_pos({}), insert_stage={}, "
"insert_size={}B ...",
key, value, insert_pos, insert_stage, insert_size);
layout_t::template insert<KeyT::HOBJ>(
mut, stage, key, value, insert_pos, insert_stage, insert_size);
break;
}
case node_delta_op_t::SPLIT: {
SUBDEBUG(seastore_onode, "decoding SPLIT ...");
auto split_at = StagedIterator::decode(
mut.get_read(), mut.get_length(), delta);
SUBDEBUG(seastore_onode, "apply split_at={} ...", split_at);
layout_t::split(mut, stage, split_at);
break;
}
case node_delta_op_t::SPLIT_INSERT: {
SUBDEBUG(seastore_onode, "decoding SPLIT_INSERT ...");
auto split_at = StagedIterator::decode(
mut.get_read(), mut.get_length(), delta);
auto key = key_hobj_t::decode(delta);
auto value = decode_value(delta);
auto insert_pos = position_t::decode(delta);
match_stage_t insert_stage;
ceph::decode(insert_stage, delta);
node_offset_t insert_size;
ceph::decode(insert_size, delta);
SUBDEBUG(seastore_onode,
"apply split_at={}, {}, {}, insert_pos({}), insert_stage={}, "
"insert_size={}B ...",
split_at, key, value, insert_pos, insert_stage, insert_size);
layout_t::template split_insert<KeyT::HOBJ>(
mut, stage, split_at, key, value, insert_pos, insert_stage, insert_size);
break;
}
case node_delta_op_t::UPDATE_CHILD_ADDR: {
SUBDEBUG(seastore_onode, "decoding UPDATE_CHILD_ADDR ...");
laddr_t new_addr;
ceph::decode(new_addr, delta);
node_offset_t update_offset;
ceph::decode(update_offset, delta);
auto p_addr = reinterpret_cast<laddr_packed_t*>(
mut.get_write() + update_offset);
SUBDEBUG(seastore_onode,
"apply {:#x} to offset {:#x} ...",
new_addr, update_offset);
layout_t::update_child_addr(mut, new_addr, p_addr);
break;
}
case node_delta_op_t::ERASE: {
SUBDEBUG(seastore_onode, "decoding ERASE ...");
auto erase_pos = position_t::decode(delta);
SUBDEBUG(seastore_onode, "apply erase_pos({}) ...", erase_pos);
layout_t::erase(mut, stage, erase_pos);
break;
}
case node_delta_op_t::MAKE_TAIL: {
SUBDEBUG(seastore_onode, "decoded MAKE_TAIL, apply ...");
layout_t::make_tail(mut, stage);
break;
}
case node_delta_op_t::SUBOP_UPDATE_VALUE: {
SUBDEBUG(seastore_onode, "decoding SUBOP_UPDATE_VALUE ...");
node_offset_t value_header_offset;
ceph::decode(value_header_offset, delta);
auto p_header = mut.get_read() + value_header_offset;
auto p_header_ = reinterpret_cast<const value_header_t*>(p_header);
SUBDEBUG(seastore_onode, "update {} at {:#x} ...", *p_header_, value_header_offset);
auto payload_mut = p_header_->get_payload_mutable(mut);
auto value_addr = node.get_laddr() + payload_mut.get_node_offset();
get_value_replayer(p_header_->magic)->apply_value_delta(
delta, payload_mut, value_addr);
break;
}
default:
SUBERROR(seastore_onode,
"got unknown op {} when replay {}",
op, node);
ceph_abort("fatal error");
}
} catch (buffer::error& e) {
SUBERROR(seastore_onode,
"got decode error {} when replay {}",
e.what(), node);
ceph_abort("fatal error");
}
}
private:
ValueDeltaRecorder* get_value_replayer(value_magic_t magic) {
// Replay procedure is independent of Btree and happens at lower level in
// seastore. There is no ValueBuilder so the recoder needs to build the
// ValueDeltaRecorder by itself.
if (value_replayer) {
if (value_replayer->get_header_magic() != magic) {
ceph_abort_msgf("OTree::Extent::Replay: value magic mismatch %x != %x",
value_replayer->get_header_magic(), magic);
}
} else {
value_replayer = build_value_recorder_by_type(encoded, magic);
if (!value_replayer) {
ceph_abort_msgf("OTree::Extent::Replay: got unexpected value magic = %x",
magic);
}
}
return value_replayer.get();
}
void encode_value(const value_input_t& value, ceph::bufferlist& encoded) const {
if constexpr (std::is_same_v<value_input_t, laddr_t>) {
// NODE_TYPE == node_type_t::INTERNAL
ceph::encode(value, encoded);
} else if constexpr (std::is_same_v<value_input_t, value_config_t>) {
// NODE_TYPE == node_type_t::LEAF
value.encode(encoded);
} else {
ceph_abort("impossible path");
}
}
value_input_t decode_value(ceph::bufferlist::const_iterator& delta) const {
if constexpr (std::is_same_v<value_input_t, laddr_t>) {
// NODE_TYPE == node_type_t::INTERNAL
laddr_t value;
ceph::decode(value, delta);
return value;
} else if constexpr (std::is_same_v<value_input_t, value_config_t>) {
// NODE_TYPE == node_type_t::LEAF
return value_config_t::decode(delta);
} else {
ceph_abort("impossible path");
}
}
std::unique_ptr<ValueDeltaRecorder> value_replayer;
};
/**
* NodeExtentAccessorT
*
* This component is responsible to reference and mutate the underlying
* NodeExtent, record mutation parameters when needed, and apply the recorded
* modifications for a specific node layout.
*
* For possible internal states, see node_types.h.
*/
template <typename FieldType, node_type_t NODE_TYPE>
class NodeExtentAccessorT {
public:
using layout_t = NodeLayoutReplayableT<FieldType, NODE_TYPE>;
using node_stage_t = typename layout_t::node_stage_t;
using position_t = typename layout_t::position_t;
using recorder_t = DeltaRecorderT<FieldType, NODE_TYPE>;
using StagedIterator = typename layout_t::StagedIterator;
using value_input_t = typename layout_t::value_input_t;
using value_t = typename layout_t::value_t;
static constexpr auto FIELD_TYPE = layout_t::FIELD_TYPE;
NodeExtentAccessorT(NodeExtentRef extent)
: extent{extent},
node_stage{reinterpret_cast<const FieldType*>(extent->get_read()),
extent->get_length()} {
assert(is_valid_node_size(extent->get_length()));
if (extent->is_initial_pending()) {
state = nextent_state_t::FRESH;
mut.emplace(extent->get_mutable());
assert(extent->get_recorder() == nullptr);
recorder = nullptr;
} else if (extent->is_mutation_pending()) {
state = nextent_state_t::MUTATION_PENDING;
mut.emplace(extent->get_mutable());
auto p_recorder = extent->get_recorder();
assert(p_recorder != nullptr);
assert(p_recorder->node_type() == NODE_TYPE);
assert(p_recorder->field_type() == FIELD_TYPE);
recorder = static_cast<recorder_t*>(p_recorder);
} else if (!extent->is_mutable() && extent->is_valid()) {
state = nextent_state_t::READ_ONLY;
// mut is empty
assert(extent->get_recorder() == nullptr ||
extent->get_recorder()->is_empty());
recorder = nullptr;
} else {
// extent is invalid or retired
ceph_abort("impossible path");
}
#ifndef NDEBUG
auto ref_recorder = recorder_t::create_for_replay();
test_recorder = static_cast<recorder_t*>(ref_recorder.get());
test_extent = TestReplayExtent::create(
get_length(), std::move(ref_recorder));
#endif
}
~NodeExtentAccessorT() = default;
NodeExtentAccessorT(const NodeExtentAccessorT&) = delete;
NodeExtentAccessorT(NodeExtentAccessorT&&) = delete;
NodeExtentAccessorT& operator=(const NodeExtentAccessorT&) = delete;
NodeExtentAccessorT& operator=(NodeExtentAccessorT&&) = delete;
const node_stage_t& read() const { return node_stage; }
laddr_t get_laddr() const { return extent->get_laddr(); }
extent_len_t get_length() const {
auto len = extent->get_length();
assert(is_valid_node_size(len));
return len;
}
nextent_state_t get_state() const {
assert(!is_retired());
// we cannot rely on the underlying extent state because
// FRESH/MUTATION_PENDING can become DIRTY after transaction submission.
return state;
}
bool is_retired() const {
if (extent) {
return false;
} else {
return true;
}
}
// must be called before any mutate attempes.
// for the safety of mixed read and mutate, call before read.
void prepare_mutate(context_t c) {
assert(!is_retired());
if (state == nextent_state_t::READ_ONLY) {
assert(!extent->is_mutable());
auto ref_recorder = recorder_t::create_for_encode(c.vb);
recorder = static_cast<recorder_t*>(ref_recorder.get());
extent = extent->mutate(c, std::move(ref_recorder));
state = nextent_state_t::MUTATION_PENDING;
assert(extent->is_mutation_pending());
node_stage = node_stage_t(reinterpret_cast<const FieldType*>(extent->get_read()),
get_length());
assert(recorder == static_cast<recorder_t*>(extent->get_recorder()));
mut.emplace(extent->get_mutable());
}
assert(extent->is_mutable());
}
template <KeyT KT>
const value_t* insert_replayable(
const full_key_t<KT>& key,
const value_input_t& value,
position_t& insert_pos,
match_stage_t& insert_stage,
node_offset_t& insert_size) {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->template encode_insert<KT>(
key, value, insert_pos, insert_stage, insert_size);
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->template encode_insert<KT>(
key, value, insert_pos, insert_stage, insert_size);
#endif
auto ret = layout_t::template insert<KT>(
*mut, read(), key, value,
insert_pos, insert_stage, insert_size);
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
return ret;
}
void split_replayable(StagedIterator& split_at) {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->encode_split(split_at, read().p_start());
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->encode_split(split_at, read().p_start());
#endif
layout_t::split(*mut, read(), split_at);
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
}
template <KeyT KT>
const value_t* split_insert_replayable(
StagedIterator& split_at,
const full_key_t<KT>& key,
const value_input_t& value,
position_t& insert_pos,
match_stage_t& insert_stage,
node_offset_t& insert_size) {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->template encode_split_insert<KT>(
split_at, key, value, insert_pos, insert_stage, insert_size,
read().p_start());
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->template encode_split_insert<KT>(
split_at, key, value, insert_pos, insert_stage, insert_size,
read().p_start());
#endif
auto ret = layout_t::template split_insert<KT>(
*mut, read(), split_at, key, value,
insert_pos, insert_stage, insert_size);
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
return ret;
}
void update_child_addr_replayable(
const laddr_t new_addr, laddr_packed_t* p_addr) {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->encode_update_child_addr(
new_addr, p_addr, read().p_start(), get_length());
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->encode_update_child_addr(
new_addr, p_addr, read().p_start(), get_length());
#endif
layout_t::update_child_addr(*mut, new_addr, p_addr);
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
}
std::tuple<match_stage_t, position_t> erase_replayable(const position_t& pos) {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->encode_erase(pos);
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->encode_erase(pos);
#endif
auto ret = layout_t::erase(*mut, read(), pos);
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
return ret;
}
position_t make_tail_replayable() {
assert(extent->is_mutable());
assert(state != nextent_state_t::READ_ONLY);
if (state == nextent_state_t::MUTATION_PENDING) {
recorder->encode_make_tail();
}
#ifndef NDEBUG
test_extent->prepare_replay(extent);
test_recorder->encode_make_tail();
#endif
auto ret = layout_t::make_tail(*mut, read());
#ifndef NDEBUG
test_extent->replay_and_verify(extent);
#endif
return ret;
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t c) {
prepare_mutate(c);
ValueDeltaRecorder* p_value_recorder = nullptr;
if (state == nextent_state_t::MUTATION_PENDING) {
p_value_recorder = recorder->get_value_recorder();
}
return {*mut, p_value_recorder};
}
void test_copy_to(NodeExtentMutable& to) const {
assert(extent->get_length() == to.get_length());
std::memcpy(to.get_write(), extent->get_read(), get_length());
}
eagain_ifuture<NodeExtentMutable> rebuild(context_t c, laddr_t hint) {
LOG_PREFIX(OTree::Extent::rebuild);
assert(!is_retired());
if (state == nextent_state_t::FRESH) {
assert(extent->is_initial_pending());
// already fresh and no need to record
return eagain_iertr::make_ready_future<NodeExtentMutable>(*mut);
}
assert(!extent->is_initial_pending());
auto alloc_size = get_length();
return c.nm.alloc_extent(c.t, hint, alloc_size
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, alloc_size, l_to_discard = extent->get_laddr()] {
SUBERRORT(seastore_onode,
"EIO during allocate -- node_size={}, to_discard={:x}",
c.t, alloc_size, l_to_discard);
ceph_abort("fatal error");
})
).si_then([this, c, FNAME] (auto fresh_extent) {
SUBDEBUGT(seastore_onode,
"update addr from {:#x} to {:#x} ...",
c.t, extent->get_laddr(), fresh_extent->get_laddr());
assert(fresh_extent);
assert(fresh_extent->is_initial_pending());
assert(fresh_extent->get_recorder() == nullptr);
assert(get_length() == fresh_extent->get_length());
auto fresh_mut = fresh_extent->get_mutable();
std::memcpy(fresh_mut.get_write(), extent->get_read(), get_length());
NodeExtentRef to_discard = extent;
extent = fresh_extent;
node_stage = node_stage_t(reinterpret_cast<const FieldType*>(extent->get_read()),
get_length());
state = nextent_state_t::FRESH;
mut.emplace(fresh_mut);
recorder = nullptr;
return c.nm.retire_extent(c.t, to_discard
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, l_to_discard = to_discard->get_laddr(),
l_fresh = fresh_extent->get_laddr()] {
SUBERRORT(seastore_onode,
"EIO during retire -- to_disgard={:x}, fresh={:x}",
c.t, l_to_discard, l_fresh);
ceph_abort("fatal error");
}),
crimson::ct_error::enoent::handle(
[FNAME, c, l_to_discard = to_discard->get_laddr(),
l_fresh = fresh_extent->get_laddr()] {
SUBERRORT(seastore_onode,
"ENOENT during retire -- to_disgard={:x}, fresh={:x}",
c.t, l_to_discard, l_fresh);
ceph_abort("fatal error");
})
);
}).si_then([this, c] {
boost::ignore_unused(c); // avoid clang warning;
assert(!c.t.is_conflicted());
return *mut;
});
}
eagain_ifuture<> retire(context_t c) {
LOG_PREFIX(OTree::Extent::retire);
assert(!is_retired());
auto addr = extent->get_laddr();
return c.nm.retire_extent(c.t, std::move(extent)
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, addr] {
SUBERRORT(seastore_onode, "EIO -- addr={:x}", c.t, addr);
ceph_abort("fatal error");
}),
crimson::ct_error::enoent::handle(
[FNAME, c, addr] {
SUBERRORT(seastore_onode, "ENOENT -- addr={:x}", c.t, addr);
ceph_abort("fatal error");
})
#ifndef NDEBUG
).si_then([c] {
assert(!c.t.is_conflicted());
}
#endif
);
}
private:
NodeExtentRef extent;
node_stage_t node_stage;
nextent_state_t state;
std::optional<NodeExtentMutable> mut;
// owned by extent
recorder_t* recorder;
#ifndef NDEBUG
// verify record replay using a different memory block
TestReplayExtent::Ref test_extent;
recorder_t* test_recorder;
#endif
};
}
| 21,818 | 34.191935 | 92 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "node_extent_manager.h"
#include "node_extent_manager/dummy.h"
#include "node_extent_manager/seastore.h"
namespace crimson::os::seastore::onode {
NodeExtentManagerURef NodeExtentManager::create_dummy(bool is_sync)
{
if (is_sync) {
return NodeExtentManagerURef(new DummyNodeExtentManager<true>());
} else {
return NodeExtentManagerURef(new DummyNodeExtentManager<false>());
}
}
NodeExtentManagerURef NodeExtentManager::create_seastore(
TransactionManager &tm, laddr_t min_laddr, double p_eagain)
{
if (p_eagain == 0.0) {
return NodeExtentManagerURef(
new SeastoreNodeExtentManager<false>(tm, min_laddr, p_eagain));
} else {
return NodeExtentManagerURef(
new SeastoreNodeExtentManager<true>(tm, min_laddr, p_eagain));
}
}
}
| 885 | 25.848485 | 72 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/common/type_helpers.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "fwd.h"
#include "node_extent_mutable.h"
#include "node_types.h"
#include "stages/node_stage_layout.h"
#include "super.h"
/**
* node_extent_manager.h
*
* Contains general interfaces for different backends (Dummy and Seastore).
*/
namespace crimson::os::seastore::onode {
using crimson::os::seastore::LogicalCachedExtent;
class NodeExtent : public LogicalCachedExtent {
public:
virtual ~NodeExtent() = default;
const node_header_t& get_header() const {
return *reinterpret_cast<const node_header_t*>(get_read());
}
const char* get_read() const {
return get_bptr().c_str();
}
NodeExtentMutable get_mutable() {
assert(is_mutable());
return do_get_mutable();
}
virtual DeltaRecorder* get_recorder() const = 0;
virtual NodeExtentRef mutate(context_t, DeltaRecorderURef&&) = 0;
protected:
template <typename... T>
NodeExtent(T&&... t) : LogicalCachedExtent(std::forward<T>(t)...) {}
NodeExtentMutable do_get_mutable() {
return NodeExtentMutable(get_bptr().c_str(), get_length());
}
std::ostream& print_detail_l(std::ostream& out) const final {
return out << ", fltree_header=" << get_header();
}
/**
* Abstracted interfaces to implement:
* - CacheExtent::duplicate_for_write() -> CachedExtentRef
* - CacheExtent::get_type() -> extent_types_t
* - CacheExtent::get_delta() -> ceph::bufferlist
* - LogicalCachedExtent::apply_delta(const ceph::bufferlist) -> void
*/
};
using crimson::os::seastore::TransactionManager;
class NodeExtentManager {
using base_iertr = TransactionManager::base_iertr;
public:
virtual ~NodeExtentManager() = default;
virtual bool is_read_isolated() const = 0;
using read_iertr = base_iertr::extend<
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
virtual read_iertr::future<NodeExtentRef> read_extent(
Transaction&, laddr_t) = 0;
using alloc_iertr = base_iertr;
virtual alloc_iertr::future<NodeExtentRef> alloc_extent(
Transaction&, laddr_t hint, extent_len_t) = 0;
using retire_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
virtual retire_iertr::future<> retire_extent(
Transaction&, NodeExtentRef) = 0;
using getsuper_iertr = base_iertr;
virtual getsuper_iertr::future<Super::URef> get_super(
Transaction&, RootNodeTracker&) = 0;
virtual std::ostream& print(std::ostream& os) const = 0;
static NodeExtentManagerURef create_dummy(bool is_sync);
static NodeExtentManagerURef create_seastore(
TransactionManager &tm, laddr_t min_laddr = L_ADDR_MIN, double p_eagain = 0.0);
};
inline std::ostream& operator<<(std::ostream& os, const NodeExtentManager& nm) {
return nm.print(os);
}
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::onode::NodeExtent> : fmt::ostream_formatter {};
#endif
| 3,120 | 28.443396 | 104 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_mutable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include <cstring>
#include "fwd.h"
#pragma once
namespace crimson::os::seastore::onode {
/**
* NodeExtentMutable
*
* A thin wrapper of NodeExtent to make sure that only the newly allocated
* or the duplicated NodeExtent is mutable, and the memory modifications are
* safe within the extent range.
*/
class NodeExtentMutable {
public:
void copy_in_absolute(void* dst, const void* src, extent_len_t len) {
assert(is_safe(dst, len));
std::memcpy(dst, src, len);
}
template <typename T>
void copy_in_absolute(void* dst, const T& src) {
copy_in_absolute(dst, &src, sizeof(T));
}
const void* copy_in_relative(
extent_len_t dst_offset, const void* src, extent_len_t len) {
auto dst = get_write() + dst_offset;
copy_in_absolute(dst, src, len);
return dst;
}
template <typename T>
const T* copy_in_relative(
extent_len_t dst_offset, const T& src) {
auto dst = copy_in_relative(dst_offset, &src, sizeof(T));
return static_cast<const T*>(dst);
}
void shift_absolute(const void* src, extent_len_t len, int offset) {
assert(is_safe(src, len));
char* to = (char*)src + offset;
assert(is_safe(to, len));
if (len != 0) {
std::memmove(to, src, len);
}
}
void shift_relative(extent_len_t src_offset, extent_len_t len, int offset) {
shift_absolute(get_write() + src_offset, len, offset);
}
void set_absolute(void* dst, int value, extent_len_t len) {
assert(is_safe(dst, len));
std::memset(dst, value, len);
}
void set_relative(extent_len_t dst_offset, int value, extent_len_t len) {
auto dst = get_write() + dst_offset;
set_absolute(dst, value, len);
}
template <typename T>
void validate_inplace_update(const T& updated) {
assert(is_safe(&updated, sizeof(T)));
}
const char* get_read() const { return p_start; }
char* get_write() { return p_start; }
extent_len_t get_length() const {
#ifndef NDEBUG
if (node_offset == 0) {
assert(is_valid_node_size(length));
}
#endif
return length;
}
node_offset_t get_node_offset() const { return node_offset; }
NodeExtentMutable get_mutable_absolute(const void* dst, node_offset_t len) const {
assert(node_offset == 0);
assert(is_safe(dst, len));
assert((const char*)dst != get_read());
auto ret = *this;
node_offset_t offset = (const char*)dst - get_read();
assert(offset != 0);
ret.p_start += offset;
ret.length = len;
ret.node_offset = offset;
return ret;
}
NodeExtentMutable get_mutable_relative(
node_offset_t offset, node_offset_t len) const {
return get_mutable_absolute(get_read() + offset, len);
}
private:
NodeExtentMutable(char* p_start, extent_len_t length)
: p_start{p_start}, length{length} {}
bool is_safe(const void* src, extent_len_t len) const {
return ((const char*)src >= p_start) &&
((const char*)src + len <= p_start + length);
}
char* p_start;
extent_len_t length;
node_offset_t node_offset = 0;
friend class NodeExtent;
};
}
| 3,146 | 26.605263 | 84 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_impl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "node_impl.h"
#include "node_layout.h"
namespace crimson::os::seastore::onode {
#ifdef UNIT_TESTS_BUILT
last_split_info_t last_split = {};
#endif
// XXX: branchless allocation
eagain_ifuture<InternalNodeImpl::fresh_impl_t>
InternalNodeImpl::allocate(
context_t c, laddr_t hint, field_type_t type, bool is_level_tail, level_t level)
{
if (type == field_type_t::N0) {
return InternalNode0::allocate(c, hint, is_level_tail, level);
} else if (type == field_type_t::N1) {
return InternalNode1::allocate(c, hint, is_level_tail, level);
} else if (type == field_type_t::N2) {
return InternalNode2::allocate(c, hint, is_level_tail, level);
} else if (type == field_type_t::N3) {
return InternalNode3::allocate(c, hint, is_level_tail, level);
} else {
ceph_abort("impossible path");
}
}
eagain_ifuture<LeafNodeImpl::fresh_impl_t>
LeafNodeImpl::allocate(
context_t c, laddr_t hint, field_type_t type, bool is_level_tail)
{
if (type == field_type_t::N0) {
return LeafNode0::allocate(c, hint, is_level_tail, 0);
} else if (type == field_type_t::N1) {
return LeafNode1::allocate(c, hint, is_level_tail, 0);
} else if (type == field_type_t::N2) {
return LeafNode2::allocate(c, hint, is_level_tail, 0);
} else if (type == field_type_t::N3) {
return LeafNode3::allocate(c, hint, is_level_tail, 0);
} else {
ceph_abort("impossible path");
}
}
InternalNodeImplURef InternalNodeImpl::load(
NodeExtentRef extent, field_type_t type)
{
if (type == field_type_t::N0) {
return InternalNode0::load(extent);
} else if (type == field_type_t::N1) {
return InternalNode1::load(extent);
} else if (type == field_type_t::N2) {
return InternalNode2::load(extent);
} else if (type == field_type_t::N3) {
return InternalNode3::load(extent);
} else {
ceph_abort("impossible path");
}
}
LeafNodeImplURef LeafNodeImpl::load(
NodeExtentRef extent, field_type_t type)
{
if (type == field_type_t::N0) {
return LeafNode0::load(extent);
} else if (type == field_type_t::N1) {
return LeafNode1::load(extent);
} else if (type == field_type_t::N2) {
return LeafNode2::load(extent);
} else if (type == field_type_t::N3) {
return LeafNode3::load(extent);
} else {
ceph_abort("impossible path");
}
}
}
| 2,416 | 28.839506 | 84 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_impl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <ostream>
#include "node_extent_mutable.h"
#include "node_types.h"
#include "stages/stage_types.h"
namespace crimson::os::seastore::onode {
#ifdef UNIT_TESTS_BUILT
enum class InsertType { BEGIN, LAST, MID };
struct split_expectation_t {
match_stage_t split_stage;
match_stage_t insert_stage;
bool is_insert_left;
InsertType insert_type;
};
struct last_split_info_t {
search_position_t split_pos;
match_stage_t insert_stage;
bool is_insert_left;
InsertType insert_type;
bool match(const split_expectation_t& e) const {
match_stage_t split_stage;
if (split_pos.nxt.nxt.index == 0) {
if (split_pos.nxt.index == 0) {
split_stage = 2;
} else {
split_stage = 1;
}
} else {
split_stage = 0;
}
return split_stage == e.split_stage &&
insert_stage == e.insert_stage &&
is_insert_left == e.is_insert_left &&
insert_type == e.insert_type;
}
bool match_split_pos(const search_position_t& pos) const {
return split_pos == pos;
}
};
extern last_split_info_t last_split;
#endif
struct key_hobj_t;
struct key_view_t;
class NodeExtentMutable;
/**
* NodeImpl
*
* Hides type specific node layout implementations for Node.
*/
class NodeImpl {
public:
virtual ~NodeImpl() = default;
virtual node_type_t node_type() const = 0;
virtual field_type_t field_type() const = 0;
virtual laddr_t laddr() const = 0;
virtual const char* read() const = 0;
virtual extent_len_t get_node_size() const = 0;
virtual nextent_state_t get_extent_state() const = 0;
virtual void prepare_mutate(context_t) = 0;
virtual bool is_level_tail() const = 0;
/* Invariants for num_keys and num_values:
* - for leaf node and non-tail internal node, num_keys == num_values;
* - for tail internal node, num_keys + 1 == num_values;
* - all node must have at least 1 value, except the root leaf node;
* - the root internal node must have more than 1 values;
*/
virtual void validate_non_empty() const = 0;
virtual bool is_keys_empty() const = 0;
// under the assumption that node is not empty
virtual bool has_single_value() const = 0;
virtual level_t level() const = 0;
virtual node_offset_t free_size() const = 0;
virtual extent_len_t total_size() const = 0;
virtual bool is_extent_retired() const = 0;
virtual std::optional<key_view_t> get_pivot_index() const = 0;
virtual bool is_size_underflow() const = 0;
virtual std::tuple<match_stage_t, search_position_t> erase(const search_position_t&) = 0;
virtual std::tuple<match_stage_t, std::size_t> evaluate_merge(NodeImpl&) = 0;
virtual search_position_t merge(NodeExtentMutable&, NodeImpl&, match_stage_t, extent_len_t) = 0;
virtual eagain_ifuture<NodeExtentMutable> rebuild_extent(context_t) = 0;
virtual eagain_ifuture<> retire_extent(context_t) = 0;
virtual search_position_t make_tail() = 0;
virtual node_stats_t get_stats() const = 0;
virtual std::ostream& dump(std::ostream&) const = 0;
virtual std::ostream& dump_brief(std::ostream&) const = 0;
virtual const std::string& get_name() const = 0;
virtual void validate_layout() const = 0;
virtual void test_copy_to(NodeExtentMutable&) const = 0;
virtual void test_set_tail(NodeExtentMutable&) = 0;
protected:
NodeImpl() = default;
};
/**
* InternalNodeImpl
*
* Hides type specific node layout implementations for InternalNode.
*/
class InternalNodeImpl : public NodeImpl {
public:
struct internal_marker_t {};
virtual ~InternalNodeImpl() = default;
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_slot(const search_position_t&, // IN
key_view_t* = nullptr, // OUT
const laddr_packed_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_prev_slot(search_position_t&, // IN&OUT
key_view_t* = nullptr, // OUT
const laddr_packed_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_next_slot(search_position_t&, // IN&OUT
key_view_t* = nullptr, // OUT
const laddr_packed_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_largest_slot(search_position_t* = nullptr, // OUT
key_view_t* = nullptr, // OUT
const laddr_packed_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual lookup_result_t<node_type_t::INTERNAL> lower_bound(
const key_hobj_t&, MatchHistory&,
key_view_t* = nullptr, internal_marker_t = {}) const {
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual const laddr_packed_t* insert(
const key_view_t&, const laddr_t&, search_position_t&, match_stage_t&, node_offset_t&) {
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual std::tuple<search_position_t, bool, const laddr_packed_t*> split_insert(
NodeExtentMutable&, NodeImpl&, const key_view_t&, const laddr_t&,
search_position_t&, match_stage_t&, node_offset_t&) {
ceph_abort("impossible path");
}
virtual const laddr_packed_t* get_tail_value() const = 0;
virtual void replace_child_addr(const search_position_t&, laddr_t dst, laddr_t src) = 0;
virtual std::tuple<match_stage_t, node_offset_t> evaluate_insert(
const key_view_t&, const laddr_t&, search_position_t&) const = 0;
struct fresh_impl_t {
InternalNodeImplURef impl;
NodeExtentMutable mut;
std::pair<NodeImplURef, NodeExtentMutable> make_pair() {
return {std::move(impl), mut};
}
};
static eagain_ifuture<fresh_impl_t> allocate(context_t, laddr_t, field_type_t, bool, level_t);
static InternalNodeImplURef load(NodeExtentRef, field_type_t);
protected:
InternalNodeImpl() = default;
};
/**
* LeafNodeImpl
*
* Hides type specific node layout implementations for LeafNode.
*/
class LeafNodeImpl : public NodeImpl {
public:
struct leaf_marker_t {};
virtual ~LeafNodeImpl() = default;
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_slot(const search_position_t&, // IN
key_view_t* = nullptr, // OUT
const value_header_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_prev_slot(search_position_t&, // IN&OUT
key_view_t* = nullptr, // OUT
const value_header_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_next_slot(search_position_t&, // IN&OUT
key_view_t* = nullptr, // OUT
const value_header_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual void get_largest_slot(search_position_t* = nullptr, // OUT
key_view_t* = nullptr, // OUT
const value_header_t** = nullptr) const { // OUT
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual lookup_result_t<node_type_t::LEAF> lower_bound(
const key_hobj_t&, MatchHistory&,
key_view_t* = nullptr, leaf_marker_t = {}) const {
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual const value_header_t* insert(
const key_hobj_t&, const value_config_t&, search_position_t&, match_stage_t&, node_offset_t&) {
ceph_abort("impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
virtual std::tuple<search_position_t, bool, const value_header_t*> split_insert(
NodeExtentMutable&, NodeImpl&, const key_hobj_t&, const value_config_t&,
search_position_t&, match_stage_t&, node_offset_t&) {
ceph_abort("impossible path");
}
virtual std::tuple<match_stage_t, node_offset_t> evaluate_insert(
const key_hobj_t&, const value_config_t&,
const MatchHistory&, match_stat_t, search_position_t&) const = 0;
virtual std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t) = 0;
struct fresh_impl_t {
LeafNodeImplURef impl;
NodeExtentMutable mut;
std::pair<NodeImplURef, NodeExtentMutable> make_pair() {
return {std::move(impl), mut};
}
};
static eagain_ifuture<fresh_impl_t> allocate(context_t, laddr_t, field_type_t, bool);
static LeafNodeImplURef load(NodeExtentRef, field_type_t);
protected:
LeafNodeImpl() = default;
};
}
| 9,486 | 34.00738 | 101 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_layout.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <ostream>
#include <sstream>
#include "common/likely.h"
#include "crimson/os/seastore/logging.h"
#include "node_extent_accessor.h"
#include "node_impl.h"
#include "stages/node_stage_layout.h"
namespace crimson::os::seastore::onode {
template <node_type_t NODE_TYPE> struct insert_key_type;
template <> struct insert_key_type<node_type_t::INTERNAL> {
static constexpr auto type = KeyT::VIEW; };
template <> struct insert_key_type<node_type_t::LEAF> {
static constexpr auto type = KeyT::HOBJ; };
template <node_type_t NODE_TYPE> struct node_impl_type;
template <> struct node_impl_type<node_type_t::INTERNAL> {
using type = InternalNodeImpl; };
template <> struct node_impl_type<node_type_t::LEAF> {
using type = LeafNodeImpl; };
template <node_type_t NODE_TYPE> struct node_marker_type;
template <> struct node_marker_type<node_type_t::INTERNAL> {
using type = InternalNodeImpl::internal_marker_t; };
template <> struct node_marker_type<node_type_t::LEAF> {
using type = LeafNodeImpl::leaf_marker_t; };
/**
* NodeLayoutT
*
* Contains templated and concrete implementations for both InternalNodeImpl
* and LeafNodeImpl under a specific node layout.
*/
template <typename FieldType, node_type_t NODE_TYPE>
class NodeLayoutT final : public InternalNodeImpl, public LeafNodeImpl {
public:
using URef = std::unique_ptr<NodeLayoutT>;
using extent_t = NodeExtentAccessorT<FieldType, NODE_TYPE>;
using parent_t = typename node_impl_type<NODE_TYPE>::type;
using marker_t = typename node_marker_type<NODE_TYPE>::type;
using node_stage_t = typename extent_t::node_stage_t;
using stage_t = node_to_stage_t<node_stage_t>;
using position_t = typename extent_t::position_t;
using value_input_t = typename extent_t::value_input_t;
using value_t = typename extent_t::value_t;
static constexpr auto FIELD_TYPE = extent_t::FIELD_TYPE;
static constexpr auto KEY_TYPE = insert_key_type<NODE_TYPE>::type;
static constexpr auto STAGE = stage_t::STAGE;
NodeLayoutT(const NodeLayoutT&) = delete;
NodeLayoutT(NodeLayoutT&&) = delete;
NodeLayoutT& operator=(const NodeLayoutT&) = delete;
NodeLayoutT& operator=(NodeLayoutT&&) = delete;
~NodeLayoutT() override = default;
static URef load(NodeExtentRef extent) {
std::unique_ptr<NodeLayoutT> ret(new NodeLayoutT(extent));
return ret;
}
static eagain_ifuture<typename parent_t::fresh_impl_t> allocate(
context_t c, laddr_t hint, bool is_level_tail, level_t level) {
LOG_PREFIX(OTree::Layout::allocate);
extent_len_t extent_size;
if constexpr (NODE_TYPE == node_type_t::LEAF) {
extent_size = c.vb.get_leaf_node_size();
} else {
extent_size = c.vb.get_internal_node_size();
}
return c.nm.alloc_extent(c.t, hint, extent_size
).handle_error_interruptible(
eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, extent_size, is_level_tail, level] {
SUBERRORT(seastore_onode,
"EIO -- extent_size={}, is_level_tail={}, level={}",
c.t, extent_size, is_level_tail, level);
ceph_abort("fatal error");
})
).si_then([is_level_tail, level](auto extent) {
assert(extent);
assert(extent->is_initial_pending());
auto mut = extent->get_mutable();
node_stage_t::bootstrap_extent(
mut, FIELD_TYPE, NODE_TYPE, is_level_tail, level);
return typename parent_t::fresh_impl_t{
std::unique_ptr<parent_t>(new NodeLayoutT(extent)), mut};
});
}
protected:
/*
* NodeImpl
*/
node_type_t node_type() const override { return NODE_TYPE; }
field_type_t field_type() const override { return FIELD_TYPE; }
laddr_t laddr() const override { return extent.get_laddr(); }
const char* read() const override { return extent.read().p_start(); }
extent_len_t get_node_size() const override { return extent.get_length(); }
nextent_state_t get_extent_state() const override { return extent.get_state(); }
void prepare_mutate(context_t c) override { return extent.prepare_mutate(c); }
bool is_level_tail() const override { return extent.read().is_level_tail(); }
void validate_non_empty() const override {
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
if (is_level_tail()) {
return;
}
}
assert(!is_keys_empty());
}
bool is_keys_empty() const override { return extent.read().keys() == 0; }
bool has_single_value() const override {
validate_non_empty();
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
return ((is_level_tail() && is_keys_empty()) ||
(!is_level_tail() && stage_t::is_keys_one(extent.read())));
} else {
return stage_t::is_keys_one(extent.read());
}
}
level_t level() const override { return extent.read().level(); }
node_offset_t free_size() const override { return extent.read().free_size(); }
extent_len_t total_size() const override { return extent.read().total_size(); }
bool is_extent_retired() const override { return extent.is_retired(); }
std::optional<key_view_t> get_pivot_index() const override {
if (is_level_tail()) {
return std::nullopt;
}
assert(!is_keys_empty());
key_view_t pivot_index;
stage_t::template get_largest_slot<false, true, false>(
extent.read(), nullptr, &pivot_index, nullptr);
return {pivot_index};
}
bool is_size_underflow() const override {
/**
* There might be 2 node-merge strategies:
*
* The first is to rebalance and merge nodes and perfer tree fillness as
* much as possible in order to save space and improve key density for
* lookup, in exchange to the efforts of frequent merge, split and
* rebalance. These operations cannot benefit from seastore deltas because
* they are allocating fresh extents which need to be write into the
* journal as a whole, making write amplification much larger.
*
* The second is to delay rebalance and merge. When submit the transaction,
* simple insert and erase only need to append delta including just enough
* information about the inserted/erase item. The downside is tree fillness
* is not as good as the first strategy.
*
* Currently the decision is the second way by delaying merge until the
* node is 1/4 full, so that:
* - After a split operation (making the node at least 1/2 full):
* - The next merge need to erase items taking at least 1/4 space;
* - The next split need to insert items taking at most 1/2 space;
* - After a merge operation (making the node at least 1/2 full):
* - The next merge need to erase items taking at least 1/4 space;
* - The next split need to insert items taking at most 1/2 space;
* - TODO: before node rebalance is implemented, the node size can be below
* the underflow limit if it cannot be merged with peers;
*/
auto& node_stage = extent.read();
size_t empty_size = node_stage.size_before(0);
size_t filled_kv_size = filled_size() - empty_size;
size_t full_kv_size = node_stage.total_size() - empty_size;
return filled_kv_size <= full_kv_size / 4;
}
std::tuple<match_stage_t, search_position_t>
erase(const search_position_t& pos) override {
LOG_PREFIX(OTree::Layout::erase);
SUBDEBUG(seastore_onode, "begin at erase_pos({}) ...", pos);
if (unlikely(LOGGER(seastore_onode).is_enabled(seastar::log_level::trace))) {
std::ostringstream sos;
dump(sos);
SUBTRACE(seastore_onode, "-- dump\n{}", sos.str());
}
auto [stage, next_or_last_pos] = extent.erase_replayable(cast_down<STAGE>(pos));
SUBDEBUG(seastore_onode, "done at erase_stage={}, n/l_pos({})", stage, next_or_last_pos);
if (unlikely(LOGGER(seastore_onode).is_enabled(seastar::log_level::trace))) {
std::ostringstream sos;
dump(sos);
SUBTRACE(seastore_onode, "-- dump\n{}", sos.str());
}
#ifndef NDEBUG
if (!is_keys_empty()) {
validate_layout();
}
#endif
return {stage, normalize(std::move(next_or_last_pos))};
}
std::tuple<match_stage_t, std::size_t> evaluate_merge(
NodeImpl& _right_node) override {
auto& left_node_stage = extent.read();
auto& right_node = dynamic_cast<NodeLayoutT&>(_right_node);
auto& right_node_stage = right_node.extent.read();
assert(NODE_TYPE == _right_node.node_type());
assert(FIELD_TYPE == _right_node.field_type());
assert(!is_level_tail());
assert(!is_keys_empty());
match_stage_t merge_stage;
node_offset_t size_comp;
if (right_node.is_keys_empty()) {
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
assert(right_node.is_level_tail());
merge_stage = STAGE;
size_comp = right_node_stage.header_size();
} else {
ceph_abort("impossible path");
}
} else {
key_view_t left_pivot_index;
stage_t::template get_largest_slot<false, true, false>(
left_node_stage, nullptr, &left_pivot_index, nullptr);
std::tie(merge_stage, size_comp) = stage_t::evaluate_merge(
left_pivot_index, right_node_stage);
}
auto size_left = filled_size();
auto size_right = right_node.filled_size();
assert(size_right > size_comp);
std::size_t merge_size = size_left + size_right - size_comp;
return {merge_stage, merge_size};
}
search_position_t merge(
NodeExtentMutable& mut,
NodeImpl& _right_node,
match_stage_t merge_stage,
extent_len_t merge_size) override {
LOG_PREFIX(OTree::Layout::merge);
auto& left_node_stage = extent.read();
auto& right_node = dynamic_cast<NodeLayoutT&>(_right_node);
auto& right_node_stage = right_node.extent.read();
if (unlikely(LOGGER(seastore_onode).is_enabled(seastar::log_level::debug))) {
{
std::ostringstream sos;
dump(sos);
SUBDEBUG(seastore_onode, "-- left node dump\n{}", sos.str());
}
{
std::ostringstream sos;
right_node.dump(sos);
SUBDEBUG(seastore_onode, "-- right node dump\n{}", sos.str());
}
}
assert(NODE_TYPE == _right_node.node_type());
assert(FIELD_TYPE == _right_node.field_type());
assert(!is_level_tail());
assert(!is_keys_empty());
if (right_node.is_level_tail()) {
node_stage_t::update_is_level_tail(mut, left_node_stage, true);
build_name();
}
position_t left_last_pos;
stage_t::template get_largest_slot<true, false, false>(
left_node_stage, &left_last_pos, nullptr, nullptr);
if (right_node.is_keys_empty()) {
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
assert(right_node.is_level_tail());
laddr_t tail_value = right_node_stage.get_end_p_laddr()->value;
auto p_write = left_node_stage.get_end_p_laddr();
mut.copy_in_absolute((void*)p_write, tail_value);
} else {
ceph_abort("impossible path");
}
} else {
typename stage_t::template StagedAppender<KeyT::VIEW> left_appender;
left_appender.init_tail(&mut, left_node_stage, merge_stage);
typename stage_t::StagedIterator right_append_at;
right_append_at.set(right_node_stage);
auto pos_end = position_t::end();
stage_t::template append_until<KeyT::VIEW>(
right_append_at, left_appender, pos_end, STAGE);
assert(right_append_at.is_end());
left_appender.wrap();
}
if (unlikely(LOGGER(seastore_onode).is_enabled(seastar::log_level::debug))) {
std::ostringstream sos;
dump(sos);
SUBDEBUG(seastore_onode, "-- merged node dump\n{}", sos.str());
}
assert(merge_size == filled_size());
return normalize(std::move(left_last_pos));
}
eagain_ifuture<NodeExtentMutable>
rebuild_extent(context_t c) override {
assert(!is_keys_empty());
key_view_t first_index;
stage_t::template get_slot<true, false>(
extent.read(), position_t::begin(), &first_index, nullptr);
auto hint = first_index.get_hint();
return extent.rebuild(c, hint).si_then([this] (auto mut) {
// addr may change
build_name();
return mut;
});
}
eagain_ifuture<> retire_extent(context_t c) override {
return extent.retire(c);
}
search_position_t make_tail() override {
auto&& ret = extent.make_tail_replayable();
// is_level_tail is changed
build_name();
return normalize(std::move(ret));
}
node_stats_t get_stats() const override {
node_stats_t stats;
auto& node_stage = extent.read();
key_view_t index_key;
if (!is_keys_empty()) {
stage_t::get_stats(node_stage, stats, index_key);
}
stats.size_persistent = extent.get_length();
stats.size_filled = filled_size();
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
if (is_level_tail()) {
stats.size_logical += sizeof(value_t);
stats.size_value += sizeof(value_t);
stats.num_kvs += 1;
}
}
return stats;
}
std::ostream& dump(std::ostream& os) const override {
auto& node_stage = extent.read();
auto p_start = node_stage.p_start();
dump_brief(os);
auto stats = get_stats();
os << " num_kvs=" << stats.num_kvs
<< ", logical=" << stats.size_logical
<< "B, overhead=" << stats.size_overhead
<< "B, value=" << stats.size_value << "B";
os << ":\n header: " << node_stage_t::header_size() << "B";
size_t size = 0u;
if (!is_keys_empty()) {
stage_t::dump(node_stage, os, " ", size, p_start);
} else {
size += node_stage_t::header_size();
if (NODE_TYPE == node_type_t::LEAF || !node_stage.is_level_tail()) {
os << " empty!";
}
}
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
if (node_stage.is_level_tail()) {
size += sizeof(laddr_t);
auto value_ptr = node_stage.get_end_p_laddr();
int offset = reinterpret_cast<const char*>(value_ptr) - p_start;
os << "\n tail value: 0x"
<< std::hex << value_ptr->value << std::dec
<< " " << size << "B"
<< " @" << offset << "B";
}
}
assert(size == filled_size());
return os;
}
std::ostream& dump_brief(std::ostream& os) const override {
os << name
<< "(filled=" << filled_size() << "B"
<< ", free=" << extent.read().free_size() << "B)";
return os;
}
const std::string& get_name() const override { return name; }
void validate_layout() const override {
#ifndef NDEBUG
stage_t::validate(extent.read());
#endif
}
void test_copy_to(NodeExtentMutable& to) const override {
extent.test_copy_to(to);
}
void test_set_tail(NodeExtentMutable& mut) override {
node_stage_t::update_is_level_tail(mut, extent.read(), true);
build_name();
}
/*
* Common
*/
void get_slot(const search_position_t& pos,
key_view_t* p_index_key = nullptr,
const value_t** pp_value = nullptr) const override {
assert(!is_keys_empty());
assert(!pos.is_end());
if (p_index_key && pp_value) {
stage_t::template get_slot<true, true>(
extent.read(), cast_down<STAGE>(pos), p_index_key, pp_value);
} else if (!p_index_key && pp_value) {
stage_t::template get_slot<false, true>(
extent.read(), cast_down<STAGE>(pos), nullptr, pp_value);
} else if (p_index_key && !pp_value) {
stage_t::template get_slot<true, false>(
extent.read(), cast_down<STAGE>(pos), p_index_key, nullptr);
} else {
ceph_abort("impossible path");
}
#ifndef NDEBUG
if (pp_value) {
assert((const char*)(*pp_value) - extent.read().p_start() <
extent.get_length());
}
#endif
}
void get_prev_slot(search_position_t& pos,
key_view_t* p_index_key = nullptr,
const value_t** pp_value = nullptr) const override {
assert(!is_keys_empty());
assert(!pos.is_end());
auto& _pos = cast_down<STAGE>(pos);
#ifndef NDEBUG
auto nxt_pos = _pos;
#endif
if (!p_index_key && pp_value) {
stage_t::template get_prev_slot<false, true>(
extent.read(), _pos, nullptr, pp_value);
} else {
ceph_abort("not implemented");
}
#ifndef NDEBUG
auto _nxt_pos = _pos;
stage_t::template get_next_slot<false, false>(
extent.read(), _nxt_pos, nullptr, nullptr);
assert(nxt_pos == _nxt_pos);
#endif
}
void get_next_slot(search_position_t& pos,
key_view_t* p_index_key = nullptr,
const value_t** pp_value = nullptr) const override {
assert(!is_keys_empty());
assert(!pos.is_end());
bool find_next;
if (p_index_key && pp_value) {
find_next = stage_t::template get_next_slot<true, true>(
extent.read(), cast_down<STAGE>(pos), p_index_key, pp_value);
} else if (!p_index_key && pp_value) {
find_next = stage_t::template get_next_slot<false, true>(
extent.read(), cast_down<STAGE>(pos), nullptr, pp_value);
} else {
ceph_abort("not implemented");
}
if (find_next) {
pos = search_position_t::end();
}
}
void get_largest_slot(search_position_t* p_pos = nullptr,
key_view_t* p_index_key = nullptr,
const value_t** pp_value = nullptr) const override {
assert(!is_keys_empty());
if (p_pos && p_index_key && pp_value) {
stage_t::template get_largest_slot<true, true, true>(
extent.read(), &cast_down_fill_0<STAGE>(*p_pos), p_index_key, pp_value);
} else if (!p_pos && p_index_key && !pp_value) {
stage_t::template get_largest_slot<false, true, false>(
extent.read(), nullptr, p_index_key, nullptr);
} else if (p_pos && !p_index_key && pp_value) {
stage_t::template get_largest_slot<true, false, true>(
extent.read(), &cast_down_fill_0<STAGE>(*p_pos), nullptr, pp_value);
} else if (p_pos && !p_index_key && !pp_value) {
stage_t::template get_largest_slot<true, false, false>(
extent.read(), &cast_down_fill_0<STAGE>(*p_pos), nullptr, nullptr);
} else {
ceph_abort("not implemented");
}
}
lookup_result_t<NODE_TYPE> lower_bound(
const key_hobj_t& key, MatchHistory& history,
key_view_t* index_key=nullptr, marker_t={}) const override {
auto& node_stage = extent.read();
if constexpr (NODE_TYPE == node_type_t::LEAF) {
if (unlikely(is_keys_empty())) {
history.set<STAGE_LEFT>(MatchKindCMP::LT);
return lookup_result_t<NODE_TYPE>::end();
}
}
assert(!is_keys_empty());
typename stage_t::result_t result_raw;
if (index_key) {
result_raw = stage_t::template lower_bound<true>(
node_stage, key, history, index_key);
#ifndef NDEBUG
if (!result_raw.is_end()) {
key_view_t index;
stage_t::template get_slot<true, false>(
node_stage, result_raw.position, &index, nullptr);
assert(index == *index_key);
}
#endif
} else {
result_raw = stage_t::lower_bound(node_stage, key, history);
}
#ifndef NDEBUG
if (result_raw.is_end()) {
assert(result_raw.mstat == MSTAT_END);
} else {
key_view_t index;
stage_t::template get_slot<true, false>(
node_stage, result_raw.position, &index, nullptr);
assert_mstat(key, index, result_raw.mstat);
}
#endif
// calculate MSTAT_LT3
if constexpr (FIELD_TYPE == field_type_t::N0) {
// currently only internal node checks mstat
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
if (result_raw.mstat == MSTAT_LT2) {
auto cmp =
key <=> node_stage[result_raw.position.index].shard_pool;
assert(cmp != std::strong_ordering::greater);
if (cmp != 0) {
result_raw.mstat = MSTAT_LT3;
}
}
}
}
auto result = normalize(std::move(result_raw));
if (result.is_end()) {
assert(node_stage.is_level_tail());
assert(result.p_value == nullptr);
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
result.p_value = node_stage.get_end_p_laddr();
}
} else {
assert(result.p_value != nullptr);
}
return result;
}
const value_t* insert(
const full_key_t<KEY_TYPE>& key, const value_input_t& value,
search_position_t& insert_pos, match_stage_t& insert_stage,
node_offset_t& insert_size) override {
LOG_PREFIX(OTree::Layout::insert);
SUBDEBUG(seastore_onode,
"begin at insert_pos({}), insert_stage={}, insert_size={}B ...",
insert_pos, insert_stage, insert_size);
if (unlikely(LOGGER(seastore_onode).is_enabled(seastar::log_level::trace))) {
std::ostringstream sos;
dump(sos);
SUBTRACE(seastore_onode, "-- dump\n{}", sos.str());
}
auto ret = extent.template insert_replayable<KEY_TYPE>(
key, value, cast_down<STAGE>(insert_pos), insert_stage, insert_size);
SUBDEBUG(seastore_onode,
"done at insert_pos({}), insert_stage={}, insert_size={}B",
insert_pos, insert_stage, insert_size);
if (unlikely(LOGGER(seastore_onode).is_enabled(seastar::log_level::trace))) {
std::ostringstream sos;
dump(sos);
SUBTRACE(seastore_onode, "-- dump\n{}", sos.str());
}
validate_layout();
#ifndef NDEBUG
key_view_t index;
get_slot(insert_pos, &index, nullptr);
assert(index == key);
#endif
return ret;
}
std::tuple<search_position_t, bool, const value_t*> split_insert(
NodeExtentMutable& right_mut, NodeImpl& _right_impl,
const full_key_t<KEY_TYPE>& key, const value_input_t& value,
search_position_t& _insert_pos, match_stage_t& insert_stage,
node_offset_t& insert_size) override {
LOG_PREFIX(OTree::Layout::split_insert);
assert(_right_impl.node_type() == NODE_TYPE);
assert(_right_impl.field_type() == FIELD_TYPE);
auto& right_impl = dynamic_cast<NodeLayoutT&>(_right_impl);
SUBDEBUG(seastore_onode,
"begin at insert_pos({}), insert_stage={}, insert_size={}B ...",
_insert_pos, insert_stage, insert_size);
if (unlikely(LOGGER(seastore_onode).is_enabled(seastar::log_level::debug))) {
std::ostringstream sos;
dump(sos);
SUBDEBUG(seastore_onode, "-- dump\n{}", sos.str());
}
#ifdef UNIT_TESTS_BUILT
auto insert_stage_pre = insert_stage;
#endif
auto& insert_pos = cast_down<STAGE>(_insert_pos);
auto& node_stage = extent.read();
typename stage_t::StagedIterator split_at;
bool is_insert_left;
size_t split_size;
size_t target_split_size;
{
size_t empty_size = node_stage.size_before(0);
size_t filled_kv_size = filled_size() - empty_size;
/** NODE_BLOCK_SIZE considerations
*
* Generally,
* target_split_size = (filled_size + insert_size) / 2
* We can have two locate_split() strategies:
* A. the simpler one is to locate the largest split position where
* the estimated left_node_size <= target_split_size;
* B. the fair one takes a further step to calculate the next slot of
* P KiB, and if left_node_size + P/2 < target_split_size, compensate
* the split position to include the next slot;
*
* Say that the node_block_size = N KiB, the largest allowed
* insert_size = 1/I * N KiB (I > 1). We want to identify the minimal 'I'
* that won't lead to "double split" effect, meaning after a split,
* the right node size is still larger than N KiB and need to split
* again. I think "double split" makes split much more complicated and
* we can no longer identify whether the node is safe under concurrent
* operations.
*
* We need to evaluate the worst case in order to identify 'I'. This means:
* - filled_size ~= N KiB
* - insert_size == N/I KiB
* - target_split_size ~= (I+1)/2I * N KiB
* To simplify the below calculations, node_block_size is normalized to 1.
*
* With strategy A, the worst case is when left_node_size cannot include
* the next slot that will just overflow the target_split_size:
* - left_node_size + 1/I ~= (I+1)/2I
* - left_node_size ~= (I-1)/2I
* - right_node_size ~= 1 + 1/I - left_node_size ~= (I+3)/2I
* The right_node_size cannot larger than the node_block_size in the
* worst case, which means (I+3)/2I < 1, so I > 3, meaning the largest
* possible insert_size must be smaller than 1/3 of the node_block_size.
*
* With strategy B, the worst case is when left_node_size cannot include
* the next slot that will just overflow the threshold
* target_split_size - 1/2I, thus:
* - left_node_size ~= (I+1)/2I - 1/2I ~= 1/2
* - right_node_size ~= 1 + 1/I - 1/2 ~= (I+2)/2I < node_block_size(1)
* - I > 2
* This means the largest possible insert_size must be smaller than 1/2 of
* the node_block_size, which is better than strategy A.
*
* In order to avoid "double split", there is another side-effect we need
* to take into consideration: if split happens with snap-gen indexes, the
* according ns-oid string needs to be copied to the right node. That is
* to say: right_node_size + string_size < node_block_size.
*
* Say that the largest allowed string size is 1/S of the largest allowed
* insert_size N/I KiB. If we go with stragety B, and when split happens
* with snap-gen indexes and split just overflow the target_split_size:
* - left_node_size ~= target_split_size - 1/2 * (1/I - 1/IS)
* ~= 1/2 + 1/2IS
* - right_node_size ~= 1 + 1/I - left_node_size + 1/IS
* ~= 1/2 + 1/I + 1/2IS < 1
* - I > 2 + 1/S (S > 1)
*
* Now back to NODE_BLOCK_SIZE calculation, if we have limits of at most
* X KiB ns-oid string and Y KiB of value to store in this BTree, then:
* - largest_insert_size ~= X+Y KiB
* - 1/S == X/(X+Y)
* - I > (3X+2Y)/(X+Y)
* - node_block_size(N) == I * insert_size > 3X+2Y KiB
*
* In conclusion,
* (TODO) the current node block size (4 KiB) is too small to
* store entire 2 KiB ns-oid string. We need to consider a larger
* node_block_size.
*
* We are setting X = Y = 640 B in order not to break the current
* implementations with 4KiB node.
*
* (TODO) Implement smarter logics to check when "double split" happens.
*/
target_split_size = empty_size + (filled_kv_size + insert_size) / 2;
assert(insert_size < (node_stage.total_size() - empty_size) / 2);
std::optional<bool> _is_insert_left;
split_at.set(node_stage);
split_size = 0;
bool locate_nxt = stage_t::recursively_locate_split_inserted(
split_size, 0, target_split_size, insert_pos,
insert_stage, insert_size, _is_insert_left, split_at);
is_insert_left = *_is_insert_left;
SUBDEBUG(seastore_onode,
"-- located split_at({}), insert_pos({}), is_insert_left={}, "
"split_size={}B(target={}B, current={}B)",
split_at, insert_pos, is_insert_left,
split_size, target_split_size, filled_size());
// split_size can be larger than target_split_size in strategy B
// assert(split_size <= target_split_size);
if (locate_nxt) {
assert(insert_stage == STAGE);
assert(split_at.get().is_last());
split_at.set_end();
assert(insert_pos.index == split_at.index());
}
}
auto append_at = split_at;
// TODO(cross-node string dedup)
typename stage_t::template StagedAppender<KEY_TYPE> right_appender;
right_appender.init_empty(&right_mut, right_mut.get_write());
const value_t* p_value = nullptr;
if (!is_insert_left) {
// right node: append [start(append_at), insert_pos)
stage_t::template append_until<KEY_TYPE>(
append_at, right_appender, insert_pos, insert_stage);
SUBDEBUG(seastore_onode,
"-- right appended until "
"insert_pos({}), insert_stage={}, insert/append the rest ...",
insert_pos, insert_stage);
// right node: append [insert_pos(key, value)]
bool is_front_insert = (insert_pos == position_t::begin());
[[maybe_unused]] bool is_end = stage_t::template append_insert<KEY_TYPE>(
key, value, append_at, right_appender,
is_front_insert, insert_stage, p_value);
assert(append_at.is_end() == is_end);
} else {
SUBDEBUG(seastore_onode, "-- right appending ...");
}
// right node: append (insert_pos, end)
auto pos_end = position_t::end();
stage_t::template append_until<KEY_TYPE>(
append_at, right_appender, pos_end, STAGE);
assert(append_at.is_end());
right_appender.wrap();
if (unlikely(LOGGER(seastore_onode).is_enabled(seastar::log_level::debug))) {
std::ostringstream sos;
right_impl.dump(sos);
SUBDEBUG(seastore_onode, "-- right node dump\n{}", sos.str());
}
right_impl.validate_layout();
// mutate left node
if (is_insert_left) {
SUBDEBUG(seastore_onode,
"-- left trim/insert at insert_pos({}), insert_stage={} ...",
insert_pos, insert_stage);
p_value = extent.template split_insert_replayable<KEY_TYPE>(
split_at, key, value, insert_pos, insert_stage, insert_size);
#ifndef NDEBUG
key_view_t index;
get_slot(_insert_pos, &index, nullptr);
assert(index == key);
#endif
} else {
SUBDEBUG(seastore_onode, "-- left trim ...");
#ifndef NDEBUG
key_view_t index;
right_impl.get_slot(_insert_pos, &index, nullptr);
assert(index == key);
#endif
extent.split_replayable(split_at);
}
if (right_impl.is_level_tail()) {
// is_level_tail of left is changed by split/split_insert
build_name();
}
if (unlikely(LOGGER(seastore_onode).is_enabled(seastar::log_level::debug))) {
std::ostringstream sos;
dump(sos);
SUBDEBUG(seastore_onode, "-- left node dump\n{}", sos.str());
}
validate_layout();
assert(p_value);
auto split_pos = normalize(split_at.get_pos());
SUBDEBUG(seastore_onode,
"done at insert_pos({}), insert_stage={}, insert_size={}B, "
"split_at({}), is_insert_left={}, split_size={}B(target={}B)",
_insert_pos, insert_stage, insert_size, split_pos,
is_insert_left, split_size, target_split_size);
assert(split_size == filled_size());
#ifdef UNIT_TESTS_BUILT
InsertType insert_type;
search_position_t last_pos;
if (is_insert_left) {
stage_t::template get_largest_slot<true, false, false>(
extent.read(), &cast_down_fill_0<STAGE>(last_pos), nullptr, nullptr);
} else {
node_stage_t right_stage{reinterpret_cast<FieldType*>(right_mut.get_write()),
right_mut.get_length()};
stage_t::template get_largest_slot<true, false, false>(
right_stage, &cast_down_fill_0<STAGE>(last_pos), nullptr, nullptr);
}
if (_insert_pos == search_position_t::begin()) {
insert_type = InsertType::BEGIN;
} else if (_insert_pos == last_pos) {
insert_type = InsertType::LAST;
} else {
insert_type = InsertType::MID;
}
last_split = {split_pos, insert_stage_pre, is_insert_left, insert_type};
#endif
return {split_pos, is_insert_left, p_value};
}
/*
* InternalNodeImpl
*/
const laddr_packed_t* get_tail_value() const override {
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
assert(is_level_tail());
return extent.read().get_end_p_laddr();
} else {
ceph_abort("impossible path");
}
}
void replace_child_addr(
const search_position_t& pos, laddr_t dst, laddr_t src) override {
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
LOG_PREFIX(OTree::Layout::replace_child_addr);
SUBDEBUG(seastore_onode, "update from {:#x} to {:#x} at pos({}) ...", src, dst, pos);
const laddr_packed_t* p_value;
if (pos.is_end()) {
assert(is_level_tail());
p_value = get_tail_value();
} else {
get_slot(pos, nullptr, &p_value);
}
assert(p_value->value == src);
extent.update_child_addr_replayable(dst, const_cast<laddr_packed_t*>(p_value));
} else {
ceph_abort("impossible path");
}
}
std::tuple<match_stage_t, node_offset_t> evaluate_insert(
const key_view_t& key, const laddr_t& value,
search_position_t& insert_pos) const override {
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
auto& node_stage = extent.read();
match_stage_t insert_stage;
node_offset_t insert_size;
if (unlikely(is_keys_empty())) {
assert(insert_pos.is_end());
insert_stage = STAGE;
insert_size = stage_t::insert_size(key, value);
} else {
std::tie(insert_stage, insert_size) = stage_t::evaluate_insert(
node_stage, key, value, cast_down<STAGE>(insert_pos), false);
}
return {insert_stage, insert_size};
} else {
ceph_abort("impossible path");
}
}
/*
* LeafNodeImpl
*/
std::tuple<match_stage_t, node_offset_t> evaluate_insert(
const key_hobj_t& key, const value_config_t& value,
const MatchHistory& history, match_stat_t mstat,
search_position_t& insert_pos) const override {
if constexpr (NODE_TYPE == node_type_t::LEAF) {
if (unlikely(is_keys_empty())) {
assert(insert_pos.is_end());
assert(is_level_tail());
return {STAGE, stage_t::insert_size(key, value)};
} else {
return stage_t::evaluate_insert(
key, value, history, mstat, cast_down<STAGE>(insert_pos));
}
} else {
ceph_abort("impossible path");
}
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t c) {
return extent.prepare_mutate_value_payload(c);
}
private:
NodeLayoutT(NodeExtentRef extent) : extent{extent} {
build_name();
}
extent_len_t filled_size() const {
auto& node_stage = extent.read();
auto ret = node_stage.size_before(node_stage.keys());
assert(ret == node_stage.total_size() - node_stage.free_size());
return ret;
}
// rebuild the name whenever addr, type, level, tail is changed
void build_name() {
// XXX: maybe also include the extent state
std::ostringstream sos;
sos << "Node" << NODE_TYPE << FIELD_TYPE
<< "@0x" << std::hex << extent.get_laddr()
<< "+" << extent.get_length() << std::dec
<< "Lv" << (unsigned)level()
<< (is_level_tail() ? "$" : "");
name = sos.str();
}
extent_t extent;
std::string name = "Node-N/A";
};
using InternalNode0 = NodeLayoutT<node_fields_0_t, node_type_t::INTERNAL>;
using InternalNode1 = NodeLayoutT<node_fields_1_t, node_type_t::INTERNAL>;
using InternalNode2 = NodeLayoutT<node_fields_2_t, node_type_t::INTERNAL>;
using InternalNode3 = NodeLayoutT<internal_fields_3_t, node_type_t::INTERNAL>;
using LeafNode0 = NodeLayoutT<node_fields_0_t, node_type_t::LEAF>;
using LeafNode1 = NodeLayoutT<node_fields_1_t, node_type_t::LEAF>;
using LeafNode2 = NodeLayoutT<node_fields_2_t, node_type_t::LEAF>;
using LeafNode3 = NodeLayoutT<leaf_fields_3_t, node_type_t::LEAF>;
}
| 35,617 | 36.532139 | 94 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_layout_replayable.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "node_extent_mutable.h"
#include "stages/node_stage.h"
#include "stages/stage.h"
namespace crimson::os::seastore::onode {
/**
* NodeLayoutReplayableT
*
* Contains templated logics to modify the layout of a NodeExtend which are
* also replayable. Used by NodeExtentAccessorT at runtime and by
* DeltaRecorderT during replay.
*/
template <typename FieldType, node_type_t NODE_TYPE>
struct NodeLayoutReplayableT {
using node_stage_t = node_extent_t<FieldType, NODE_TYPE>;
using stage_t = node_to_stage_t<node_stage_t>;
using position_t = typename stage_t::position_t;
using StagedIterator = typename stage_t::StagedIterator;
using value_input_t = value_input_type_t<NODE_TYPE>;
using value_t = value_type_t<NODE_TYPE>;
static constexpr auto FIELD_TYPE = FieldType::FIELD_TYPE;
template <KeyT KT>
static const value_t* insert(
NodeExtentMutable& mut,
const node_stage_t& node_stage,
const full_key_t<KT>& key,
const value_input_t& value,
position_t& insert_pos,
match_stage_t& insert_stage,
node_offset_t& insert_size) {
auto p_value = stage_t::template proceed_insert<KT, false>(
mut, node_stage, key, value, insert_pos, insert_stage, insert_size);
return p_value;
}
static void split(
NodeExtentMutable& mut,
const node_stage_t& node_stage,
StagedIterator& split_at) {
node_stage_t::update_is_level_tail(mut, node_stage, false);
stage_t::trim(mut, split_at);
}
template <KeyT KT>
static const value_t* split_insert(
NodeExtentMutable& mut,
const node_stage_t& node_stage,
StagedIterator& split_at,
const full_key_t<KT>& key,
const value_input_t& value,
position_t& insert_pos,
match_stage_t& insert_stage,
node_offset_t& insert_size) {
node_stage_t::update_is_level_tail(mut, node_stage, false);
stage_t::trim(mut, split_at);
auto p_value = stage_t::template proceed_insert<KT, true>(
mut, node_stage, key, value, insert_pos, insert_stage, insert_size);
return p_value;
}
static void update_child_addr(
NodeExtentMutable& mut, const laddr_t new_addr, laddr_packed_t* p_addr) {
assert(NODE_TYPE == node_type_t::INTERNAL);
mut.copy_in_absolute(p_addr, new_addr);
}
static std::tuple<match_stage_t, position_t> erase(
NodeExtentMutable& mut,
const node_stage_t& node_stage,
const position_t& _erase_pos) {
if (_erase_pos.is_end()) {
// must be internal node
assert(node_stage.is_level_tail());
// return erase_stage, last_pos
return update_last_to_tail(mut, node_stage);
}
assert(node_stage.keys() != 0);
position_t erase_pos = _erase_pos;
auto erase_stage = stage_t::erase(mut, node_stage, erase_pos);
// return erase_stage, next_pos
return {erase_stage, erase_pos};
}
static position_t make_tail(
NodeExtentMutable& mut,
const node_stage_t& node_stage) {
assert(!node_stage.is_level_tail());
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
auto [r_stage, r_last_pos] = update_last_to_tail(mut, node_stage);
std::ignore = r_stage;
return r_last_pos;
} else {
node_stage_t::update_is_level_tail(mut, node_stage, true);
// no need to calculate the last pos
return position_t::end();
}
}
private:
static std::tuple<match_stage_t, position_t> update_last_to_tail(
NodeExtentMutable& mut,
const node_stage_t& node_stage) {
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
assert(node_stage.keys() != 0);
position_t last_pos;
laddr_t last_value;
{
const laddr_packed_t* p_last_value;
stage_t::template get_largest_slot<true, false, true>(
node_stage, &last_pos, nullptr, &p_last_value);
last_value = p_last_value->value;
}
auto erase_pos = last_pos;
auto erase_stage = stage_t::erase(mut, node_stage, erase_pos);
assert(erase_pos.is_end());
node_stage_t::update_is_level_tail(mut, node_stage, true);
auto p_last_value = const_cast<laddr_packed_t*>(
node_stage.get_end_p_laddr());
mut.copy_in_absolute(p_last_value, last_value);
// return erase_stage, last_pos
return {erase_stage, last_pos};
} else {
ceph_abort("impossible path");
}
}
};
}
| 4,486 | 31.280576 | 79 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cassert>
#include <ostream>
#include "fwd.h"
namespace crimson::os::seastore::onode {
constexpr uint8_t FIELD_TYPE_MAGIC = 0x25;
enum class field_type_t : uint8_t {
N0 = FIELD_TYPE_MAGIC,
N1,
N2,
N3,
_MAX
};
inline uint8_t to_unsigned(field_type_t type) {
auto value = static_cast<uint8_t>(type);
assert(value >= FIELD_TYPE_MAGIC);
assert(value < static_cast<uint8_t>(field_type_t::_MAX));
return value - FIELD_TYPE_MAGIC;
}
inline std::ostream& operator<<(std::ostream &os, field_type_t type) {
const char* const names[] = {"0", "1", "2", "3"};
auto index = to_unsigned(type);
os << names[index];
return os;
}
enum class node_type_t : uint8_t {
LEAF = 0,
INTERNAL
};
inline std::ostream& operator<<(std::ostream &os, const node_type_t& type) {
const char* const names[] = {"L", "I"};
auto index = static_cast<uint8_t>(type);
assert(index <= 1u);
os << names[index];
return os;
}
struct laddr_packed_t {
laddr_t value;
} __attribute__((packed));
inline std::ostream& operator<<(std::ostream& os, const laddr_packed_t& laddr) {
return os << "laddr_packed(0x" << std::hex << laddr.value << std::dec << ")";
}
using match_stat_t = int8_t;
constexpr match_stat_t MSTAT_END = -2; // index is search_position_t::end()
constexpr match_stat_t MSTAT_EQ = -1; // key == index
constexpr match_stat_t MSTAT_LT0 = 0; // key == index [pool/shard crush ns/oid]; key < index [snap/gen]
constexpr match_stat_t MSTAT_LT1 = 1; // key == index [pool/shard crush]; key < index [ns/oid]
constexpr match_stat_t MSTAT_LT2 = 2; // key < index [pool/shard crush ns/oid] ||
// key == index [pool/shard]; key < index [crush]
constexpr match_stat_t MSTAT_LT3 = 3; // key < index [pool/shard]
constexpr match_stat_t MSTAT_MIN = MSTAT_END;
constexpr match_stat_t MSTAT_MAX = MSTAT_LT3;
enum class node_delta_op_t : uint8_t {
INSERT,
SPLIT,
SPLIT_INSERT,
UPDATE_CHILD_ADDR,
ERASE,
MAKE_TAIL,
SUBOP_UPDATE_VALUE = 0xff,
};
/** nextent_state_t
*
* The possible states of tree node extent(NodeExtentAccessorT).
*
* State transition implies the following capabilities is changed:
* - mutability is changed;
* - whether to record;
* - memory has been copied;
*
* load()----+
* |
* alloc() v
* | +--> [READ_ONLY] ---------+
* | | | |
* | | prepare_mutate() |
* | | | |
* | v v v
* | +--> [MUTATION_PENDING]---+
* | | |
* | | rebuild()
* | | |
* | v v
* +------->+--> [FRESH] <------------+
*
* Note that NodeExtentAccessorT might still be MUTATION_PENDING/FRESH while
* the internal extent has become DIRTY after the transaction submission is
* started while nodes destruction and validation has not been completed yet.
*/
enum class nextent_state_t : uint8_t {
READ_ONLY = 0, // requires mutate for recording
// CLEAN/DIRTY
MUTATION_PENDING, // can mutate, needs recording
// MUTATION_PENDING
FRESH, // can mutate, no recording
// INITIAL_WRITE_PENDING
};
}
template <> struct fmt::formatter<crimson::os::seastore::onode::node_delta_op_t>
: fmt::formatter<std::string_view> {
using node_delta_op_t = crimson::os::seastore::onode::node_delta_op_t;
// parse is inherited from formatter<string_view>.
template <typename FormatContext>
auto format(node_delta_op_t op, FormatContext& ctx) {
std::string_view name = "unknown";
switch (op) {
case node_delta_op_t::INSERT:
name = "insert";
break;
case node_delta_op_t::SPLIT:
name = "split";
break;
case node_delta_op_t::SPLIT_INSERT:
name = "split_insert";
break;
case node_delta_op_t::UPDATE_CHILD_ADDR:
name = "update_child_addr";
break;
case node_delta_op_t::ERASE:
name = "erase";
break;
case node_delta_op_t::MAKE_TAIL:
name = "make_tail";
break;
case node_delta_op_t::SUBOP_UPDATE_VALUE:
name = "subop_update_value";
break;
}
return formatter<string_view>::format(name, ctx);
}
};
| 4,460 | 29.554795 | 104 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/super.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "super.h"
#include "node.h"
namespace crimson::os::seastore::onode {
Ref<Node> RootNodeTrackerIsolated::get_root(Transaction& t) const
{
auto iter = tracked_supers.find(&t);
if (iter == tracked_supers.end()) {
return nullptr;
} else {
return iter->second->get_p_root();
}
}
Ref<Node> RootNodeTrackerShared::get_root(Transaction&) const
{
if (is_clean()) {
return nullptr;
} else {
return tracked_super->get_p_root();
}
}
}
| 567 | 18.586207 | 72 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/super.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include "crimson/common/type_helpers.h"
#include "fwd.h"
namespace crimson::os::seastore::onode {
class Node;
class Super;
/**
* RootNodeTracker
*
* An abstracted tracker to get the root node by Transaction.
*/
class RootNodeTracker {
public:
virtual ~RootNodeTracker() = default;
virtual bool is_clean() const = 0;
virtual Ref<Node> get_root(Transaction&) const = 0;
static RootNodeTrackerURef create(bool read_isolated);
protected:
RootNodeTracker() = default;
RootNodeTracker(const RootNodeTracker&) = delete;
RootNodeTracker(RootNodeTracker&&) = delete;
RootNodeTracker& operator=(const RootNodeTracker&) = delete;
RootNodeTracker& operator=(RootNodeTracker&&) = delete;
virtual void do_track_super(Transaction&, Super&) = 0;
virtual void do_untrack_super(Transaction&, Super&) = 0;
friend class Super;
};
/**
* Super
*
* The parent of root node. It contains the relationship between a Transaction
* and a root node address.
*/
class Super {
public:
using URef = std::unique_ptr<Super>;
Super(const Super&) = delete;
Super(Super&&) = delete;
Super& operator=(const Super&) = delete;
Super& operator=(Super&&) = delete;
virtual ~Super() {
assert(tracked_root_node == nullptr);
tracker.do_untrack_super(t, *this);
}
virtual laddr_t get_root_laddr() const = 0;
virtual void write_root_laddr(context_t, laddr_t) = 0;
void do_track_root(Node& root) {
assert(tracked_root_node == nullptr);
tracked_root_node = &root;
}
void do_untrack_root(Node& root) {
assert(tracked_root_node == &root);
tracked_root_node = nullptr;
}
Node* get_p_root() const {
assert(tracked_root_node != nullptr);
return tracked_root_node;
}
protected:
Super(Transaction& t, RootNodeTracker& tracker)
: t{t}, tracker{tracker} {
tracker.do_track_super(t, *this);
}
private:
Transaction& t;
RootNodeTracker& tracker;
Node* tracked_root_node = nullptr;
};
/**
* RootNodeTrackerIsolated
*
* A concrete RootNodeTracker implementation which provides root node isolation
* between Transactions for Seastore backend.
*/
class RootNodeTrackerIsolated final : public RootNodeTracker {
public:
~RootNodeTrackerIsolated() override { assert(is_clean()); }
protected:
bool is_clean() const override {
return tracked_supers.empty();
}
void do_track_super(Transaction& t, Super& super) override {
assert(tracked_supers.find(&t) == tracked_supers.end());
tracked_supers[&t] = &super;
}
void do_untrack_super(Transaction& t, Super& super) override {
[[maybe_unused]] auto removed = tracked_supers.erase(&t);
assert(removed);
}
::Ref<Node> get_root(Transaction& t) const override;
std::map<Transaction*, Super*> tracked_supers;
};
/**
* RootNodeTrackerShared
*
* A concrete RootNodeTracker implementation which has no isolation between
* Transactions for Dummy backend.
*/
class RootNodeTrackerShared final : public RootNodeTracker {
public:
~RootNodeTrackerShared() override { assert(is_clean()); }
protected:
bool is_clean() const override {
return tracked_super == nullptr;
}
void do_track_super(Transaction&, Super& super) override {
assert(is_clean());
tracked_super = &super;
}
void do_untrack_super(Transaction&, Super& super) override {
assert(tracked_super == &super);
tracked_super = nullptr;
}
::Ref<Node> get_root(Transaction&) const override;
Super* tracked_super = nullptr;
};
inline RootNodeTrackerURef RootNodeTracker::create(bool read_isolated) {
if (read_isolated) {
return RootNodeTrackerURef(new RootNodeTrackerIsolated());
} else {
return RootNodeTrackerURef(new RootNodeTrackerShared());
}
}
}
| 3,839 | 25.666667 | 79 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/tree.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <ostream>
#include "common/hobject.h"
#include "crimson/common/type_helpers.h"
#include "crimson/os/seastore/logging.h"
#include "fwd.h"
#include "node.h"
#include "node_extent_manager.h"
#include "stages/key_layout.h"
#include "super.h"
#include "value.h"
/**
* tree.h
*
* A special-purpose and b-tree-based implementation that:
* - Fulfills requirements of OnodeManager to index ordered onode key-values;
* - Runs above seastore block and transaction layer;
* - Specially optimized for onode key structures and seastore
* delta/transaction semantics;
*
* Note: Cursor/Value are transactional, they cannot be used outside the scope
* of the according transaction, or the behavior is undefined.
*/
namespace crimson::os::seastore::onode {
class Node;
class tree_cursor_t;
template <typename ValueImpl>
class Btree {
public:
Btree(NodeExtentManagerURef&& _nm)
: nm{std::move(_nm)},
root_tracker{RootNodeTracker::create(nm->is_read_isolated())} {}
~Btree() { assert(root_tracker->is_clean()); }
Btree(const Btree&) = delete;
Btree(Btree&&) = delete;
Btree& operator=(const Btree&) = delete;
Btree& operator=(Btree&&) = delete;
eagain_ifuture<> mkfs(Transaction& t) {
return Node::mkfs(get_context(t), *root_tracker);
}
class Cursor {
public:
Cursor(const Cursor&) = default;
Cursor(Cursor&&) noexcept = default;
Cursor& operator=(const Cursor&) = default;
Cursor& operator=(Cursor&&) = default;
~Cursor() = default;
bool is_end() const {
if (p_cursor->is_tracked()) {
return false;
} else if (p_cursor->is_invalid()) {
return true;
} else {
// we don't actually store end cursor because it will hold a reference
// to an end leaf node and is not kept updated.
assert(p_cursor->is_end());
ceph_abort("impossible");
}
}
/// Invalidate the Cursor before submitting transaction.
void invalidate() {
p_cursor.reset();
}
// XXX: return key_view_t to avoid unecessary ghobject_t constructions
ghobject_t get_ghobj() const {
assert(!is_end());
auto view = p_cursor->get_key_view(
p_tree->value_builder.get_header_magic());
assert(view.nspace().size() <=
p_tree->value_builder.get_max_ns_size());
assert(view.oid().size() <=
p_tree->value_builder.get_max_oid_size());
return view.to_ghobj();
}
ValueImpl value() {
assert(!is_end());
return p_tree->value_builder.build_value(
*p_tree->nm, p_tree->value_builder, p_cursor);
}
bool operator==(const Cursor& o) const { return operator<=>(o) == 0; }
eagain_ifuture<Cursor> get_next(Transaction& t) {
assert(!is_end());
auto this_obj = *this;
return p_cursor->get_next(p_tree->get_context(t)
).si_then([this_obj] (Ref<tree_cursor_t> next_cursor) {
next_cursor->assert_next_to(
*this_obj.p_cursor, this_obj.p_tree->value_builder.get_header_magic());
auto ret = Cursor{this_obj.p_tree, next_cursor};
assert(this_obj < ret);
return ret;
});
}
template <bool FORCE_MERGE = false>
eagain_ifuture<Cursor> erase(Transaction& t) {
assert(!is_end());
auto this_obj = *this;
return p_cursor->erase<FORCE_MERGE>(p_tree->get_context(t), true
).si_then([this_obj, this] (Ref<tree_cursor_t> next_cursor) {
assert(p_cursor->is_invalid());
if (next_cursor) {
assert(!next_cursor->is_end());
return Cursor{p_tree, next_cursor};
} else {
return Cursor{p_tree};
}
});
}
private:
Cursor(Btree* p_tree, Ref<tree_cursor_t> _p_cursor) : p_tree(p_tree) {
if (_p_cursor->is_invalid()) {
// we don't create Cursor from an invalid tree_cursor_t.
ceph_abort("impossible");
} else if (_p_cursor->is_end()) {
// we don't actually store end cursor because it will hold a reference
// to an end leaf node and is not kept updated.
} else {
assert(_p_cursor->is_tracked());
p_cursor = _p_cursor;
}
}
Cursor(Btree* p_tree) : p_tree{p_tree} {}
std::strong_ordering operator<=>(const Cursor& o) const {
assert(p_tree == o.p_tree);
return p_cursor->compare_to(
*o.p_cursor, p_tree->value_builder.get_header_magic());
}
static Cursor make_end(Btree* p_tree) {
return {p_tree};
}
Btree* p_tree;
Ref<tree_cursor_t> p_cursor = tree_cursor_t::get_invalid();
friend class Btree;
};
/*
* lookup
*/
eagain_ifuture<Cursor> begin(Transaction& t) {
return get_root(t).si_then([this, &t](auto root) {
return root->lookup_smallest(get_context(t));
}).si_then([this](auto cursor) {
return Cursor{this, cursor};
});
}
eagain_ifuture<Cursor> last(Transaction& t) {
return get_root(t).si_then([this, &t](auto root) {
return root->lookup_largest(get_context(t));
}).si_then([this](auto cursor) {
return Cursor(this, cursor);
});
}
Cursor end() {
return Cursor::make_end(this);
}
eagain_ifuture<bool> contains(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
key_hobj_t{obj},
[this, &t](auto& key) -> eagain_ifuture<bool> {
return get_root(t).si_then([this, &t, &key](auto root) {
// TODO: improve lower_bound()
return root->lower_bound(get_context(t), key);
}).si_then([](auto result) {
return MatchKindBS::EQ == result.match();
});
}
);
}
eagain_ifuture<Cursor> find(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
key_hobj_t{obj},
[this, &t](auto& key) -> eagain_ifuture<Cursor> {
return get_root(t).si_then([this, &t, &key](auto root) {
// TODO: improve lower_bound()
return root->lower_bound(get_context(t), key);
}).si_then([this](auto result) {
if (result.match() == MatchKindBS::EQ) {
return Cursor(this, result.p_cursor);
} else {
return Cursor::make_end(this);
}
});
}
);
}
/**
* lower_bound
*
* Returns a Cursor pointing to the element that is equal to the key, or the
* first element larger than the key, or the end Cursor if that element
* doesn't exist.
*/
eagain_ifuture<Cursor> lower_bound(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
key_hobj_t{obj},
[this, &t](auto& key) -> eagain_ifuture<Cursor> {
return get_root(t).si_then([this, &t, &key](auto root) {
return root->lower_bound(get_context(t), key);
}).si_then([this](auto result) {
return Cursor(this, result.p_cursor);
});
}
);
}
eagain_ifuture<Cursor> get_next(Transaction& t, Cursor& cursor) {
return cursor.get_next(t);
}
/*
* modifiers
*/
struct tree_value_config_t {
value_size_t payload_size = 256;
};
using insert_iertr = eagain_iertr::extend<
crimson::ct_error::value_too_large>;
insert_iertr::future<std::pair<Cursor, bool>>
insert(Transaction& t, const ghobject_t& obj, tree_value_config_t _vconf) {
LOG_PREFIX(OTree::insert);
if (_vconf.payload_size > value_builder.get_max_value_payload_size()) {
SUBERRORT(seastore_onode, "value payload size {} too large to insert {}",
t, _vconf.payload_size, key_hobj_t{obj});
return crimson::ct_error::value_too_large::make();
}
if (obj.hobj.nspace.size() > value_builder.get_max_ns_size()) {
SUBERRORT(seastore_onode, "namespace size {} too large to insert {}",
t, obj.hobj.nspace.size(), key_hobj_t{obj});
return crimson::ct_error::value_too_large::make();
}
if (obj.hobj.oid.name.size() > value_builder.get_max_oid_size()) {
SUBERRORT(seastore_onode, "oid size {} too large to insert {}",
t, obj.hobj.oid.name.size(), key_hobj_t{obj});
return crimson::ct_error::value_too_large::make();
}
value_config_t vconf{value_builder.get_header_magic(), _vconf.payload_size};
return seastar::do_with(
key_hobj_t{obj},
[this, &t, vconf](auto& key) -> eagain_ifuture<std::pair<Cursor, bool>> {
ceph_assert(key.is_valid());
return get_root(t).si_then([this, &t, &key, vconf](auto root) {
return root->insert(get_context(t), key, vconf, std::move(root));
}).si_then([this](auto ret) {
auto& [cursor, success] = ret;
return std::make_pair(Cursor(this, cursor), success);
});
}
);
}
eagain_ifuture<std::size_t> erase(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
key_hobj_t{obj},
[this, &t](auto& key) -> eagain_ifuture<std::size_t> {
return get_root(t).si_then([this, &t, &key](auto root) {
return root->erase(get_context(t), key, std::move(root));
});
}
);
}
eagain_ifuture<Cursor> erase(Transaction& t, Cursor& pos) {
return pos.erase(t);
}
eagain_ifuture<> erase(Transaction& t, Value& value) {
assert(value.is_tracked());
auto ref_cursor = value.p_cursor;
return ref_cursor->erase(get_context(t), false
).si_then([ref_cursor] (auto next_cursor) {
assert(ref_cursor->is_invalid());
assert(!next_cursor);
});
}
/*
* stats
*/
eagain_ifuture<size_t> height(Transaction& t) {
return get_root(t).si_then([](auto root) {
return size_t(root->level() + 1);
});
}
eagain_ifuture<tree_stats_t> get_stats_slow(Transaction& t) {
return get_root(t).si_then([this, &t](auto root) {
unsigned height = root->level() + 1;
return root->get_tree_stats(get_context(t)
).si_then([height](auto stats) {
stats.height = height;
return seastar::make_ready_future<tree_stats_t>(stats);
});
});
}
std::ostream& dump(Transaction& t, std::ostream& os) {
auto root = root_tracker->get_root(t);
if (root) {
root->dump(os);
} else {
os << "empty tree!";
}
return os;
}
std::ostream& print(std::ostream& os) const {
return os << "BTree-" << *nm;
}
/*
* test_only
*/
bool test_is_clean() const {
return root_tracker->is_clean();
}
eagain_ifuture<> test_clone_from(
Transaction& t, Transaction& t_from, Btree& from) {
// Note: assume the tree to clone is tracked correctly in memory.
// In some unit tests, parts of the tree are stubbed out that they
// should not be loaded from NodeExtentManager.
return from.get_root(t_from
).si_then([this, &t](auto root_from) {
return root_from->test_clone_root(get_context(t), *root_tracker);
});
}
private:
context_t get_context(Transaction& t) {
return {*nm, value_builder, t};
}
eagain_ifuture<Ref<Node>> get_root(Transaction& t) {
auto root = root_tracker->get_root(t);
if (root) {
return seastar::make_ready_future<Ref<Node>>(root);
} else {
return Node::load_root(get_context(t), *root_tracker);
}
}
NodeExtentManagerURef nm;
const ValueBuilderImpl<ValueImpl> value_builder;
RootNodeTrackerURef root_tracker;
friend class DummyChildPool;
};
template <typename ValueImpl>
inline std::ostream& operator<<(std::ostream& os, const Btree<ValueImpl>& tree) {
return tree.print(os);
}
}
| 11,585 | 28.860825 | 83 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/tree_utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cassert>
#include <cstring>
#include <random>
#include <string>
#include <sstream>
#include <utility>
#include <vector>
#include <seastar/core/thread.hh>
#include "crimson/common/log.h"
#include "stages/key_layout.h"
#include "tree.h"
/**
* tree_utils.h
*
* Contains shared logic for unit tests and perf tool.
*/
namespace crimson::os::seastore::onode {
/**
* templates to work with tree utility classes:
*
* struct ValueItem {
* <public members>
*
* value_size_t get_payload_size() const;
* static ValueItem create(std::size_t expected_size, std::size_t id);
* };
* std::ostream& operator<<(std::ostream& os, const ValueItem& item);
*
* class ValueImpl final : public Value {
* ...
*
* using item_t = ValueItem;
* void initialize(Transaction& t, const item_t& item);
* void validate(const item_t& item);
* };
*
*/
template <typename CursorType>
void initialize_cursor_from_item(
Transaction& t,
const ghobject_t& key,
const typename decltype(std::declval<CursorType>().value())::item_t& item,
CursorType& cursor,
bool insert_success) {
ceph_assert(insert_success);
ceph_assert(!cursor.is_end());
ceph_assert(cursor.get_ghobj() == key);
auto tree_value = cursor.value();
tree_value.initialize(t, item);
}
template <typename CursorType>
void validate_cursor_from_item(
const ghobject_t& key,
const typename decltype(std::declval<CursorType>().value())::item_t& item,
CursorType& cursor) {
ceph_assert(!cursor.is_end());
ceph_assert(cursor.get_ghobj() == key);
auto tree_value = cursor.value();
tree_value.validate(item);
}
template <typename ValueItem>
class Values {
public:
Values(size_t n) {
for (size_t i = 1; i <= n; ++i) {
auto item = create(i * 8);
values.push_back(item);
}
}
Values(std::vector<size_t> sizes) {
for (auto& size : sizes) {
auto item = create(size);
values.push_back(item);
}
}
~Values() = default;
ValueItem create(size_t size) {
return ValueItem::create(size, id++);
}
ValueItem pick() const {
auto index = rd() % values.size();
return values[index];
}
private:
std::size_t id = 0;
mutable std::random_device rd;
std::vector<ValueItem> values;
};
template <typename ValueItem>
class KVPool {
public:
struct kv_t {
ghobject_t key;
ValueItem value;
};
using kv_vector_t = std::vector<kv_t>;
using kvptr_vector_t = std::vector<kv_t*>;
using iterator_t = typename kvptr_vector_t::iterator;
size_t size() const {
return kvs.size();
}
iterator_t begin() {
return serial_p_kvs.begin();
}
iterator_t end() {
return serial_p_kvs.end();
}
iterator_t random_begin() {
return random_p_kvs.begin();
}
iterator_t random_end() {
return random_p_kvs.end();
}
void shuffle() {
std::shuffle(random_p_kvs.begin(), random_p_kvs.end(), std::default_random_engine{});
}
void erase_from_random(iterator_t begin, iterator_t end) {
random_p_kvs.erase(begin, end);
kv_vector_t new_kvs;
for (auto p_kv : random_p_kvs) {
new_kvs.emplace_back(*p_kv);
}
std::sort(new_kvs.begin(), new_kvs.end(), [](auto& l, auto& r) {
return l.key < r.key;
});
kvs.swap(new_kvs);
serial_p_kvs.resize(kvs.size());
random_p_kvs.resize(kvs.size());
init();
}
static KVPool create_raw_range(
const std::vector<size_t>& ns_sizes,
const std::vector<size_t>& oid_sizes,
const std::vector<size_t>& value_sizes,
const std::pair<index_t, index_t>& range2,
const std::pair<index_t, index_t>& range1,
const std::pair<index_t, index_t>& range0) {
ceph_assert(range2.first < range2.second);
ceph_assert(range2.second - 1 <= MAX_SHARD);
ceph_assert(range2.second - 1 <= MAX_CRUSH);
ceph_assert(range1.first < range1.second);
ceph_assert(range1.second - 1 <= 9);
ceph_assert(range0.first < range0.second);
kv_vector_t kvs;
std::random_device rd;
Values<ValueItem> values{value_sizes};
for (index_t i = range2.first; i < range2.second; ++i) {
for (index_t j = range1.first; j < range1.second; ++j) {
size_t ns_size;
size_t oid_size;
if (j == 0) {
// store ns0, oid0 as empty strings for test purposes
ns_size = 0;
oid_size = 0;
} else {
ns_size = ns_sizes[rd() % ns_sizes.size()];
oid_size = oid_sizes[rd() % oid_sizes.size()];
assert(ns_size && oid_size);
}
for (index_t k = range0.first; k < range0.second; ++k) {
kvs.emplace_back(
kv_t{make_raw_oid(i, j, k, ns_size, oid_size), values.pick()}
);
}
}
}
return KVPool(std::move(kvs));
}
static KVPool create_range(
const std::pair<index_t, index_t>& range_i,
const std::vector<size_t>& value_sizes,
const uint64_t block_size) {
kv_vector_t kvs;
std::random_device rd;
for (index_t i = range_i.first; i < range_i.second; ++i) {
auto value_size = value_sizes[rd() % value_sizes.size()];
kvs.emplace_back(
kv_t{make_oid(i), ValueItem::create(value_size, i, block_size)}
);
}
return KVPool(std::move(kvs));
}
private:
KVPool(kv_vector_t&& _kvs)
: kvs(std::move(_kvs)), serial_p_kvs(kvs.size()), random_p_kvs(kvs.size()) {
init();
}
void init() {
std::transform(kvs.begin(), kvs.end(), serial_p_kvs.begin(),
[] (kv_t& item) { return &item; });
std::transform(kvs.begin(), kvs.end(), random_p_kvs.begin(),
[] (kv_t& item) { return &item; });
shuffle();
}
static ghobject_t make_raw_oid(
index_t index2, index_t index1, index_t index0,
size_t ns_size, size_t oid_size) {
assert(index1 < 10);
std::ostringstream os_ns;
std::ostringstream os_oid;
if (index1 == 0) {
assert(!ns_size);
assert(!oid_size);
} else {
os_ns << "ns" << index1;
auto current_size = (size_t)os_ns.tellp();
assert(ns_size >= current_size);
os_ns << std::string(ns_size - current_size, '_');
os_oid << "oid" << index1;
current_size = (size_t)os_oid.tellp();
assert(oid_size >= current_size);
os_oid << std::string(oid_size - current_size, '_');
}
return ghobject_t(shard_id_t(index2), index2, index2,
os_ns.str(), os_oid.str(), index0, index0);
}
static ghobject_t make_oid(index_t i) {
std::stringstream ss;
ss << "object_" << i;
auto ret = ghobject_t(
hobject_t(
sobject_t(ss.str(), CEPH_NOSNAP)));
ret.set_shard(shard_id_t(0));
ret.hobj.nspace = "asdf";
return ret;
}
kv_vector_t kvs;
kvptr_vector_t serial_p_kvs;
kvptr_vector_t random_p_kvs;
};
template <bool TRACK, typename ValueImpl>
class TreeBuilder {
public:
using BtreeImpl = Btree<ValueImpl>;
using BtreeCursor = typename BtreeImpl::Cursor;
using ValueItem = typename ValueImpl::item_t;
using iterator_t = typename KVPool<ValueItem>::iterator_t;
TreeBuilder(KVPool<ValueItem>& kvs, NodeExtentManagerURef&& nm)
: kvs{kvs} {
tree.emplace(std::move(nm));
}
eagain_ifuture<> bootstrap(Transaction& t) {
std::ostringstream oss;
#ifndef NDEBUG
oss << "debug=on, ";
#else
oss << "debug=off, ";
#endif
#ifdef UNIT_TESTS_BUILT
oss << "UNIT_TEST_BUILT=on, ";
#else
oss << "UNIT_TEST_BUILT=off, ";
#endif
if constexpr (TRACK) {
oss << "track=on, ";
} else {
oss << "track=off, ";
}
oss << *tree;
logger().warn("TreeBuilder: {}, bootstrapping ...", oss.str());
return tree->mkfs(t);
}
eagain_ifuture<BtreeCursor> insert_one(
Transaction& t, const iterator_t& iter_rd) {
auto p_kv = *iter_rd;
logger().debug("[{}] insert {} -> {}",
iter_rd - kvs.random_begin(),
key_hobj_t{p_kv->key},
p_kv->value);
return tree->insert(
t, p_kv->key, {p_kv->value.get_payload_size()}
).si_then([&t, this, p_kv](auto ret) {
boost::ignore_unused(this); // avoid clang warning;
auto success = ret.second;
auto cursor = std::move(ret.first);
initialize_cursor_from_item(t, p_kv->key, p_kv->value, cursor, success);
#ifndef NDEBUG
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
return tree->find(t, p_kv->key
).si_then([cursor, p_kv](auto cursor_) mutable {
assert(!cursor_.is_end());
ceph_assert(cursor_.get_ghobj() == p_kv->key);
ceph_assert(cursor_.value() == cursor.value());
validate_cursor_from_item(p_kv->key, p_kv->value, cursor_);
return cursor;
});
#else
return eagain_iertr::make_ready_future<BtreeCursor>(cursor);
#endif
}).handle_error_interruptible(
[] (const crimson::ct_error::value_too_large& e) {
ceph_abort("impossible path");
},
crimson::ct_error::pass_further_all{}
);
}
eagain_ifuture<> insert(Transaction& t) {
auto ref_kv_iter = seastar::make_lw_shared<iterator_t>();
*ref_kv_iter = kvs.random_begin();
auto cursors = seastar::make_lw_shared<std::vector<BtreeCursor>>();
logger().warn("start inserting {} kvs ...", kvs.size());
auto start_time = mono_clock::now();
return trans_intr::repeat([&t, this, cursors, ref_kv_iter,
start_time]()
-> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == kvs.random_end()) {
std::chrono::duration<double> duration = mono_clock::now() - start_time;
logger().warn("Insert done! {}s", duration.count());
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
} else {
return insert_one(t, *ref_kv_iter
).si_then([cursors, ref_kv_iter] (auto cursor) {
if constexpr (TRACK) {
cursors->emplace_back(cursor);
}
++(*ref_kv_iter);
return seastar::stop_iteration::no;
});
}
}).si_then([&t, this, cursors, ref_kv_iter] {
if (!cursors->empty()) {
logger().info("Verifing tracked cursors ...");
*ref_kv_iter = kvs.random_begin();
return seastar::do_with(
cursors->begin(),
[&t, this, cursors, ref_kv_iter] (auto& c_iter) {
return trans_intr::repeat(
[&t, this, &c_iter, cursors, ref_kv_iter] ()
-> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == kvs.random_end()) {
logger().info("Verify done!");
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
assert(c_iter != cursors->end());
auto p_kv = **ref_kv_iter;
// validate values in tree keep intact
return tree->find(t, p_kv->key).si_then([&c_iter, ref_kv_iter](auto cursor) {
auto p_kv = **ref_kv_iter;
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
// validate values in cursors keep intact
validate_cursor_from_item(p_kv->key, p_kv->value, *c_iter);
++(*ref_kv_iter);
++c_iter;
return seastar::stop_iteration::no;
});
});
});
} else {
return eagain_iertr::now();
}
});
}
eagain_ifuture<> erase_one(
Transaction& t, const iterator_t& iter_rd) {
auto p_kv = *iter_rd;
logger().debug("[{}] erase {} -> {}",
iter_rd - kvs.random_begin(),
key_hobj_t{p_kv->key},
p_kv->value);
return tree->erase(t, p_kv->key
).si_then([&t, this, p_kv] (auto size) {
boost::ignore_unused(t); // avoid clang warning;
boost::ignore_unused(this);
boost::ignore_unused(p_kv);
ceph_assert(size == 1);
#ifndef NDEBUG
return tree->contains(t, p_kv->key
).si_then([] (bool ret) {
ceph_assert(ret == false);
});
#else
return eagain_iertr::now();
#endif
});
}
eagain_ifuture<> erase(Transaction& t, std::size_t erase_size) {
assert(erase_size <= kvs.size());
kvs.shuffle();
auto erase_end = kvs.random_begin() + erase_size;
auto ref_kv_iter = seastar::make_lw_shared<iterator_t>();
auto cursors = seastar::make_lw_shared<std::map<ghobject_t, BtreeCursor>>();
return eagain_iertr::now().si_then([&t, this, cursors, ref_kv_iter] {
(void)this; // silence clang warning for !TRACK
(void)t; // silence clang warning for !TRACK
if constexpr (TRACK) {
logger().info("Tracking cursors before erase ...");
*ref_kv_iter = kvs.begin();
auto start_time = mono_clock::now();
return trans_intr::repeat(
[&t, this, cursors, ref_kv_iter, start_time] ()
-> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == kvs.end()) {
std::chrono::duration<double> duration = mono_clock::now() - start_time;
logger().info("Track done! {}s", duration.count());
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
auto p_kv = **ref_kv_iter;
return tree->find(t, p_kv->key).si_then([cursors, ref_kv_iter](auto cursor) {
auto p_kv = **ref_kv_iter;
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
cursors->emplace(p_kv->key, cursor);
++(*ref_kv_iter);
return seastar::stop_iteration::no;
});
});
} else {
return eagain_iertr::now();
}
}).si_then([&t, this, ref_kv_iter, erase_end] {
*ref_kv_iter = kvs.random_begin();
logger().warn("start erasing {}/{} kvs ...",
erase_end - kvs.random_begin(), kvs.size());
auto start_time = mono_clock::now();
return trans_intr::repeat([&t, this, ref_kv_iter,
start_time, erase_end] ()
-> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == erase_end) {
std::chrono::duration<double> duration = mono_clock::now() - start_time;
logger().warn("Erase done! {}s", duration.count());
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
} else {
return erase_one(t, *ref_kv_iter
).si_then([ref_kv_iter] {
++(*ref_kv_iter);
return seastar::stop_iteration::no;
});
}
});
}).si_then([this, cursors, ref_kv_iter, erase_end] {
if constexpr (TRACK) {
logger().info("Verifing tracked cursors ...");
*ref_kv_iter = kvs.random_begin();
while (*ref_kv_iter != erase_end) {
auto p_kv = **ref_kv_iter;
auto c_it = cursors->find(p_kv->key);
ceph_assert(c_it != cursors->end());
ceph_assert(c_it->second.is_end());
cursors->erase(c_it);
++(*ref_kv_iter);
}
}
kvs.erase_from_random(kvs.random_begin(), erase_end);
if constexpr (TRACK) {
*ref_kv_iter = kvs.begin();
for (auto& [k, c] : *cursors) {
assert(*ref_kv_iter != kvs.end());
auto p_kv = **ref_kv_iter;
validate_cursor_from_item(p_kv->key, p_kv->value, c);
++(*ref_kv_iter);
}
logger().info("Verify done!");
}
});
}
eagain_ifuture<> get_stats(Transaction& t) {
return tree->get_stats_slow(t
).si_then([](auto stats) {
logger().warn("{}", stats);
});
}
eagain_ifuture<std::size_t> height(Transaction& t) {
return tree->height(t);
}
void reload(NodeExtentManagerURef&& nm) {
tree.emplace(std::move(nm));
}
eagain_ifuture<> validate_one(
Transaction& t, const iterator_t& iter_seq) {
assert(iter_seq != kvs.end());
auto next_iter = iter_seq + 1;
auto p_kv = *iter_seq;
return tree->find(t, p_kv->key
).si_then([p_kv, &t] (auto cursor) {
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
return cursor.get_next(t);
}).si_then([next_iter, this] (auto cursor) {
if (next_iter == kvs.end()) {
ceph_assert(cursor.is_end());
} else {
auto p_kv = *next_iter;
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
}
});
}
eagain_ifuture<> validate(Transaction& t) {
logger().info("Verifing inserted ...");
return seastar::do_with(
kvs.begin(),
[this, &t] (auto &iter) {
return trans_intr::repeat(
[this, &t, &iter]() ->eagain_iertr::future<seastar::stop_iteration> {
if (iter == kvs.end()) {
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
return validate_one(t, iter).si_then([&iter] {
++iter;
return seastar::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
});
});
});
}
private:
static seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_test);
}
KVPool<ValueItem>& kvs;
std::optional<BtreeImpl> tree;
};
}
| 17,436 | 29.80742 | 89 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/value.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "value.h"
#include "node.h"
#include "node_delta_recorder.h"
#include "node_layout.h"
// value implementations
#include "test/crimson/seastore/onode_tree/test_value.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h"
namespace crimson::os::seastore::onode {
ceph::bufferlist&
ValueDeltaRecorder::get_encoded(NodeExtentMutable& payload_mut)
{
ceph::encode(node_delta_op_t::SUBOP_UPDATE_VALUE, encoded);
node_offset_t offset = payload_mut.get_node_offset();
assert(offset > sizeof(value_header_t));
offset -= sizeof(value_header_t);
ceph::encode(offset, encoded);
return encoded;
}
Value::Value(NodeExtentManager& nm,
const ValueBuilder& vb,
Ref<tree_cursor_t>& p_cursor)
: nm{nm}, vb{vb}, p_cursor{p_cursor} {}
Value::~Value() {}
bool Value::is_tracked() const
{
assert(!p_cursor->is_end());
return p_cursor->is_tracked();
}
void Value::invalidate()
{
p_cursor.reset();
}
eagain_ifuture<> Value::extend(Transaction& t, value_size_t extend_size)
{
assert(is_tracked());
[[maybe_unused]] auto target_size = get_payload_size() + extend_size;
return p_cursor->extend_value(get_context(t), extend_size)
#ifndef NDEBUG
.si_then([this, target_size] {
assert(target_size == get_payload_size());
})
#endif
;
}
eagain_ifuture<> Value::trim(Transaction& t, value_size_t trim_size)
{
assert(is_tracked());
assert(get_payload_size() > trim_size);
[[maybe_unused]] auto target_size = get_payload_size() - trim_size;
return p_cursor->trim_value(get_context(t), trim_size)
#ifndef NDEBUG
.si_then([this, target_size] {
assert(target_size == get_payload_size());
})
#endif
;
}
const value_header_t* Value::read_value_header() const
{
auto ret = p_cursor->read_value_header(vb.get_header_magic());
assert(ret->payload_size <= vb.get_max_value_payload_size());
return ret;
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
Value::do_prepare_mutate_payload(Transaction& t)
{
return p_cursor->prepare_mutate_value_payload(get_context(t));
}
laddr_t Value::get_hint() const
{
return p_cursor->get_key_view(vb.get_header_magic()).get_hint();
}
std::unique_ptr<ValueDeltaRecorder>
build_value_recorder_by_type(ceph::bufferlist& encoded,
const value_magic_t& magic)
{
std::unique_ptr<ValueDeltaRecorder> ret;
switch (magic) {
case value_magic_t::ONODE:
ret = std::make_unique<FLTreeOnode::Recorder>(encoded);
break;
case value_magic_t::TEST_UNBOUND:
ret = std::make_unique<UnboundedValue::Recorder>(encoded);
break;
case value_magic_t::TEST_BOUNDED:
ret = std::make_unique<BoundedValue::Recorder>(encoded);
break;
case value_magic_t::TEST_EXTENDED:
ret = std::make_unique<ExtendedValue::Recorder>(encoded);
break;
default:
ret = nullptr;
break;
}
assert(!ret || ret->get_header_magic() == magic);
return ret;
}
void validate_tree_config(const tree_conf_t& conf)
{
ceph_assert(conf.max_ns_size <
string_key_view_t::VALID_UPPER_BOUND);
ceph_assert(conf.max_oid_size <
string_key_view_t::VALID_UPPER_BOUND);
ceph_assert(is_valid_node_size(conf.internal_node_size));
ceph_assert(is_valid_node_size(conf.leaf_node_size));
if (conf.do_split_check) {
// In hope to comply with 3 * (oid + ns) + 2 * value < node
//
// see node_layout.h for NODE_BLOCK_SIZE considerations
//
// The below calculations also consider the internal indexing overhead in
// order to be accurate, so the equation has become:
// node-header-size + 2 * max-full-insert-size +
// max-ns/oid-split-overhead <= node-size
auto obj = ghobject_t{shard_id_t{0}, 0, 0, "", "", 0, 0};
key_hobj_t key(obj);
auto max_str_size = conf.max_ns_size + conf.max_oid_size;
#define _STAGE_T(NodeType) node_to_stage_t<typename NodeType::node_stage_t>
#define NXT_T(StageType) staged<typename StageType::next_param_t>
laddr_t i_value{0};
auto insert_size_2 =
_STAGE_T(InternalNode0)::insert_size(key, i_value);
auto insert_size_0 =
NXT_T(NXT_T(_STAGE_T(InternalNode0)))::insert_size(key, i_value);
unsigned internal_size_bound = sizeof(node_header_t) +
(insert_size_2 + max_str_size) * 2 +
(insert_size_2 - insert_size_0 + max_str_size);
ceph_assert(internal_size_bound <= conf.internal_node_size);
value_config_t l_value;
l_value.payload_size = conf.max_value_payload_size;
insert_size_2 =
_STAGE_T(LeafNode0)::insert_size(key, l_value);
insert_size_0 =
NXT_T(NXT_T(_STAGE_T(LeafNode0)))::insert_size(key, l_value);
unsigned leaf_size_bound = sizeof(node_header_t) +
(insert_size_2 + max_str_size) * 2 +
(insert_size_2 - insert_size_0 + max_str_size);
ceph_assert(leaf_size_bound <= conf.leaf_node_size);
}
}
}
| 5,066 | 29.709091 | 82 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/value.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <ostream>
#include "include/buffer.h"
#include "crimson/common/type_helpers.h"
#include "fwd.h"
#include "node_extent_mutable.h"
namespace crimson::os::seastore::onode {
// value size up to 64 KiB
using value_size_t = uint16_t;
enum class value_magic_t : uint8_t {
ONODE = 0x52,
TEST_UNBOUND,
TEST_BOUNDED,
TEST_EXTENDED,
};
inline std::ostream& operator<<(std::ostream& os, const value_magic_t& magic) {
switch (magic) {
case value_magic_t::ONODE:
return os << "ONODE";
case value_magic_t::TEST_UNBOUND:
return os << "TEST_UNBOUND";
case value_magic_t::TEST_BOUNDED:
return os << "TEST_BOUNDED";
case value_magic_t::TEST_EXTENDED:
return os << "TEST_EXTENDED";
default:
return os << "UNKNOWN(" << magic << ")";
}
}
/**
* value_config_t
*
* Parameters to create a value.
*/
struct value_config_t {
value_magic_t magic;
value_size_t payload_size;
value_size_t allocation_size() const;
void encode(ceph::bufferlist& encoded) const {
ceph::encode(magic, encoded);
ceph::encode(payload_size, encoded);
}
static value_config_t decode(ceph::bufferlist::const_iterator& delta) {
value_magic_t magic;
ceph::decode(magic, delta);
value_size_t payload_size;
ceph::decode(payload_size, delta);
return {magic, payload_size};
}
};
inline std::ostream& operator<<(std::ostream& os, const value_config_t& conf) {
return os << "ValueConf(" << conf.magic
<< ", " << conf.payload_size << "B)";
}
/**
* value_header_t
*
* The header structure in value layout.
*
* Value layout:
*
* # <- alloc size -> #
* # header | payload #
*/
struct value_header_t {
value_magic_t magic;
value_size_t payload_size;
bool operator==(const value_header_t& rhs) const {
return (magic == rhs.magic && payload_size == rhs.payload_size);
}
bool operator!=(const value_header_t& rhs) const {
return !(*this == rhs);
}
value_size_t allocation_size() const {
return payload_size + sizeof(value_header_t);
}
const char* get_payload() const {
return reinterpret_cast<const char*>(this) + sizeof(value_header_t);
}
NodeExtentMutable get_payload_mutable(NodeExtentMutable& node) const {
return node.get_mutable_absolute(get_payload(), payload_size);
}
char* get_payload() {
return reinterpret_cast<char*>(this) + sizeof(value_header_t);
}
void initiate(NodeExtentMutable& mut, const value_config_t& config) {
value_header_t header{config.magic, config.payload_size};
mut.copy_in_absolute(this, header);
mut.set_absolute(get_payload(), 0, config.payload_size);
}
static value_size_t estimate_allocation_size(value_size_t payload_size) {
return payload_size + sizeof(value_header_t);
}
} __attribute__((packed));
inline std::ostream& operator<<(std::ostream& os, const value_header_t& header) {
return os << "Value(" << header.magic
<< ", " << header.payload_size << "B)";
}
inline value_size_t value_config_t::allocation_size() const {
return value_header_t::estimate_allocation_size(payload_size);
}
/**
* ValueDeltaRecorder
*
* An abstracted class to handle user-defined value delta encode, decode and
* replay.
*/
class ValueDeltaRecorder {
public:
virtual ~ValueDeltaRecorder() = default;
ValueDeltaRecorder(const ValueDeltaRecorder&) = delete;
ValueDeltaRecorder(ValueDeltaRecorder&&) = delete;
ValueDeltaRecorder& operator=(const ValueDeltaRecorder&) = delete;
ValueDeltaRecorder& operator=(ValueDeltaRecorder&&) = delete;
/// Returns the value header magic for validation purpose.
virtual value_magic_t get_header_magic() const = 0;
/// Called by DeltaRecorderT to apply user-defined value delta.
virtual void apply_value_delta(ceph::bufferlist::const_iterator&,
NodeExtentMutable&,
laddr_t) = 0;
protected:
ValueDeltaRecorder(ceph::bufferlist& encoded) : encoded{encoded} {}
/// Get the delta buffer to encode user-defined value delta.
ceph::bufferlist& get_encoded(NodeExtentMutable&);
private:
ceph::bufferlist& encoded;
};
/**
* tree_conf_t
*
* Hard limits and compile-time configurations.
*/
struct tree_conf_t {
value_magic_t value_magic;
string_size_t max_ns_size;
string_size_t max_oid_size;
value_size_t max_value_payload_size;
extent_len_t internal_node_size;
extent_len_t leaf_node_size;
bool do_split_check = true;
};
class tree_cursor_t;
/**
* Value
*
* Value is a stateless view of the underlying value header and payload content
* stored in a tree leaf node, with the support to implement user-defined value
* deltas and to extend and trim the underlying payload data (not implemented
* yet).
*
* In the current implementation, we don't guarantee any alignment for value
* payload due to unaligned node layout and the according merge and split
* operations.
*/
class Value {
public:
virtual ~Value();
Value(const Value&) = default;
Value(Value&&) = default;
Value& operator=(const Value&) = delete;
Value& operator=(Value&&) = delete;
/// Returns whether the Value is still tracked in tree.
bool is_tracked() const;
/// Invalidate the Value before submitting transaction.
void invalidate();
/// Returns the value payload size.
value_size_t get_payload_size() const {
assert(is_tracked());
return read_value_header()->payload_size;
}
laddr_t get_hint() const;
bool operator==(const Value& v) const { return p_cursor == v.p_cursor; }
bool operator!=(const Value& v) const { return !(*this == v); }
protected:
Value(NodeExtentManager&, const ValueBuilder&, Ref<tree_cursor_t>&);
/// Extends the payload size.
eagain_ifuture<> extend(Transaction&, value_size_t extend_size);
/// Trim and shrink the payload.
eagain_ifuture<> trim(Transaction&, value_size_t trim_size);
/// Get the permission to mutate the payload with the optional value recorder.
template <typename PayloadT, typename ValueDeltaRecorderT>
std::pair<NodeExtentMutable&, ValueDeltaRecorderT*>
prepare_mutate_payload(Transaction& t) {
assert(is_tracked());
assert(sizeof(PayloadT) <= get_payload_size());
auto value_mutable = do_prepare_mutate_payload(t);
assert(value_mutable.first.get_write() ==
const_cast<const Value*>(this)->template read_payload<char>());
assert(value_mutable.first.get_length() == get_payload_size());
return {value_mutable.first,
static_cast<ValueDeltaRecorderT*>(value_mutable.second)};
}
/// Get the latest payload pointer for read.
template <typename PayloadT>
const PayloadT* read_payload() const {
assert(is_tracked());
// see Value documentation
static_assert(alignof(PayloadT) == 1);
assert(sizeof(PayloadT) <= get_payload_size());
return reinterpret_cast<const PayloadT*>(read_value_header()->get_payload());
}
private:
const value_header_t* read_value_header() const;
context_t get_context(Transaction& t) {
return {nm, vb, t};
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
do_prepare_mutate_payload(Transaction&);
NodeExtentManager& nm;
const ValueBuilder& vb;
Ref<tree_cursor_t> p_cursor;
template <typename ValueImpl>
friend class Btree;
};
/**
* ValueBuilder
*
* For tree nodes to build values without the need to depend on the actual
* implementation.
*/
struct ValueBuilder {
virtual value_magic_t get_header_magic() const = 0;
virtual string_size_t get_max_ns_size() const = 0;
virtual string_size_t get_max_oid_size() const = 0;
virtual value_size_t get_max_value_payload_size() const = 0;
virtual extent_len_t get_internal_node_size() const = 0;
virtual extent_len_t get_leaf_node_size() const = 0;
virtual std::unique_ptr<ValueDeltaRecorder>
build_value_recorder(ceph::bufferlist&) const = 0;
};
/**
* ValueBuilderImpl
*
* The concrete ValueBuilder implementation in Btree.
*/
template <typename ValueImpl>
struct ValueBuilderImpl final : public ValueBuilder {
ValueBuilderImpl() {
validate_tree_config(ValueImpl::TREE_CONF);
}
value_magic_t get_header_magic() const {
return ValueImpl::TREE_CONF.value_magic;
}
string_size_t get_max_ns_size() const override {
return ValueImpl::TREE_CONF.max_ns_size;
}
string_size_t get_max_oid_size() const override {
return ValueImpl::TREE_CONF.max_oid_size;
}
value_size_t get_max_value_payload_size() const override {
return ValueImpl::TREE_CONF.max_value_payload_size;
}
extent_len_t get_internal_node_size() const override {
return ValueImpl::TREE_CONF.internal_node_size;
}
extent_len_t get_leaf_node_size() const override {
return ValueImpl::TREE_CONF.leaf_node_size;
}
std::unique_ptr<ValueDeltaRecorder>
build_value_recorder(ceph::bufferlist& encoded) const override {
std::unique_ptr<ValueDeltaRecorder> ret =
std::make_unique<typename ValueImpl::Recorder>(encoded);
assert(ret->get_header_magic() == get_header_magic());
return ret;
}
ValueImpl build_value(NodeExtentManager& nm,
const ValueBuilder& vb,
Ref<tree_cursor_t>& p_cursor) const {
assert(vb.get_header_magic() == get_header_magic());
return ValueImpl(nm, vb, p_cursor);
}
};
void validate_tree_config(const tree_conf_t& conf);
/**
* Get the value recorder by type (the magic value) when the ValueBuilder is
* unavailable.
*/
std::unique_ptr<ValueDeltaRecorder>
build_value_recorder_by_type(ceph::bufferlist& encoded, const value_magic_t& magic);
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::onode::value_config_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::onode::value_header_t> : fmt::ostream_formatter {};
#endif
| 9,925 | 28.366864 | 108 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/dummy.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <chrono>
#include <seastar/core/sleep.hh>
#include "include/buffer_raw.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.h"
/**
* dummy.h
*
* Dummy backend implementations for test purposes.
*/
namespace crimson::os::seastore::onode {
class DummySuper final: public Super {
public:
DummySuper(Transaction& t, RootNodeTracker& tracker, laddr_t* p_root_laddr)
: Super(t, tracker), p_root_laddr{p_root_laddr} {}
~DummySuper() override = default;
protected:
laddr_t get_root_laddr() const override { return *p_root_laddr; }
void write_root_laddr(context_t c, laddr_t addr) override {
LOG_PREFIX(OTree::Dummy);
SUBDEBUGT(seastore_onode, "update root {:#x} ...", c.t, addr);
*p_root_laddr = addr;
}
private:
laddr_t* p_root_laddr;
};
class DummyNodeExtent final: public NodeExtent {
public:
DummyNodeExtent(ceph::bufferptr &&ptr) : NodeExtent(std::move(ptr)) {
state = extent_state_t::INITIAL_WRITE_PENDING;
}
DummyNodeExtent(const DummyNodeExtent& other) = delete;
~DummyNodeExtent() override = default;
void retire() {
assert(state == extent_state_t::INITIAL_WRITE_PENDING);
state = extent_state_t::INVALID;
bufferptr empty_bptr;
get_bptr().swap(empty_bptr);
}
protected:
NodeExtentRef mutate(context_t, DeltaRecorderURef&&) override {
ceph_abort("impossible path"); }
DeltaRecorder* get_recorder() const override {
return nullptr; }
CachedExtentRef duplicate_for_write(Transaction&) override {
ceph_abort("impossible path"); }
extent_types_t get_type() const override {
return extent_types_t::TEST_BLOCK; }
ceph::bufferlist get_delta() override {
ceph_abort("impossible path"); }
void apply_delta(const ceph::bufferlist&) override {
ceph_abort("impossible path"); }
};
template <bool SYNC>
class DummyNodeExtentManager final: public NodeExtentManager {
static constexpr size_t ALIGNMENT = 4096;
public:
~DummyNodeExtentManager() override = default;
std::size_t size() const { return allocate_map.size(); }
protected:
bool is_read_isolated() const override { return false; }
read_iertr::future<NodeExtentRef> read_extent(
Transaction& t, laddr_t addr) override {
SUBTRACET(seastore_onode, "reading at {:#x} ...", t, addr);
if constexpr (SYNC) {
return read_extent_sync(t, addr);
} else {
using namespace std::chrono_literals;
return seastar::sleep(1us).then([this, &t, addr] {
return read_extent_sync(t, addr);
});
}
}
alloc_iertr::future<NodeExtentRef> alloc_extent(
Transaction& t, laddr_t hint, extent_len_t len) override {
SUBTRACET(seastore_onode, "allocating {}B with hint {:#x} ...", t, len, hint);
if constexpr (SYNC) {
return alloc_extent_sync(t, len);
} else {
using namespace std::chrono_literals;
return seastar::sleep(1us).then([this, &t, len] {
return alloc_extent_sync(t, len);
});
}
}
retire_iertr::future<> retire_extent(
Transaction& t, NodeExtentRef extent) override {
SUBTRACET(seastore_onode,
"retiring {}B at {:#x} -- {} ...",
t, extent->get_length(), extent->get_laddr(), *extent);
if constexpr (SYNC) {
return retire_extent_sync(t, extent);
} else {
using namespace std::chrono_literals;
return seastar::sleep(1us).then([this, &t, extent] {
return retire_extent_sync(t, extent);
});
}
}
getsuper_iertr::future<Super::URef> get_super(
Transaction& t, RootNodeTracker& tracker) override {
SUBTRACET(seastore_onode, "get root ...", t);
if constexpr (SYNC) {
return get_super_sync(t, tracker);
} else {
using namespace std::chrono_literals;
return seastar::sleep(1us).then([this, &t, &tracker] {
return get_super_sync(t, tracker);
});
}
}
std::ostream& print(std::ostream& os) const override {
return os << "DummyNodeExtentManager(sync=" << SYNC << ")";
}
private:
read_iertr::future<NodeExtentRef> read_extent_sync(
Transaction& t, laddr_t addr) {
auto iter = allocate_map.find(addr);
assert(iter != allocate_map.end());
auto extent = iter->second;
SUBTRACET(seastore_onode,
"read {}B at {:#x} -- {}",
t, extent->get_length(), extent->get_laddr(), *extent);
assert(extent->get_laddr() == addr);
return read_iertr::make_ready_future<NodeExtentRef>(extent);
}
alloc_iertr::future<NodeExtentRef> alloc_extent_sync(
Transaction& t, extent_len_t len) {
assert(len % ALIGNMENT == 0);
auto r = ceph::buffer::create_aligned(len, ALIGNMENT);
auto addr = reinterpret_cast<laddr_t>(r->get_data());
auto bp = ceph::bufferptr(std::move(r));
auto extent = Ref<DummyNodeExtent>(new DummyNodeExtent(std::move(bp)));
extent->set_laddr(addr);
assert(allocate_map.find(extent->get_laddr()) == allocate_map.end());
allocate_map.insert({extent->get_laddr(), extent});
SUBDEBUGT(seastore_onode,
"allocated {}B at {:#x} -- {}",
t, extent->get_length(), extent->get_laddr(), *extent);
assert(extent->get_length() == len);
return alloc_iertr::make_ready_future<NodeExtentRef>(extent);
}
retire_iertr::future<> retire_extent_sync(
Transaction& t, NodeExtentRef _extent) {
auto& extent = static_cast<DummyNodeExtent&>(*_extent.get());
auto addr = extent.get_laddr();
auto len = extent.get_length();
extent.retire();
auto iter = allocate_map.find(addr);
assert(iter != allocate_map.end());
allocate_map.erase(iter);
SUBDEBUGT(seastore_onode, "retired {}B at {:#x}", t, len, addr);
return retire_iertr::now();
}
getsuper_iertr::future<Super::URef> get_super_sync(
Transaction& t, RootNodeTracker& tracker) {
SUBTRACET(seastore_onode, "got root {:#x}", t, root_laddr);
return getsuper_iertr::make_ready_future<Super::URef>(
Super::URef(new DummySuper(t, tracker, &root_laddr)));
}
static LOG_PREFIX(OTree::Dummy);
std::map<laddr_t, Ref<DummyNodeExtent>> allocate_map;
laddr_t root_laddr = L_ADDR_NULL;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::onode::DummyNodeExtent> : fmt::ostream_formatter {};
#endif
| 6,425 | 31.619289 | 109 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/seastore.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "seastore.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_accessor.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage_layout.h"
namespace {
LOG_PREFIX(OTree::Seastore);
}
SET_SUBSYS(seastore_onode);
namespace crimson::os::seastore::onode {
static DeltaRecorderURef create_replay_recorder(
node_type_t node_type, field_type_t field_type)
{
if (node_type == node_type_t::LEAF) {
if (field_type == field_type_t::N0) {
return DeltaRecorderT<node_fields_0_t, node_type_t::LEAF>::create_for_replay();
} else if (field_type == field_type_t::N1) {
return DeltaRecorderT<node_fields_1_t, node_type_t::LEAF>::create_for_replay();
} else if (field_type == field_type_t::N2) {
return DeltaRecorderT<node_fields_2_t, node_type_t::LEAF>::create_for_replay();
} else if (field_type == field_type_t::N3) {
return DeltaRecorderT<leaf_fields_3_t, node_type_t::LEAF>::create_for_replay();
} else {
ceph_abort("impossible path");
}
} else if (node_type == node_type_t::INTERNAL) {
if (field_type == field_type_t::N0) {
return DeltaRecorderT<node_fields_0_t, node_type_t::INTERNAL>::create_for_replay();
} else if (field_type == field_type_t::N1) {
return DeltaRecorderT<node_fields_1_t, node_type_t::INTERNAL>::create_for_replay();
} else if (field_type == field_type_t::N2) {
return DeltaRecorderT<node_fields_2_t, node_type_t::INTERNAL>::create_for_replay();
} else if (field_type == field_type_t::N3) {
return DeltaRecorderT<internal_fields_3_t, node_type_t::INTERNAL>::create_for_replay();
} else {
ceph_abort("impossible path");
}
} else {
ceph_abort("impossible path");
}
}
NodeExtentRef SeastoreNodeExtent::mutate(
context_t c, DeltaRecorderURef&& _recorder)
{
DEBUGT("mutate {} ...", c.t, *this);
auto p_handle = static_cast<TransactionManagerHandle*>(&c.nm);
auto extent = p_handle->tm.get_mutable_extent(c.t, this);
auto ret = extent->cast<SeastoreNodeExtent>();
// A replayed extent may already have an empty recorder, we discard it for
// simplicity.
assert(!ret->recorder || ret->recorder->is_empty());
ret->recorder = std::move(_recorder);
return ret;
}
void SeastoreNodeExtent::apply_delta(const ceph::bufferlist& bl)
{
DEBUG("replay {} ...", *this);
if (!recorder) {
auto header = get_header();
auto field_type = header.get_field_type();
if (!field_type.has_value()) {
ERROR("replay got invalid node -- {}", *this);
ceph_abort("fatal error");
}
auto node_type = header.get_node_type();
recorder = create_replay_recorder(node_type, *field_type);
} else {
#ifndef NDEBUG
auto header = get_header();
assert(recorder->node_type() == header.get_node_type());
assert(recorder->field_type() == *header.get_field_type());
#endif
}
auto mut = do_get_mutable();
auto p = bl.cbegin();
while (p != bl.end()) {
recorder->apply_delta(p, mut, *this);
}
DEBUG("relay done!");
}
}
| 3,145 | 33.571429 | 93 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/seastore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <random>
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_delta_recorder.h"
/**
* seastore.h
*
* Seastore backend implementations.
*/
namespace crimson::os::seastore::onode {
class SeastoreSuper final: public Super {
public:
SeastoreSuper(Transaction& t, RootNodeTracker& tracker,
laddr_t root_addr, TransactionManager& tm)
: Super(t, tracker), root_addr{root_addr}, tm{tm} {}
~SeastoreSuper() override = default;
protected:
laddr_t get_root_laddr() const override {
return root_addr;
}
void write_root_laddr(context_t c, laddr_t addr) override {
LOG_PREFIX(OTree::Seastore);
SUBDEBUGT(seastore_onode, "update root {:#x} ...", c.t, addr);
root_addr = addr;
tm.write_onode_root(c.t, addr);
}
private:
laddr_t root_addr;
TransactionManager &tm;
};
class SeastoreNodeExtent final: public NodeExtent {
public:
SeastoreNodeExtent(ceph::bufferptr &&ptr)
: NodeExtent(std::move(ptr)) {}
SeastoreNodeExtent(const SeastoreNodeExtent& other)
: NodeExtent(other) {}
~SeastoreNodeExtent() override = default;
constexpr static extent_types_t TYPE = extent_types_t::ONODE_BLOCK_STAGED;
extent_types_t get_type() const override {
return TYPE;
}
protected:
NodeExtentRef mutate(context_t, DeltaRecorderURef&&) override;
DeltaRecorder* get_recorder() const override {
return recorder.get();
}
CachedExtentRef duplicate_for_write(Transaction&) override {
return CachedExtentRef(new SeastoreNodeExtent(*this));
}
ceph::bufferlist get_delta() override {
assert(recorder);
return recorder->get_delta();
}
void apply_delta(const ceph::bufferlist&) override;
private:
DeltaRecorderURef recorder;
};
class TransactionManagerHandle : public NodeExtentManager {
public:
TransactionManagerHandle(TransactionManager &tm) : tm{tm} {}
TransactionManager &tm;
};
template <bool INJECT_EAGAIN=false>
class SeastoreNodeExtentManager final: public TransactionManagerHandle {
public:
SeastoreNodeExtentManager(
TransactionManager &tm, laddr_t min, double p_eagain)
: TransactionManagerHandle(tm), addr_min{min}, p_eagain{p_eagain} {
if constexpr (INJECT_EAGAIN) {
assert(p_eagain > 0.0 && p_eagain < 1.0);
} else {
assert(p_eagain == 0.0);
}
}
~SeastoreNodeExtentManager() override = default;
void set_generate_eagain(bool enable) {
generate_eagain = enable;
}
protected:
bool is_read_isolated() const override { return true; }
read_iertr::future<NodeExtentRef> read_extent(
Transaction& t, laddr_t addr) override {
SUBTRACET(seastore_onode, "reading at {:#x} ...", t, addr);
if constexpr (INJECT_EAGAIN) {
if (trigger_eagain()) {
SUBDEBUGT(seastore_onode, "reading at {:#x}: trigger eagain", t, addr);
t.test_set_conflict();
return read_iertr::make_ready_future<NodeExtentRef>();
}
}
return tm.read_extent<SeastoreNodeExtent>(t, addr
).si_then([addr, &t](auto&& e) -> read_iertr::future<NodeExtentRef> {
SUBTRACET(seastore_onode,
"read {}B at {:#x} -- {}",
t, e->get_length(), e->get_laddr(), *e);
assert(e->get_laddr() == addr);
std::ignore = addr;
return read_iertr::make_ready_future<NodeExtentRef>(e);
});
}
alloc_iertr::future<NodeExtentRef> alloc_extent(
Transaction& t, laddr_t hint, extent_len_t len) override {
SUBTRACET(seastore_onode, "allocating {}B with hint {:#x} ...", t, len, hint);
if constexpr (INJECT_EAGAIN) {
if (trigger_eagain()) {
SUBDEBUGT(seastore_onode, "allocating {}B: trigger eagain", t, len);
t.test_set_conflict();
return alloc_iertr::make_ready_future<NodeExtentRef>();
}
}
return tm.alloc_extent<SeastoreNodeExtent>(t, hint, len
).si_then([len, &t](auto extent) {
SUBDEBUGT(seastore_onode,
"allocated {}B at {:#x} -- {}",
t, extent->get_length(), extent->get_laddr(), *extent);
if (!extent->is_initial_pending()) {
SUBERRORT(seastore_onode,
"allocated {}B but got invalid extent: {}",
t, len, *extent);
ceph_abort("fatal error");
}
assert(extent->get_length() == len);
std::ignore = len;
return NodeExtentRef(extent);
});
}
retire_iertr::future<> retire_extent(
Transaction& t, NodeExtentRef _extent) override {
LogicalCachedExtentRef extent = _extent;
auto addr = extent->get_laddr();
auto len = extent->get_length();
SUBDEBUGT(seastore_onode,
"retiring {}B at {:#x} -- {} ...",
t, len, addr, *extent);
if constexpr (INJECT_EAGAIN) {
if (trigger_eagain()) {
SUBDEBUGT(seastore_onode,
"retiring {}B at {:#x} -- {} : trigger eagain",
t, len, addr, *extent);
t.test_set_conflict();
return retire_iertr::now();
}
}
return tm.dec_ref(t, extent).si_then([addr, len, &t] (unsigned cnt) {
assert(cnt == 0);
SUBTRACET(seastore_onode, "retired {}B at {:#x} ...", t, len, addr);
});
}
getsuper_iertr::future<Super::URef> get_super(
Transaction& t, RootNodeTracker& tracker) override {
SUBTRACET(seastore_onode, "get root ...", t);
if constexpr (INJECT_EAGAIN) {
if (trigger_eagain()) {
SUBDEBUGT(seastore_onode, "get root: trigger eagain", t);
t.test_set_conflict();
return getsuper_iertr::make_ready_future<Super::URef>();
}
}
return tm.read_onode_root(t).si_then([this, &t, &tracker](auto root_addr) {
SUBTRACET(seastore_onode, "got root {:#x}", t, root_addr);
return Super::URef(new SeastoreSuper(t, tracker, root_addr, tm));
});
}
std::ostream& print(std::ostream& os) const override {
os << "SeastoreNodeExtentManager";
if constexpr (INJECT_EAGAIN) {
os << "(p_eagain=" << p_eagain << ")";
}
return os;
}
private:
static LOG_PREFIX(OTree::Seastore);
const laddr_t addr_min;
// XXX: conditional members by INJECT_EAGAIN
bool trigger_eagain() {
if (generate_eagain) {
double dice = rd();
assert(rd.min() == 0);
dice /= rd.max();
return dice <= p_eagain;
} else {
return false;
}
}
bool generate_eagain = true;
std::random_device rd;
double p_eagain;
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::onode::SeastoreNodeExtent> : fmt::ostream_formatter {};
#endif
| 6,732 | 29.058036 | 112 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/test_replay.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/onode_manager/staged-fltree/node_delta_recorder.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager.h"
/** test_replay.h
*
* A special version of NodeExtent to help verify delta encode, decode and
* replay in recorder_t under debug build.
*/
namespace crimson::os::seastore::onode {
class TestReplayExtent final: public NodeExtent {
public:
using Ref = crimson::os::seastore::TCachedExtentRef<TestReplayExtent>;
void prepare_replay(NodeExtentRef from_extent) {
assert(get_length() == from_extent->get_length());
auto mut = do_get_mutable();
std::memcpy(mut.get_write(), from_extent->get_read(), get_length());
}
void replay_and_verify(NodeExtentRef replayed_extent) {
assert(get_length() == replayed_extent->get_length());
auto mut = do_get_mutable();
auto bl = recorder->get_delta();
assert(bl.length());
auto p = bl.cbegin();
recorder->apply_delta(p, mut, *this);
assert(p == bl.end());
auto cmp = std::memcmp(get_read(), replayed_extent->get_read(), get_length());
ceph_assert(cmp == 0 && "replay mismatch!");
}
static Ref create(extent_len_t length, DeltaRecorderURef&& recorder) {
auto r = ceph::buffer::create_aligned(length, 4096);
auto bp = ceph::bufferptr(std::move(r));
return new TestReplayExtent(std::move(bp), std::move(recorder));
}
protected:
NodeExtentRef mutate(context_t, DeltaRecorderURef&&) override {
ceph_abort("impossible path"); }
DeltaRecorder* get_recorder() const override {
ceph_abort("impossible path"); }
CachedExtentRef duplicate_for_write(Transaction&) override {
ceph_abort("impossible path"); }
extent_types_t get_type() const override {
return extent_types_t::TEST_BLOCK; }
ceph::bufferlist get_delta() override {
ceph_abort("impossible path"); }
void apply_delta(const ceph::bufferlist&) override {
ceph_abort("impossible path"); }
private:
TestReplayExtent(ceph::bufferptr&& ptr, DeltaRecorderURef&& recorder)
: NodeExtent(std::move(ptr)), recorder(std::move(recorder)) {
state = extent_state_t::MUTATION_PENDING;
}
DeltaRecorderURef recorder;
};
}
| 2,300 | 32.838235 | 82 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/item_iterator_stage.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "item_iterator_stage.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_mutable.h"
namespace crimson::os::seastore::onode {
#define ITER_T item_iterator_t<NODE_TYPE>
#define ITER_INST(NT) item_iterator_t<NT>
template <node_type_t NODE_TYPE>
template <IsFullKey Key>
memory_range_t ITER_T::insert_prefix(
NodeExtentMutable& mut, const ITER_T& iter, const Key& key,
bool is_end, node_offset_t size, const char* p_left_bound)
{
// 1. insert range
char* p_insert;
if (is_end) {
assert(!iter.has_next());
p_insert = const_cast<char*>(iter.p_start());
} else {
p_insert = const_cast<char*>(iter.p_end());
}
char* p_insert_front = p_insert - size;
// 2. shift memory
const char* p_shift_start = p_left_bound;
const char* p_shift_end = p_insert;
mut.shift_absolute(p_shift_start,
p_shift_end - p_shift_start,
-(int)size);
// 3. append header
p_insert -= sizeof(node_offset_t);
node_offset_t back_offset = (p_insert - p_insert_front);
mut.copy_in_absolute(p_insert, back_offset);
ns_oid_view_t::append(mut, key, p_insert);
return {p_insert_front, p_insert};
}
#define IP_TEMPLATE(NT, Key) \
template memory_range_t ITER_INST(NT)::insert_prefix<Key>( \
NodeExtentMutable&, const ITER_INST(NT)&, const Key&, \
bool, node_offset_t, const char*)
IP_TEMPLATE(node_type_t::LEAF, key_view_t);
IP_TEMPLATE(node_type_t::INTERNAL, key_view_t);
IP_TEMPLATE(node_type_t::LEAF, key_hobj_t);
IP_TEMPLATE(node_type_t::INTERNAL, key_hobj_t);
template <node_type_t NODE_TYPE>
void ITER_T::update_size(
NodeExtentMutable& mut, const ITER_T& iter, int change)
{
node_offset_t offset = iter.get_back_offset();
int new_size = change + offset;
assert(new_size > 0 && new_size < (int)mut.get_length());
mut.copy_in_absolute(
(void*)iter.get_item_range().p_end, node_offset_t(new_size));
}
template <node_type_t NODE_TYPE>
node_offset_t ITER_T::trim_until(NodeExtentMutable& mut, const ITER_T& iter)
{
assert(iter.index() != 0);
size_t ret = iter.p_end() - iter.p_items_start;
assert(ret < mut.get_length());
return ret;
}
template <node_type_t NODE_TYPE>
node_offset_t ITER_T::trim_at(
NodeExtentMutable& mut, const ITER_T& iter, node_offset_t trimmed)
{
size_t trim_size = iter.p_start() - iter.p_items_start + trimmed;
assert(trim_size < mut.get_length());
assert(iter.get_back_offset() > trimmed);
node_offset_t new_offset = iter.get_back_offset() - trimmed;
mut.copy_in_absolute((void*)iter.item_range.p_end, new_offset);
return trim_size;
}
template <node_type_t NODE_TYPE>
node_offset_t ITER_T::erase(
NodeExtentMutable& mut, const ITER_T& iter, const char* p_left_bound)
{
node_offset_t erase_size = iter.p_end() - iter.p_start();
const char* p_shift_start = p_left_bound;
assert(p_left_bound <= iter.p_start());
extent_len_t shift_len = iter.p_start() - p_left_bound;
int shift_off = erase_size;
mut.shift_absolute(p_shift_start, shift_len, shift_off);
return erase_size;
}
#define ITER_TEMPLATE(NT) template class ITER_INST(NT)
ITER_TEMPLATE(node_type_t::LEAF);
ITER_TEMPLATE(node_type_t::INTERNAL);
#define APPEND_T ITER_T::Appender<KT>
template <node_type_t NODE_TYPE>
template <KeyT KT>
APPEND_T::Appender(NodeExtentMutable* p_mut,
const item_iterator_t& iter,
bool open) : p_mut{p_mut}
{
assert(!iter.has_next());
if (open) {
p_append = const_cast<char*>(iter.get_key().p_start());
p_offset_while_open = const_cast<char*>(iter.item_range.p_end);
} else {
// XXX: this doesn't need to advance the iter to last
p_append = const_cast<char*>(iter.p_items_start);
}
}
template <node_type_t NODE_TYPE>
template <KeyT KT>
bool APPEND_T::append(const ITER_T& src, index_t& items)
{
auto p_end = src.p_end();
bool append_till_end = false;
if (is_valid_index(items)) {
for (auto i = 1u; i <= items; ++i) {
if (!src.has_next()) {
assert(i == items);
append_till_end = true;
break;
}
++src;
}
} else {
if (items == INDEX_END) {
append_till_end = true;
} else {
assert(items == INDEX_LAST);
}
items = 0;
while (src.has_next()) {
++src;
++items;
}
if (append_till_end) {
++items;
}
}
const char* p_start;
if (append_till_end) {
p_start = src.p_start();
} else {
p_start = src.p_end();
}
assert(p_end >= p_start);
size_t append_size = p_end - p_start;
p_append -= append_size;
p_mut->copy_in_absolute(p_append, p_start, append_size);
return append_till_end;
}
template <node_type_t NODE_TYPE>
template <KeyT KT>
std::tuple<NodeExtentMutable*, char*>
APPEND_T::open_nxt(const key_get_type& partial_key)
{
p_append -= sizeof(node_offset_t);
p_offset_while_open = p_append;
ns_oid_view_t::append(*p_mut, partial_key, p_append);
return {p_mut, p_append};
}
template <node_type_t NODE_TYPE>
template <KeyT KT>
std::tuple<NodeExtentMutable*, char*>
APPEND_T::open_nxt(const full_key_t<KT>& key)
{
p_append -= sizeof(node_offset_t);
p_offset_while_open = p_append;
ns_oid_view_t::append(*p_mut, key, p_append);
return {p_mut, p_append};
}
template <node_type_t NODE_TYPE>
template <KeyT KT>
void APPEND_T::wrap_nxt(char* _p_append)
{
assert(_p_append < p_append);
p_mut->copy_in_absolute(
p_offset_while_open, node_offset_t(p_offset_while_open - _p_append));
p_append = _p_append;
}
#define APPEND_TEMPLATE(NT, KT) template class ITER_INST(NT)::Appender<KT>
APPEND_TEMPLATE(node_type_t::LEAF, KeyT::VIEW);
APPEND_TEMPLATE(node_type_t::INTERNAL, KeyT::VIEW);
APPEND_TEMPLATE(node_type_t::LEAF, KeyT::HOBJ);
APPEND_TEMPLATE(node_type_t::INTERNAL, KeyT::HOBJ);
}
| 5,914 | 28.137931 | 80 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/item_iterator_stage.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
#include "key_layout.h"
#include "stage_types.h"
namespace crimson::os::seastore::onode {
class NodeExtentMutable;
/**
* item_iterator_t
*
* The STAGE_STRING implementation for node N0/N1, implements staged contract
* as an iterative container to resolve crush hash conflicts.
*
* The layout of the contaner to index ns, oid strings storing n items:
*
* # <--------- container range ---------> #
* #<~># items [i+1, n) #
* # # items [0, i) #<~>#
* # # <------ item i -------------> # #
* # # <--- item_range ---> | # #
* # # | # #
* # # next-stage | ns-oid | back_ # #
* # # contaner | strings | offset # #
* #...# range | | #...#
* ^ ^ | ^
* | | | |
* | +---------------------------+ |
* + p_items_start p_items_end +
*/
template <node_type_t NODE_TYPE>
class item_iterator_t {
using value_input_t = value_input_type_t<NODE_TYPE>;
using value_t = value_type_t<NODE_TYPE>;
public:
item_iterator_t(const container_range_t& range)
: node_size{range.node_size},
p_items_start(range.range.p_start),
p_items_end(range.range.p_end) {
assert(is_valid_node_size(node_size));
assert(p_items_start < p_items_end);
next_item_range(p_items_end);
}
const char* p_start() const { return item_range.p_start; }
const char* p_end() const { return item_range.p_end + sizeof(node_offset_t); }
const memory_range_t& get_item_range() const { return item_range; }
node_offset_t get_back_offset() const { return back_offset; }
// container type system
using key_get_type = const ns_oid_view_t&;
static constexpr auto CONTAINER_TYPE = ContainerType::ITERATIVE;
index_t index() const { return _index; }
key_get_type get_key() const {
if (!key.has_value()) {
key = ns_oid_view_t(item_range.p_end);
assert(item_range.p_start < (*key).p_start());
}
return *key;
}
node_offset_t size() const {
size_t ret = item_range.p_end - item_range.p_start + sizeof(node_offset_t);
assert(ret < node_size);
return ret;
};
node_offset_t size_to_nxt() const {
size_t ret = get_key().size() + sizeof(node_offset_t);
assert(ret < node_size);
return ret;
}
node_offset_t size_overhead() const {
return sizeof(node_offset_t) + get_key().size_overhead();
}
container_range_t get_nxt_container() const {
return {{item_range.p_start, get_key().p_start()}, node_size};
}
bool has_next() const {
assert(p_items_start <= item_range.p_start);
return p_items_start < item_range.p_start;
}
const item_iterator_t<NODE_TYPE>& operator++() const {
assert(has_next());
next_item_range(item_range.p_start);
key.reset();
++_index;
return *this;
}
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
int start_offset = p_items_start - p_node_start;
int stage_size = p_items_end - p_items_start;
assert(start_offset > 0);
assert(stage_size > 0);
assert(start_offset + stage_size <= (int)node_size);
ceph::encode(static_cast<node_offset_t>(start_offset), encoded);
ceph::encode(static_cast<node_offset_t>(stage_size), encoded);
ceph::encode(_index, encoded);
}
static item_iterator_t decode(const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
node_offset_t start_offset;
ceph::decode(start_offset, delta);
node_offset_t stage_size;
ceph::decode(stage_size, delta);
assert(start_offset > 0);
assert(stage_size > 0);
assert((unsigned)start_offset + stage_size <= node_size);
index_t index;
ceph::decode(index, delta);
item_iterator_t ret({{p_node_start + start_offset,
p_node_start + start_offset + stage_size},
node_size});
while (index > 0) {
++ret;
--index;
}
return ret;
}
static node_offset_t header_size() { return 0u; }
template <IsFullKey Key>
static node_offset_t estimate_insert(
const Key& key, const value_input_t&) {
return ns_oid_view_t::estimate_size(key) + sizeof(node_offset_t);
}
template <IsFullKey Key>
static memory_range_t insert_prefix(
NodeExtentMutable& mut, const item_iterator_t<NODE_TYPE>& iter,
const Key& key, bool is_end,
node_offset_t size, const char* p_left_bound);
static void update_size(
NodeExtentMutable& mut, const item_iterator_t<NODE_TYPE>& iter, int change);
static node_offset_t trim_until(NodeExtentMutable&, const item_iterator_t<NODE_TYPE>&);
static node_offset_t trim_at(
NodeExtentMutable&, const item_iterator_t<NODE_TYPE>&, node_offset_t trimmed);
static node_offset_t erase(
NodeExtentMutable&, const item_iterator_t<NODE_TYPE>&, const char*);
template <KeyT KT>
class Appender;
private:
void next_item_range(const char* p_end) const {
auto p_item_end = p_end - sizeof(node_offset_t);
assert(p_items_start < p_item_end);
back_offset = reinterpret_cast<const node_offset_packed_t*>(p_item_end)->value;
assert(back_offset);
const char* p_item_start = p_item_end - back_offset;
assert(p_items_start <= p_item_start);
item_range = {p_item_start, p_item_end};
}
extent_len_t node_size;
const char* p_items_start;
const char* p_items_end;
mutable memory_range_t item_range;
mutable node_offset_t back_offset;
mutable std::optional<ns_oid_view_t> key;
mutable index_t _index = 0u;
};
template <node_type_t NODE_TYPE>
template <KeyT KT>
class item_iterator_t<NODE_TYPE>::Appender {
public:
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_append{p_append} {}
Appender(NodeExtentMutable*, const item_iterator_t&, bool open);
bool append(const item_iterator_t<NODE_TYPE>& src, index_t& items);
char* wrap() { return p_append; }
std::tuple<NodeExtentMutable*, char*> open_nxt(const key_get_type&);
std::tuple<NodeExtentMutable*, char*> open_nxt(const full_key_t<KT>&);
void wrap_nxt(char* _p_append);
private:
NodeExtentMutable* p_mut;
char* p_append;
char* p_offset_while_open;
};
}
| 6,505 | 32.536082 | 89 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/key_layout.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "key_layout.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_mutable.h"
namespace crimson::os::seastore::onode {
void string_key_view_t::append_str(
NodeExtentMutable& mut, std::string_view str, char*& p_append)
{
assert(is_valid_size(str.length()));
p_append -= sizeof(string_size_t);
string_size_t len = str.length();
mut.copy_in_absolute(p_append, len);
p_append -= len;
mut.copy_in_absolute(p_append, str.data(), len);
}
void string_key_view_t::append_dedup(
NodeExtentMutable& mut, const Type& dedup_type, char*& p_append)
{
p_append -= sizeof(string_size_t);
if (dedup_type == Type::MIN) {
mut.copy_in_absolute(p_append, MARKER_MIN);
} else if (dedup_type == Type::MAX) {
mut.copy_in_absolute(p_append, MARKER_MAX);
} else {
ceph_abort("impossible path");
}
}
}
| 949 | 26.142857 | 80 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/key_layout.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cassert>
#include <limits>
#include <optional>
#include <ostream>
#include "common/hobject.h"
#include "crimson/os/seastore/onode.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/fwd.h"
namespace crimson::os::seastore::onode {
using shard_t = int8_t;
using pool_t = int64_t;
// Note: this is the reversed version of the object hash
using crush_hash_t = uint32_t;
using snap_t = uint64_t;
using gen_t = uint64_t;
static_assert(sizeof(shard_t) == sizeof(ghobject_t().shard_id.id));
static_assert(sizeof(pool_t) == sizeof(ghobject_t().hobj.pool));
static_assert(sizeof(crush_hash_t) == sizeof(ghobject_t().hobj.get_bitwise_key_u32()));
static_assert(sizeof(snap_t) == sizeof(ghobject_t().hobj.snap.val));
static_assert(sizeof(gen_t) == sizeof(ghobject_t().generation));
constexpr auto MAX_SHARD = std::numeric_limits<shard_t>::max();
constexpr auto MAX_POOL = std::numeric_limits<pool_t>::max();
constexpr auto MAX_CRUSH = std::numeric_limits<crush_hash_t>::max();
constexpr auto MAX_SNAP = std::numeric_limits<snap_t>::max();
constexpr auto MAX_GEN = std::numeric_limits<gen_t>::max();
class NodeExtentMutable;
class key_view_t;
class key_hobj_t;
enum class KeyT { VIEW, HOBJ };
template <KeyT> struct _full_key_type;
template<> struct _full_key_type<KeyT::VIEW> { using type = key_view_t; };
template<> struct _full_key_type<KeyT::HOBJ> { using type = key_hobj_t; };
template <KeyT type>
using full_key_t = typename _full_key_type<type>::type;
static laddr_t get_lba_hint(shard_t shard, pool_t pool, crush_hash_t crush) {
// FIXME: It is possible that PGs from different pools share the same prefix
// if the mask 0xFF is not long enough, result in unexpected transaction
// conflicts.
return ((uint64_t)(shard & 0XFF)<<56 |
(uint64_t)(pool & 0xFF)<<48 |
(uint64_t)(crush )<<16);
}
struct node_offset_packed_t {
node_offset_t value;
} __attribute__((packed));
/**
* string_key_view_t
*
* The layout to store char array as an oid or an ns string which may be
* compressed.
*
* (TODO) If compressed, the physical block only stores an unsigned int of
* string_size_t, with value MARKER_MIN denoting Type::MIN, and value
* MARKER_MAX denoting Type::MAX.
*
* If not compressed (Type::STR), the physical block stores the char array and
* a valid string_size_t value.
*/
struct string_key_view_t {
enum class Type {MIN, STR, MAX};
static constexpr auto MARKER_MAX = std::numeric_limits<string_size_t>::max();
static constexpr auto MARKER_MIN = std::numeric_limits<string_size_t>::max() - 1;
static constexpr auto VALID_UPPER_BOUND = std::numeric_limits<string_size_t>::max() - 2;
static bool is_valid_size(size_t size) {
return size <= VALID_UPPER_BOUND;
}
string_key_view_t(const char* p_end) {
p_length = p_end - sizeof(string_size_t);
std::memcpy(&length, p_length, sizeof(string_size_t));
if (is_valid_size(length)) {
auto _p_key = p_length - length;
p_key = static_cast<const char*>(_p_key);
} else {
assert(length == MARKER_MAX || length == MARKER_MIN);
p_key = nullptr;
}
}
Type type() const {
if (length == MARKER_MIN) {
return Type::MIN;
} else if (length == MARKER_MAX) {
return Type::MAX;
} else {
assert(is_valid_size(length));
return Type::STR;
}
}
const char* p_start() const {
if (p_key) {
return p_key;
} else {
return p_length;
}
}
const char* p_next_end() const {
if (p_key) {
return p_start();
} else {
return p_length + sizeof(string_size_t);
}
}
node_offset_t size() const {
size_t ret = length + sizeof(string_size_t);
assert(ret < MAX_NODE_SIZE);
return ret;
}
node_offset_t size_logical() const {
assert(type() == Type::STR);
assert(is_valid_size(length));
return length;
}
node_offset_t size_overhead() const {
assert(type() == Type::STR);
return sizeof(string_size_t);
}
std::string_view to_string_view() const {
assert(type() == Type::STR);
assert(is_valid_size(length));
return {p_key, length};
}
bool operator==(const string_key_view_t& x) const {
if (type() == x.type() && type() != Type::STR)
return true;
if (type() != x.type())
return false;
if (length != x.length)
return false;
return (memcmp(p_key, x.p_key, length) == 0);
}
bool operator!=(const string_key_view_t& x) const { return !(*this == x); }
void reset_to(const char* origin_base,
const char* new_base,
extent_len_t node_size) {
reset_ptr(p_key, origin_base, new_base, node_size);
reset_ptr(p_length, origin_base, new_base, node_size);
#ifndef NDEBUG
string_size_t current_length;
std::memcpy(¤t_length, p_length, sizeof(string_size_t));
assert(length == current_length);
#endif
}
static void append_str(
NodeExtentMutable&, std::string_view, char*& p_append);
static void test_append_str(std::string_view str, char*& p_append) {
assert(is_valid_size(str.length()));
p_append -= sizeof(string_size_t);
string_size_t len = str.length();
std::memcpy(p_append, &len, sizeof(string_size_t));
p_append -= len;
std::memcpy(p_append, str.data(), len);
}
static void append_dedup(
NodeExtentMutable&, const Type& dedup_type, char*& p_append);
static void test_append_dedup(const Type& dedup_type, char*& p_append) {
p_append -= sizeof(string_size_t);
string_size_t len;
if (dedup_type == Type::MIN) {
len = MARKER_MIN;
} else if (dedup_type == Type::MAX) {
len = MARKER_MAX;
} else {
ceph_abort("impossible path");
}
std::memcpy(p_append, &len, sizeof(string_size_t));
}
const char* p_key;
const char* p_length;
// TODO: remove if p_length is aligned
string_size_t length;
};
/**
* string_view_masked_t
*
* A common class to hide the underlying string implementation regardless of a
* string_key_view_t (maybe compressed), a string/string_view, or a compressed
* string. And leverage this consistant class to do compare, print, convert and
* append operations.
*/
class string_view_masked_t {
public:
using Type = string_key_view_t::Type;
explicit string_view_masked_t(const string_key_view_t& index)
: type{index.type()} {
if (type == Type::STR) {
view = index.to_string_view();
}
}
explicit string_view_masked_t(std::string_view str)
: type{Type::STR}, view{str} {
assert(string_key_view_t::is_valid_size(view.size()));
}
Type get_type() const { return type; }
std::string_view to_string_view() const {
assert(get_type() == Type::STR);
return view;
}
string_size_t size() const {
assert(get_type() == Type::STR);
assert(string_key_view_t::is_valid_size(view.size()));
return view.size();
}
bool operator==(const string_view_masked_t& x) const {
if (get_type() == x.get_type() && get_type() != Type::STR)
return true;
if (get_type() != x.get_type())
return false;
if (size() != x.size())
return false;
return (memcmp(view.data(), x.view.data(), size()) == 0);
}
auto operator<=>(std::string_view rhs) const {
using Type = string_view_masked_t::Type;
assert(string_key_view_t::is_valid_size(rhs.size()));
auto lhs_type = get_type();
if (lhs_type == Type::MIN) {
return std::strong_ordering::less;
} else if (lhs_type == Type::MAX) {
return std::strong_ordering::greater;
} else { // r_type == Type::STR
assert(string_key_view_t::is_valid_size(size()));
return to_string_view() <=> rhs;
}
}
void encode(ceph::bufferlist& bl) const {
if (get_type() == Type::MIN) {
ceph::encode(string_key_view_t::MARKER_MIN, bl);
} else if (get_type() == Type::MAX) {
ceph::encode(string_key_view_t::MARKER_MAX, bl);
} else {
ceph::encode(size(), bl);
ceph::encode_nohead(view, bl);
}
}
static auto min() { return string_view_masked_t{Type::MIN}; }
static auto max() { return string_view_masked_t{Type::MAX}; }
static string_view_masked_t decode(
std::string& str_storage, ceph::bufferlist::const_iterator& delta) {
string_size_t size;
ceph::decode(size, delta);
if (size == string_key_view_t::MARKER_MIN) {
return min();
} else if (size == string_key_view_t::MARKER_MAX) {
return max();
} else {
ceph::decode_nohead(size, str_storage, delta);
return string_view_masked_t(str_storage);
}
}
private:
explicit string_view_masked_t(Type type)
: type{type} {}
Type type;
std::string_view view;
};
inline auto operator<=>(const string_view_masked_t& l, const string_view_masked_t& r) {
using Type = string_view_masked_t::Type;
auto l_type = l.get_type();
auto r_type = r.get_type();
if (l_type == Type::STR && r_type == Type::STR) {
assert(string_key_view_t::is_valid_size(l.size()));
assert(string_key_view_t::is_valid_size(r.size()));
return l.to_string_view() <=> r.to_string_view();
} else if (l_type == r_type) {
return std::strong_ordering::equal;
} else if (l_type == Type::MIN || r_type == Type::MAX) {
return std::strong_ordering::less;
} else { // l_type == Type::MAX || r_type == Type::MIN
return std::strong_ordering::greater;
}
}
inline std::ostream& operator<<(std::ostream& os, const string_view_masked_t& masked) {
using Type = string_view_masked_t::Type;
auto type = masked.get_type();
if (type == Type::MIN) {
return os << "MIN";
} else if (type == Type::MAX) {
return os << "MAX";
} else { // type == Type::STR
auto view = masked.to_string_view();
if (view.length() <= 12) {
os << "\"" << view << "\"";
} else {
os << "\"" << std::string_view(view.data(), 4) << ".."
<< std::string_view(view.data() + view.length() - 2, 2)
<< "/" << view.length() << "B\"";
}
return os;
}
}
struct ns_oid_view_t {
using Type = string_key_view_t::Type;
ns_oid_view_t(const char* p_end) : nspace(p_end), oid(nspace.p_next_end()) {}
Type type() const { return oid.type(); }
const char* p_start() const { return oid.p_start(); }
node_offset_t size() const {
if (type() == Type::STR) {
size_t ret = nspace.size() + oid.size();
assert(ret < MAX_NODE_SIZE);
return ret;
} else {
return sizeof(string_size_t);
}
}
node_offset_t size_logical() const {
assert(type() == Type::STR);
return nspace.size_logical() + oid.size_logical();
}
node_offset_t size_overhead() const {
assert(type() == Type::STR);
return nspace.size_overhead() + oid.size_overhead();
}
bool operator==(const ns_oid_view_t& x) const {
return (string_view_masked_t{nspace} == string_view_masked_t{x.nspace} &&
string_view_masked_t{oid} == string_view_masked_t{x.oid});
}
void reset_to(const char* origin_base,
const char* new_base,
extent_len_t node_size) {
nspace.reset_to(origin_base, new_base, node_size);
oid.reset_to(origin_base, new_base, node_size);
}
template <typename Key>
static node_offset_t estimate_size(const Key& key);
template <typename Key>
static void append(NodeExtentMutable&,
const Key& key,
char*& p_append);
static void append(NodeExtentMutable& mut,
const ns_oid_view_t& view,
char*& p_append) {
if (view.type() == Type::STR) {
string_key_view_t::append_str(mut, view.nspace.to_string_view(), p_append);
string_key_view_t::append_str(mut, view.oid.to_string_view(), p_append);
} else {
string_key_view_t::append_dedup(mut, view.type(), p_append);
}
}
template <typename Key>
static void test_append(const Key& key, char*& p_append);
string_key_view_t nspace;
string_key_view_t oid;
};
inline std::ostream& operator<<(std::ostream& os, const ns_oid_view_t& ns_oid) {
return os << string_view_masked_t{ns_oid.nspace} << ","
<< string_view_masked_t{ns_oid.oid};
}
inline auto operator<=>(const ns_oid_view_t& l, const ns_oid_view_t& r) {
auto ret = (string_view_masked_t{l.nspace} <=> string_view_masked_t{r.nspace});
if (ret != 0)
return ret;
return string_view_masked_t{l.oid} <=> string_view_masked_t{r.oid};
}
inline const ghobject_t _MIN_OID() {
assert(ghobject_t().is_min());
// don't extern _MIN_OID
return ghobject_t();
}
/*
* Unfortunally the ghobject_t representitive as tree key doesn't have max
* field, so we define our own _MAX_OID and translate it from/to
* ghobject_t::get_max() if necessary.
*/
inline const ghobject_t _MAX_OID() {
auto ret = ghobject_t(shard_id_t(MAX_SHARD), MAX_POOL, MAX_CRUSH,
"MAX", "MAX", MAX_SNAP, MAX_GEN);
assert(ret.hobj.get_hash() == ret.hobj.get_bitwise_key_u32());
return ret;
}
// the valid key stored in tree should be in the range of (_MIN_OID, _MAX_OID)
template <typename Key>
bool is_valid_key(const Key& key);
/**
* key_hobj_t
*
* A specialized implementation of a full_key_t storing a ghobject_t passed
* from user.
*/
class key_hobj_t {
public:
explicit key_hobj_t(const ghobject_t& _ghobj) {
if (_ghobj.is_max()) {
ghobj = _MAX_OID();
} else {
// including when _ghobj.is_min()
ghobj = _ghobj;
}
// I can be in the range of [_MIN_OID, _MAX_OID]
assert(ghobj >= _MIN_OID());
assert(ghobj <= _MAX_OID());
}
/*
* common interfaces as a full_key_t
*/
shard_t shard() const {
return ghobj.shard_id;
}
pool_t pool() const {
return ghobj.hobj.pool;
}
crush_hash_t crush() const {
// Note: this is the reversed version of the object hash
return ghobj.hobj.get_bitwise_key_u32();
}
laddr_t get_hint() const {
return get_lba_hint(shard(), pool(), crush());
}
std::string_view nspace() const {
// TODO(cross-node string dedup)
return ghobj.hobj.nspace;
}
string_view_masked_t nspace_masked() const {
// TODO(cross-node string dedup)
return string_view_masked_t{nspace()};
}
std::string_view oid() const {
// TODO(cross-node string dedup)
return ghobj.hobj.oid.name;
}
string_view_masked_t oid_masked() const {
// TODO(cross-node string dedup)
return string_view_masked_t{oid()};
}
ns_oid_view_t::Type dedup_type() const {
return _dedup_type;
}
snap_t snap() const {
return ghobj.hobj.snap;
}
gen_t gen() const {
return ghobj.generation;
}
std::ostream& dump(std::ostream& os) const {
os << "key_hobj(" << (int)shard() << ","
<< pool() << ",0x" << std::hex << crush() << std::dec << "; "
<< string_view_masked_t{nspace()} << ","
<< string_view_masked_t{oid()} << "; "
<< snap() << "," << gen() << ")";
return os;
}
bool is_valid() const {
return is_valid_key(*this);
}
static key_hobj_t decode(ceph::bufferlist::const_iterator& delta) {
shard_t shard;
ceph::decode(shard, delta);
pool_t pool;
ceph::decode(pool, delta);
// Note: this is the reversed version of the object hash
crush_hash_t crush;
ceph::decode(crush, delta);
std::string nspace;
[[maybe_unused]] auto nspace_masked = string_view_masked_t::decode(nspace, delta);
// TODO(cross-node string dedup)
assert(nspace_masked.get_type() == string_view_masked_t::Type::STR);
std::string oid;
[[maybe_unused]] auto oid_masked = string_view_masked_t::decode(oid, delta);
// TODO(cross-node string dedup)
assert(oid_masked.get_type() == string_view_masked_t::Type::STR);
snap_t snap;
ceph::decode(snap, delta);
gen_t gen;
ceph::decode(gen, delta);
return key_hobj_t(ghobject_t(
shard_id_t(shard), pool, crush, nspace, oid, snap, gen));
}
private:
ns_oid_view_t::Type _dedup_type = ns_oid_view_t::Type::STR;
ghobject_t ghobj;
};
inline std::ostream& operator<<(std::ostream& os, const key_hobj_t& key) {
return key.dump(os);
}
struct shard_pool_t;
struct crush_t;
struct shard_pool_crush_t;
struct snap_gen_t;
/**
* key_view_t
*
* A specialized implementation of a full_key_t pointing to the locations
* storing the full key in a tree node.
*/
class key_view_t {
public:
/**
* common interfaces as a full_key_t
*/
inline shard_t shard() const;
inline pool_t pool() const;
inline crush_hash_t crush() const;
laddr_t get_hint() const {
return get_lba_hint(shard(), pool(), crush());
}
std::string_view nspace() const {
// TODO(cross-node string dedup)
return ns_oid_view().nspace.to_string_view();
}
string_view_masked_t nspace_masked() const {
// TODO(cross-node string dedup)
return string_view_masked_t{ns_oid_view().nspace};
}
std::string_view oid() const {
// TODO(cross-node string dedup)
return ns_oid_view().oid.to_string_view();
}
string_view_masked_t oid_masked() const {
// TODO(cross-node string dedup)
return string_view_masked_t{ns_oid_view().oid};
}
ns_oid_view_t::Type dedup_type() const {
return ns_oid_view().type();
}
inline snap_t snap() const;
inline gen_t gen() const;
/**
* key_view_t specific interfaces
*/
bool has_shard_pool() const {
return p_shard_pool != nullptr;
}
bool has_crush() const {
return p_crush != nullptr;
}
bool has_ns_oid() const {
return p_ns_oid.has_value();
}
bool has_snap_gen() const {
return p_snap_gen != nullptr;
}
const shard_pool_t& shard_pool_packed() const {
assert(has_shard_pool());
return *p_shard_pool;
}
const crush_t& crush_packed() const {
assert(has_crush());
return *p_crush;
}
const ns_oid_view_t& ns_oid_view() const {
assert(has_ns_oid());
return *p_ns_oid;
}
const snap_gen_t& snap_gen_packed() const {
assert(has_snap_gen());
return *p_snap_gen;
}
size_t size_logical() const {
return sizeof(shard_t) + sizeof(pool_t) + sizeof(crush_hash_t) +
sizeof(snap_t) + sizeof(gen_t) + ns_oid_view().size_logical();
}
ghobject_t to_ghobj() const {
assert(is_valid_key(*this));
return ghobject_t(
shard_id_t(shard()), pool(), crush(),
std::string(nspace()), std::string(oid()), snap(), gen());
}
void replace(const crush_t& key) { p_crush = &key; }
void set(const crush_t& key) {
assert(!has_crush());
replace(key);
}
inline void replace(const shard_pool_crush_t& key);
inline void set(const shard_pool_crush_t& key);
void replace(const ns_oid_view_t& key) { p_ns_oid = key; }
void set(const ns_oid_view_t& key) {
assert(!has_ns_oid());
replace(key);
}
void replace(const snap_gen_t& key) { p_snap_gen = &key; }
void set(const snap_gen_t& key) {
assert(!has_snap_gen());
replace(key);
}
void reset_to(const char* origin_base,
const char* new_base,
extent_len_t node_size) {
if (p_shard_pool != nullptr) {
reset_ptr(p_shard_pool, origin_base, new_base, node_size);
}
if (p_crush != nullptr) {
reset_ptr(p_crush, origin_base, new_base, node_size);
}
if (p_ns_oid.has_value()) {
p_ns_oid->reset_to(origin_base, new_base, node_size);
}
if (p_snap_gen != nullptr) {
reset_ptr(p_snap_gen, origin_base, new_base, node_size);
}
}
std::ostream& dump(std::ostream& os) const {
os << "key_view(";
if (has_shard_pool()) {
os << (int)shard() << "," << pool() << ",";
} else {
os << "X,X,";
}
if (has_crush()) {
os << "0x" << std::hex << crush() << std::dec << "; ";
} else {
os << "X; ";
}
if (has_ns_oid()) {
os << ns_oid_view() << "; ";
} else {
os << "X,X; ";
}
if (has_snap_gen()) {
os << snap() << "," << gen() << ")";
} else {
os << "X,X)";
}
return os;
}
private:
const shard_pool_t* p_shard_pool = nullptr;
const crush_t* p_crush = nullptr;
std::optional<ns_oid_view_t> p_ns_oid;
const snap_gen_t* p_snap_gen = nullptr;
};
template<typename T>
concept IsFullKey = std::same_as<T, key_hobj_t> || std::same_as<T, key_view_t>;
// TODO: consider alignments
struct shard_pool_t {
auto operator<=>(const shard_pool_t&) const = default;
pool_t pool() const { return _pool; }
template <IsFullKey Key>
static shard_pool_t from_key(const Key& key) {
if constexpr (std::same_as<Key, key_view_t>) {
return key.shard_pool_packed();
} else {
return {key.shard(), key.pool()};
}
}
shard_t shard;
pool_t _pool;
} __attribute__((packed));
inline std::ostream& operator<<(std::ostream& os, const shard_pool_t& sp) {
return os << (int)sp.shard << "," << sp.pool();
}
// Note: this is the reversed version of the object hash
struct crush_t {
auto operator<=>(const crush_t&) const = default;
template <IsFullKey Key>
static crush_t from_key(const Key& key) {
if constexpr (std::same_as<Key, key_view_t>) {
return key.crush_packed();
} else {
return {key.crush()};
}
}
crush_hash_t crush;
} __attribute__((packed));
inline std::ostream& operator<<(std::ostream& os, const crush_t& c) {
return os << "0x" << std::hex << c.crush << std::dec;
}
struct shard_pool_crush_t {
auto operator<=>(const shard_pool_crush_t&) const = default;
template <IsFullKey Key>
static shard_pool_crush_t from_key(const Key& key) {
return {shard_pool_t::from_key(key), crush_t::from_key(key)};
}
shard_pool_t shard_pool;
crush_t crush;
} __attribute__((packed));
inline std::ostream& operator<<(std::ostream& os, const shard_pool_crush_t& spc) {
return os << spc.shard_pool << ",0x" << std::hex << spc.crush << std::dec;
}
struct snap_gen_t {
auto operator<=>(const snap_gen_t&) const = default;
template <IsFullKey Key>
static snap_gen_t from_key(const Key& key) {
if constexpr (std::same_as<Key, key_view_t>) {
return key.snap_gen_packed();
} else {
return {key.snap(), key.gen()};
}
}
snap_t snap;
gen_t gen;
} __attribute__((packed));
inline std::ostream& operator<<(std::ostream& os, const snap_gen_t& sg) {
return os << sg.snap << "," << sg.gen;
}
shard_t key_view_t::shard() const {
return shard_pool_packed().shard;
}
pool_t key_view_t::pool() const {
return shard_pool_packed().pool();
}
crush_hash_t key_view_t::crush() const {
return crush_packed().crush;
}
snap_t key_view_t::snap() const {
return snap_gen_packed().snap;
}
gen_t key_view_t::gen() const {
return snap_gen_packed().gen;
}
void key_view_t::replace(const shard_pool_crush_t& key) {
p_shard_pool = &key.shard_pool;
}
void key_view_t::set(const shard_pool_crush_t& key) {
set(key.crush);
assert(!has_shard_pool());
replace(key);
}
template <IsFullKey Key>
void encode_key(const Key& key, ceph::bufferlist& bl) {
ceph::encode(key.shard(), bl);
ceph::encode(key.pool(), bl);
ceph::encode(key.crush(), bl);
key.nspace_masked().encode(bl);
key.oid_masked().encode(bl);
ceph::encode(key.snap(), bl);
ceph::encode(key.gen(), bl);
}
template<IsFullKey LHS, IsFullKey RHS>
std::strong_ordering operator<=>(const LHS& lhs, const RHS& rhs) noexcept {
auto ret = lhs.shard() <=> rhs.shard();
if (ret != 0)
return ret;
ret = lhs.pool() <=> rhs.pool();
if (ret != 0)
return ret;
ret = lhs.crush() <=> rhs.crush();
if (ret != 0)
return ret;
ret = lhs.nspace() <=> rhs.nspace();
if (ret != 0)
return ret;
ret = lhs.oid() <=> rhs.oid();
if (ret != 0)
return ret;
ret = lhs.snap() <=> rhs.snap();
if (ret != 0)
return ret;
return lhs.gen() <=> rhs.gen();
}
template <typename Key>
bool is_valid_key(const Key& key) {
static_assert(IsFullKey<Key>);
return (key > key_hobj_t(ghobject_t()) &&
key < key_hobj_t(ghobject_t::get_max()));
}
inline std::ostream& operator<<(std::ostream& os, const key_view_t& key) {
return key.dump(os);
}
template <IsFullKey T>
auto operator<=>(const T& key, const shard_pool_t& target) {
auto ret = key.shard() <=> target.shard;
if (ret != 0)
return ret;
return key.pool() <=> target.pool();
}
template <IsFullKey T>
auto operator<=>(const T& key, const crush_t& target) {
return key.crush() <=> target.crush;
}
template <IsFullKey T>
auto operator<=>(const T& key, const shard_pool_crush_t& target) {
auto ret = key <=> target.shard_pool;
if (ret != 0)
return ret;
return key <=> target.crush;
}
template <IsFullKey T>
auto operator<=>(const T& key, const ns_oid_view_t& target) {
auto ret = key.nspace() <=> string_view_masked_t{target.nspace};
if (ret != 0)
return ret;
return key.oid() <=> string_view_masked_t{target.oid};
}
template <IsFullKey T>
auto operator<=>(const T& key, const snap_gen_t& target) {
auto ret = key.snap() <=> target.snap;
if (ret != 0)
return ret;
return key.gen() <=> target.gen;
}
template <IsFullKey LHS, typename RHS>
bool operator==(LHS lhs, RHS rhs) {
return lhs <=> rhs == 0;
}
template <typename Key>
node_offset_t ns_oid_view_t::estimate_size(const Key& key) {
static_assert(IsFullKey<Key>);
if constexpr (std::same_as<Key, key_view_t>) {
return key.ns_oid_view().size();
} else {
if (key.dedup_type() != Type::STR) {
// size after deduplication
return sizeof(string_size_t);
} else {
return 2 * sizeof(string_size_t) + key.nspace().size() + key.oid().size();
}
}
}
template <typename Key>
void ns_oid_view_t::append(
NodeExtentMutable& mut, const Key& key, char*& p_append) {
static_assert(IsFullKey<Key>);
if (key.dedup_type() == Type::STR) {
string_key_view_t::append_str(mut, key.nspace(), p_append);
string_key_view_t::append_str(mut, key.oid(), p_append);
} else {
string_key_view_t::append_dedup(mut, key.dedup_type(), p_append);
}
}
template <typename Key>
void ns_oid_view_t::test_append(const Key& key, char*& p_append) {
static_assert(IsFullKey<Key>);
if (key.dedup_type() == Type::STR) {
string_key_view_t::test_append_str(key.nspace(), p_append);
string_key_view_t::test_append_str(key.oid(), p_append);
} else {
string_key_view_t::test_append_dedup(key.dedup_type(), p_append);
}
}
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::onode::key_hobj_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::onode::key_view_t> : fmt::ostream_formatter {};
#endif
| 26,592 | 28.190999 | 104 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "node_stage.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_mutable.h"
#include "node_stage_layout.h"
namespace crimson::os::seastore::onode {
#define NODE_T node_extent_t<FieldType, NODE_TYPE>
#define NODE_INST(FT, NT) node_extent_t<FT, NT>
template <typename FieldType, node_type_t NODE_TYPE>
const char* NODE_T::p_left_bound() const
{
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
// N3 internal node doesn't have the right part
return nullptr;
} else {
auto ret = p_start() +
fields().get_item_end_offset(keys(), node_size);
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
if (is_level_tail()) {
ret -= sizeof(laddr_t);
}
}
return ret;
}
}
template <typename FieldType, node_type_t NODE_TYPE>
node_offset_t NODE_T::size_to_nxt_at(index_t index) const
{
assert(index < keys());
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
return FieldType::estimate_insert_one();
} else if constexpr (FIELD_TYPE == field_type_t::N2) {
auto p_end = p_start() +
p_fields->get_item_end_offset(index, node_size);
return FieldType::estimate_insert_one() + ns_oid_view_t(p_end).size();
} else {
ceph_abort("N3 node is not nested");
}
}
template <typename FieldType, node_type_t NODE_TYPE>
container_range_t NODE_T::get_nxt_container(index_t index) const
{
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
ceph_abort("N3 internal node doesn't have the right part");
} else {
auto item_start_offset = p_fields->get_item_start_offset(
index, node_size);
auto item_end_offset = p_fields->get_item_end_offset(
index, node_size);
assert(item_start_offset < item_end_offset);
auto item_p_start = p_start() + item_start_offset;
auto item_p_end = p_start() + item_end_offset;
if constexpr (FIELD_TYPE == field_type_t::N2) {
// range for sub_items_t<NODE_TYPE>
item_p_end = ns_oid_view_t(item_p_end).p_start();
assert(item_p_start < item_p_end);
} else {
// range for item_iterator_t<NODE_TYPE>
}
return {{item_p_start, item_p_end}, node_size};
}
}
template <typename FieldType, node_type_t NODE_TYPE>
void NODE_T::bootstrap_extent(
NodeExtentMutable& mut,
field_type_t field_type, node_type_t node_type,
bool is_level_tail, level_t level)
{
node_header_t::bootstrap_extent(
mut, field_type, node_type, is_level_tail, level);
mut.copy_in_relative(
sizeof(node_header_t), typename FieldType::num_keys_t(0u));
}
template <typename FieldType, node_type_t NODE_TYPE>
void NODE_T::update_is_level_tail(
NodeExtentMutable& mut, const node_extent_t& extent, bool value)
{
assert(mut.get_length() == extent.node_size);
assert(mut.get_read() == extent.p_start());
node_header_t::update_is_level_tail(mut, extent.p_fields->header, value);
}
template <typename FieldType, node_type_t NODE_TYPE>
template <IsFullKey Key>
memory_range_t NODE_T::insert_prefix_at(
NodeExtentMutable& mut, const node_extent_t& node, const Key& key,
index_t index, node_offset_t size, const char* p_left_bound)
{
assert(mut.get_length() == node.node_size);
assert(mut.get_read() == node.p_start());
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
assert(index <= node.keys());
assert(p_left_bound == node.p_left_bound());
assert(size > FieldType::estimate_insert_one());
auto size_right = size - FieldType::estimate_insert_one();
const char* p_insert = node.p_start() +
node.fields().get_item_end_offset(index, mut.get_length());
const char* p_insert_front = p_insert - size_right;
FieldType::insert_at(mut, key, node.fields(), index, size_right);
mut.shift_absolute(p_left_bound,
p_insert - p_left_bound,
-(int)size_right);
return {p_insert_front, p_insert};
} else if constexpr (FIELD_TYPE == field_type_t::N2) {
ceph_abort("not implemented");
} else {
ceph_abort("impossible");
}
}
#define IPA_TEMPLATE(FT, NT, Key) \
template memory_range_t NODE_INST(FT, NT)::insert_prefix_at<Key>( \
NodeExtentMutable&, const node_extent_t&, const Key&, \
index_t, node_offset_t, const char*)
IPA_TEMPLATE(node_fields_0_t, node_type_t::INTERNAL, key_view_t);
IPA_TEMPLATE(node_fields_1_t, node_type_t::INTERNAL, key_view_t);
IPA_TEMPLATE(node_fields_2_t, node_type_t::INTERNAL, key_view_t);
IPA_TEMPLATE(node_fields_0_t, node_type_t::LEAF, key_view_t);
IPA_TEMPLATE(node_fields_1_t, node_type_t::LEAF, key_view_t);
IPA_TEMPLATE(node_fields_2_t, node_type_t::LEAF, key_view_t);
IPA_TEMPLATE(node_fields_0_t, node_type_t::INTERNAL, key_hobj_t);
IPA_TEMPLATE(node_fields_1_t, node_type_t::INTERNAL, key_hobj_t);
IPA_TEMPLATE(node_fields_2_t, node_type_t::INTERNAL, key_hobj_t);
IPA_TEMPLATE(node_fields_0_t, node_type_t::LEAF, key_hobj_t);
IPA_TEMPLATE(node_fields_1_t, node_type_t::LEAF, key_hobj_t);
IPA_TEMPLATE(node_fields_2_t, node_type_t::LEAF, key_hobj_t);
template <typename FieldType, node_type_t NODE_TYPE>
void NODE_T::update_size_at(
NodeExtentMutable& mut, const node_extent_t& node, index_t index, int change)
{
assert(mut.get_length() == node.node_size);
assert(mut.get_read() == node.p_start());
assert(index < node.keys());
FieldType::update_size_at(mut, node.fields(), index, change);
}
template <typename FieldType, node_type_t NODE_TYPE>
node_offset_t NODE_T::trim_until(
NodeExtentMutable& mut, const node_extent_t& node, index_t index)
{
assert(mut.get_length() == node.node_size);
assert(mut.get_read() == node.p_start());
assert(!node.is_level_tail());
auto keys = node.keys();
assert(index <= keys);
if (index == keys) {
return 0;
}
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
ceph_abort("not implemented");
} else {
mut.copy_in_absolute(
(void*)&node.p_fields->num_keys, num_keys_t(index));
}
// no need to calculate trim size for node
return 0;
}
template <typename FieldType, node_type_t NODE_TYPE>
node_offset_t NODE_T::trim_at(
NodeExtentMutable& mut, const node_extent_t& node,
index_t index, node_offset_t trimmed)
{
assert(mut.get_length() == node.node_size);
assert(mut.get_read() == node.p_start());
assert(!node.is_level_tail());
assert(index < node.keys());
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
ceph_abort("not implemented");
} else {
extent_len_t node_size = mut.get_length();
node_offset_t offset = node.p_fields->get_item_start_offset(
index, node_size);
size_t new_offset = offset + trimmed;
assert(new_offset < node.p_fields->get_item_end_offset(index, node_size));
mut.copy_in_absolute(const_cast<void*>(node.p_fields->p_offset(index)),
node_offset_t(new_offset));
mut.copy_in_absolute(
(void*)&node.p_fields->num_keys, num_keys_t(index + 1));
}
// no need to calculate trim size for node
return 0;
}
template <typename FieldType, node_type_t NODE_TYPE>
node_offset_t NODE_T::erase_at(
NodeExtentMutable& mut, const node_extent_t& node,
index_t index, const char* p_left_bound)
{
assert(mut.get_length() == node.node_size);
assert(mut.get_read() == node.p_start());
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
assert(node.keys() > 0);
assert(index < node.keys());
assert(p_left_bound == node.p_left_bound());
return FieldType::erase_at(mut, node.fields(), index, p_left_bound);
} else {
ceph_abort("not implemented");
}
}
#define NODE_TEMPLATE(FT, NT) template class NODE_INST(FT, NT)
NODE_TEMPLATE(node_fields_0_t, node_type_t::INTERNAL);
NODE_TEMPLATE(node_fields_1_t, node_type_t::INTERNAL);
NODE_TEMPLATE(node_fields_2_t, node_type_t::INTERNAL);
NODE_TEMPLATE(internal_fields_3_t, node_type_t::INTERNAL);
NODE_TEMPLATE(node_fields_0_t, node_type_t::LEAF);
NODE_TEMPLATE(node_fields_1_t, node_type_t::LEAF);
NODE_TEMPLATE(node_fields_2_t, node_type_t::LEAF);
NODE_TEMPLATE(leaf_fields_3_t, node_type_t::LEAF);
#define APPEND_T node_extent_t<FieldType, NODE_TYPE>::Appender<KT>
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
APPEND_T::Appender(NodeExtentMutable* p_mut, const node_extent_t& node, bool open)
: p_mut{p_mut}, p_start{p_mut->get_write()}
{
assert(p_start == node.p_start());
assert(node.keys());
assert(node.node_size == p_mut->get_length());
extent_len_t node_size = node.node_size;
if (open) {
// seek as open_nxt()
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
p_append_left = p_start + node.fields().get_key_start_offset(
node.keys() - 1, node_size);
p_append_left += sizeof(typename FieldType::key_t);
p_append_right = p_start +
node.fields().get_item_end_offset(node.keys() - 1,
node_size);
} else if constexpr (FIELD_TYPE == field_type_t::N2) {
ceph_abort("not implemented");
} else {
ceph_abort("impossible path");
}
num_keys = node.keys() - 1;
} else {
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
std::ignore = node_size;
ceph_abort("not implemented");
} else {
p_append_left = p_start + node.fields().get_key_start_offset(
node.keys(), node_size);
p_append_right = p_start +
node.fields().get_item_end_offset(node.keys(),
node_size);
}
num_keys = node.keys();
}
}
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
void APPEND_T::append(const node_extent_t& src, index_t from, index_t items)
{
assert(from <= src.keys());
if (p_src == nullptr) {
p_src = &src;
} else {
assert(p_src == &src);
}
assert(p_src->node_size == p_mut->get_length());
extent_len_t node_size = src.node_size;
if (items == 0) {
return;
}
assert(from < src.keys());
assert(from + items <= src.keys());
num_keys += items;
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
std::ignore = node_size;
ceph_abort("not implemented");
} else {
// append left part forwards
node_offset_t offset_left_start = src.fields().get_key_start_offset(
from, node_size);
node_offset_t offset_left_end = src.fields().get_key_start_offset(
from + items, node_size);
node_offset_t left_size = offset_left_end - offset_left_start;
if (num_keys == 0) {
// no need to adjust offset
assert(from == 0);
assert(p_start + offset_left_start == p_append_left);
p_mut->copy_in_absolute(p_append_left,
src.p_start() + offset_left_start, left_size);
} else {
node_offset_t step_size = FieldType::estimate_insert_one();
extent_len_t offset_base = src.fields().get_item_end_offset(
from, node_size);
int offset_change = p_append_right - p_start - offset_base;
auto p_offset_dst = p_append_left;
if constexpr (FIELD_TYPE != field_type_t::N2) {
// copy keys
p_mut->copy_in_absolute(p_append_left,
src.p_start() + offset_left_start, left_size);
// point to offset for update
p_offset_dst += sizeof(typename FieldType::key_t);
}
for (auto i = from; i < from + items; ++i) {
int new_offset = src.fields().get_item_start_offset(i, node_size) +
offset_change;
assert(new_offset > 0);
assert(new_offset < (int)node_size);
p_mut->copy_in_absolute(p_offset_dst, node_offset_t(new_offset));
p_offset_dst += step_size;
}
assert(p_append_left + left_size + sizeof(typename FieldType::key_t) ==
p_offset_dst);
}
p_append_left += left_size;
// append right part backwards
auto offset_right_start = src.fields().get_item_end_offset(
from + items, node_size);
auto offset_right_end = src.fields().get_item_end_offset(
from, node_size);
int right_size = offset_right_end - offset_right_start;
assert(right_size > 0);
assert(right_size < (int)node_size);
p_append_right -= right_size;
p_mut->copy_in_absolute(p_append_right,
src.p_start() + offset_right_start, node_offset_t(right_size));
}
}
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
void APPEND_T::append(
const full_key_t<KT>& key, const value_input_t& value, const value_t*& p_value)
{
if constexpr (FIELD_TYPE == field_type_t::N3) {
ceph_abort("not implemented");
} else {
ceph_abort("should not happen");
}
}
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
std::tuple<NodeExtentMutable*, char*>
APPEND_T::open_nxt(const key_get_type& partial_key)
{
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
FieldType::append_key(*p_mut, partial_key, p_append_left);
} else if constexpr (FIELD_TYPE == field_type_t::N2) {
FieldType::append_key(*p_mut, partial_key, p_append_right);
} else {
ceph_abort("impossible path");
}
return {p_mut, p_append_right};
}
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
std::tuple<NodeExtentMutable*, char*>
APPEND_T::open_nxt(const full_key_t<KT>& key)
{
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
FieldType::append_key(*p_mut, key, p_append_left);
} else if constexpr (FIELD_TYPE == field_type_t::N2) {
FieldType::append_key(*p_mut, key, p_append_right);
} else {
ceph_abort("impossible path");
}
return {p_mut, p_append_right};
}
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
char* APPEND_T::wrap()
{
assert(p_append_left <= p_append_right);
assert(p_src);
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
if (p_src->is_level_tail()) {
laddr_t tail_value = p_src->get_end_p_laddr()->value;
p_append_right -= sizeof(laddr_t);
assert(p_append_left <= p_append_right);
p_mut->copy_in_absolute(p_append_right, tail_value);
}
}
p_mut->copy_in_absolute(p_start + offsetof(FieldType, num_keys), num_keys);
return p_append_left;
}
#define APPEND_TEMPLATE(FT, NT, KT) template class node_extent_t<FT, NT>::Appender<KT>
APPEND_TEMPLATE(node_fields_0_t, node_type_t::INTERNAL, KeyT::VIEW);
APPEND_TEMPLATE(node_fields_1_t, node_type_t::INTERNAL, KeyT::VIEW);
APPEND_TEMPLATE(node_fields_2_t, node_type_t::INTERNAL, KeyT::VIEW);
APPEND_TEMPLATE(internal_fields_3_t, node_type_t::INTERNAL, KeyT::VIEW);
APPEND_TEMPLATE(node_fields_0_t, node_type_t::LEAF, KeyT::VIEW);
APPEND_TEMPLATE(node_fields_1_t, node_type_t::LEAF, KeyT::VIEW);
APPEND_TEMPLATE(node_fields_2_t, node_type_t::LEAF, KeyT::VIEW);
APPEND_TEMPLATE(leaf_fields_3_t, node_type_t::LEAF, KeyT::VIEW);
APPEND_TEMPLATE(node_fields_0_t, node_type_t::INTERNAL, KeyT::HOBJ);
APPEND_TEMPLATE(node_fields_1_t, node_type_t::INTERNAL, KeyT::HOBJ);
APPEND_TEMPLATE(node_fields_2_t, node_type_t::INTERNAL, KeyT::HOBJ);
APPEND_TEMPLATE(internal_fields_3_t, node_type_t::INTERNAL, KeyT::HOBJ);
APPEND_TEMPLATE(node_fields_0_t, node_type_t::LEAF, KeyT::HOBJ);
APPEND_TEMPLATE(node_fields_1_t, node_type_t::LEAF, KeyT::HOBJ);
APPEND_TEMPLATE(node_fields_2_t, node_type_t::LEAF, KeyT::HOBJ);
APPEND_TEMPLATE(leaf_fields_3_t, node_type_t::LEAF, KeyT::HOBJ);
}
| 15,824 | 36.589074 | 86 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
#include "key_layout.h"
#include "stage_types.h"
namespace crimson::os::seastore::onode {
class NodeExtentMutable;
/**
* node_extent_t
*
* The top indexing stage implementation for node N0/N1/N2/N3, implements
* staged contract as an indexable container, and provides access to node
* header.
*
* The specific field layout are defined by FieldType which are
* node_fields_0_t, node_fields_1_t, node_fields_2_t, internal_fields_3_t and
* leaf_fields_3_t. Diagrams see node_stage_layout.h.
*/
template <typename FieldType, node_type_t _NODE_TYPE>
class node_extent_t {
public:
using value_input_t = value_input_type_t<_NODE_TYPE>;
using value_t = value_type_t<_NODE_TYPE>;
using num_keys_t = typename FieldType::num_keys_t;
static constexpr node_type_t NODE_TYPE = _NODE_TYPE;
static constexpr field_type_t FIELD_TYPE = FieldType::FIELD_TYPE;
// TODO: remove
node_extent_t() = default;
node_extent_t(const FieldType* p_fields, extent_len_t node_size)
: p_fields{p_fields}, node_size{node_size} {
assert(is_valid_node_size(node_size));
validate(*p_fields);
}
const char* p_start() const { return fields_start(*p_fields); }
bool is_level_tail() const { return p_fields->is_level_tail(); }
level_t level() const { return p_fields->header.level; }
node_offset_t free_size() const {
return p_fields->template free_size_before<NODE_TYPE>(
keys(), node_size);
}
extent_len_t total_size() const {
return p_fields->total_size(node_size);
}
const char* p_left_bound() const;
template <node_type_t T = NODE_TYPE>
std::enable_if_t<T == node_type_t::INTERNAL, const laddr_packed_t*>
get_end_p_laddr() const {
assert(is_level_tail());
if constexpr (FIELD_TYPE == field_type_t::N3) {
return p_fields->get_p_child_addr(keys(), node_size);
} else {
auto offset_start = p_fields->get_item_end_offset(
keys(), node_size);
assert(offset_start <= node_size);
offset_start -= sizeof(laddr_packed_t);
auto p_addr = p_start() + offset_start;
return reinterpret_cast<const laddr_packed_t*>(p_addr);
}
}
// container type system
using key_get_type = typename FieldType::key_get_type;
static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
index_t keys() const { return p_fields->num_keys; }
key_get_type operator[] (index_t index) const {
return p_fields->get_key(index, node_size);
}
extent_len_t size_before(index_t index) const {
auto free_size = p_fields->template free_size_before<NODE_TYPE>(
index, node_size);
assert(total_size() >= free_size);
return total_size() - free_size;
}
node_offset_t size_to_nxt_at(index_t index) const;
node_offset_t size_overhead_at(index_t index) const {
return FieldType::ITEM_OVERHEAD; }
container_range_t get_nxt_container(index_t index) const;
template <typename T = FieldType>
std::enable_if_t<T::FIELD_TYPE == field_type_t::N3, const value_t*>
get_p_value(index_t index) const {
assert(index < keys());
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
return p_fields->get_p_child_addr(index, node_size);
} else {
auto range = get_nxt_container(index).range;
auto ret = reinterpret_cast<const value_header_t*>(range.p_start);
assert(range.p_start + ret->allocation_size() == range.p_end);
return ret;
}
}
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
assert(p_node_start == p_start());
// nothing to encode as the container range is the entire extent
}
static node_extent_t decode(const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
// nothing to decode
return node_extent_t(
reinterpret_cast<const FieldType*>(p_node_start),
node_size);
}
static void validate(const FieldType& fields) {
#ifndef NDEBUG
assert(fields.header.get_node_type() == NODE_TYPE);
assert(fields.header.get_field_type() == FieldType::FIELD_TYPE);
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
assert(fields.header.level > 0u);
} else {
assert(fields.header.level == 0u);
}
#endif
}
static void bootstrap_extent(
NodeExtentMutable&, field_type_t, node_type_t, bool, level_t);
static void update_is_level_tail(NodeExtentMutable&, const node_extent_t&, bool);
static node_offset_t header_size() { return FieldType::HEADER_SIZE; }
template <IsFullKey Key>
static node_offset_t estimate_insert(
const Key& key, const value_input_t& value) {
auto size = FieldType::estimate_insert_one();
if constexpr (FIELD_TYPE == field_type_t::N2) {
size += ns_oid_view_t::estimate_size(key);
} else if constexpr (FIELD_TYPE == field_type_t::N3 &&
NODE_TYPE == node_type_t::LEAF) {
size += value.allocation_size();
}
return size;
}
template <IsFullKey Key>
static const value_t* insert_at(
NodeExtentMutable& mut, const node_extent_t&,
const Key& key, const value_input_t& value,
index_t index, node_offset_t size, const char* p_left_bound) {
if constexpr (FIELD_TYPE == field_type_t::N3) {
ceph_abort("not implemented");
} else {
ceph_abort("impossible");
}
}
template <IsFullKey Key>
static memory_range_t insert_prefix_at(
NodeExtentMutable&, const node_extent_t&,
const Key& key,
index_t index, node_offset_t size, const char* p_left_bound);
static void update_size_at(
NodeExtentMutable&, const node_extent_t&, index_t index, int change);
static node_offset_t trim_until(
NodeExtentMutable&, const node_extent_t&, index_t index);
static node_offset_t trim_at(NodeExtentMutable&, const node_extent_t&,
index_t index, node_offset_t trimmed);
static node_offset_t erase_at(NodeExtentMutable&, const node_extent_t&,
index_t index, const char* p_left_bound);
template <KeyT KT>
class Appender;
private:
const FieldType& fields() const { return *p_fields; }
const FieldType* p_fields;
extent_len_t node_size;
};
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
class node_extent_t<FieldType, NODE_TYPE>::Appender {
public:
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_start{p_append} {
#ifndef NDEBUG
auto p_fields = reinterpret_cast<const FieldType*>(p_append);
assert(*(p_fields->header.get_field_type()) == FIELD_TYPE);
assert(p_fields->header.get_node_type() == NODE_TYPE);
assert(p_fields->num_keys == 0);
#endif
p_append_left = p_start + FieldType::HEADER_SIZE;
p_append_right = p_start + p_mut->get_length();
}
Appender(NodeExtentMutable*, const node_extent_t&, bool open = false);
void append(const node_extent_t& src, index_t from, index_t items);
void append(const full_key_t<KT>&, const value_input_t&, const value_t*&);
char* wrap();
std::tuple<NodeExtentMutable*, char*> open_nxt(const key_get_type&);
std::tuple<NodeExtentMutable*, char*> open_nxt(const full_key_t<KT>&);
void wrap_nxt(char* p_append) {
if constexpr (FIELD_TYPE != field_type_t::N3) {
assert(p_append < p_append_right);
assert(p_append_left < p_append);
p_append_right = p_append;
auto new_offset = p_append - p_start;
assert(new_offset > 0);
assert(new_offset < p_mut->get_length());
FieldType::append_offset(*p_mut, new_offset, p_append_left);
++num_keys;
} else {
ceph_abort("not implemented");
}
}
private:
const node_extent_t* p_src = nullptr;
NodeExtentMutable* p_mut;
char* p_start;
char* p_append_left;
char* p_append_right;
num_keys_t num_keys = 0;
};
}
| 8,009 | 33.377682 | 83 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage_layout.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "node_stage_layout.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_mutable.h"
namespace crimson::os::seastore::onode {
void node_header_t::bootstrap_extent(
NodeExtentMutable& mut,
field_type_t field_type, node_type_t node_type,
bool is_level_tail, level_t level)
{
node_header_t header;
header.set_field_type(field_type);
header.set_node_type(node_type);
header.set_is_level_tail(is_level_tail);
header.level = level;
mut.copy_in_relative(0, header);
}
void node_header_t::update_is_level_tail(
NodeExtentMutable& mut, const node_header_t& header, bool value)
{
auto& _header = const_cast<node_header_t&>(header);
_header.set_is_level_tail(value);
mut.validate_inplace_update(_header);
}
#define F013_T _node_fields_013_t<SlotType>
#define F013_INST(ST) _node_fields_013_t<ST>
template <typename SlotType>
void F013_T::update_size_at(
NodeExtentMutable& mut, const me_t& node, index_t index, int change)
{
assert(index <= node.num_keys);
[[maybe_unused]] extent_len_t node_size = mut.get_length();
#ifndef NDEBUG
// check underflow
if (change < 0 && index != node.num_keys) {
assert(node.get_item_start_offset(index, node_size) <
node.get_item_end_offset(index, node_size));
}
#endif
for (const auto* p_slot = &node.slots[index];
p_slot < &node.slots[node.num_keys];
++p_slot) {
node_offset_t offset = p_slot->right_offset;
int new_offset = offset - change;
assert(new_offset > 0);
assert(new_offset < (int)node_size);
mut.copy_in_absolute(
(void*)&(p_slot->right_offset),
node_offset_t(new_offset));
}
#ifndef NDEBUG
// check overflow
if (change > 0 && index != node.num_keys) {
assert(node.num_keys > 0);
assert(node.get_key_start_offset(node.num_keys, node_size) <=
node.slots[node.num_keys - 1].right_offset);
}
#endif
}
template <typename SlotType>
void F013_T::append_key(
NodeExtentMutable& mut, const key_t& key, char*& p_append)
{
mut.copy_in_absolute(p_append, key);
p_append += sizeof(key_t);
}
template <typename SlotType>
void F013_T::append_offset(
NodeExtentMutable& mut, node_offset_t offset_to_right, char*& p_append)
{
mut.copy_in_absolute(p_append, offset_to_right);
p_append += sizeof(node_offset_t);
}
template <typename SlotType>
template <IsFullKey Key>
void F013_T::insert_at(
NodeExtentMutable& mut, const Key& key,
const me_t& node, index_t index, node_offset_t size_right)
{
assert(index <= node.num_keys);
extent_len_t node_size = mut.get_length();
update_size_at(mut, node, index, size_right);
auto p_insert = const_cast<char*>(fields_start(node)) +
node.get_key_start_offset(index, node_size);
auto p_shift_end = fields_start(node) +
node.get_key_start_offset(node.num_keys, node_size);
mut.shift_absolute(p_insert, p_shift_end - p_insert, estimate_insert_one());
mut.copy_in_absolute((void*)&node.num_keys, num_keys_t(node.num_keys + 1));
append_key(mut, key_t::from_key(key), p_insert);
int new_offset = node.get_item_end_offset(index, node_size) - size_right;
assert(new_offset > 0);
assert(new_offset < (int)node_size);
append_offset(mut, new_offset, p_insert);
}
#define IA_TEMPLATE(ST, KT) template void F013_INST(ST):: \
insert_at<KT>(NodeExtentMutable&, const KT&, \
const F013_INST(ST)&, index_t, node_offset_t)
IA_TEMPLATE(slot_0_t, key_view_t);
IA_TEMPLATE(slot_1_t, key_view_t);
IA_TEMPLATE(slot_3_t, key_view_t);
IA_TEMPLATE(slot_0_t, key_hobj_t);
IA_TEMPLATE(slot_1_t, key_hobj_t);
IA_TEMPLATE(slot_3_t, key_hobj_t);
template <typename SlotType>
node_offset_t F013_T::erase_at(
NodeExtentMutable& mut, const me_t& node, index_t index, const char* p_left_bound)
{
extent_len_t node_size = mut.get_length();
auto offset_item_start = node.get_item_start_offset(index, node_size);
auto offset_item_end = node.get_item_end_offset(index, node_size);
assert(offset_item_start < offset_item_end);
auto erase_size = offset_item_end - offset_item_start;
// fix and shift the left part
update_size_at(mut, node, index + 1, -erase_size);
const char* p_shift_start = fields_start(node) +
node.get_key_start_offset(index + 1, node_size);
extent_len_t shift_len = sizeof(SlotType) * (node.num_keys - index - 1);
int shift_off = -(int)sizeof(SlotType);
mut.shift_absolute(p_shift_start, shift_len, shift_off);
// shift the right part
p_shift_start = p_left_bound;
shift_len = fields_start(node) + offset_item_start - p_left_bound;
shift_off = erase_size;
mut.shift_absolute(p_shift_start, shift_len, shift_off);
// fix num_keys
mut.copy_in_absolute((void*)&node.num_keys, num_keys_t(node.num_keys - 1));
return erase_size;
}
#define F013_TEMPLATE(ST) template struct F013_INST(ST)
F013_TEMPLATE(slot_0_t);
F013_TEMPLATE(slot_1_t);
F013_TEMPLATE(slot_3_t);
void node_fields_2_t::append_offset(
NodeExtentMutable& mut, node_offset_t offset_to_right, char*& p_append)
{
mut.copy_in_absolute(p_append, offset_to_right);
p_append += sizeof(node_offset_t);
}
}
| 5,268 | 33.214286 | 86 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage_layout.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "key_layout.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
namespace crimson::os::seastore::onode {
class NodeExtentMutable;
struct node_header_t {
static constexpr unsigned FIELD_TYPE_BITS = 6u;
static_assert(static_cast<uint8_t>(field_type_t::_MAX) <= 1u << FIELD_TYPE_BITS);
static constexpr unsigned NODE_TYPE_BITS = 1u;
static constexpr unsigned B_LEVEL_TAIL_BITS = 1u;
using bits_t = uint8_t;
node_header_t() {}
std::optional<field_type_t> get_field_type() const {
if (field_type >= FIELD_TYPE_MAGIC &&
field_type < static_cast<uint8_t>(field_type_t::_MAX)) {
return static_cast<field_type_t>(field_type);
} else {
return std::nullopt;
}
}
node_type_t get_node_type() const {
return static_cast<node_type_t>(node_type);
}
bool get_is_level_tail() const {
return is_level_tail;
}
static void bootstrap_extent(
NodeExtentMutable&, field_type_t, node_type_t, bool, level_t);
static void update_is_level_tail(NodeExtentMutable&, const node_header_t&, bool);
bits_t field_type : FIELD_TYPE_BITS;
bits_t node_type : NODE_TYPE_BITS;
bits_t is_level_tail : B_LEVEL_TAIL_BITS;
static_assert(sizeof(bits_t) * 8 ==
FIELD_TYPE_BITS + NODE_TYPE_BITS + B_LEVEL_TAIL_BITS);
level_t level;
private:
void set_field_type(field_type_t type) {
field_type = static_cast<uint8_t>(type);
}
void set_node_type(node_type_t type) {
node_type = static_cast<uint8_t>(type);
}
void set_is_level_tail(bool value) {
is_level_tail = static_cast<uint8_t>(value);
}
} __attribute__((packed));
inline std::ostream& operator<<(std::ostream& os, const node_header_t& header) {
auto field_type = header.get_field_type();
if (field_type.has_value()) {
os << "header" << header.get_node_type() << *field_type
<< "(is_level_tail=" << header.get_is_level_tail()
<< ", level=" << (unsigned)header.level << ")";
} else {
os << "header(INVALID)";
}
return os;
}
template <typename FixedKeyType, field_type_t _FIELD_TYPE>
struct _slot_t {
using key_t = FixedKeyType;
static constexpr field_type_t FIELD_TYPE = _FIELD_TYPE;
static constexpr node_offset_t OVERHEAD = sizeof(_slot_t) - sizeof(key_t);
key_t key;
node_offset_t right_offset;
} __attribute__((packed));
using slot_0_t = _slot_t<shard_pool_crush_t, field_type_t::N0>;
using slot_1_t = _slot_t<crush_t, field_type_t::N1>;
using slot_3_t = _slot_t<snap_gen_t, field_type_t::N3>;
struct node_range_t {
extent_len_t start;
extent_len_t end;
};
template <typename FieldType>
const char* fields_start(const FieldType& node) {
return reinterpret_cast<const char*>(&node);
}
template <node_type_t NODE_TYPE, typename FieldType>
node_range_t fields_free_range_before(
const FieldType& node, index_t index, extent_len_t node_size) {
assert(index <= node.num_keys);
extent_len_t offset_start = node.get_key_start_offset(index, node_size);
extent_len_t offset_end = node.get_item_end_offset(index, node_size);
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
if (node.is_level_tail() && index == node.num_keys) {
offset_end -= sizeof(laddr_t);
}
}
assert(offset_start <= offset_end);
assert(offset_end - offset_start < node_size);
return {offset_start, offset_end};
}
/**
* _node_fields_013_t (node_fields_0_t, node_fields_1_t, leaf_fields_3_t
*
* The STAGE_LEFT layout implementation for node N0/N1, or the STAGE_RIGHT
* layout implementation for leaf node N3.
*
* The node layout storing n slots:
*
* # <----------------------------- node range --------------------------------------> #
* # #<~># free space #
* # <----- left part -----------------------------> # <~# <----- right slots -------> #
* # # <---- left slots -------------> #~> # #
* # # slots [2, n) |<~># #<~>| right slots [2, n) #
* # # <- slot 0 -> | <- slot 1 -> | # # | <-- s1 --> | <-- s0 --> #
* # # | | # # | | #
* # | num_ # | right | | right | # # | next-stage | next-stage #
* # header | keys # key | offset | key | offset | # # | container | container #
* # | # 0 | 0 | 1 | 1 |...#...#...| or onode 1 | or onode 0 #
* | | ^ ^
* | | | |
* | +----------------+ |
* +--------------------------------------------+
*/
template <typename SlotType>
struct _node_fields_013_t {
// should be enough to index all keys under 64 KiB node
using num_keys_t = uint16_t;
using key_t = typename SlotType::key_t;
using key_get_type = const key_t&;
using me_t = _node_fields_013_t<SlotType>;
static constexpr field_type_t FIELD_TYPE = SlotType::FIELD_TYPE;
static constexpr node_offset_t HEADER_SIZE =
sizeof(node_header_t) + sizeof(num_keys_t);
static constexpr node_offset_t ITEM_OVERHEAD = SlotType::OVERHEAD;
bool is_level_tail() const { return header.get_is_level_tail(); }
extent_len_t total_size(extent_len_t node_size) const {
return node_size;
}
key_get_type get_key(
index_t index, extent_len_t node_size) const {
assert(index < num_keys);
return slots[index].key;
}
node_offset_t get_key_start_offset(
index_t index, extent_len_t node_size) const {
assert(index <= num_keys);
auto offset = HEADER_SIZE + sizeof(SlotType) * index;
assert(offset < node_size);
return offset;
}
node_offset_t get_item_start_offset(
index_t index, extent_len_t node_size) const {
assert(index < num_keys);
auto offset = slots[index].right_offset;
assert(offset < node_size);
return offset;
}
const void* p_offset(index_t index) const {
assert(index < num_keys);
return &slots[index].right_offset;
}
extent_len_t get_item_end_offset(
index_t index, extent_len_t node_size) const {
return index == 0 ? node_size
: get_item_start_offset(index - 1, node_size);
}
template <node_type_t NODE_TYPE>
node_offset_t free_size_before(
index_t index, extent_len_t node_size) const {
auto range = fields_free_range_before<NODE_TYPE>(*this, index, node_size);
return range.end - range.start;
}
static node_offset_t estimate_insert_one() { return sizeof(SlotType); }
template <IsFullKey Key>
static void insert_at(
NodeExtentMutable&, const Key& key,
const me_t& node, index_t index, node_offset_t size_right);
static node_offset_t erase_at(NodeExtentMutable&, const me_t&, index_t, const char*);
static void update_size_at(
NodeExtentMutable&, const me_t& node, index_t index, int change);
static void append_key(
NodeExtentMutable&, const key_t& key, char*& p_append);
template <IsFullKey Key>
static void append_key(
NodeExtentMutable& mut, const Key& key, char*& p_append) {
append_key(mut, key_t::from_key(key), p_append);
}
static void append_offset(
NodeExtentMutable& mut, node_offset_t offset_to_right, char*& p_append);
node_header_t header;
num_keys_t num_keys = 0u;
SlotType slots[];
} __attribute__((packed));
using node_fields_0_t = _node_fields_013_t<slot_0_t>;
using node_fields_1_t = _node_fields_013_t<slot_1_t>;
/**
* node_fields_2_t
*
* The STAGE_STRING layout implementation for node N2.
*
* The node layout storing n slots:
*
* # <--------------------------------- node range ----------------------------------------> #
* # #<~># free space #
* # <------- left part ---------------> # <~# <--------- right slots ---------------------> #
* # # <---- offsets ----> #~> #<~>| slots [2, n) #
* # # offsets [2, n) |<~># # | <----- slot 1 ----> | <----- slot 0 ----> #
* # # | # # | | #
* # | num_ # offset | offset | # # | next-stage | ns-oid | next-stage | ns-oid #
* # header | keys # 0 | 1 |...#...#...| container1 | 1 | container0 | 0 #
* | | ^ ^
* | | | |
* | +----------------+ |
* +-----------------------------------------------+
*/
struct node_fields_2_t {
// should be enough to index all keys under 64 KiB node
using num_keys_t = uint16_t;
using key_t = ns_oid_view_t;
using key_get_type = key_t;
static constexpr field_type_t FIELD_TYPE = field_type_t::N2;
static constexpr node_offset_t HEADER_SIZE =
sizeof(node_header_t) + sizeof(num_keys_t);
static constexpr node_offset_t ITEM_OVERHEAD = sizeof(node_offset_t);
bool is_level_tail() const { return header.get_is_level_tail(); }
extent_len_t total_size(extent_len_t node_size) const {
return node_size;
}
key_get_type get_key(
index_t index, extent_len_t node_size) const {
assert(index < num_keys);
auto item_end_offset = get_item_end_offset(index, node_size);
const char* p_start = fields_start(*this);
return key_t(p_start + item_end_offset);
}
node_offset_t get_key_start_offset(
index_t index, extent_len_t node_size) const {
assert(index <= num_keys);
auto offset = HEADER_SIZE + sizeof(node_offset_t) * num_keys;
assert(offset < node_size);
return offset;
}
node_offset_t get_item_start_offset(
index_t index, extent_len_t node_size) const {
assert(index < num_keys);
auto offset = offsets[index];
assert(offset < node_size);
return offset;
}
const void* p_offset(index_t index) const {
assert(index < num_keys);
return &offsets[index];
}
extent_len_t get_item_end_offset(
index_t index, extent_len_t node_size) const {
return index == 0 ? node_size
: get_item_start_offset(index - 1, node_size);
}
template <node_type_t NODE_TYPE>
node_offset_t free_size_before(
index_t index, extent_len_t node_size) const {
auto range = fields_free_range_before<NODE_TYPE>(*this, index, node_size);
return range.end - range.start;
}
static node_offset_t estimate_insert_one() { return sizeof(node_offset_t); }
template <IsFullKey Key>
static void insert_at(
NodeExtentMutable& mut, const Key& key,
const node_fields_2_t& node, index_t index, node_offset_t size_right) {
ceph_abort("not implemented");
}
static void update_size_at(
NodeExtentMutable& mut, const node_fields_2_t& node, index_t index, int change) {
ceph_abort("not implemented");
}
static void append_key(
NodeExtentMutable& mut, const key_t& key, char*& p_append) {
ns_oid_view_t::append(mut, key, p_append);
}
template <IsFullKey Key>
static void append_key(
NodeExtentMutable& mut, const Key& key, char*& p_append) {
ns_oid_view_t::append(mut, key, p_append);
}
static void append_offset(
NodeExtentMutable& mut, node_offset_t offset_to_right, char*& p_append);
node_header_t header;
num_keys_t num_keys = 0u;
node_offset_t offsets[];
} __attribute__((packed));
/**
* internal_fields_3_t
*
* The STAGE_RIGHT layout implementation for N2.
*
* The node layout storing 3 children:
*
* # <---------------- node range ---------------------------> #
* # # <-- keys ---> # <---- laddrs -----------> #
* # free space: # |<~># |<~>#
* # # | # | #
* # | num_ # key | key | # laddr | laddr | laddr | #
* # header | keys # 0 | 1 |...# 0 | 1 | 2 |...#
*/
struct internal_fields_3_t {
using key_get_type = const snap_gen_t&;
// should be enough to index all keys under 64 KiB node
using num_keys_t = uint16_t;
static constexpr field_type_t FIELD_TYPE = field_type_t::N3;
static constexpr node_offset_t HEADER_SIZE =
sizeof(node_header_t) + sizeof(num_keys_t);
static constexpr node_offset_t ITEM_SIZE =
sizeof(snap_gen_t) + sizeof(laddr_t);
static constexpr node_offset_t ITEM_OVERHEAD = 0u;
bool is_level_tail() const { return header.get_is_level_tail(); }
extent_len_t total_size(extent_len_t node_size) const {
if (is_level_tail()) {
return node_size - sizeof(snap_gen_t);
} else {
return node_size;
}
}
key_get_type get_key(
index_t index, extent_len_t node_size) const {
assert(index < num_keys);
return keys[index];
}
template <node_type_t NODE_TYPE>
std::enable_if_t<NODE_TYPE == node_type_t::INTERNAL, node_offset_t>
free_size_before(index_t index, extent_len_t node_size) const {
assert(index <= num_keys);
assert(num_keys <= get_max_num_keys(node_size));
extent_len_t free = total_size(node_size) - HEADER_SIZE -
index * ITEM_SIZE;
if (is_level_tail() && index == num_keys) {
free -= sizeof(laddr_t);
}
return free;
}
const laddr_packed_t* get_p_child_addr(
index_t index, extent_len_t node_size) const {
#ifndef NDEBUG
if (is_level_tail()) {
assert(index <= num_keys);
} else {
assert(index < num_keys);
}
#endif
auto p_addrs = reinterpret_cast<const laddr_packed_t*>(
&keys[get_num_keys_limit(node_size)]);
auto ret = p_addrs + index;
assert((const char*)ret < fields_start(*this) + node_size);
return ret;
}
static node_offset_t estimate_insert_one() { return ITEM_SIZE; }
template <IsFullKey Key>
static void insert_at(
NodeExtentMutable& mut, const Key& key,
const internal_fields_3_t& node,
index_t index, node_offset_t size_right) {
ceph_abort("not implemented");
}
static void update_size_at(
NodeExtentMutable& mut, const internal_fields_3_t& node,
index_t index, int change) {
ceph_abort("not implemented");
}
node_header_t header;
num_keys_t num_keys = 0u;
snap_gen_t keys[];
private:
num_keys_t get_max_num_keys(extent_len_t node_size) const {
auto num_limit = get_num_keys_limit(node_size);
return (is_level_tail() ? num_limit - 1 : num_limit);
}
static num_keys_t get_num_keys_limit(extent_len_t node_size) {
return (node_size - HEADER_SIZE) / ITEM_SIZE;
}
} __attribute__((packed));
using leaf_fields_3_t = _node_fields_013_t<slot_3_t>;
}
| 14,889 | 35.584767 | 94 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cassert>
#include <compare>
#include <optional>
#include <ostream>
#include <sstream>
#include <type_traits>
#include "common/likely.h"
#include "sub_items_stage.h"
#include "item_iterator_stage.h"
namespace crimson::os::seastore::onode {
struct search_result_bs_t {
index_t index;
MatchKindBS match;
};
template <typename FGetKey>
search_result_bs_t binary_search(
const key_hobj_t& key,
index_t begin, index_t end, FGetKey&& f_get_key) {
assert(begin <= end);
while (begin < end) {
auto total = begin + end;
auto mid = total >> 1;
// do not copy if return value is reference
decltype(f_get_key(mid)) target = f_get_key(mid);
auto match = key <=> target;
if (match == std::strong_ordering::less) {
end = mid;
} else if (match == std::strong_ordering::greater) {
begin = mid + 1;
} else {
return {mid, MatchKindBS::EQ};
}
}
return {begin , MatchKindBS::NE};
}
template <typename PivotType, typename FGet>
search_result_bs_t binary_search_r(
index_t rend, index_t rbegin, FGet&& f_get, const PivotType& key) {
assert(rend <= rbegin);
while (rend < rbegin) {
auto total = rend + rbegin + 1;
auto mid = total >> 1;
// do not copy if return value is reference
decltype(f_get(mid)) target = f_get(mid);
int match = target - key;
if (match < 0) {
rend = mid;
} else if (match > 0) {
rbegin = mid - 1;
} else {
return {mid, MatchKindBS::EQ};
}
}
return {rbegin, MatchKindBS::NE};
}
inline bool matchable(field_type_t type, match_stat_t mstat) {
assert(mstat >= MSTAT_MIN && mstat <= MSTAT_MAX);
/*
* compressed prefix by field type:
* N0: NONE
* N1: pool/shard
* N2: pool/shard crush
* N3: pool/shard crush ns/oid
*
* if key matches the node's compressed prefix, return true
* else, return false
*/
#ifndef NDEBUG
if (mstat == MSTAT_END) {
assert(type == field_type_t::N0);
}
#endif
return mstat + to_unsigned(type) < 4;
}
inline void assert_mstat(
const key_hobj_t& key,
const key_view_t& index,
match_stat_t mstat) {
assert(mstat >= MSTAT_MIN && mstat <= MSTAT_LT2);
// key < index ...
switch (mstat) {
case MSTAT_EQ:
break;
case MSTAT_LT0:
assert(key < index.snap_gen_packed());
break;
case MSTAT_LT1:
assert(key < index.ns_oid_view());
break;
case MSTAT_LT2:
if (index.has_shard_pool()) {
assert((key < shard_pool_crush_t{
index.shard_pool_packed(), index.crush_packed()}));
} else {
assert(key < index.crush_packed());
}
break;
default:
ceph_abort("impossible path");
}
// key == index ...
switch (mstat) {
case MSTAT_EQ:
assert(key == index.snap_gen_packed());
case MSTAT_LT0:
if (!index.has_ns_oid())
break;
assert(index.ns_oid_view().type() == ns_oid_view_t::Type::MAX ||
key == index.ns_oid_view());
case MSTAT_LT1:
if (!index.has_crush())
break;
assert(key == index.crush_packed());
if (!index.has_shard_pool())
break;
assert(key == index.shard_pool_packed());
default:
break;
}
}
#define NXT_STAGE_T staged<next_param_t>
enum class TrimType { BEFORE, AFTER, AT };
/**
* staged
*
* Implements recursive logic that modifies or reads the node layout
* (N0/N1/N2/N3 * LEAF/INTERNAL) with the multi-stage design. The specific
* stage implementation is flexible. So the implementations for different
* stages can be assembled independently, as long as they follow the
* definitions of container interfaces.
*
* Multi-stage is designed to index different portions of onode keys
* stage-by-stage. There are at most 3 stages for a node:
* - STAGE_LEFT: index shard-pool-crush for N0, or index crush for N1 node;
* - STAGE_STRING: index ns-oid for N0/N1/N2 nodes;
* - STAGE_RIGHT: index snap-gen for N0/N1/N2/N3 nodes;
*
* The intention is to consolidate the high-level indexing implementations at
* the level of stage, so we don't need to write them repeatedly for every
* stage and for every node type.
*/
template <typename Params>
struct staged {
static_assert(Params::STAGE >= STAGE_BOTTOM);
static_assert(Params::STAGE <= STAGE_TOP);
using container_t = typename Params::container_t;
using key_get_type = typename container_t::key_get_type;
using next_param_t = typename Params::next_param_t;
using position_t = staged_position_t<Params::STAGE>;
using result_t = staged_result_t<Params::NODE_TYPE, Params::STAGE>;
using value_input_t = value_input_type_t<Params::NODE_TYPE>;
using value_t = value_type_t<Params::NODE_TYPE>;
static constexpr auto CONTAINER_TYPE = container_t::CONTAINER_TYPE;
static constexpr bool IS_BOTTOM = (Params::STAGE == STAGE_BOTTOM);
static constexpr auto NODE_TYPE = Params::NODE_TYPE;
static constexpr auto STAGE = Params::STAGE;
template <bool is_exclusive>
static void _left_or_right(index_t& split_index, index_t insert_index,
std::optional<bool>& is_insert_left) {
assert(!is_insert_left.has_value());
assert(is_valid_index(split_index));
if constexpr (is_exclusive) {
if (split_index <= insert_index) {
// ...[s_index-1] |!| (i_index) [s_index]...
// offset i_position to right
is_insert_left = false;
} else {
// ...[s_index-1] (i_index)) |?[s_index]| ...
// ...(i_index)...[s_index-1] |?[s_index]| ...
is_insert_left = true;
--split_index;
}
} else {
if (split_index < insert_index) {
// ...[s_index-1] |?[s_index]| ...[(i_index)[s_index_k]...
is_insert_left = false;
} else if (split_index > insert_index) {
// ...[(i_index)s_index-1] |?[s_index]| ...
// ...[(i_index)s_index_k]...[s_index-1] |?[s_index]| ...
is_insert_left = true;
} else {
// ...[s_index-1] |?[(i_index)s_index]| ...
// i_to_left = std::nullopt;
}
}
}
template <ContainerType CTYPE, typename Enable = void> class _iterator_t;
template <ContainerType CTYPE>
class _iterator_t<CTYPE, std::enable_if_t<CTYPE == ContainerType::INDEXABLE>> {
/*
* indexable container type system:
* CONTAINER_TYPE = ContainerType::INDEXABLE
* keys() const -> index_t
* operator[](index_t) const -> key_get_type
* size_before(index_t) const -> extent_len_t
* size_overhead_at(index_t) const -> node_offset_t
* (IS_BOTTOM) get_p_value(index_t) const -> const value_t*
* (!IS_BOTTOM) size_to_nxt_at(index_t) const -> node_offset_t
* (!IS_BOTTOM) get_nxt_container(index_t) const
* encode(p_node_start, encoded)
* decode(p_node_start, node_size, delta) -> container_t
* static:
* header_size() -> node_offset_t
* estimate_insert(key, value) -> node_offset_t
* (IS_BOTTOM) insert_at(mut, src, key, value,
* index, size, p_left_bound) -> const value_t*
* (!IS_BOTTOM) insert_prefix_at(mut, src, key,
* index, size, p_left_bound) -> memory_range_t
* (!IS_BOTTOM) update_size_at(mut, src, index, size)
* trim_until(mut, container, index) -> trim_size
* (!IS_BOTTOM) trim_at(mut, container, index, trimmed) -> trim_size
* erase_at(mut, container, index, p_left_bound) -> erase_size
*
* Appender::append(const container_t& src, from, items)
*/
public:
using me_t = _iterator_t<CTYPE>;
_iterator_t(const container_t& container) : container{container} {
assert(container.keys());
}
index_t index() const {
return _index;
}
key_get_type get_key() const {
assert(!is_end());
return container[_index];
}
node_offset_t size_to_nxt() const {
assert(!is_end());
return container.size_to_nxt_at(_index);
}
template <typename T = typename NXT_STAGE_T::container_t>
std::enable_if_t<!IS_BOTTOM, T> get_nxt_container() const {
assert(!is_end());
return container.get_nxt_container(_index);
}
template <typename T = value_t>
std::enable_if_t<IS_BOTTOM, const T*> get_p_value() const {
assert(!is_end());
return container.get_p_value(_index);
}
bool is_last() const {
return _index + 1 == container.keys();
}
bool is_end() const { return _index == container.keys(); }
node_offset_t size() const {
assert(!is_end());
assert(header_size() == container.size_before(0));
assert(container.size_before(_index + 1) > container.size_before(_index));
return container.size_before(_index + 1) -
container.size_before(_index);
}
node_offset_t size_overhead() const {
assert(!is_end());
return container.size_overhead_at(_index);
}
me_t& operator++() {
assert(!is_end());
assert(!is_last());
++_index;
return *this;
}
void seek_at(index_t index) {
assert(index < container.keys());
seek_till_end(index);
}
void seek_till_end(index_t index) {
assert(!is_end());
assert(this->index() == 0);
assert(index <= container.keys());
_index = index;
}
void seek_last() {
assert(!is_end());
assert(index() == 0);
_index = container.keys() - 1;
}
void set_end() {
assert(!is_end());
assert(is_last());
++_index;
}
// Note: possible to return an end iterator
MatchKindBS seek(const key_hobj_t& key, bool exclude_last) {
assert(!is_end());
assert(index() == 0);
index_t end_index = container.keys();
if (exclude_last) {
assert(end_index);
--end_index;
assert(key < container[end_index]);
}
auto ret = binary_search(key, _index, end_index,
[this] (index_t index) { return container[index]; });
_index = ret.index;
return ret.match;
}
template <IsFullKey Key, typename T = value_t>
std::enable_if_t<IS_BOTTOM, const T*> insert(
NodeExtentMutable& mut,
const Key& key,
const value_input_t& value,
node_offset_t insert_size,
const char* p_left_bound) {
return container_t::insert_at(
mut, container, key, value, _index, insert_size, p_left_bound);
}
template <IsFullKey Key, typename T = memory_range_t>
std::enable_if_t<!IS_BOTTOM, T> insert_prefix(
NodeExtentMutable& mut, const Key& key,
node_offset_t size, const char* p_left_bound) {
return container_t::insert_prefix_at(
mut, container, key, _index, size, p_left_bound);
}
template <typename T = void>
std::enable_if_t<!IS_BOTTOM, T>
update_size(NodeExtentMutable& mut, int insert_size) {
assert(!is_end());
container_t::update_size_at(mut, container, _index, insert_size);
}
// Note: possible to return an end iterator when is_exclusive is true
template <bool is_exclusive>
size_t seek_split_inserted(
size_t start_size, size_t extra_size, size_t target_size,
index_t& insert_index, size_t insert_size,
std::optional<bool>& is_insert_left) {
assert(!is_end());
assert(index() == 0);
// replace insert_index placeholder
if constexpr (!is_exclusive) {
if (insert_index == INDEX_LAST) {
insert_index = container.keys() - 1;
}
} else {
if (insert_index == INDEX_END) {
insert_index = container.keys();
}
}
assert(insert_index <= container.keys());
auto start_size_1 = start_size + extra_size;
auto f_get_used_size = [this, start_size, start_size_1,
insert_index, insert_size] (index_t index) {
size_t current_size;
if (unlikely(index == 0)) {
current_size = start_size;
} else {
current_size = start_size_1;
if (index > insert_index) {
current_size += insert_size;
if constexpr (is_exclusive) {
--index;
}
}
// already includes header size
current_size += container.size_before(index);
}
return current_size;
};
index_t s_end;
if constexpr (is_exclusive) {
s_end = container.keys();
} else {
s_end = container.keys() - 1;
}
_index = binary_search_r(0, s_end, f_get_used_size, target_size).index;
size_t current_size = f_get_used_size(_index);
assert(current_size <= target_size);
_left_or_right<is_exclusive>(_index, insert_index, is_insert_left);
return current_size;
}
size_t seek_split(size_t start_size, size_t extra_size, size_t target_size) {
assert(!is_end());
assert(index() == 0);
auto start_size_1 = start_size + extra_size;
auto f_get_used_size = [this, start_size, start_size_1] (index_t index) {
size_t current_size;
if (unlikely(index == 0)) {
current_size = start_size;
} else {
// already includes header size
current_size = start_size_1 + container.size_before(index);
}
return current_size;
};
_index = binary_search_r(
0, container.keys() - 1, f_get_used_size, target_size).index;
size_t current_size = f_get_used_size(_index);
assert(current_size <= target_size);
return current_size;
}
// Note: possible to return an end iterater if to_index == INDEX_END
template <KeyT KT>
void copy_out_until(
typename container_t::template Appender<KT>& appender, index_t& to_index) {
auto num_keys = container.keys();
index_t items;
if (to_index == INDEX_END) {
items = num_keys - _index;
appender.append(container, _index, items);
_index = num_keys;
to_index = _index;
} else if (to_index == INDEX_LAST) {
assert(!is_end());
items = num_keys - 1 - _index;
appender.append(container, _index, items);
_index = num_keys - 1;
to_index = _index;
} else {
assert(_index <= to_index);
assert(to_index <= num_keys);
items = to_index - _index;
appender.append(container, _index, items);
_index = to_index;
}
}
node_offset_t trim_until(NodeExtentMutable& mut) {
return container_t::trim_until(mut, container, _index);
}
template <typename T = node_offset_t>
std::enable_if_t<!IS_BOTTOM, T>
trim_at(NodeExtentMutable& mut, node_offset_t trimmed) {
return container_t::trim_at(mut, container, _index, trimmed);
}
node_offset_t erase(NodeExtentMutable& mut, const char* p_left_bound) {
assert(!is_end());
return container_t::erase_at(mut, container, _index, p_left_bound);
}
template <KeyT KT>
typename container_t::template Appender<KT>
get_appender(NodeExtentMutable* p_mut) {
assert(_index + 1 == container.keys());
return typename container_t::template Appender<KT>(p_mut, container);
}
template <KeyT KT>
typename container_t::template Appender<KT>
get_appender_opened(NodeExtentMutable* p_mut) {
if constexpr (!IS_BOTTOM) {
assert(_index + 1 == container.keys());
return typename container_t::template Appender<KT>(p_mut, container, true);
} else {
ceph_abort("impossible path");
}
}
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
container.encode(p_node_start, encoded);
ceph::encode(_index, encoded);
}
static me_t decode(const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
auto container = container_t::decode(
p_node_start, node_size, delta);
auto ret = me_t(container);
index_t index;
ceph::decode(index, delta);
ret.seek_till_end(index);
return ret;
}
static node_offset_t header_size() {
return container_t::header_size();
}
template <IsFullKey Key>
static node_offset_t estimate_insert(
const Key& key, const value_input_t& value) {
return container_t::estimate_insert(key, value);
}
private:
container_t container;
index_t _index = 0;
};
template <ContainerType CTYPE>
class _iterator_t<CTYPE, std::enable_if_t<CTYPE == ContainerType::ITERATIVE>> {
/*
* iterative container type system (!IS_BOTTOM):
* CONTAINER_TYPE = ContainerType::ITERATIVE
* index() const -> index_t
* get_key() const -> key_get_type
* size() const -> node_offset_t
* size_to_nxt() const -> node_offset_t
* size_overhead() const -> node_offset_t
* get_nxt_container() const
* has_next() const -> bool
* encode(p_node_start, encoded)
* decode(p_node_start, node_length, delta) -> container_t
* operator++()
* static:
* header_size() -> node_offset_t
* estimate_insert(key, value) -> node_offset_t
* insert_prefix(mut, src, key, is_end, size, p_left_bound) -> memory_range_t
* update_size(mut, src, size)
* trim_until(mut, container) -> trim_size
* trim_at(mut, container, trimmed) -> trim_size
* erase(mut, container, p_left_bound) -> erase_size
*/
// currently the iterative iterator is only implemented with STAGE_STRING
// for in-node space efficiency
static_assert(STAGE == STAGE_STRING);
public:
using me_t = _iterator_t<CTYPE>;
_iterator_t(const container_t& container) : container{container} {}
index_t index() const {
if (is_end()) {
return container.index() + 1;
} else {
return container.index();
}
}
key_get_type get_key() const {
assert(!is_end());
return container.get_key();
}
node_offset_t size_to_nxt() const {
assert(!is_end());
return container.size_to_nxt();
}
const typename NXT_STAGE_T::container_t get_nxt_container() const {
assert(!is_end());
return container.get_nxt_container();
}
bool is_last() const {
assert(!is_end());
return !container.has_next();
}
bool is_end() const {
#ifndef NDEBUG
if (_is_end) {
assert(!container.has_next());
}
#endif
return _is_end;
}
node_offset_t size() const {
assert(!is_end());
return container.size();
}
node_offset_t size_overhead() const {
assert(!is_end());
return container.size_overhead();
}
me_t& operator++() {
assert(!is_end());
assert(!is_last());
++container;
return *this;
}
void seek_at(index_t index) {
assert(!is_end());
assert(this->index() == 0);
while (index > 0) {
assert(container.has_next());
++container;
--index;
}
}
void seek_till_end(index_t index) {
assert(!is_end());
assert(this->index() == 0);
while (index > 0) {
if (!container.has_next()) {
assert(index == 1);
set_end();
break;
}
++container;
--index;
}
}
void seek_last() {
assert(!is_end());
assert(index() == 0);
while (container.has_next()) {
++container;
}
}
void set_end() {
assert(!is_end());
assert(is_last());
_is_end = true;
}
// Note: possible to return an end iterator
MatchKindBS seek(const key_hobj_t& key, bool exclude_last) {
assert(!is_end());
assert(index() == 0);
do {
if (exclude_last && is_last()) {
assert(key < get_key());
return MatchKindBS::NE;
}
auto match = key <=> get_key();
if (match == std::strong_ordering::less) {
return MatchKindBS::NE;
} else if (match == std::strong_ordering::equal) {
return MatchKindBS::EQ;
} else {
if (container.has_next()) {
++container;
} else {
// end
break;
}
}
} while (true);
assert(!exclude_last);
set_end();
return MatchKindBS::NE;
}
template <IsFullKey Key>
memory_range_t insert_prefix(
NodeExtentMutable& mut, const Key& key,
node_offset_t size, const char* p_left_bound) {
return container_t::insert_prefix(
mut, container, key, is_end(), size, p_left_bound);
}
void update_size(NodeExtentMutable& mut, int insert_size) {
assert(!is_end());
container_t::update_size(mut, container, insert_size);
}
// Note: possible to return an end iterator when is_exclusive is true
// insert_index can still be INDEX_LAST or INDEX_END
template <bool is_exclusive>
size_t seek_split_inserted(
size_t start_size, size_t extra_size, size_t target_size,
index_t& insert_index, size_t insert_size,
std::optional<bool>& is_insert_left) {
assert(!is_end());
assert(index() == 0);
size_t current_size = start_size;
index_t split_index = 0;
extra_size += header_size();
do {
if constexpr (!is_exclusive) {
if (is_last()) {
assert(split_index == index());
if (insert_index == INDEX_LAST) {
insert_index = index();
}
assert(insert_index <= index());
break;
}
}
size_t nxt_size = current_size;
if (split_index == 0) {
nxt_size += extra_size;
}
if (split_index == insert_index) {
nxt_size += insert_size;
if constexpr (is_exclusive) {
if (nxt_size > target_size) {
break;
}
current_size = nxt_size;
++split_index;
}
}
nxt_size += size();
if (nxt_size > target_size) {
break;
}
current_size = nxt_size;
if constexpr (is_exclusive) {
if (is_last()) {
assert(split_index == index());
set_end();
split_index = index();
if (insert_index == INDEX_END) {
insert_index = index();
}
assert(insert_index == index());
break;
} else {
++(*this);
++split_index;
}
} else {
++(*this);
++split_index;
}
} while (true);
assert(current_size <= target_size);
_left_or_right<is_exclusive>(split_index, insert_index, is_insert_left);
assert(split_index == index());
return current_size;
}
size_t seek_split(size_t start_size, size_t extra_size, size_t target_size) {
assert(!is_end());
assert(index() == 0);
size_t current_size = start_size;
do {
if (is_last()) {
break;
}
size_t nxt_size = current_size;
if (index() == 0) {
nxt_size += extra_size;
}
nxt_size += size();
if (nxt_size > target_size) {
break;
}
current_size = nxt_size;
++(*this);
} while (true);
assert(current_size <= target_size);
return current_size;
}
// Note: possible to return an end iterater if to_index == INDEX_END
template <KeyT KT>
void copy_out_until(
typename container_t::template Appender<KT>& appender, index_t& to_index) {
if (is_end()) {
assert(!container.has_next());
if (to_index == INDEX_END) {
to_index = index();
}
assert(to_index == index());
return;
}
index_t items;
if (to_index == INDEX_END || to_index == INDEX_LAST) {
items = to_index;
} else {
assert(is_valid_index(to_index));
assert(index() <= to_index);
items = to_index - index();
}
if (appender.append(container, items)) {
set_end();
}
to_index = index();
}
node_offset_t trim_until(NodeExtentMutable& mut) {
if (is_end()) {
return 0;
}
return container_t::trim_until(mut, container);
}
node_offset_t trim_at(NodeExtentMutable& mut, node_offset_t trimmed) {
assert(!is_end());
return container_t::trim_at(mut, container, trimmed);
}
node_offset_t erase(NodeExtentMutable& mut, const char* p_left_bound) {
assert(!is_end());
return container_t::erase(mut, container, p_left_bound);
}
template <KeyT KT>
typename container_t::template Appender<KT>
get_appender(NodeExtentMutable* p_mut) {
return typename container_t::template Appender<KT>(p_mut, container, false);
}
template <KeyT KT>
typename container_t::template Appender<KT>
get_appender_opened(NodeExtentMutable* p_mut) {
if constexpr (!IS_BOTTOM) {
return typename container_t::template Appender<KT>(p_mut, container, true);
} else {
ceph_abort("impossible path");
}
}
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
container.encode(p_node_start, encoded);
uint8_t is_end = _is_end;
ceph::encode(is_end, encoded);
}
static me_t decode(const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
auto container = container_t::decode(
p_node_start, node_size, delta);
auto ret = me_t(container);
uint8_t is_end;
ceph::decode(is_end, delta);
if (is_end) {
ret.set_end();
}
return ret;
}
static node_offset_t header_size() {
return container_t::header_size();
}
template <IsFullKey Key>
static node_offset_t estimate_insert(const Key& key,
const value_input_t& value) {
return container_t::estimate_insert(key, value);
}
private:
container_t container;
bool _is_end = false;
};
/*
* iterator_t encapsulates both indexable and iterative implementations
* from a *non-empty* container.
* cstr(const container_t&)
* access:
* index() -> index_t
* get_key() -> key_get_type (const reference or value type)
* is_last() -> bool
* is_end() -> bool
* size() -> node_offset_t
* size_overhead() -> node_offset_t
* (IS_BOTTOM) get_p_value() -> const value_t*
* (!IS_BOTTOM) get_nxt_container() -> container_range_t
* (!IS_BOTTOM) size_to_nxt() -> node_offset_t
* seek:
* operator++() -> iterator_t&
* seek_at(index)
* seek_till_end(index)
* seek_last()
* set_end()
* seek(key, exclude_last) -> MatchKindBS
* insert:
* (IS_BOTTOM) insert(mut, key, value, size, p_left_bound) -> p_value
* (!IS_BOTTOM) insert_prefix(mut, key, size, p_left_bound) -> memory_range_t
* (!IS_BOTTOM) update_size(mut, size)
* split:
* seek_split_inserted<bool is_exclusive>(
* start_size, extra_size, target_size, insert_index, insert_size,
* std::optional<bool>& is_insert_left)
* -> insert to left/right/unknown (!exclusive)
* -> insert to left/right (exclusive, can be end)
* -> split_size
* seek_split(start_size, extra_size, target_size) -> split_size
* copy_out_until(appender, to_index) (can be end)
* trim_until(mut) -> trim_size
* (!IS_BOTTOM) trim_at(mut, trimmed) -> trim_size
* erase:
* erase(mut, p_left_bound) -> erase_size
* merge:
* get_appender(p_mut) -> Appender
* (!IS_BOTTOM)get_appender_opened(p_mut) -> Appender
* denc:
* encode(p_node_start, encoded)
* decode(p_node_start, node_size, delta) -> iterator_t
* static:
* header_size() -> node_offset_t
* estimate_insert(key, value) -> node_offset_t
*/
using iterator_t = _iterator_t<CONTAINER_TYPE>;
/* TODO: detailed comments
* - trim_until(mut) -> trim_size
* * keep 0 to i - 1, and remove the rest, return the size trimmed.
* * if this is the end iterator, do nothing and return 0.
* * if this is the start iterator, normally needs to go to the higher
* stage to trim the entire container.
* - trim_at(mut, trimmed) -> trim_size
* * trim happens inside the current iterator, causing the size reduced by
* <trimmed>, return the total size trimmed.
*/
/*
* Lookup internals (hide?)
*/
static bool is_keys_one(
const container_t& container) { // IN
auto iter = iterator_t(container);
iter.seek_last();
if (iter.index() == 0) {
if constexpr (IS_BOTTOM) {
// ok, there is only 1 key
return true;
} else {
auto nxt_container = iter.get_nxt_container();
return NXT_STAGE_T::is_keys_one(nxt_container);
}
} else {
// more than 1 keys
return false;
}
}
template <bool GET_KEY>
static result_t smallest_result(
const iterator_t& iter, key_view_t* p_index_key) {
static_assert(!IS_BOTTOM);
assert(!iter.is_end());
auto nxt_container = iter.get_nxt_container();
auto pos_smallest = NXT_STAGE_T::position_t::begin();
const value_t* p_value;
NXT_STAGE_T::template get_slot<GET_KEY, true>(
nxt_container, pos_smallest, p_index_key, &p_value);
if constexpr (GET_KEY) {
assert(p_index_key);
p_index_key->set(iter.get_key());
} else {
assert(!p_index_key);
}
return result_t{{iter.index(), pos_smallest}, p_value, STAGE};
}
template <bool GET_KEY>
static result_t nxt_lower_bound(
const key_hobj_t& key, iterator_t& iter,
MatchHistory& history, key_view_t* index_key) {
static_assert(!IS_BOTTOM);
assert(!iter.is_end());
auto nxt_container = iter.get_nxt_container();
auto nxt_result = NXT_STAGE_T::template lower_bound<GET_KEY>(
nxt_container, key, history, index_key);
if (nxt_result.is_end()) {
if (iter.is_last()) {
return result_t::end();
} else {
return smallest_result<GET_KEY>(++iter, index_key);
}
} else {
if constexpr (GET_KEY) {
index_key->set(iter.get_key());
}
return result_t::from_nxt(iter.index(), nxt_result);
}
}
template <bool GET_POS, bool GET_KEY, bool GET_VAL>
static void get_largest_slot(
const container_t& container, // IN
position_t* p_position, // OUT
key_view_t* p_index_key, // OUT
const value_t** pp_value) { // OUT
auto iter = iterator_t(container);
iter.seek_last();
if constexpr (GET_KEY) {
assert(p_index_key);
p_index_key->set(iter.get_key());
} else {
assert(!p_index_key);
}
if constexpr (GET_POS) {
assert(p_position);
p_position->index = iter.index();
} else {
assert(!p_position);
}
if constexpr (IS_BOTTOM) {
if constexpr (GET_VAL) {
assert(pp_value);
*pp_value = iter.get_p_value();
} else {
assert(!pp_value);
}
} else {
auto nxt_container = iter.get_nxt_container();
if constexpr (GET_POS) {
NXT_STAGE_T::template get_largest_slot<true, GET_KEY, GET_VAL>(
nxt_container, &p_position->nxt, p_index_key, pp_value);
} else {
NXT_STAGE_T::template get_largest_slot<false, GET_KEY, GET_VAL>(
nxt_container, nullptr, p_index_key, pp_value);
}
}
}
template <bool GET_KEY, bool GET_VAL>
static void get_slot(
const container_t& container, // IN
const position_t& pos, // IN
key_view_t* p_index_key, // OUT
const value_t** pp_value) { // OUT
auto iter = iterator_t(container);
iter.seek_at(pos.index);
if constexpr (GET_KEY) {
assert(p_index_key);
p_index_key->set(iter.get_key());
} else {
assert(!p_index_key);
}
if constexpr (!IS_BOTTOM) {
auto nxt_container = iter.get_nxt_container();
NXT_STAGE_T::template get_slot<GET_KEY, GET_VAL>(
nxt_container, pos.nxt, p_index_key, pp_value);
} else {
if constexpr (GET_VAL) {
assert(pp_value);
*pp_value = iter.get_p_value();
} else {
assert(!pp_value);
}
}
}
template <bool GET_KEY = false>
static result_t lower_bound(
const container_t& container,
const key_hobj_t& key,
MatchHistory& history,
key_view_t* index_key = nullptr) {
bool exclude_last = false;
if (history.get<STAGE>().has_value()) {
if (*history.get<STAGE>() == MatchKindCMP::EQ) {
// lookup is short-circuited
if constexpr (!IS_BOTTOM) {
assert(history.get<STAGE - 1>().has_value());
if (history.is_GT<STAGE - 1>()) {
auto iter = iterator_t(container);
bool test_key_equal;
if constexpr (STAGE == STAGE_STRING) {
// TODO(cross-node string dedup)
// test_key_equal = (iter.get_key().type() == ns_oid_view_t::Type::MIN);
auto cmp = key <=> iter.get_key();
assert(cmp != std::strong_ordering::greater);
test_key_equal = (cmp == 0);
} else {
auto cmp = key <=> iter.get_key();
// From history, key[stage] == parent[stage][index - 1]
// which should be the smallest possible value for all
// index[stage][*]
assert(cmp != std::strong_ordering::greater);
test_key_equal = (cmp == 0);
}
if (test_key_equal) {
return nxt_lower_bound<GET_KEY>(key, iter, history, index_key);
} else {
// key[stage] < index[stage][left-most]
return smallest_result<GET_KEY>(iter, index_key);
}
}
}
// IS_BOTTOM || !history.is_GT<STAGE - 1>()
auto iter = iterator_t(container);
iter.seek_last();
if constexpr (STAGE == STAGE_STRING) {
// TODO(cross-node string dedup)
// assert(iter.get_key().type() == ns_oid_view_t::Type::MAX);
assert(key == iter.get_key());
} else {
assert(key == iter.get_key());
}
if constexpr (GET_KEY) {
index_key->set(iter.get_key());
}
if constexpr (IS_BOTTOM) {
auto value_ptr = iter.get_p_value();
return result_t{{iter.index()}, value_ptr, MSTAT_EQ};
} else {
auto nxt_container = iter.get_nxt_container();
auto nxt_result = NXT_STAGE_T::template lower_bound<GET_KEY>(
nxt_container, key, history, index_key);
// !history.is_GT<STAGE - 1>() means
// key[stage+1 ...] <= index[stage+1 ...][*]
assert(!nxt_result.is_end());
return result_t::from_nxt(iter.index(), nxt_result);
}
} else if (*history.get<STAGE>() == MatchKindCMP::LT) {
exclude_last = true;
}
}
auto iter = iterator_t(container);
auto bs_match = iter.seek(key, exclude_last);
if (iter.is_end()) {
assert(!exclude_last);
assert(bs_match == MatchKindBS::NE);
history.set<STAGE>(MatchKindCMP::GT);
return result_t::end();
}
history.set<STAGE>(bs_match == MatchKindBS::EQ ?
MatchKindCMP::EQ : MatchKindCMP::LT);
if constexpr (IS_BOTTOM) {
if constexpr (GET_KEY) {
index_key->set(iter.get_key());
}
auto value_ptr = iter.get_p_value();
return result_t{{iter.index()}, value_ptr,
(bs_match == MatchKindBS::EQ ? MSTAT_EQ : MSTAT_LT0)};
} else {
if (bs_match == MatchKindBS::EQ) {
return nxt_lower_bound<GET_KEY>(key, iter, history, index_key);
} else {
return smallest_result<GET_KEY>(iter, index_key);
}
}
}
template <IsFullKey Key>
static node_offset_t insert_size(const Key& key,
const value_input_t& value) {
if constexpr (IS_BOTTOM) {
return iterator_t::estimate_insert(key, value);
} else {
return iterator_t::estimate_insert(key, value) +
NXT_STAGE_T::iterator_t::header_size() +
NXT_STAGE_T::insert_size(key, value);
}
}
template <IsFullKey Key>
static node_offset_t insert_size_at(match_stage_t stage,
const Key& key,
const value_input_t& value) {
if (stage == STAGE) {
return insert_size(key, value);
} else {
assert(stage < STAGE);
return NXT_STAGE_T::template insert_size_at(stage, key, value);
}
}
template <typename T = std::tuple<match_stage_t, node_offset_t>>
static std::enable_if_t<NODE_TYPE == node_type_t::INTERNAL, T> evaluate_insert(
const container_t& container, const key_view_t& key,
const value_input_t& value, position_t& position, bool evaluate_last) {
auto iter = iterator_t(container);
auto& index = position.index;
if (evaluate_last || index == INDEX_END) {
iter.seek_last();
index = iter.index();
// evaluate the previous index
} else {
assert(is_valid_index(index));
// evaluate the current index
iter.seek_at(index);
auto match = key <=> iter.get_key();
if (match == 0) {
if constexpr (IS_BOTTOM) {
ceph_abort("insert conflict at current index!");
} else {
// insert into the current index
auto nxt_container = iter.get_nxt_container();
return NXT_STAGE_T::evaluate_insert(
nxt_container, key, value, position.nxt, false);
}
} else {
assert(match == std::strong_ordering::less);
if (index == 0) {
// already the first index, so insert at the current index
return {STAGE, insert_size(key, value)};
}
--index;
iter = iterator_t(container);
iter.seek_at(index);
// proceed to evaluate the previous index
}
}
// XXX(multi-type): when key is from a different type of node
auto match = key <=> iter.get_key();
if (match == std::strong_ordering::greater) {
// key doesn't match both indexes, so insert at the current index
++index;
return {STAGE, insert_size(key, value)};
} else {
assert(match == std::strong_ordering::equal);
if constexpr (IS_BOTTOM) {
// ceph_abort?
ceph_abort("insert conflict at the previous index!");
} else {
// insert into the previous index
auto nxt_container = iter.get_nxt_container();
return NXT_STAGE_T::evaluate_insert(
nxt_container, key, value, position.nxt, true);
}
}
}
template <typename T = bool>
static std::enable_if_t<NODE_TYPE == node_type_t::LEAF, T>
compensate_insert_position_at(match_stage_t stage, position_t& position) {
auto& index = position.index;
if (stage == STAGE) {
assert(index == 0);
// insert at the end of the current stage
index = INDEX_END;
return true;
} else {
if constexpr (IS_BOTTOM) {
ceph_abort("impossible path");
} else {
assert(stage < STAGE);
bool compensate = NXT_STAGE_T::
compensate_insert_position_at(stage, position.nxt);
if (compensate) {
assert(is_valid_index(index));
if (index == 0) {
// insert into the *last* index of the current stage
index = INDEX_LAST;
return true;
} else {
--index;
return false;
}
} else {
return false;
}
}
}
}
static void patch_insert_end(position_t& insert_pos, match_stage_t insert_stage) {
assert(insert_stage <= STAGE);
if (insert_stage == STAGE) {
insert_pos.index = INDEX_END;
} else if constexpr (!IS_BOTTOM) {
insert_pos.index = INDEX_LAST;
NXT_STAGE_T::patch_insert_end(insert_pos.nxt, insert_stage);
}
}
template <typename T = std::tuple<match_stage_t, node_offset_t>>
static std::enable_if_t<NODE_TYPE == node_type_t::LEAF, T> evaluate_insert(
const key_hobj_t& key, const value_config_t& value,
const MatchHistory& history, match_stat_t mstat, position_t& position) {
match_stage_t insert_stage = STAGE_TOP;
while (*history.get_by_stage(insert_stage) == MatchKindCMP::EQ) {
assert(insert_stage != STAGE_BOTTOM && "insert conflict!");
--insert_stage;
}
if (history.is_GT()) {
if (position.is_end()) {
// no need to compensate insert position
assert(insert_stage <= STAGE && "impossible insert stage");
} else if (position == position_t::begin()) {
// I must be short-circuited by staged::smallest_result()
// in staged::lower_bound(), so we need to rely on mstat instead
assert(mstat >= MSTAT_LT0 && mstat <= MSTAT_LT3);
if (mstat == MSTAT_LT0) {
insert_stage = STAGE_RIGHT;
} else if (mstat == MSTAT_LT1) {
insert_stage = STAGE_STRING;
} else {
insert_stage = STAGE_LEFT;
}
// XXX(multi-type): need to upgrade node type before inserting an
// incompatible index at front.
assert(insert_stage <= STAGE && "incompatible insert");
} else {
assert(insert_stage <= STAGE && "impossible insert stage");
[[maybe_unused]] bool ret = compensate_insert_position_at(insert_stage, position);
assert(!ret);
}
}
if (position.is_end()) {
patch_insert_end(position, insert_stage);
}
node_offset_t insert_size = insert_size_at(insert_stage, key, value);
return {insert_stage, insert_size};
}
template <KeyT KT>
static const value_t* insert_new(
NodeExtentMutable& mut, const memory_range_t& range,
const full_key_t<KT>& key, const value_input_t& value) {
char* p_insert = const_cast<char*>(range.p_end);
const value_t* p_value = nullptr;
StagedAppender<KT> appender;
appender.init_empty(&mut, p_insert);
appender.append(key, value, p_value);
[[maybe_unused]] const char* p_insert_front = appender.wrap();
assert(p_insert_front == range.p_start);
return p_value;
}
template <KeyT KT, bool SPLIT>
static const value_t* proceed_insert_recursively(
NodeExtentMutable& mut, const container_t& container,
const full_key_t<KT>& key, const value_input_t& value,
position_t& position, match_stage_t& stage,
node_offset_t& _insert_size, const char* p_left_bound) {
// proceed insert from right to left
assert(stage <= STAGE);
auto iter = iterator_t(container);
auto& index = position.index;
bool do_insert = false;
if (stage == STAGE) {
if (index == INDEX_END) {
iter.seek_last();
iter.set_end();
index = iter.index();
} else {
assert(is_valid_index(index));
iter.seek_till_end(index);
}
do_insert = true;
} else { // stage < STAGE
if (index == INDEX_LAST) {
iter.seek_last();
index = iter.index();
} else {
assert(is_valid_index(index));
iter.seek_till_end(index);
}
if constexpr (SPLIT) {
if (iter.is_end()) {
// insert at the higher stage due to split
do_insert = true;
_insert_size = insert_size(key, value);
stage = STAGE;
}
} else {
assert(!iter.is_end());
}
}
if (do_insert) {
if constexpr (!IS_BOTTOM) {
position.nxt = position_t::nxt_t::begin();
}
assert(_insert_size == insert_size(key, value));
if constexpr (IS_BOTTOM) {
return iter.insert(
mut, key, value, _insert_size, p_left_bound);
} else {
auto range = iter.insert_prefix(
mut, key, _insert_size, p_left_bound);
return NXT_STAGE_T::template insert_new<KT>(mut, range, key, value);
}
} else {
if constexpr (!IS_BOTTOM) {
auto nxt_container = iter.get_nxt_container();
auto p_value = NXT_STAGE_T::template proceed_insert_recursively<KT, SPLIT>(
mut, nxt_container, key, value,
position.nxt, stage, _insert_size, p_left_bound);
iter.update_size(mut, _insert_size);
return p_value;
} else {
ceph_abort("impossible path");
}
}
}
template <KeyT KT, bool SPLIT>
static const value_t* proceed_insert(
NodeExtentMutable& mut, const container_t& container,
const full_key_t<KT>& key, const value_input_t& value,
position_t& position, match_stage_t& stage, node_offset_t& _insert_size) {
auto p_left_bound = container.p_left_bound();
if (unlikely(!container.keys())) {
if (position.is_end()) {
position = position_t::begin();
assert(stage == STAGE);
assert(_insert_size == insert_size(key, value));
} else if (position == position_t::begin()) {
// when insert into a trimmed and empty left node
stage = STAGE;
_insert_size = insert_size(key, value);
} else {
ceph_abort("impossible path");
}
if constexpr (IS_BOTTOM) {
return container_t::insert_at(
mut, container, key, value, 0, _insert_size, p_left_bound);
} else {
auto range = container_t::template insert_prefix_at(
mut, container, key, 0, _insert_size, p_left_bound);
return NXT_STAGE_T::template insert_new<KT>(mut, range, key, value);
}
} else {
return proceed_insert_recursively<KT, SPLIT>(
mut, container, key, value,
position, stage, _insert_size, p_left_bound);
}
}
static std::ostream& dump(const container_t& container,
std::ostream& os,
const std::string& prefix,
size_t& size,
const char* p_start) {
auto iter = iterator_t(container);
assert(!iter.is_end());
std::string prefix_blank(prefix.size(), ' ');
const std::string* p_prefix = &prefix;
size += iterator_t::header_size();
do {
std::ostringstream sos;
sos << *p_prefix << iter.get_key() << ": ";
std::string i_prefix = sos.str();
if constexpr (!IS_BOTTOM) {
auto nxt_container = iter.get_nxt_container();
size += iter.size_to_nxt();
NXT_STAGE_T::dump(nxt_container, os, i_prefix, size, p_start);
} else {
auto value_ptr = iter.get_p_value();
int offset = reinterpret_cast<const char*>(value_ptr) - p_start;
size += iter.size();
os << "\n" << i_prefix;
if constexpr (NODE_TYPE == node_type_t::LEAF) {
os << *value_ptr;
} else {
os << "0x" << std::hex << value_ptr->value << std::dec;
}
os << " " << size << "B"
<< " @" << offset << "B";
}
if (iter.is_last()) {
break;
} else {
++iter;
p_prefix = &prefix_blank;
}
} while (true);
return os;
}
static void validate(const container_t& container) {
auto iter = iterator_t(container);
assert(!iter.is_end());
auto key = iter.get_key();
do {
if constexpr (!IS_BOTTOM) {
auto nxt_container = iter.get_nxt_container();
NXT_STAGE_T::validate(nxt_container);
}
if (iter.is_last()) {
break;
} else {
++iter;
assert(key < iter.get_key());
key = iter.get_key();
}
} while (true);
}
static void get_stats(const container_t& container, node_stats_t& stats,
key_view_t& index_key) {
auto iter = iterator_t(container);
assert(!iter.is_end());
stats.size_overhead += iterator_t::header_size();
do {
index_key.replace(iter.get_key());
stats.size_overhead += iter.size_overhead();
if constexpr (!IS_BOTTOM) {
auto nxt_container = iter.get_nxt_container();
NXT_STAGE_T::get_stats(nxt_container, stats, index_key);
} else {
++stats.num_kvs;
size_t kv_logical_size = index_key.size_logical();
size_t value_size;
if constexpr (NODE_TYPE == node_type_t::LEAF) {
value_size = iter.get_p_value()->allocation_size();
} else {
value_size = sizeof(value_t);
}
stats.size_value += value_size;
kv_logical_size += value_size;
stats.size_logical += kv_logical_size;
}
if (iter.is_last()) {
break;
} else {
++iter;
}
} while (true);
}
template <bool GET_KEY, bool GET_VAL>
static bool get_next_slot(
const container_t& container, // IN
position_t& pos, // IN&OUT
key_view_t* p_index_key, // OUT
const value_t** pp_value) { // OUT
auto iter = iterator_t(container);
assert(!iter.is_end());
iter.seek_at(pos.index);
bool find_next;
if constexpr (!IS_BOTTOM) {
auto nxt_container = iter.get_nxt_container();
find_next = NXT_STAGE_T::template get_next_slot<GET_KEY, GET_VAL>(
nxt_container, pos.nxt, p_index_key, pp_value);
} else {
find_next = true;
}
if (find_next) {
if (iter.is_last()) {
return true;
} else {
pos.index = iter.index() + 1;
if constexpr (!IS_BOTTOM) {
pos.nxt = NXT_STAGE_T::position_t::begin();
}
get_slot<GET_KEY, GET_VAL>(
container, pos, p_index_key, pp_value);
return false;
}
} else { // !find_next && !IS_BOTTOM
if constexpr (GET_KEY) {
assert(p_index_key);
p_index_key->set(iter.get_key());
} else {
assert(!p_index_key);
}
return false;
}
}
template <bool GET_KEY, bool GET_VAL>
static void get_prev_slot(
const container_t& container, // IN
position_t& pos, // IN&OUT
key_view_t* p_index_key, // OUT
const value_t** pp_value) { // OUT
assert(pos != position_t::begin());
assert(!pos.is_end());
auto& index = pos.index;
auto iter = iterator_t(container);
if constexpr (!IS_BOTTOM) {
auto& nxt_pos = pos.nxt;
if (nxt_pos == NXT_STAGE_T::position_t::begin()) {
assert(index);
--index;
iter.seek_at(index);
auto nxt_container = iter.get_nxt_container();
NXT_STAGE_T::template get_largest_slot<true, GET_KEY, GET_VAL>(
nxt_container, &nxt_pos, p_index_key, pp_value);
} else {
iter.seek_at(index);
auto nxt_container = iter.get_nxt_container();
NXT_STAGE_T::template get_prev_slot<GET_KEY, GET_VAL>(
nxt_container, nxt_pos, p_index_key, pp_value);
}
} else {
assert(index);
--index;
iter.seek_at(index);
if constexpr (GET_VAL) {
assert(pp_value);
*pp_value = iter.get_p_value();
} else {
assert(!pp_value);
}
}
if constexpr (GET_KEY) {
p_index_key->set(iter.get_key());
} else {
assert(!p_index_key);
}
}
struct _BaseEmpty {};
class _BaseWithNxtIterator {
protected:
typename NXT_STAGE_T::StagedIterator _nxt;
};
class StagedIterator
: std::conditional_t<IS_BOTTOM, _BaseEmpty, _BaseWithNxtIterator> {
public:
StagedIterator() = default;
bool valid() const { return iter.has_value(); }
index_t index() const {
return iter->index();
}
bool is_end() const { return iter->is_end(); }
bool in_progress() const {
assert(valid());
assert(!is_end());
if constexpr (!IS_BOTTOM) {
if (this->_nxt.valid()) {
if (this->_nxt.index() == 0) {
return this->_nxt.in_progress();
} else {
return true;
}
} else {
return false;
}
} else {
return false;
}
}
key_get_type get_key() const { return iter->get_key(); }
iterator_t& get() { return *iter; }
void set(const container_t& container) {
assert(!valid());
iter = iterator_t(container);
}
void set_end() { iter->set_end(); }
typename NXT_STAGE_T::StagedIterator& nxt() {
if constexpr (!IS_BOTTOM) {
if (!this->_nxt.valid()) {
auto nxt_container = iter->get_nxt_container();
this->_nxt.set(nxt_container);
}
return this->_nxt;
} else {
ceph_abort("impossible path");
}
}
typename NXT_STAGE_T::StagedIterator& get_nxt() {
if constexpr (!IS_BOTTOM) {
return this->_nxt;
} else {
ceph_abort("impossible path");
}
}
StagedIterator& operator++() {
if (iter->is_last()) {
iter->set_end();
} else {
++(*iter);
}
if constexpr (!IS_BOTTOM) {
this->_nxt.reset();
}
return *this;
}
void reset() {
if (valid()) {
iter.reset();
if constexpr (!IS_BOTTOM) {
this->_nxt.reset();
}
}
}
template<typename OutputIt>
auto do_format_to(OutputIt out, bool is_top) const {
if (valid()) {
if (iter->is_end()) {
return fmt::format_to(out, "END");
} else {
out = fmt::format_to(out, "{}", index());
}
} else {
if (is_top) {
return fmt::format_to(out, "invalid StagedIterator!");
} else {
out = fmt::format_to(out, "0!");
}
}
if constexpr (!IS_BOTTOM) {
out = fmt::format_to(out, ", ");
return this->_nxt.do_format_to(out, false);
} else {
return out;
}
}
position_t get_pos() const {
if (valid()) {
if constexpr (IS_BOTTOM) {
return position_t{index()};
} else {
return position_t{index(), this->_nxt.get_pos()};
}
} else {
return position_t::begin();
}
}
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
uint8_t present = static_cast<bool>(iter);
ceph::encode(present, encoded);
if (iter.has_value()) {
iter->encode(p_node_start, encoded);
if constexpr (!IS_BOTTOM) {
this->_nxt.encode(p_node_start, encoded);
}
}
}
static StagedIterator decode(const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
StagedIterator ret;
uint8_t present;
ceph::decode(present, delta);
if (present) {
ret.iter = iterator_t::decode(
p_node_start, node_size, delta);
if constexpr (!IS_BOTTOM) {
ret._nxt = NXT_STAGE_T::StagedIterator::decode(
p_node_start, node_size, delta);
}
}
return ret;
}
private:
std::optional<iterator_t> iter;
};
static bool recursively_locate_split(
size_t& current_size, size_t extra_size,
size_t target_size, StagedIterator& split_at) {
assert(current_size <= target_size);
iterator_t& split_iter = split_at.get();
current_size = split_iter.seek_split(current_size, extra_size, target_size);
assert(current_size <= target_size);
assert(!split_iter.is_end());
if (split_iter.index() == 0) {
extra_size += iterator_t::header_size();
} else {
extra_size = 0;
}
bool locate_nxt;
if constexpr (!IS_BOTTOM) {
locate_nxt = NXT_STAGE_T::recursively_locate_split(
current_size, extra_size + split_iter.size_to_nxt(),
target_size, split_at.nxt());
} else { // IS_BOTTOM
// located upper_bound, fair split strategy
size_t nxt_size = split_iter.size() + extra_size;
assert(current_size + nxt_size > target_size);
if (current_size + nxt_size/2 < target_size) {
// include next
current_size += nxt_size;
locate_nxt = true;
} else {
// exclude next
locate_nxt = false;
}
}
if (locate_nxt) {
if (split_iter.is_last()) {
return true;
} else {
++split_at;
return false;
}
} else {
return false;
}
}
static bool recursively_locate_split_inserted(
size_t& current_size, size_t extra_size, size_t target_size,
position_t& insert_pos, match_stage_t insert_stage, size_t insert_size,
std::optional<bool>& is_insert_left, StagedIterator& split_at) {
assert(current_size <= target_size);
assert(!is_insert_left.has_value());
iterator_t& split_iter = split_at.get();
auto& insert_index = insert_pos.index;
if (insert_stage == STAGE) {
current_size = split_iter.template seek_split_inserted<true>(
current_size, extra_size, target_size,
insert_index, insert_size, is_insert_left);
assert(is_insert_left.has_value());
assert(current_size <= target_size);
if (split_iter.index() == 0) {
if (insert_index == 0) {
if (*is_insert_left == false) {
extra_size += iterator_t::header_size();
} else {
extra_size = 0;
}
} else {
extra_size += iterator_t::header_size();
}
} else {
extra_size = 0;
}
if (*is_insert_left == false && split_iter.index() == insert_index) {
// split_iter can be end
// found the lower-bound of target_size
// ...[s_index-1] |!| (i_index) [s_index]...
// located upper-bound, fair split strategy
// look at the next slot (the insert item)
size_t nxt_size = insert_size + extra_size;
assert(current_size + nxt_size > target_size);
if (current_size + nxt_size/2 < target_size) {
// include next
*is_insert_left = true;
current_size += nxt_size;
if (split_iter.is_end()) {
// ...[s_index-1] (i_index) |!|
return true;
} else {
return false;
}
} else {
// exclude next
return false;
}
} else {
// Already considered insert effect in the current stage.
// Look into the next stage to identify the target_size lower-bound w/o
// insert effect.
assert(!split_iter.is_end());
bool locate_nxt;
if constexpr (!IS_BOTTOM) {
locate_nxt = NXT_STAGE_T::recursively_locate_split(
current_size, extra_size + split_iter.size_to_nxt(),
target_size, split_at.nxt());
} else { // IS_BOTTOM
// located upper-bound, fair split strategy
// look at the next slot
size_t nxt_size = split_iter.size() + extra_size;
assert(current_size + nxt_size > target_size);
if (current_size + nxt_size/2 < target_size) {
// include next
current_size += nxt_size;
locate_nxt = true;
} else {
// exclude next
locate_nxt = false;
}
}
if (locate_nxt) {
if (split_iter.is_last()) {
auto end_index = split_iter.index() + 1;
if (insert_index == INDEX_END) {
insert_index = end_index;
}
assert(insert_index <= end_index);
if (insert_index == end_index) {
assert(*is_insert_left == false);
split_iter.set_end();
// ...[s_index-1] |!| (i_index)
return false;
} else {
assert(*is_insert_left == true);
return true;
}
} else {
++split_at;
return false;
}
} else {
return false;
}
}
} else {
if constexpr (!IS_BOTTOM) {
assert(insert_stage < STAGE);
current_size = split_iter.template seek_split_inserted<false>(
current_size, extra_size, target_size,
insert_index, insert_size, is_insert_left);
assert(!split_iter.is_end());
assert(current_size <= target_size);
if (split_iter.index() == 0) {
extra_size += iterator_t::header_size();
} else {
extra_size = 0;
}
bool locate_nxt;
if (!is_insert_left.has_value()) {
// Considered insert effect in the current stage, and insert happens
// in the lower stage.
// Look into the next stage to identify the target_size lower-bound w/
// insert effect.
assert(split_iter.index() == insert_index);
locate_nxt = NXT_STAGE_T::recursively_locate_split_inserted(
current_size, extra_size + split_iter.size_to_nxt(), target_size,
insert_pos.nxt, insert_stage, insert_size,
is_insert_left, split_at.nxt());
assert(is_insert_left.has_value());
#ifndef NDEBUG
if (locate_nxt) {
assert(*is_insert_left == true);
}
#endif
} else {
// is_insert_left.has_value() == true
// Insert will *not* happen in the lower stage.
// Need to look into the next stage to identify the target_size
// lower-bound w/ insert effect
assert(split_iter.index() != insert_index);
locate_nxt = NXT_STAGE_T::recursively_locate_split(
current_size, extra_size + split_iter.size_to_nxt(),
target_size, split_at.nxt());
#ifndef NDEBUG
if (split_iter.index() < insert_index) {
assert(*is_insert_left == false);
} else {
assert(*is_insert_left == true);
}
#endif
}
if (locate_nxt) {
if (split_iter.is_last()) {
return true;
} else {
++split_at;
return false;
}
} else {
return false;
}
} else {
ceph_abort("impossible path");
return false;;
}
}
}
/*
* container appender type system
* container_t::Appender(NodeExtentMutable& mut, char* p_append)
* append(const container_t& src, index_t from, index_t items)
* wrap() -> char*
* IF !IS_BOTTOM:
* open_nxt(const key_get_type&)
* open_nxt(const full_key_t&)
* -> std::tuple<NodeExtentMutable&, char*>
* wrap_nxt(char* p_append)
* ELSE
* append(const full_key_t& key, const value_input_t& value)
*/
template <KeyT KT>
struct _BaseWithNxtAppender {
typename NXT_STAGE_T::template StagedAppender<KT> _nxt;
};
template <KeyT KT>
class StagedAppender
: std::conditional_t<IS_BOTTOM, _BaseEmpty, _BaseWithNxtAppender<KT>> {
public:
StagedAppender() = default;
~StagedAppender() {
assert(!require_wrap_nxt);
assert(!valid());
}
bool valid() const { return appender.has_value(); }
index_t index() const {
assert(valid());
return _index;
}
bool in_progress() const { return require_wrap_nxt; }
// TODO: pass by reference
void init_empty(NodeExtentMutable* p_mut, char* p_start) {
assert(!valid());
appender = typename container_t::template Appender<KT>(p_mut, p_start);
_index = 0;
}
void init_tail(NodeExtentMutable* p_mut,
const container_t& container,
match_stage_t stage) {
assert(!valid());
auto iter = iterator_t(container);
iter.seek_last();
if (stage == STAGE) {
appender = iter.template get_appender<KT>(p_mut);
_index = iter.index() + 1;
if constexpr (!IS_BOTTOM) {
assert(!this->_nxt.valid());
}
} else {
assert(stage < STAGE);
if constexpr (!IS_BOTTOM) {
appender = iter.template get_appender_opened<KT>(p_mut);
_index = iter.index();
require_wrap_nxt = true;
auto nxt_container = iter.get_nxt_container();
this->_nxt.init_tail(p_mut, nxt_container, stage);
} else {
ceph_abort("impossible path");
}
}
}
// possible to make src_iter end if to_index == INDEX_END
void append_until(StagedIterator& src_iter, index_t& to_index) {
assert(!require_wrap_nxt);
auto s_index = src_iter.index();
src_iter.get().template copy_out_until<KT>(*appender, to_index);
assert(src_iter.index() == to_index);
assert(to_index >= s_index);
auto increment = (to_index - s_index);
if (increment) {
_index += increment;
if constexpr (!IS_BOTTOM) {
src_iter.get_nxt().reset();
}
}
}
void append(const full_key_t<KT>& key,
const value_input_t& value, const value_t*& p_value) {
assert(!require_wrap_nxt);
if constexpr (!IS_BOTTOM) {
auto& nxt = open_nxt(key);
nxt.append(key, value, p_value);
wrap_nxt();
} else {
appender->append(key, value, p_value);
++_index;
}
}
char* wrap() {
assert(valid());
assert(_index > 0);
if constexpr (!IS_BOTTOM) {
if (require_wrap_nxt) {
wrap_nxt();
}
}
auto ret = appender->wrap();
appender.reset();
return ret;
}
typename NXT_STAGE_T::template StagedAppender<KT>&
open_nxt(key_get_type paritial_key) {
assert(!require_wrap_nxt);
if constexpr (!IS_BOTTOM) {
require_wrap_nxt = true;
auto [p_mut, p_append] = appender->open_nxt(paritial_key);
this->_nxt.init_empty(p_mut, p_append);
return this->_nxt;
} else {
ceph_abort("impossible path");
}
}
typename NXT_STAGE_T::template StagedAppender<KT>&
open_nxt(const full_key_t<KT>& key) {
assert(!require_wrap_nxt);
if constexpr (!IS_BOTTOM) {
require_wrap_nxt = true;
auto [p_mut, p_append] = appender->open_nxt(key);
this->_nxt.init_empty(p_mut, p_append);
return this->_nxt;
} else {
ceph_abort("impossible path");
}
}
typename NXT_STAGE_T::template StagedAppender<KT>& get_nxt() {
if constexpr (!IS_BOTTOM) {
assert(require_wrap_nxt);
return this->_nxt;
} else {
ceph_abort("impossible path");
}
}
void wrap_nxt() {
if constexpr (!IS_BOTTOM) {
assert(require_wrap_nxt);
require_wrap_nxt = false;
auto p_append = this->_nxt.wrap();
appender->wrap_nxt(p_append);
++_index;
} else {
ceph_abort("impossible path");
}
}
private:
std::optional<typename container_t::template Appender<KT>> appender;
index_t _index;
bool require_wrap_nxt = false;
};
template <KeyT KT>
static void _append_range(
StagedIterator& src_iter, StagedAppender<KT>& appender, index_t& to_index) {
if (src_iter.is_end()) {
// append done
assert(to_index == INDEX_END);
to_index = src_iter.index();
} else if constexpr (!IS_BOTTOM) {
if (appender.in_progress()) {
// appender has appended something at the current item,
// cannot append the current item as-a-whole
index_t to_index_nxt = INDEX_END;
NXT_STAGE_T::template _append_range<KT>(
src_iter.nxt(), appender.get_nxt(), to_index_nxt);
++src_iter;
appender.wrap_nxt();
} else if (src_iter.in_progress()) {
// src_iter is not at the beginning of the current item,
// cannot append the current item as-a-whole
index_t to_index_nxt = INDEX_END;
NXT_STAGE_T::template _append_range<KT>(
src_iter.get_nxt(), appender.open_nxt(src_iter.get_key()), to_index_nxt);
++src_iter;
appender.wrap_nxt();
} else {
// we can safely append the current item as-a-whole
}
}
appender.append_until(src_iter, to_index);
}
template <KeyT KT>
static void _append_into(StagedIterator& src_iter, StagedAppender<KT>& appender,
position_t& position, match_stage_t stage) {
assert(position.index == src_iter.index());
// reaches the last item
if (stage == STAGE) {
// done, end recursion
if constexpr (!IS_BOTTOM) {
position.nxt = position_t::nxt_t::begin();
}
} else {
assert(stage < STAGE);
// proceed append in the next stage
NXT_STAGE_T::template append_until<KT>(
src_iter.nxt(), appender.open_nxt(src_iter.get_key()),
position.nxt, stage);
}
}
template <KeyT KT>
static void append_until(StagedIterator& src_iter, StagedAppender<KT>& appender,
position_t& position, match_stage_t stage) {
index_t from_index = src_iter.index();
index_t& to_index = position.index;
assert(from_index <= to_index);
if constexpr (IS_BOTTOM) {
assert(stage == STAGE);
appender.append_until(src_iter, to_index);
} else {
assert(stage <= STAGE);
if (src_iter.index() == to_index) {
_append_into<KT>(src_iter, appender, position, stage);
} else {
if (to_index == INDEX_END) {
assert(stage == STAGE);
} else if (to_index == INDEX_LAST) {
assert(stage < STAGE);
}
_append_range<KT>(src_iter, appender, to_index);
_append_into<KT>(src_iter, appender, position, stage);
}
}
to_index -= from_index;
}
template <KeyT KT>
static bool append_insert(
const full_key_t<KT>& key, const value_input_t& value,
StagedIterator& src_iter, StagedAppender<KT>& appender,
bool is_front_insert, match_stage_t& stage, const value_t*& p_value) {
assert(src_iter.valid());
if (stage == STAGE) {
appender.append(key, value, p_value);
if (src_iter.is_end()) {
return true;
} else {
return false;
}
} else {
assert(stage < STAGE);
if constexpr (!IS_BOTTOM) {
auto nxt_is_end = NXT_STAGE_T::template append_insert<KT>(
key, value, src_iter.get_nxt(), appender.get_nxt(),
is_front_insert, stage, p_value);
if (nxt_is_end) {
appender.wrap_nxt();
++src_iter;
if (is_front_insert) {
stage = STAGE;
}
if (src_iter.is_end()) {
return true;
}
}
return false;
} else {
ceph_abort("impossible path");
}
}
}
/* TrimType:
* BEFORE: remove the entire container, normally means the according higher
* stage iterator needs to be trimmed as-a-whole.
* AFTER: retain the entire container, normally means the trim should be
* start from the next iterator at the higher stage.
* AT: trim happens in the current container, and the according higher
* stage iterator needs to be adjusted by the trimmed size.
*/
static std::tuple<TrimType, node_offset_t>
recursively_trim(NodeExtentMutable& mut, StagedIterator& trim_at) {
if (!trim_at.valid()) {
return {TrimType::BEFORE, 0u};
}
if (trim_at.is_end()) {
return {TrimType::AFTER, 0u};
}
auto& iter = trim_at.get();
if constexpr (!IS_BOTTOM) {
auto [type, trimmed] = NXT_STAGE_T::recursively_trim(
mut, trim_at.get_nxt());
node_offset_t trim_size;
if (type == TrimType::AFTER) {
if (iter.is_last()) {
return {TrimType::AFTER, 0u};
}
++trim_at;
trim_size = iter.trim_until(mut);
} else if (type == TrimType::BEFORE) {
if (iter.index() == 0) {
return {TrimType::BEFORE, 0u};
}
trim_size = iter.trim_until(mut);
} else {
trim_size = iter.trim_at(mut, trimmed);
}
return {TrimType::AT, trim_size};
} else {
if (iter.index() == 0) {
return {TrimType::BEFORE, 0u};
} else {
auto trimmed = iter.trim_until(mut);
return {TrimType::AT, trimmed};
}
}
}
static void trim(NodeExtentMutable& mut, StagedIterator& trim_at) {
auto [type, trimmed] = recursively_trim(mut, trim_at);
if (type == TrimType::BEFORE) {
assert(trim_at.valid());
auto& iter = trim_at.get();
iter.trim_until(mut);
}
}
static std::optional<std::tuple<match_stage_t, node_offset_t, bool>>
proceed_erase_recursively(
NodeExtentMutable& mut,
const container_t& container, // IN
const char* p_left_bound, // IN
position_t& pos) { // IN&OUT
auto iter = iterator_t(container);
auto& index = pos.index;
assert(is_valid_index(index));
iter.seek_at(index);
bool is_last = iter.is_last();
if constexpr (!IS_BOTTOM) {
auto nxt_container = iter.get_nxt_container();
auto ret = NXT_STAGE_T::proceed_erase_recursively(
mut, nxt_container, p_left_bound, pos.nxt);
if (ret.has_value()) {
// erased at lower level
auto [r_stage, r_erase_size, r_done] = *ret;
assert(r_erase_size != 0);
iter.update_size(mut, -r_erase_size);
if (r_done) {
// done, the next_pos is calculated
return ret;
} else {
if (is_last) {
// need to find the next pos at upper stage
return ret;
} else {
// done, calculate the next pos
++index;
pos.nxt = NXT_STAGE_T::position_t::begin();
return {{r_stage, r_erase_size, true}};
}
}
}
// not erased at lower level
}
// not erased yet
if (index == 0 && is_last) {
// need to erase from the upper stage
return std::nullopt;
} else {
auto erase_size = iter.erase(mut, p_left_bound);
assert(erase_size != 0);
if (is_last) {
// need to find the next pos at upper stage
return {{STAGE, erase_size, false}};
} else {
// done, calculate the next pos (should be correct already)
if constexpr (!IS_BOTTOM) {
assert(pos.nxt == NXT_STAGE_T::position_t::begin());
}
return {{STAGE, erase_size, true}};
}
}
}
static match_stage_t erase(
NodeExtentMutable& mut,
const container_t& node_stage, // IN
position_t& erase_pos) { // IN&OUT
auto p_left_bound = node_stage.p_left_bound();
auto ret = proceed_erase_recursively(
mut, node_stage, p_left_bound, erase_pos);
if (ret.has_value()) {
auto [r_stage, r_erase_size, r_done] = *ret;
std::ignore = r_erase_size;
if (r_done) {
assert(!erase_pos.is_end());
return r_stage;
} else {
// erased the last kv
erase_pos = position_t::end();
return r_stage;
}
} else {
assert(node_stage.keys() == 1);
node_stage.erase_at(mut, node_stage, 0, p_left_bound);
erase_pos = position_t::end();
return STAGE;
}
}
static std::tuple<match_stage_t, node_offset_t> evaluate_merge(
const key_view_t& left_pivot_index,
const container_t& right_container) {
auto r_iter = iterator_t(right_container);
r_iter.seek_at(0);
node_offset_t compensate = r_iter.header_size();
auto cmp = left_pivot_index <=> r_iter.get_key();
if (cmp == std::strong_ordering::equal) {
if constexpr (!IS_BOTTOM) {
// the index is equal, compensate and look at the lower stage
compensate += r_iter.size_to_nxt();
auto r_nxt_container = r_iter.get_nxt_container();
auto [ret_stage, ret_compensate] = NXT_STAGE_T::evaluate_merge(
left_pivot_index, r_nxt_container);
compensate += ret_compensate;
return {ret_stage, compensate};
} else {
ceph_abort("impossible path: left_pivot_key == right_first_key");
}
} else if (cmp == std::strong_ordering::less) {
// ok, do merge here
return {STAGE, compensate};
} else {
ceph_abort("impossible path: left_pivot_key < right_first_key");
}
}
};
/**
* Configurations for struct staged
*
* staged_params_* assembles different container_t implementations (defined by
* stated::_iterator_t) by STAGE, and constructs the final multi-stage
* implementations for different node layouts defined by
* node_extent_t<FieldType, NODE_TYPE>.
*
* The specialized implementations for different layouts are accessible through
* the helper type node_to_stage_t<node_extent_t<FieldType, NODE_TYPE>>.
*
* Specifically, the settings of 8 layouts are:
*
* The layout (N0, LEAF/INTERNAL) has 3 stages:
* - STAGE_LEFT: node_extent_t<node_fields_0_t, LEAF/INTERNAL>
* - STAGE_STRING: item_iterator_t<LEAF/INTERNAL>
* - STAGE_RIGHT: sub_items_t<LEAF/INTERNAL>
*
* The layout (N1, LEAF/INTERNAL) has 3 stages:
* - STAGE_LEFT: node_extent_t<node_fields_1_t, LEAF/INTERNAL>
* - STAGE_STRING: item_iterator_t<LEAF/INTERNAL>
* - STAGE_RIGHT: sub_items_t<LEAF/INTERNAL>
*
* The layout (N2, LEAF/INTERNAL) has 2 stages:
* - STAGE_STRING: node_extent_t<node_fields_2_t, LEAF/INTERNAL>
* - STAGE_RIGHT: sub_items_t<LEAF/INTERNAL>
*
* The layout (N3, LEAF) has 1 stage:
* - STAGE_RIGHT: node_extent_t<leaf_fields_3_t, LEAF>
*
* The layout (N3, INTERNAL) has 1 stage:
* - STAGE_RIGHT: node_extent_t<internal_fields_3_t, INTERNAL>
*/
template <node_type_t _NODE_TYPE>
struct staged_params_subitems {
using container_t = sub_items_t<_NODE_TYPE>;
static constexpr auto NODE_TYPE = _NODE_TYPE;
static constexpr auto STAGE = STAGE_RIGHT;
// dummy type in order to make our type system work
// any better solution to get rid of this?
using next_param_t = staged_params_subitems<NODE_TYPE>;
};
template <node_type_t _NODE_TYPE>
struct staged_params_item_iterator {
using container_t = item_iterator_t<_NODE_TYPE>;
static constexpr auto NODE_TYPE = _NODE_TYPE;
static constexpr auto STAGE = STAGE_STRING;
using next_param_t = staged_params_subitems<NODE_TYPE>;
};
template <typename NodeType>
struct staged_params_node_01 {
using container_t = NodeType;
static constexpr auto NODE_TYPE = NodeType::NODE_TYPE;
static constexpr auto STAGE = STAGE_LEFT;
using next_param_t = staged_params_item_iterator<NODE_TYPE>;
};
template <typename NodeType>
struct staged_params_node_2 {
using container_t = NodeType;
static constexpr auto NODE_TYPE = NodeType::NODE_TYPE;
static constexpr auto STAGE = STAGE_STRING;
using next_param_t = staged_params_subitems<NODE_TYPE>;
};
template <typename NodeType>
struct staged_params_node_3 {
using container_t = NodeType;
static constexpr auto NODE_TYPE = NodeType::NODE_TYPE;
static constexpr auto STAGE = STAGE_RIGHT;
// dummy type in order to make our type system work
// any better solution to get rid of this?
using next_param_t = staged_params_node_3<NodeType>;
};
template <typename NodeType, typename Enable = void> struct _node_to_stage_t;
template <typename NodeType>
struct _node_to_stage_t<NodeType,
std::enable_if_t<NodeType::FIELD_TYPE == field_type_t::N0 ||
NodeType::FIELD_TYPE == field_type_t::N1>> {
using type = staged<staged_params_node_01<NodeType>>;
};
template <typename NodeType>
struct _node_to_stage_t<NodeType,
std::enable_if_t<NodeType::FIELD_TYPE == field_type_t::N2>> {
using type = staged<staged_params_node_2<NodeType>>;
};
template <typename NodeType>
struct _node_to_stage_t<NodeType,
std::enable_if_t<NodeType::FIELD_TYPE == field_type_t::N3>> {
using type = staged<staged_params_node_3<NodeType>>;
};
template <typename NodeType>
using node_to_stage_t = typename _node_to_stage_t<NodeType>::type;
}
template<typename T>
concept HasDoFormatTo = requires(T x, std::back_insert_iterator<fmt::memory_buffer> out) {
{ x.do_format_to(out, true) } -> std::same_as<decltype(out)>;
};
template <HasDoFormatTo T> struct fmt::formatter<T> : fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const T& staged_iterator, FormatContext& ctx) {
return staged_iterator.do_format_to(ctx.out(), true);
}
};
| 80,588 | 31.378063 | 90 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cassert>
#include <optional>
#include <ostream>
#include "crimson/os/seastore/onode_manager/staged-fltree/fwd.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/value.h"
namespace crimson::os::seastore::onode {
using match_stage_t = int8_t;
constexpr match_stage_t STAGE_LEFT = 2; // shard/pool/crush
constexpr match_stage_t STAGE_STRING = 1; // nspace/oid
constexpr match_stage_t STAGE_RIGHT = 0; // snap/gen
constexpr auto STAGE_TOP = STAGE_LEFT;
constexpr auto STAGE_BOTTOM = STAGE_RIGHT;
constexpr bool is_valid_stage(match_stage_t stage) {
return std::clamp(stage, STAGE_BOTTOM, STAGE_TOP) == stage;
}
// TODO: replace by
// using match_history_t = int8_t;
// left_m, str_m, right_m
// 3: GT,
// 2: EQ, GT,
// 1: EQ, EQ, GT
// 0: EQ, EQ, EQ
// -1: EQ, EQ, LT
// -2: EQ, LT,
// -3: LT,
struct MatchHistory {
template <match_stage_t STAGE>
const std::optional<MatchKindCMP>& get() const {
static_assert(is_valid_stage(STAGE));
if constexpr (STAGE == STAGE_RIGHT) {
return right_match;
} else if (STAGE == STAGE_STRING) {
return string_match;
} else {
return left_match;
}
}
const std::optional<MatchKindCMP>&
get_by_stage(match_stage_t stage) const {
assert(is_valid_stage(stage));
if (stage == STAGE_RIGHT) {
return right_match;
} else if (stage == STAGE_STRING) {
return string_match;
} else {
return left_match;
}
}
template <match_stage_t STAGE = STAGE_TOP>
const bool is_GT() const;
template <match_stage_t STAGE>
void set(MatchKindCMP match) {
static_assert(is_valid_stage(STAGE));
if constexpr (STAGE < STAGE_TOP) {
assert(*get<STAGE + 1>() == MatchKindCMP::EQ);
}
assert(!get<STAGE>().has_value() || *get<STAGE>() != MatchKindCMP::EQ);
const_cast<std::optional<MatchKindCMP>&>(get<STAGE>()) = match;
}
std::ostream& dump(std::ostream& os) const {
os << "history(";
dump_each(os, left_match) << ", ";
dump_each(os, string_match) << ", ";
dump_each(os, right_match) << ")";
return os;
}
std::ostream& dump_each(
std::ostream& os, const std::optional<MatchKindCMP>& match) const {
if (!match.has_value()) {
return os << "--";
} else if (*match == MatchKindCMP::LT) {
return os << "LT";
} else if (*match == MatchKindCMP::EQ) {
return os << "EQ";
} else if (*match == MatchKindCMP::GT) {
return os << "GT";
} else {
ceph_abort("impossble path");
}
}
std::optional<MatchKindCMP> left_match;
std::optional<MatchKindCMP> string_match;
std::optional<MatchKindCMP> right_match;
};
inline std::ostream& operator<<(std::ostream& os, const MatchHistory& pos) {
return pos.dump(os);
}
template <match_stage_t STAGE>
struct _check_GT_t {
static bool eval(const MatchHistory* history) {
return history->get<STAGE>() &&
(*history->get<STAGE>() == MatchKindCMP::GT ||
(*history->get<STAGE>() == MatchKindCMP::EQ &&
_check_GT_t<STAGE - 1>::eval(history)));
}
};
template <>
struct _check_GT_t<STAGE_RIGHT> {
static bool eval(const MatchHistory* history) {
return history->get<STAGE_RIGHT>() &&
*history->get<STAGE_RIGHT>() == MatchKindCMP::GT;
}
};
template <match_stage_t STAGE>
const bool MatchHistory::is_GT() const {
static_assert(is_valid_stage(STAGE));
if constexpr (STAGE < STAGE_TOP) {
assert(get<STAGE + 1>() == MatchKindCMP::EQ);
}
return _check_GT_t<STAGE>::eval(this);
}
template <match_stage_t STAGE>
struct staged_position_t {
static_assert(is_valid_stage(STAGE));
using me_t = staged_position_t<STAGE>;
using nxt_t = staged_position_t<STAGE - 1>;
bool is_end() const {
if (index == INDEX_END) {
return true;
} else {
assert(is_valid_index(index));
return false;
}
}
index_t& index_by_stage(match_stage_t stage) {
assert(stage <= STAGE);
if (STAGE == stage) {
return index;
} else {
return nxt.index_by_stage(stage);
}
}
auto operator<=>(const me_t& o) const = default;
void assert_next_to(const me_t& prv) const {
#ifndef NDEBUG
if (is_end()) {
assert(!prv.is_end());
} else if (index == prv.index) {
assert(!nxt.is_end());
nxt.assert_next_to(prv.nxt);
} else if (index == prv.index + 1) {
assert(!prv.nxt.is_end());
assert(nxt == nxt_t::begin());
} else {
assert(false);
}
#endif
}
me_t& operator-=(const me_t& o) {
assert(is_valid_index(o.index));
assert(index >= o.index);
if (index != INDEX_END) {
assert(is_valid_index(index));
index -= o.index;
if (index == 0) {
nxt -= o.nxt;
}
}
return *this;
}
me_t& operator+=(const me_t& o) {
assert(is_valid_index(index));
assert(is_valid_index(o.index));
index += o.index;
nxt += o.nxt;
return *this;
}
void encode(ceph::bufferlist& encoded) const {
ceph::encode(index, encoded);
nxt.encode(encoded);
}
static me_t decode(ceph::bufferlist::const_iterator& delta) {
me_t ret;
ceph::decode(ret.index, delta);
ret.nxt = nxt_t::decode(delta);
return ret;
}
static me_t begin() { return {0u, nxt_t::begin()}; }
static me_t end() {
return {INDEX_END, nxt_t::end()};
}
index_t index;
nxt_t nxt;
};
template <match_stage_t STAGE>
std::ostream& operator<<(std::ostream& os, const staged_position_t<STAGE>& pos) {
if (pos.index == INDEX_END) {
os << "END";
} else if (pos.index == INDEX_LAST) {
os << "LAST";
} else {
os << pos.index;
assert(is_valid_index(pos.index));
}
return os << ", " << pos.nxt;
}
template <>
struct staged_position_t<STAGE_BOTTOM> {
using me_t = staged_position_t<STAGE_BOTTOM>;
bool is_end() const {
if (index == INDEX_END) {
return true;
} else {
assert(is_valid_index(index));
return false;
}
}
index_t& index_by_stage(match_stage_t stage) {
assert(stage == STAGE_BOTTOM);
return index;
}
auto operator<=>(const me_t&) const = default;
me_t& operator-=(const me_t& o) {
assert(is_valid_index(o.index));
assert(index >= o.index);
if (index != INDEX_END) {
assert(is_valid_index(index));
index -= o.index;
}
return *this;
}
me_t& operator+=(const me_t& o) {
assert(is_valid_index(index));
assert(is_valid_index(o.index));
index += o.index;
return *this;
}
void assert_next_to(const me_t& prv) const {
#ifndef NDEBUG
if (is_end()) {
assert(!prv.is_end());
} else {
assert(index == prv.index + 1);
}
#endif
}
void encode(ceph::bufferlist& encoded) const {
ceph::encode(index, encoded);
}
static me_t decode(ceph::bufferlist::const_iterator& delta) {
me_t ret;
ceph::decode(ret.index, delta);
return ret;
}
static me_t begin() { return {0u}; }
static me_t end() { return {INDEX_END}; }
index_t index;
};
template <>
inline std::ostream& operator<<(std::ostream& os, const staged_position_t<STAGE_BOTTOM>& pos) {
if (pos.index == INDEX_END) {
os << "END";
} else if (pos.index == INDEX_LAST) {
os << "LAST";
} else {
os << pos.index;
assert(is_valid_index(pos.index));
}
return os;
}
using search_position_t = staged_position_t<STAGE_TOP>;
template <match_stage_t STAGE>
const staged_position_t<STAGE>& cast_down(const search_position_t& pos) {
if constexpr (STAGE == STAGE_LEFT) {
return pos;
} else if constexpr (STAGE == STAGE_STRING) {
#ifndef NDEBUG
if (pos.is_end()) {
assert(pos.nxt.is_end());
} else {
assert(pos.index == 0u);
}
#endif
return pos.nxt;
} else if constexpr (STAGE == STAGE_RIGHT) {
#ifndef NDEBUG
if (pos.is_end()) {
assert(pos.nxt.nxt.is_end());
} else {
assert(pos.index == 0u);
assert(pos.nxt.index == 0u);
}
#endif
return pos.nxt.nxt;
} else {
ceph_abort("impossible path");
}
}
template <match_stage_t STAGE>
staged_position_t<STAGE>& cast_down(search_position_t& pos) {
const search_position_t& _pos = pos;
return const_cast<staged_position_t<STAGE>&>(cast_down<STAGE>(_pos));
}
template <match_stage_t STAGE>
staged_position_t<STAGE>& cast_down_fill_0(search_position_t& pos) {
if constexpr (STAGE == STAGE_LEFT) {
return pos;
} if constexpr (STAGE == STAGE_STRING) {
pos.index = 0;
return pos.nxt;
} else if constexpr (STAGE == STAGE_RIGHT) {
pos.index = 0;
pos.nxt.index = 0;
return pos.nxt.nxt;
} else {
ceph_abort("impossible path");
}
}
inline search_position_t&& normalize(search_position_t&& pos) { return std::move(pos); }
template <match_stage_t STAGE, typename = std::enable_if_t<STAGE != STAGE_TOP>>
search_position_t normalize(staged_position_t<STAGE>&& pos) {
if (pos.is_end()) {
return search_position_t::end();
}
if constexpr (STAGE == STAGE_STRING) {
return {0u, std::move(pos)};
} else if (STAGE == STAGE_RIGHT) {
return {0u, {0u, std::move(pos)}};
} else {
ceph_abort("impossible path");
}
}
struct memory_range_t {
const char* p_start;
const char* p_end;
};
struct container_range_t {
memory_range_t range;
extent_len_t node_size;
};
enum class ContainerType { ITERATIVE, INDEXABLE };
// the input type to construct the value during insert.
template <node_type_t> struct value_input_type;
template<> struct value_input_type<node_type_t::INTERNAL> { using type = laddr_t; };
template<> struct value_input_type<node_type_t::LEAF> { using type = value_config_t; };
template <node_type_t NODE_TYPE>
using value_input_type_t = typename value_input_type<NODE_TYPE>::type;
template <node_type_t> struct value_type;
template<> struct value_type<node_type_t::INTERNAL> { using type = laddr_packed_t; };
template<> struct value_type<node_type_t::LEAF> { using type = value_header_t; };
template <node_type_t NODE_TYPE>
using value_type_t = typename value_type<NODE_TYPE>::type;
template <node_type_t NODE_TYPE, match_stage_t STAGE>
struct staged_result_t {
using me_t = staged_result_t<NODE_TYPE, STAGE>;
bool is_end() const { return position.is_end(); }
static me_t end() {
return {staged_position_t<STAGE>::end(), nullptr, MSTAT_END};
}
template <typename T = me_t>
static std::enable_if_t<STAGE != STAGE_BOTTOM, T> from_nxt(
index_t index, const staged_result_t<NODE_TYPE, STAGE - 1>& nxt_stage_result) {
return {{index, nxt_stage_result.position},
nxt_stage_result.p_value,
nxt_stage_result.mstat};
}
staged_position_t<STAGE> position;
const value_type_t<NODE_TYPE>* p_value;
match_stat_t mstat;
};
template <node_type_t NODE_TYPE>
using lookup_result_t = staged_result_t<NODE_TYPE, STAGE_TOP>;
template <node_type_t NODE_TYPE>
lookup_result_t<NODE_TYPE>&& normalize(
lookup_result_t<NODE_TYPE>&& result) { return std::move(result); }
template <node_type_t NODE_TYPE, match_stage_t STAGE,
typename = std::enable_if_t<STAGE != STAGE_TOP>>
lookup_result_t<NODE_TYPE> normalize(
staged_result_t<NODE_TYPE, STAGE>&& result) {
// FIXME: assert result.mstat correct
return {normalize(std::move(result.position)), result.p_value, result.mstat};
}
struct node_stats_t {
size_t size_persistent = 0;
size_t size_filled = 0;
// filled by staged::get_stats()
size_t size_logical = 0;
size_t size_overhead = 0;
size_t size_value = 0;
unsigned num_kvs = 0;
};
}
#if FMT_VERSION >= 90000
template <crimson::os::seastore::onode::match_stage_t S>
struct fmt::formatter<crimson::os::seastore::onode::staged_position_t<S>> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::onode::MatchHistory> : fmt::ostream_formatter {};
#endif
| 11,976 | 26.036117 | 106 | h |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/sub_items_stage.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "sub_items_stage.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_mutable.h"
namespace crimson::os::seastore::onode {
template <IsFullKey Key>
const laddr_packed_t* internal_sub_items_t::insert_at(
NodeExtentMutable& mut, const internal_sub_items_t& sub_items,
const Key& key, const laddr_t& value,
index_t index, node_offset_t size, const char* p_left_bound)
{
assert(index <= sub_items.keys());
assert(size == estimate_insert(key, value));
const char* p_shift_start = p_left_bound;
const char* p_shift_end = reinterpret_cast<const char*>(
sub_items.p_first_item + 1 - index);
mut.shift_absolute(p_shift_start, p_shift_end - p_shift_start, -(int)size);
auto p_insert = const_cast<char*>(p_shift_end) - size;
auto item = internal_sub_item_t{
snap_gen_t::from_key(key), laddr_packed_t{value}};
mut.copy_in_absolute(p_insert, item);
return &reinterpret_cast<internal_sub_item_t*>(p_insert)->value;
}
#define IA_TEMPLATE(Key) \
template const laddr_packed_t* internal_sub_items_t::insert_at<Key>( \
NodeExtentMutable&, const internal_sub_items_t&, const Key&, \
const laddr_t&, index_t, node_offset_t, const char*)
IA_TEMPLATE(key_view_t);
IA_TEMPLATE(key_hobj_t);
node_offset_t internal_sub_items_t::trim_until(
NodeExtentMutable& mut, internal_sub_items_t& items, index_t index)
{
assert(index != 0);
auto keys = items.keys();
assert(index <= keys);
size_t ret = sizeof(internal_sub_item_t) * (keys - index);
assert(ret < mut.get_length());
return ret;
}
node_offset_t internal_sub_items_t::erase_at(
NodeExtentMutable& mut, const internal_sub_items_t& sub_items,
index_t index, const char* p_left_bound)
{
assert(index < sub_items.keys());
node_offset_t erase_size = sizeof(internal_sub_item_t);
const char* p_shift_start = p_left_bound;
const char* p_shift_end = reinterpret_cast<const char*>(
sub_items.p_first_item - index);
mut.shift_absolute(p_shift_start, p_shift_end - p_shift_start, erase_size);
return erase_size;
}
template <KeyT KT>
void internal_sub_items_t::Appender<KT>::append(
const internal_sub_items_t& src, index_t from, index_t items)
{
assert(from <= src.keys());
if (items == 0) {
return;
}
assert(from < src.keys());
assert(from + items <= src.keys());
node_offset_t size = sizeof(internal_sub_item_t) * items;
p_append -= size;
p_mut->copy_in_absolute(p_append, src.p_first_item + 1 - from - items, size);
}
template <KeyT KT>
void internal_sub_items_t::Appender<KT>::append(
const full_key_t<KT>& key, const laddr_t& value,
const laddr_packed_t*& p_value)
{
p_append -= sizeof(internal_sub_item_t);
auto item = internal_sub_item_t{
snap_gen_t::from_key(key), laddr_packed_t{value}};
p_mut->copy_in_absolute(p_append, item);
p_value = &reinterpret_cast<internal_sub_item_t*>(p_append)->value;
}
template <IsFullKey Key>
const value_header_t* leaf_sub_items_t::insert_at(
NodeExtentMutable& mut, const leaf_sub_items_t& sub_items,
const Key& key, const value_config_t& value,
index_t index, node_offset_t size, const char* p_left_bound)
{
assert(index <= sub_items.keys());
assert(size == estimate_insert(key, value));
// a. [... item(index)] << size
const char* p_shift_start = p_left_bound;
const char* p_shift_end = sub_items.get_item_end(index);
mut.shift_absolute(p_shift_start, p_shift_end - p_shift_start, -(int)size);
// b. insert item
auto p_insert = const_cast<char*>(p_shift_end - size);
auto p_value = reinterpret_cast<value_header_t*>(p_insert);
p_value->initiate(mut, value);
p_insert += value.allocation_size();
mut.copy_in_absolute(p_insert, snap_gen_t::from_key(key));
assert(p_insert + sizeof(snap_gen_t) + sizeof(node_offset_t) == p_shift_end);
// c. compensate affected offsets
auto item_size = value.allocation_size() + sizeof(snap_gen_t);
for (auto i = index; i < sub_items.keys(); ++i) {
const node_offset_packed_t& offset_i = sub_items.get_offset(i);
mut.copy_in_absolute((void*)&offset_i, node_offset_t(offset_i.value + item_size));
}
// d. [item(index-1) ... item(0) ... offset(index)] <<< sizeof(node_offset_t)
const char* p_offset = (index == 0 ?
(const char*)&sub_items.get_offset(0) + sizeof(node_offset_t) :
(const char*)&sub_items.get_offset(index - 1));
p_shift_start = p_shift_end;
p_shift_end = p_offset;
mut.shift_absolute(p_shift_start, p_shift_end - p_shift_start, -(int)sizeof(node_offset_t));
// e. insert offset
node_offset_t offset_to_item_start = item_size + sub_items.get_offset_to_end(index);
mut.copy_in_absolute(
const_cast<char*>(p_shift_end) - sizeof(node_offset_t), offset_to_item_start);
// f. update num_sub_keys
mut.copy_in_absolute((void*)sub_items.p_num_keys, num_keys_t(sub_items.keys() + 1));
return p_value;
}
template const value_header_t* leaf_sub_items_t::insert_at<key_hobj_t>(
NodeExtentMutable&, const leaf_sub_items_t&, const key_hobj_t&,
const value_config_t&, index_t, node_offset_t, const char*);
node_offset_t leaf_sub_items_t::trim_until(
NodeExtentMutable& mut, leaf_sub_items_t& items, index_t index)
{
assert(index != 0);
auto keys = items.keys();
assert(index <= keys);
if (index == keys) {
return 0;
}
index_t trim_items = keys - index;
const char* p_items_start = items.p_start();
const char* p_shift_start = items.get_item_end(index);
const char* p_shift_end = items.get_item_end(0);
size_t size_trim_offsets = sizeof(node_offset_t) * trim_items;
mut.shift_absolute(p_shift_start, p_shift_end - p_shift_start,
size_trim_offsets);
mut.copy_in_absolute((void*)items.p_num_keys, num_keys_t(index));
size_t ret = size_trim_offsets + (p_shift_start - p_items_start);
assert(ret < mut.get_length());
return ret;
}
node_offset_t leaf_sub_items_t::erase_at(
NodeExtentMutable& mut, const leaf_sub_items_t& sub_items,
index_t index, const char* p_left_bound)
{
assert(sub_items.keys() > 0);
assert(index < sub_items.keys());
auto p_item_start = sub_items.get_item_start(index);
auto p_item_end = sub_items.get_item_end(index);
assert(p_item_start < p_item_end);
node_offset_t item_erase_size = p_item_end - p_item_start;
node_offset_t erase_size = item_erase_size + sizeof(node_offset_t);
auto p_offset_end = (const char*)&sub_items.get_offset(index);
// a. compensate affected offset[n] ... offset[index+1]
for (auto i = index + 1; i < sub_items.keys(); ++i) {
const node_offset_packed_t& offset_i = sub_items.get_offset(i);
mut.copy_in_absolute((void*)&offset_i, node_offset_t(offset_i.value - item_erase_size));
}
// b. kv[index-1] ... kv[0] ... offset[index+1] >> sizeof(node_offset_t)
mut.shift_absolute(p_item_end, p_offset_end - p_item_end, sizeof(node_offset_t));
// c. ... kv[n] ... kv[index+1] >> item_erase_size
mut.shift_absolute(p_left_bound, p_item_start - p_left_bound, erase_size);
// d. update num_keys
mut.copy_in_absolute((void*)sub_items.p_num_keys, num_keys_t(sub_items.keys() - 1));
return erase_size;
}
template class internal_sub_items_t::Appender<KeyT::VIEW>;
template class internal_sub_items_t::Appender<KeyT::HOBJ>;
// helper type for the visitor
template<class... Ts> struct overloaded : Ts... { using Ts::operator()...; };
// explicit deduction guide
template<class... Ts> overloaded(Ts...) -> overloaded<Ts...>;
template <KeyT KT>
void leaf_sub_items_t::Appender<KT>::append(
const leaf_sub_items_t& src, index_t from, index_t items)
{
if (p_append) {
// append from empty
assert(cnt <= APPENDER_LIMIT);
assert(from <= src.keys());
if (items == 0) {
return;
}
if (op_src) {
assert(*op_src == src);
} else {
op_src = src;
}
assert(from < src.keys());
assert(from + items <= src.keys());
appends[cnt] = range_items_t{from, items};
++cnt;
} else {
// append from existing
assert(op_dst.has_value());
assert(!p_appended);
assert(from == 0);
assert(items);
assert(items == src.keys());
num_keys_t num_keys = op_dst->keys();
node_offset_t compensate = op_dst->get_offset(num_keys - 1).value;
const char* p_items_start = op_dst->p_start();
const char* p_items_end = op_dst->p_items_end;
// update dst num_keys
num_keys += items;
p_mut->copy_in_absolute((char*)op_dst->p_num_keys, num_keys);
// shift dst items
std::size_t src_offsets_size = sizeof(node_offset_t) * items;
p_mut->shift_absolute(p_items_start,
p_items_end - p_items_start,
-(int)src_offsets_size);
// fill offsets from src
node_offset_t offset;
char* p_cur_offset = const_cast<char*>(p_items_end);
for (auto i = from; i < from + items; ++i) {
offset = src.get_offset(i).value + compensate;
p_cur_offset -= sizeof(node_offset_t);
p_mut->copy_in_absolute(p_cur_offset, offset);
}
// fill items from src
auto p_src_items_start = src.get_item_end(from + items);
std::size_t src_items_size = src.get_item_end(from) - p_src_items_start;
p_appended = const_cast<char*>(p_items_start) - src_offsets_size - src_items_size;
p_mut->copy_in_absolute(p_appended, p_src_items_start, src_items_size);
}
}
template <KeyT KT>
char* leaf_sub_items_t::Appender<KT>::wrap()
{
if (op_dst.has_value()) {
// append from existing
assert(p_appended);
return p_appended;
}
// append from empty
assert(p_append);
auto p_cur = p_append;
num_keys_t num_keys = 0;
for (auto i = 0u; i < cnt; ++i) {
auto& a = appends[i];
std::visit(overloaded {
[&] (const range_items_t& arg) { num_keys += arg.items; },
[&] (const kv_item_t& arg) { ++num_keys; }
}, a);
}
assert(num_keys);
p_cur -= sizeof(num_keys_t);
p_mut->copy_in_absolute(p_cur, num_keys);
node_offset_t last_offset = 0;
for (auto i = 0u; i < cnt; ++i) {
auto& a = appends[i];
std::visit(overloaded {
[&] (const range_items_t& arg) {
int compensate = (last_offset - op_src->get_offset_to_end(arg.from));
node_offset_t offset;
for (auto i = arg.from; i < arg.from + arg.items; ++i) {
offset = op_src->get_offset(i).value + compensate;
p_cur -= sizeof(node_offset_t);
p_mut->copy_in_absolute(p_cur, offset);
}
last_offset = offset;
},
[&] (const kv_item_t& arg) {
last_offset += sizeof(snap_gen_t) + arg.value_config.allocation_size();
p_cur -= sizeof(node_offset_t);
p_mut->copy_in_absolute(p_cur, last_offset);
}
}, a);
}
for (auto i = 0u; i < cnt; ++i) {
auto& a = appends[i];
std::visit(overloaded {
[&] (const range_items_t& arg) {
auto _p_start = op_src->get_item_end(arg.from + arg.items);
size_t _len = op_src->get_item_end(arg.from) - _p_start;
p_cur -= _len;
p_mut->copy_in_absolute(p_cur, _p_start, _len);
},
[&] (const kv_item_t& arg) {
assert(pp_value);
p_cur -= sizeof(snap_gen_t);
p_mut->copy_in_absolute(p_cur, snap_gen_t::from_key(*arg.p_key));
p_cur -= arg.value_config.allocation_size();
auto p_value = reinterpret_cast<value_header_t*>(p_cur);
p_value->initiate(*p_mut, arg.value_config);
*pp_value = p_value;
}
}, a);
}
return p_cur;
}
template class leaf_sub_items_t::Appender<KeyT::VIEW>;
template class leaf_sub_items_t::Appender<KeyT::HOBJ>;
}
| 11,770 | 34.669697 | 94 | cc |
null | ceph-main/src/crimson/os/seastore/onode_manager/staged-fltree/stages/sub_items_stage.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <variant>
#include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
#include "key_layout.h"
#include "stage_types.h"
namespace crimson::os::seastore::onode {
class NodeExtentMutable;
struct internal_sub_item_t {
const snap_gen_t& get_key() const { return key; }
const laddr_packed_t* get_p_value() const { return &value; }
snap_gen_t key;
laddr_packed_t value;
} __attribute__((packed));
/**
* internal_sub_items_t
*
* The STAGE_RIGHT implementation for internal node N0/N1/N2, implements staged
* contract as an indexable container to index snap-gen to child node
* addresses.
*
* The layout of the contaner storing n sub-items:
*
* # <--------- container range -----------> #
* #<~># sub-items [2, n) #
* # # <- sub-item 1 -> # <- sub-item 0 -> #
* #...# snap-gen | laddr # snap-gen | laddr #
* ^
* |
* p_first_item +
*/
class internal_sub_items_t {
public:
using num_keys_t = index_t;
internal_sub_items_t(const container_range_t& _range)
: node_size{_range.node_size} {
assert(is_valid_node_size(node_size));
auto& range = _range.range;
assert(range.p_start < range.p_end);
assert((range.p_end - range.p_start) % sizeof(internal_sub_item_t) == 0);
num_items = (range.p_end - range.p_start) / sizeof(internal_sub_item_t);
assert(num_items > 0);
auto _p_first_item = range.p_end - sizeof(internal_sub_item_t);
p_first_item = reinterpret_cast<const internal_sub_item_t*>(_p_first_item);
}
// container type system
using key_get_type = const snap_gen_t&;
static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
num_keys_t keys() const { return num_items; }
key_get_type operator[](index_t index) const {
assert(index < num_items);
return (p_first_item - index)->get_key();
}
node_offset_t size_before(index_t index) const {
size_t ret = index * sizeof(internal_sub_item_t);
assert(ret < node_size);
return ret;
}
const laddr_packed_t* get_p_value(index_t index) const {
assert(index < num_items);
return (p_first_item - index)->get_p_value();
}
node_offset_t size_overhead_at(index_t index) const { return 0u; }
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
auto p_end = reinterpret_cast<const char*>(p_first_item) +
sizeof(internal_sub_item_t);
auto p_start = p_end - num_items * sizeof(internal_sub_item_t);
int start_offset = p_start - p_node_start;
int stage_size = p_end - p_start;
assert(start_offset > 0);
assert(stage_size > 0);
assert(start_offset + stage_size < (int)node_size);
ceph::encode(static_cast<node_offset_t>(start_offset), encoded);
ceph::encode(static_cast<node_offset_t>(stage_size), encoded);
}
static internal_sub_items_t decode(
const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
node_offset_t start_offset;
ceph::decode(start_offset, delta);
node_offset_t stage_size;
ceph::decode(stage_size, delta);
assert(start_offset > 0);
assert(stage_size > 0);
assert((unsigned)start_offset + stage_size < node_size);
return internal_sub_items_t({{p_node_start + start_offset,
p_node_start + start_offset + stage_size},
node_size});
}
static node_offset_t header_size() { return 0u; }
template <IsFullKey Key>
static node_offset_t estimate_insert(
const Key&, const laddr_t&) {
return sizeof(internal_sub_item_t);
}
template <IsFullKey Key>
static const laddr_packed_t* insert_at(
NodeExtentMutable&, const internal_sub_items_t&,
const Key&, const laddr_t&,
index_t index, node_offset_t size, const char* p_left_bound);
static node_offset_t trim_until(NodeExtentMutable&, internal_sub_items_t&, index_t);
static node_offset_t erase_at(
NodeExtentMutable&, const internal_sub_items_t&, index_t, const char*);
template <KeyT KT>
class Appender;
private:
extent_len_t node_size;
index_t num_items;
const internal_sub_item_t* p_first_item;
};
template <KeyT KT>
class internal_sub_items_t::Appender {
public:
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_append{p_append} {}
Appender(NodeExtentMutable* p_mut, const internal_sub_items_t& sub_items)
: p_mut{p_mut},
p_append{(char*)(sub_items.p_first_item + 1 - sub_items.keys())} {
assert(sub_items.keys());
}
void append(const internal_sub_items_t& src, index_t from, index_t items);
void append(const full_key_t<KT>&, const laddr_t&, const laddr_packed_t*&);
char* wrap() { return p_append; }
private:
NodeExtentMutable* p_mut;
char* p_append;
};
/**
* leaf_sub_items_t
*
* The STAGE_RIGHT implementation for leaf node N0/N1/N2, implements staged
* contract as an indexable container to index snap-gen to value_header_t.
*
* The layout of the contaner storing n sub-items:
*
* # <------------------------ container range -------------------------------> #
* # <---------- sub-items ----------------> # <--- offsets ---------# #
* #<~># sub-items [2, n) #<~>| offsets [2, n) # #
* # # <- sub-item 1 -> # <- sub-item 0 -> # | # #
* #...# snap-gen | value # snap-gen | value #...| offset1 | offset0 # num_keys #
* ^ ^ ^
* | | |
* p_items_end + p_offsets + |
* p_num_keys +
*/
class leaf_sub_items_t {
public:
// should be enough to index all keys under 64 KiB node
using num_keys_t = uint16_t;
// TODO: remove if num_keys_t is aligned
struct num_keys_packed_t {
num_keys_t value;
} __attribute__((packed));
leaf_sub_items_t(const container_range_t& _range)
: node_size{_range.node_size} {
assert(is_valid_node_size(node_size));
auto& range = _range.range;
assert(range.p_start < range.p_end);
auto _p_num_keys = range.p_end - sizeof(num_keys_t);
assert(range.p_start < _p_num_keys);
p_num_keys = reinterpret_cast<const num_keys_packed_t*>(_p_num_keys);
assert(keys());
auto _p_offsets = _p_num_keys - sizeof(node_offset_t);
assert(range.p_start < _p_offsets);
p_offsets = reinterpret_cast<const node_offset_packed_t*>(_p_offsets);
p_items_end = reinterpret_cast<const char*>(&get_offset(keys() - 1));
assert(range.p_start < p_items_end);
assert(range.p_start == p_start());
}
bool operator==(const leaf_sub_items_t& x) {
return (p_num_keys == x.p_num_keys &&
p_offsets == x.p_offsets &&
p_items_end == x.p_items_end);
}
const char* p_start() const { return get_item_end(keys()); }
const node_offset_packed_t& get_offset(index_t index) const {
assert(index < keys());
return *(p_offsets - index);
}
const node_offset_t get_offset_to_end(index_t index) const {
assert(index <= keys());
return index == 0 ? 0 : get_offset(index - 1).value;
}
const char* get_item_start(index_t index) const {
return p_items_end - get_offset(index).value;
}
const char* get_item_end(index_t index) const {
return p_items_end - get_offset_to_end(index);
}
// container type system
using key_get_type = const snap_gen_t&;
static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
num_keys_t keys() const { return p_num_keys->value; }
key_get_type operator[](index_t index) const {
assert(index < keys());
auto pointer = get_item_end(index);
assert(get_item_start(index) < pointer);
pointer -= sizeof(snap_gen_t);
assert(get_item_start(index) < pointer);
return *reinterpret_cast<const snap_gen_t*>(pointer);
}
node_offset_t size_before(index_t index) const {
assert(index <= keys());
size_t ret;
if (index == 0) {
ret = sizeof(num_keys_t);
} else {
--index;
ret = sizeof(num_keys_t) +
(index + 1) * sizeof(node_offset_t) +
get_offset(index).value;
}
assert(ret < node_size);
return ret;
}
node_offset_t size_overhead_at(index_t index) const { return sizeof(node_offset_t); }
const value_header_t* get_p_value(index_t index) const {
assert(index < keys());
auto pointer = get_item_start(index);
auto value = reinterpret_cast<const value_header_t*>(pointer);
assert(pointer + value->allocation_size() + sizeof(snap_gen_t) ==
get_item_end(index));
return value;
}
void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
auto p_end = reinterpret_cast<const char*>(p_num_keys) +
sizeof(num_keys_t);
int start_offset = p_start() - p_node_start;
int stage_size = p_end - p_start();
assert(start_offset > 0);
assert(stage_size > 0);
assert(start_offset + stage_size < (int)node_size);
ceph::encode(static_cast<node_offset_t>(start_offset), encoded);
ceph::encode(static_cast<node_offset_t>(stage_size), encoded);
}
static leaf_sub_items_t decode(
const char* p_node_start,
extent_len_t node_size,
ceph::bufferlist::const_iterator& delta) {
node_offset_t start_offset;
ceph::decode(start_offset, delta);
node_offset_t stage_size;
ceph::decode(stage_size, delta);
assert(start_offset > 0);
assert(stage_size > 0);
assert((unsigned)start_offset + stage_size < node_size);
return leaf_sub_items_t({{p_node_start + start_offset,
p_node_start + start_offset + stage_size},
node_size});
}
static node_offset_t header_size() { return sizeof(num_keys_t); }
template <IsFullKey Key>
static node_offset_t estimate_insert(
const Key&, const value_config_t& value) {
return value.allocation_size() + sizeof(snap_gen_t) + sizeof(node_offset_t);
}
template <IsFullKey Key>
static const value_header_t* insert_at(
NodeExtentMutable&, const leaf_sub_items_t&,
const Key&, const value_config_t&,
index_t index, node_offset_t size, const char* p_left_bound);
static node_offset_t trim_until(NodeExtentMutable&, leaf_sub_items_t&, index_t index);
static node_offset_t erase_at(
NodeExtentMutable&, const leaf_sub_items_t&, index_t, const char*);
template <KeyT KT>
class Appender;
private:
extent_len_t node_size;
const num_keys_packed_t* p_num_keys;
const node_offset_packed_t* p_offsets;
const char* p_items_end;
};
constexpr index_t APPENDER_LIMIT = 3u;
template <KeyT KT>
class leaf_sub_items_t::Appender {
struct range_items_t {
index_t from;
index_t items;
};
struct kv_item_t {
const full_key_t<KT>* p_key;
value_config_t value_config;
};
using var_t = std::variant<range_items_t, kv_item_t>;
public:
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_append{p_append} {
}
Appender(NodeExtentMutable* p_mut, const leaf_sub_items_t& sub_items)
: p_mut{p_mut} , op_dst(sub_items) {
assert(sub_items.keys());
}
void append(const leaf_sub_items_t& src, index_t from, index_t items);
void append(const full_key_t<KT>& key,
const value_config_t& value, const value_header_t*& p_value) {
// append from empty
assert(p_append);
assert(pp_value == nullptr);
assert(cnt <= APPENDER_LIMIT);
appends[cnt] = kv_item_t{&key, value};
++cnt;
pp_value = &p_value;
}
char* wrap();
private:
NodeExtentMutable* p_mut;
// append from empty
std::optional<leaf_sub_items_t> op_src;
const value_header_t** pp_value = nullptr;
char* p_append = nullptr;
var_t appends[APPENDER_LIMIT];
index_t cnt = 0;
// append from existing
std::optional<leaf_sub_items_t> op_dst;
char* p_appended = nullptr;
};
template <node_type_t> struct _sub_items_t;
template<> struct _sub_items_t<node_type_t::INTERNAL> { using type = internal_sub_items_t; };
template<> struct _sub_items_t<node_type_t::LEAF> { using type = leaf_sub_items_t; };
template <node_type_t NODE_TYPE>
using sub_items_t = typename _sub_items_t<NODE_TYPE>::type;
}
| 12,455 | 32.756098 | 93 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager/avlallocator.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
//
#include "avlallocator.h"
#include "crimson/os/seastore/logging.h"
SET_SUBSYS(seastore_device);
namespace crimson::os::seastore {
void AvlAllocator::mark_extent_used(rbm_abs_addr addr, size_t size)
{
LOG_PREFIX(AvlAllocator::mark_extent_used);
DEBUG("addr: {}, size: {}, avail: {}", addr, size, available_size);
_remove_from_tree(addr, size);
}
void AvlAllocator::init(rbm_abs_addr addr, size_t size, size_t b_size)
{
LOG_PREFIX(AvlAllocator::init);
DEBUG("addr: {}, size: {}", addr, size);
auto r = new extent_range_t{ addr, addr + size };
extent_tree.insert(*r);
extent_size_tree.insert(*r);
available_size = size;
block_size = b_size;
total_size = size;
base_addr = addr;
}
void AvlAllocator::_remove_from_tree(rbm_abs_addr start, rbm_abs_addr size)
{
LOG_PREFIX(AvlAllocator::_remove_from_tree);
rbm_abs_addr end = start + size;
ceph_assert(size != 0);
ceph_assert(size <= available_size);
auto rs = extent_tree.find(extent_range_t{start, end}, extent_tree.key_comp());
DEBUG("rs start: {}, rs end: {}", rs->start, rs->end);
ceph_assert(rs != extent_tree.end());
ceph_assert(rs->start <= start);
ceph_assert(rs->end >= end);
bool left_over = (rs->start != start);
bool right_over = (rs->end != end);
_extent_size_tree_rm(*rs);
if (left_over && right_over) {
auto old_right_end = rs->end;
auto insert_pos = rs;
ceph_assert(insert_pos != extent_tree.end());
++insert_pos;
rs->end = start;
auto r = new extent_range_t{end, old_right_end};
extent_tree.insert_before(insert_pos, *r);
extent_size_tree.insert(*r);
available_size += r->length();
_extent_size_tree_try_insert(*rs);
} else if (left_over) {
assert(is_aligned(start, block_size));
rs->end = start;
_extent_size_tree_try_insert(*rs);
} else if (right_over) {
assert(is_aligned(end, block_size));
rs->start = end;
_extent_size_tree_try_insert(*rs);
} else {
extent_tree.erase_and_dispose(rs, dispose_rs{});
}
}
rbm_abs_addr AvlAllocator::find_block(size_t size)
{
const auto comp = extent_size_tree.key_comp();
auto iter = extent_size_tree.lower_bound(
extent_range_t{base_addr, base_addr + size}, comp);
for (; iter != extent_size_tree.end(); ++iter) {
assert(is_aligned(iter->start, block_size));
rbm_abs_addr off = iter->start;
if (off + size <= iter->end) {
return off;
}
}
return total_size;
}
void AvlAllocator::_add_to_tree(rbm_abs_addr start, rbm_abs_addr size)
{
LOG_PREFIX(AvlAllocator::_add_to_tree);
ceph_assert(size != 0);
DEBUG("addr: {}, size: {}", start, size);
rbm_abs_addr end = start + size;
auto rs_after = extent_tree.upper_bound(extent_range_t{start, end},
extent_tree.key_comp());
auto rs_before = extent_tree.end();
if (rs_after != extent_tree.begin()) {
rs_before = std::prev(rs_after);
}
bool merge_before = (rs_before != extent_tree.end() && rs_before->end == start);
bool merge_after = (rs_after != extent_tree.end() && rs_after->start == end);
if (merge_before && merge_after) {
_extent_size_tree_rm(*rs_before);
_extent_size_tree_rm(*rs_after);
rs_after->start = rs_before->start;
extent_tree.erase_and_dispose(rs_before, dispose_rs{});
_extent_size_tree_try_insert(*rs_after);
} else if (merge_before) {
_extent_size_tree_rm(*rs_before);
rs_before->end = end;
_extent_size_tree_try_insert(*rs_before);
} else if (merge_after) {
_extent_size_tree_rm(*rs_after);
rs_after->start = start;
_extent_size_tree_try_insert(*rs_after);
} else {
auto r = new extent_range_t{start, end};
extent_tree.insert(*r);
extent_size_tree.insert(*r);
available_size += r->length();
}
}
std::optional<interval_set<rbm_abs_addr>> AvlAllocator::alloc_extent(
size_t size)
{
LOG_PREFIX(AvlAllocator::alloc_extent);
if (available_size < size) {
return std::nullopt;
}
if (extent_size_tree.empty()) {
return std::nullopt;
}
ceph_assert(size > 0);
ceph_assert(is_aligned(size, block_size));
interval_set<rbm_abs_addr> result;
auto try_to_alloc_block = [this, &result, FNAME] (uint64_t alloc_size) -> uint64_t
{
rbm_abs_addr start = find_block(alloc_size);
if (start != base_addr + total_size) {
_remove_from_tree(start, alloc_size);
DEBUG("allocate addr: {}, allocate size: {}, available size: {}",
start, alloc_size, available_size);
result.insert(start, alloc_size);
return alloc_size;
}
return 0;
};
auto alloc = std::min(max_alloc_size, size);
rbm_abs_addr ret = try_to_alloc_block(alloc);
if (ret == 0) {
return std::nullopt;
}
assert(!result.empty());
assert(result.num_intervals() == 1);
for (auto p : result) {
INFO("result start: {}, end: {}", p.first, p.first + p.second);
if (detailed) {
assert(!reserved_extent_tracker.contains(p.first, p.second));
reserved_extent_tracker.insert(p.first, p.second);
}
}
return result;
}
void AvlAllocator::free_extent(rbm_abs_addr addr, size_t size)
{
assert(total_size);
assert(total_size > available_size);
_add_to_tree(addr, size);
if (detailed && reserved_extent_tracker.contains(addr, size)) {
reserved_extent_tracker.erase(addr, size);
}
}
bool AvlAllocator::is_free_extent(rbm_abs_addr start, size_t size)
{
rbm_abs_addr end = start + size;
ceph_assert(size != 0);
if (start < base_addr || base_addr + total_size < end) {
return false;
}
auto rs = extent_tree.find(extent_range_t{start, end}, extent_tree.key_comp());
if (rs != extent_tree.end() && rs->start <= start && rs->end >= end) {
return true;
}
return false;
}
}
| 5,787 | 27.653465 | 84 | cc |
null | ceph-main/src/crimson/os/seastore/random_block_manager/avlallocator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include "extent_allocator.h"
#include "include/ceph_assert.h"
#include "include/buffer_fwd.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/transaction.h"
#include <string.h>
#include "include/buffer.h"
#include <boost/intrusive/avl_set.hpp>
#include <optional>
#include <vector>
namespace crimson::os::seastore {
struct extent_range_t {
rbm_abs_addr start;
rbm_abs_addr end;
extent_range_t(rbm_abs_addr start, rbm_abs_addr end) :
start(start), end(end)
{}
struct before_t {
template<typename KeyLeft, typename KeyRight>
bool operator()(const KeyLeft& lhs, const KeyRight& rhs) const {
return lhs.end <= rhs.start;
}
};
boost::intrusive::avl_set_member_hook<> offset_hook;
struct shorter_t {
template<typename KeyType>
bool operator()(const extent_range_t& lhs, const KeyType& rhs) const {
auto lhs_size = lhs.length();
auto rhs_size = rhs.end - rhs.start;
if (lhs_size < rhs_size) {
return true;
} else if (lhs_size > rhs_size) {
return false;
} else {
return lhs.start < rhs.start;
}
}
};
size_t length() const {
return end - start;
}
boost::intrusive::avl_set_member_hook<> size_hook;
};
/*
* This is the simplest version of avlallocator from bluestore's avlallocator
*/
class AvlAllocator : public ExtentAllocator {
public:
AvlAllocator(bool detailed) :
detailed(detailed) {}
std::optional<interval_set<rbm_abs_addr>> alloc_extent(
size_t size) final;
void free_extent(rbm_abs_addr addr, size_t size) final;
void mark_extent_used(rbm_abs_addr addr, size_t size) final;
void init(rbm_abs_addr addr, size_t size, size_t b_size);
struct dispose_rs {
void operator()(extent_range_t* p)
{
delete p;
}
};
~AvlAllocator() {
close();
}
void close() {
if (!detailed) {
assert(reserved_extent_tracker.size() == 0);
}
extent_size_tree.clear();
extent_tree.clear_and_dispose(dispose_rs{});
total_size = 0;
block_size = 0;
available_size = 0;
base_addr = 0;
}
uint64_t get_available_size() const final {
return available_size;
}
uint64_t get_max_alloc_size() const final {
return max_alloc_size;
}
bool is_free_extent(rbm_abs_addr start, size_t size);
void complete_allocation(rbm_abs_addr start, size_t size) final {
if (detailed) {
assert(reserved_extent_tracker.contains(start, size));
reserved_extent_tracker.erase(start, size);
}
}
bool is_reserved_extent(rbm_abs_addr start, size_t size) {
if (detailed) {
return reserved_extent_tracker.contains(start, size);
}
return false;
}
rbm_extent_state_t get_extent_state(rbm_abs_addr addr, size_t size) final {
if (is_reserved_extent(addr, size)) {
return rbm_extent_state_t::RESERVED;
} else if (is_free_extent(addr, size)) {
return rbm_extent_state_t::FREE;
}
return rbm_extent_state_t::ALLOCATED;
}
private:
void _add_to_tree(rbm_abs_addr start, size_t size);
void _extent_size_tree_rm(extent_range_t& r) {
ceph_assert(available_size >= r.length());
available_size -= r.length();
extent_size_tree.erase(r);
}
void _extent_size_tree_try_insert(extent_range_t& r) {
extent_size_tree.insert(r);
available_size += r.length();
}
void _remove_from_tree(rbm_abs_addr start, rbm_abs_addr size);
rbm_abs_addr find_block(size_t size);
using extent_tree_t =
boost::intrusive::avl_set<
extent_range_t,
boost::intrusive::compare<extent_range_t::before_t>,
boost::intrusive::member_hook<
extent_range_t,
boost::intrusive::avl_set_member_hook<>,
&extent_range_t::offset_hook>>;
extent_tree_t extent_tree;
using extent_size_tree_t =
boost::intrusive::avl_set<
extent_range_t,
boost::intrusive::compare<extent_range_t::shorter_t>,
boost::intrusive::member_hook<
extent_range_t,
boost::intrusive::avl_set_member_hook<>,
&extent_range_t::size_hook>>;
extent_size_tree_t extent_size_tree;
uint64_t block_size = 0;
uint64_t available_size = 0;
uint64_t total_size = 0;
uint64_t base_addr = 0;
uint64_t max_alloc_size = 4 << 20;
bool detailed;
interval_set<rbm_abs_addr> reserved_extent_tracker;
};
}
| 4,399 | 24.142857 | 77 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager/block_rb_manager.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sys/mman.h>
#include <string.h>
#include "crimson/os/seastore/logging.h"
#include "include/buffer.h"
#include "rbm_device.h"
#include "include/interval_set.h"
#include "include/intarith.h"
#include "block_rb_manager.h"
SET_SUBSYS(seastore_device);
namespace crimson::os::seastore {
device_config_t get_rbm_ephemeral_device_config(
std::size_t index, std::size_t num_devices)
{
assert(num_devices > index);
magic_t magic = 0xfffa;
auto type = device_type_t::RANDOM_BLOCK_EPHEMERAL;
bool is_major_device;
secondary_device_set_t secondary_devices;
if (index == 0) {
is_major_device = true;
for (std::size_t secondary_index = index + 1;
secondary_index < num_devices;
++secondary_index) {
device_id_t secondary_id = static_cast<device_id_t>(secondary_index);
secondary_devices.insert({
secondary_index, device_spec_t{magic, type, secondary_id}
});
}
} else { // index > 0
is_major_device = false;
}
device_id_t id = static_cast<device_id_t>(DEVICE_ID_RANDOM_BLOCK_MIN + index);
seastore_meta_t meta = {};
return {is_major_device,
device_spec_t{magic, type, id},
meta,
secondary_devices};
}
paddr_t BlockRBManager::alloc_extent(size_t size)
{
LOG_PREFIX(BlockRBManager::alloc_extent);
assert(allocator);
auto alloc = allocator->alloc_extent(size);
ceph_assert((*alloc).num_intervals() == 1);
auto extent = (*alloc).begin();
ceph_assert(size == extent.get_len());
paddr_t paddr = convert_abs_addr_to_paddr(
extent.get_start(),
device->get_device_id());
DEBUG("allocated addr: {}, size: {}, requested size: {}",
paddr, extent.get_len(), size);
return paddr;
}
void BlockRBManager::complete_allocation(
paddr_t paddr, size_t size)
{
assert(allocator);
rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr);
allocator->complete_allocation(addr, size);
}
BlockRBManager::open_ertr::future<> BlockRBManager::open()
{
assert(device);
assert(device->get_available_size() > 0);
assert(device->get_block_size() > 0);
auto ool_start = get_start_rbm_addr();
allocator->init(
ool_start,
device->get_shard_end() -
ool_start,
device->get_block_size());
return open_ertr::now();
}
BlockRBManager::write_ertr::future<> BlockRBManager::write(
paddr_t paddr,
bufferptr &bptr)
{
LOG_PREFIX(BlockRBManager::write);
ceph_assert(device);
rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr);
rbm_abs_addr start = device->get_shard_start();
rbm_abs_addr end = device->get_shard_end();
if (addr < start || addr + bptr.length() > end) {
ERROR("out of range: start {}, end {}, addr {}, length {}",
start, end, addr, bptr.length());
return crimson::ct_error::erange::make();
}
bufferptr bp = bufferptr(ceph::buffer::create_page_aligned(bptr.length()));
bp.copy_in(0, bptr.length(), bptr.c_str());
return device->write(
addr,
std::move(bp));
}
BlockRBManager::read_ertr::future<> BlockRBManager::read(
paddr_t paddr,
bufferptr &bptr)
{
LOG_PREFIX(BlockRBManager::read);
ceph_assert(device);
rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr);
rbm_abs_addr start = device->get_shard_start();
rbm_abs_addr end = device->get_shard_end();
if (addr < start || addr + bptr.length() > end) {
ERROR("out of range: start {}, end {}, addr {}, length {}",
start, end, addr, bptr.length());
return crimson::ct_error::erange::make();
}
return device->read(
addr,
bptr);
}
BlockRBManager::close_ertr::future<> BlockRBManager::close()
{
ceph_assert(device);
allocator->close();
return device->close();
}
BlockRBManager::write_ertr::future<> BlockRBManager::write(
rbm_abs_addr addr,
bufferlist &bl)
{
LOG_PREFIX(BlockRBManager::write);
ceph_assert(device);
bufferptr bptr;
try {
bptr = bufferptr(ceph::buffer::create_page_aligned(bl.length()));
auto iter = bl.cbegin();
iter.copy(bl.length(), bptr.c_str());
} catch (const std::exception &e) {
DEBUG("write: exception creating aligned buffer {}", e);
ceph_assert(0 == "unhandled exception");
}
return device->write(
addr,
std::move(bptr));
}
std::ostream &operator<<(std::ostream &out, const rbm_metadata_header_t &header)
{
out << " rbm_metadata_header_t(size=" << header.size
<< ", block_size=" << header.block_size
<< ", feature=" << header.feature
<< ", journal_size=" << header.journal_size
<< ", crc=" << header.crc
<< ", config=" << header.config
<< ", shard_num=" << header.shard_num;
for (auto p : header.shard_infos) {
out << p;
}
return out << ")";
}
std::ostream &operator<<(std::ostream &out, const rbm_shard_info_t &shard)
{
out << " rbm_shard_info_t(size=" << shard.size
<< ", start_offset=" << shard.start_offset;
return out << ")";
}
}
| 4,965 | 27.056497 | 80 | cc |
null | ceph-main/src/crimson/os/seastore/random_block_manager/block_rb_manager.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <iosfwd>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "include/ceph_assert.h"
#include "crimson/os/seastore/seastore_types.h"
#include "include/buffer_fwd.h"
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/transaction.h"
#include "rbm_device.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/common/layout.h"
#include "include/buffer.h"
#include "include/uuid.h"
#include "avlallocator.h"
namespace crimson::os::seastore {
using RBMDevice = random_block_device::RBMDevice;
using RBMDeviceRef = std::unique_ptr<RBMDevice>;
device_config_t get_rbm_ephemeral_device_config(
std::size_t index, std::size_t num_devices);
class BlockRBManager final : public RandomBlockManager {
public:
/*
* Ondisk layout (TODO)
*
* ---------------------------------------------------------------------------
* | rbm_metadata_header_t | metadatas | ... | data blocks |
* ---------------------------------------------------------------------------
*/
read_ertr::future<> read(paddr_t addr, bufferptr &buffer) final;
write_ertr::future<> write(paddr_t addr, bufferptr &buf) final;
open_ertr::future<> open() final;
close_ertr::future<> close() final;
/*
* alloc_extent
*
* The role of this function is to find out free blocks the transaction requires.
* To do so, alloc_extent() looks into both in-memory allocator
* and freebitmap blocks.
*
* TODO: multiple allocation
*
*/
paddr_t alloc_extent(size_t size) final; // allocator, return blocks
void complete_allocation(paddr_t addr, size_t size) final;
size_t get_start_rbm_addr() const {
return device->get_shard_journal_start() + device->get_journal_size();
}
size_t get_size() const final {
return device->get_shard_end() - get_start_rbm_addr();
};
extent_len_t get_block_size() const final { return device->get_block_size(); }
BlockRBManager(RBMDevice * device, std::string path, bool detailed)
: device(device), path(path) {
allocator.reset(new AvlAllocator(detailed));
}
write_ertr::future<> write(rbm_abs_addr addr, bufferlist &bl);
device_id_t get_device_id() const final {
assert(device);
return device->get_device_id();
}
uint64_t get_free_blocks() const final {
// TODO: return correct free blocks after block allocator is introduced
assert(device);
return get_size() / get_block_size();
}
const seastore_meta_t &get_meta() const final {
return device->get_meta();
}
RBMDevice* get_device() {
return device;
}
void mark_space_used(paddr_t paddr, size_t len) final {
assert(allocator);
rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr);
assert(addr >= get_start_rbm_addr() &&
addr + len <= device->get_shard_end());
allocator->mark_extent_used(addr, len);
}
void mark_space_free(paddr_t paddr, size_t len) final {
assert(allocator);
rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr);
assert(addr >= get_start_rbm_addr() &&
addr + len <= device->get_shard_end());
allocator->free_extent(addr, len);
}
paddr_t get_start() final {
return convert_abs_addr_to_paddr(
get_start_rbm_addr(),
device->get_device_id());
}
rbm_extent_state_t get_extent_state(paddr_t paddr, size_t size) final {
assert(allocator);
rbm_abs_addr addr = convert_paddr_to_abs_addr(paddr);
assert(addr >= get_start_rbm_addr() &&
addr + size <= device->get_shard_end());
return allocator->get_extent_state(addr, size);
}
size_t get_journal_size() const final {
return device->get_journal_size();
}
private:
/*
* this contains the number of bitmap blocks, free blocks and
* rbm specific information
*/
ExtentAllocatorRef allocator;
RBMDevice * device;
std::string path;
int stream_id; // for multi-stream
};
using BlockRBManagerRef = std::unique_ptr<BlockRBManager>;
}
| 4,130 | 27.888112 | 83 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager/extent_allocator.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab expandtab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "include/interval_set.h"
namespace crimson::os::seastore {
class ExtentAllocator {
public:
/**
* alloc_extent
*
* Allocate continous region as much as given size.
* Note that the inital state of extent is RESERVED after alloc_extent().
* see rbm_extent_state_t in random_block_manager.h
*
* @param size
* @return nullopt or the address range (rbm_abs_addr, len)
*/
virtual std::optional<interval_set<rbm_abs_addr>> alloc_extent(
size_t size) = 0;
/**
* free_extent
*
* free given region
*
* @param rbm_abs_addr
* @param size
*/
virtual void free_extent(rbm_abs_addr addr, size_t size) = 0;
/**
* mark_extent_used
*
* This marks given region as used without alloc_extent.
*
* @param rbm_abs_addr
* @param size
*/
virtual void mark_extent_used(rbm_abs_addr addr, size_t size) = 0;
/**
* init
*
* Initialize the address space the ExtentAllocator will manage
*
* @param start address (rbm_abs_addr)
* @param total size
* @param block size
*/
virtual void init(rbm_abs_addr addr, size_t size, size_t b_size) = 0;
virtual uint64_t get_available_size() const = 0;
virtual uint64_t get_max_alloc_size() const = 0;
virtual void close() = 0;
/**
* complete_allocation
*
* This changes this extent state from RESERVED to ALLOCATED
*
* @param start address
* @param size
*/
virtual void complete_allocation(rbm_abs_addr start, size_t size) = 0;
virtual rbm_extent_state_t get_extent_state(rbm_abs_addr addr, size_t size) = 0;
virtual ~ExtentAllocator() {}
};
using ExtentAllocatorRef = std::unique_ptr<ExtentAllocator>;
}
| 2,021 | 25.605263 | 82 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager/nvme_block_device.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sys/mman.h>
#include <string.h>
#include <fcntl.h>
#include "crimson/common/log.h"
#include "crimson/common/errorator-loop.h"
#include "include/buffer.h"
#include "rbm_device.h"
#include "nvme_block_device.h"
#include "block_rb_manager.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_seastore_tm);
}
}
namespace crimson::os::seastore::random_block_device::nvme {
NVMeBlockDevice::mkfs_ret NVMeBlockDevice::mkfs(device_config_t config) {
using crimson::common::get_conf;
return shard_devices.local().do_primary_mkfs(config,
seastar::smp::count,
get_conf<Option::size_t>("seastore_cbjournal_size")
);
}
open_ertr::future<> NVMeBlockDevice::open(
const std::string &in_path,
seastar::open_flags mode) {
return seastar::do_with(in_path, [this, mode](auto& in_path) {
return seastar::file_stat(in_path).then([this, mode, in_path](auto stat) {
return seastar::open_file_dma(in_path, mode).then([=, this](auto file) {
device = std::move(file);
logger().debug("open");
// Get SSD's features from identify_controller and namespace command.
// Do identify_controller first, and then identify_namespace.
return identify_controller(device).safe_then([this, in_path, mode](
auto id_controller_data) {
support_multistream = id_controller_data.oacs.support_directives;
if (support_multistream) {
stream_id_count = WRITE_LIFE_MAX;
}
awupf = id_controller_data.awupf + 1;
return identify_namespace(device).safe_then([this, in_path, mode] (
auto id_namespace_data) {
atomic_write_unit = awupf * super.block_size;
data_protection_type = id_namespace_data.dps.protection_type;
data_protection_enabled = (data_protection_type > 0);
if (id_namespace_data.nsfeat.opterf == 1){
// NPWG and NPWA is 0'based value
write_granularity = super.block_size * (id_namespace_data.npwg + 1);
write_alignment = super.block_size * (id_namespace_data.npwa + 1);
}
return open_for_io(in_path, mode);
});
}).handle_error(crimson::ct_error::input_output_error::handle([this, in_path, mode]{
logger().error("open: id ctrlr failed. open without ioctl");
return open_for_io(in_path, mode);
}), crimson::ct_error::pass_further_all{});
});
});
});
}
open_ertr::future<> NVMeBlockDevice::open_for_io(
const std::string& in_path,
seastar::open_flags mode) {
io_device.resize(stream_id_count);
return seastar::do_for_each(io_device, [=, this](auto &target_device) {
return seastar::open_file_dma(in_path, mode).then([this](
auto file) {
assert(io_device.size() > stream_index_to_open);
io_device[stream_index_to_open] = std::move(file);
return io_device[stream_index_to_open].fcntl(
F_SET_FILE_RW_HINT,
(uintptr_t)&stream_index_to_open).then([this](auto ret) {
stream_index_to_open++;
return seastar::now();
});
});
});
}
NVMeBlockDevice::mount_ret NVMeBlockDevice::mount()
{
logger().debug(" mount ");
return shard_devices.invoke_on_all([](auto &local_device) {
return local_device.do_shard_mount(
).handle_error(
crimson::ct_error::assert_all{
"Invalid error in RBMDevice::do_mount"
});
});
}
write_ertr::future<> NVMeBlockDevice::write(
uint64_t offset,
bufferptr &&bptr,
uint16_t stream) {
logger().debug(
"block: write offset {} len {}",
offset,
bptr.length());
auto length = bptr.length();
assert((length % super.block_size) == 0);
uint16_t supported_stream = stream;
if (stream >= stream_id_count) {
supported_stream = WRITE_LIFE_NOT_SET;
}
return seastar::do_with(
std::move(bptr),
[this, offset, length, supported_stream] (auto& bptr) {
return io_device[supported_stream].dma_write(
offset, bptr.c_str(), length).handle_exception(
[](auto e) -> write_ertr::future<size_t> {
logger().error("write: dma_write got error{}", e);
return crimson::ct_error::input_output_error::make();
}).then([length](auto result) -> write_ertr::future<> {
if (result != length) {
logger().error("write: dma_write got error with not proper length");
return crimson::ct_error::input_output_error::make();
}
return write_ertr::now();
});
});
}
read_ertr::future<> NVMeBlockDevice::read(
uint64_t offset,
bufferptr &bptr) {
logger().debug(
"block: read offset {} len {}",
offset,
bptr.length());
auto length = bptr.length();
assert((length % super.block_size) == 0);
return device.dma_read(offset, bptr.c_str(), length).handle_exception(
[](auto e) -> read_ertr::future<size_t> {
logger().error("read: dma_read got error{}", e);
return crimson::ct_error::input_output_error::make();
}).then([length](auto result) -> read_ertr::future<> {
if (result != length) {
logger().error("read: dma_read got error with not proper length");
return crimson::ct_error::input_output_error::make();
}
return read_ertr::now();
});
}
write_ertr::future<> NVMeBlockDevice::writev(
uint64_t offset,
ceph::bufferlist bl,
uint16_t stream) {
logger().debug(
"block: write offset {} len {}",
offset,
bl.length());
uint16_t supported_stream = stream;
if (stream >= stream_id_count) {
supported_stream = WRITE_LIFE_NOT_SET;
}
bl.rebuild_aligned(super.block_size);
return seastar::do_with(
bl.prepare_iovs(),
std::move(bl),
[this, supported_stream, offset](auto& iovs, auto& bl)
{
return write_ertr::parallel_for_each(
iovs,
[this, supported_stream, offset](auto& p) mutable
{
auto off = offset + p.offset;
auto len = p.length;
auto& iov = p.iov;
return io_device[supported_stream].dma_write(off, std::move(iov)
).handle_exception(
[this, off, len](auto e) -> write_ertr::future<size_t>
{
logger().error("{} poffset={}~{} dma_write got error -- {}",
device_id_printer_t{get_device_id()}, off, len, e);
return crimson::ct_error::input_output_error::make();
}).then([this, off, len](size_t written) -> write_ertr::future<> {
if (written != len) {
logger().error("{} poffset={}~{} dma_write len={} inconsistent",
device_id_printer_t{get_device_id()}, off, len, written);
return crimson::ct_error::input_output_error::make();
}
return write_ertr::now();
});
});
});
}
Device::close_ertr::future<> NVMeBlockDevice::close() {
logger().debug(" close ");
stream_index_to_open = WRITE_LIFE_NOT_SET;
return device.close().then([this]() {
return seastar::do_for_each(io_device, [](auto target_device) {
return target_device.close();
});
});
}
nvme_command_ertr::future<nvme_identify_controller_data_t>
NVMeBlockDevice::identify_controller(seastar::file f) {
return seastar::do_with(
nvme_admin_command_t(),
nvme_identify_controller_data_t(),
[this, f](auto &admin_command, auto &data) {
admin_command.common.opcode = nvme_admin_command_t::OPCODE_IDENTIFY;
admin_command.common.addr = (uint64_t)&data;
admin_command.common.data_len = sizeof(data);
admin_command.identify.cns = nvme_identify_command_t::CNS_CONTROLLER;
return pass_admin(admin_command, f).safe_then([&data](auto status) {
return seastar::make_ready_future<nvme_identify_controller_data_t>(
std::move(data));
});
});
}
discard_ertr::future<> NVMeBlockDevice::discard(uint64_t offset, uint64_t len) {
return device.discard(offset, len);
}
nvme_command_ertr::future<nvme_identify_namespace_data_t>
NVMeBlockDevice::identify_namespace(seastar::file f) {
return get_nsid(f).safe_then([this, f](auto nsid) {
return seastar::do_with(
nvme_admin_command_t(),
nvme_identify_namespace_data_t(),
[this, nsid, f](auto &admin_command, auto &data) {
admin_command.common.opcode = nvme_admin_command_t::OPCODE_IDENTIFY;
admin_command.common.addr = (uint64_t)&data;
admin_command.common.data_len = sizeof(data);
admin_command.common.nsid = nsid;
admin_command.identify.cns = nvme_identify_command_t::CNS_NAMESPACE;
return pass_admin(admin_command, f).safe_then([&data](auto status){
return seastar::make_ready_future<nvme_identify_namespace_data_t>(
std::move(data));
});
});
});
}
nvme_command_ertr::future<int> NVMeBlockDevice::get_nsid(seastar::file f) {
return f.ioctl(NVME_IOCTL_ID, nullptr).handle_exception(
[](auto e)->nvme_command_ertr::future<int> {
logger().error("pass_admin: ioctl failed");
return crimson::ct_error::input_output_error::make();
});
}
nvme_command_ertr::future<int> NVMeBlockDevice::pass_admin(
nvme_admin_command_t& admin_cmd, seastar::file f) {
return f.ioctl(NVME_IOCTL_ADMIN_CMD, &admin_cmd).handle_exception(
[](auto e)->nvme_command_ertr::future<int> {
logger().error("pass_admin: ioctl failed");
return crimson::ct_error::input_output_error::make();
});
}
nvme_command_ertr::future<int> NVMeBlockDevice::pass_through_io(
nvme_io_command_t& io_cmd) {
return device.ioctl(NVME_IOCTL_IO_CMD, &io_cmd);
}
}
| 9,555 | 33.007117 | 92 | cc |
null | ceph-main/src/crimson/os/seastore/random_block_manager/nvme_block_device.h | //-*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include <vector>
#include <seastar/core/file.hh>
#include <linux/nvme_ioctl.h>
#include "crimson/osd/exceptions.h"
#include "crimson/common/layout.h"
#include "rbm_device.h"
namespace ceph {
namespace buffer {
class bufferptr;
}
}
namespace crimson::os::seastore::random_block_device::nvme {
/*
* NVMe protocol structures (nvme_XX, identify_XX)
*
* All structures relative to NVMe protocol are following NVMe protocol v1.4
* (latest). NVMe is protocol for fast interfacing between user and SSD device.
* We selectively adopted features among various NVMe features to ease
* implementation. And also, NVMeBlockDevice provides generic command submission
* APIs for IO and Admin commands. Please use pass_through_io() and pass_admin()
* to do it.
*
* For more information about NVMe protocol, refer https://nvmexpress.org/
*/
struct nvme_identify_command_t {
uint32_t common_dw[10];
uint32_t cns : 8;
uint32_t reserved : 8;
uint32_t cnt_id : 16;
static const uint8_t CNS_NAMESPACE = 0x00;
static const uint8_t CNS_CONTROLLER = 0x01;
};
struct nvme_admin_command_t {
union {
nvme_passthru_cmd common;
nvme_identify_command_t identify;
};
static const uint8_t OPCODE_IDENTIFY = 0x06;
};
// Optional Admin Command Support (OACS)
// Indicates optional commands are supported by SSD or not
struct oacs_t {
uint16_t unused : 5;
uint16_t support_directives : 1; // Support multi-stream
uint16_t unused2 : 10;
};
struct nvme_identify_controller_data_t {
union {
struct {
uint8_t unused[256]; // [255:0]
oacs_t oacs; // [257:256]
uint8_t unused2[270]; // [527:258]
uint16_t awupf; // [529:528]
};
uint8_t raw[4096];
};
};
// End-to-end Data Protection Capabilities (DPC)
// Indicates type of E2E data protection supported by SSD
struct dpc_t {
uint8_t support_type1 : 1;
uint8_t support_type2 : 1;
uint8_t support_type3 : 1;
uint8_t support_first_meta : 1;
uint8_t support_last_meta : 1;
uint8_t reserved : 3;
};
// End-to-end Data Protection Type Settings (DPS)
// Indicates enabled type of E2E data protection
struct dps_t {
uint8_t protection_type : 3;
uint8_t protection_info : 1;
uint8_t reserved : 4;
};
// Namespace Features (NSFEAT)
// Indicates features of namespace
struct nsfeat_t {
uint8_t thinp : 1;
uint8_t nsabp : 1;
uint8_t dae : 1;
uint8_t uid_reuse : 1;
uint8_t opterf : 1; // Support NPWG, NPWA
uint8_t reserved : 3;
};
// LBA Format (LBAF)
// Indicates LBA format (metadata size, data size, performance)
struct lbaf_t {
uint32_t ms : 16;
uint32_t lbads : 8;
uint32_t rp : 2;
uint32_t reserved : 6;
};
struct nvme_identify_namespace_data_t {
union {
struct {
uint8_t unused[24]; // [23:0]
nsfeat_t nsfeat; // [24]
uint8_t unused2[3]; // [27:25]
dpc_t dpc; // [28]
dps_t dps; // [29]
uint8_t unused3[34]; // [63:30]
uint16_t npwg; // [65:64]
uint16_t npwa; // [67:66]
uint8_t unused4[60]; // [127:68]
lbaf_t lbaf0; // [131:128]
};
uint8_t raw[4096];
};
};
struct nvme_rw_command_t {
uint32_t common_dw[10];
uint64_t s_lba;
uint32_t nlb : 16; // 0's based value
uint32_t reserved : 4;
uint32_t d_type : 4;
uint32_t reserved2 : 2;
uint32_t prinfo_prchk : 3;
uint32_t prinfo_pract : 1;
uint32_t fua : 1;
uint32_t lr : 1;
uint32_t reserved3 : 16;
uint32_t dspec : 16;
static const uint32_t DTYPE_STREAM = 1;
};
struct nvme_io_command_t {
union {
nvme_passthru_cmd common;
nvme_rw_command_t rw;
};
static const uint8_t OPCODE_WRITE = 0x01;
static const uint8_t OPCODE_READ = 0x01;
};
/*
* Implementation of NVMeBlockDevice with POSIX APIs
*
* NVMeBlockDevice provides NVMe SSD interfaces through POSIX APIs which is
* generally available at most operating environment.
*/
class NVMeBlockDevice : public RBMDevice {
public:
/*
* Service NVMe device relative size
*
* size : total size of device in byte.
*
* block_size : IO unit size in byte. Caller should follow every IO command
* aligned with block size.
*
* preffered_write_granularity(PWG), preffered_write_alignment(PWA) : IO unit
* size for write in byte. Caller should request every write IO sized multiple
* times of PWG and aligned starting address by PWA. Available only if NVMe
* Device supports NVMe protocol 1.4 or later versions.
* atomic_write_unit : The maximum size of write whose atomicity is guranteed
* by SSD even on power failure. The write equal to or smaller than
* atomic_write_unit does not require fsync().
*/
NVMeBlockDevice(std::string device_path) : device_path(device_path) {}
~NVMeBlockDevice() = default;
open_ertr::future<> open(
const std::string &in_path,
seastar::open_flags mode) override;
write_ertr::future<> write(
uint64_t offset,
bufferptr &&bptr,
uint16_t stream = 0) override;
using RBMDevice::read;
read_ertr::future<> read(
uint64_t offset,
bufferptr &bptr) final;
close_ertr::future<> close() override;
discard_ertr::future<> discard(
uint64_t offset,
uint64_t len) override;
mount_ret mount() final;
mkfs_ret mkfs(device_config_t config) final;
write_ertr::future<> writev(
uint64_t offset,
ceph::bufferlist bl,
uint16_t stream = 0) final;
stat_device_ret stat_device() final {
return seastar::file_stat(device_path, seastar::follow_symlink::yes
).handle_exception([](auto e) -> stat_device_ret {
return crimson::ct_error::input_output_error::make();
}).then([this](auto stat) {
return seastar::open_file_dma(
device_path,
seastar::open_flags::rw | seastar::open_flags::dsync
).then([this, stat](auto file) mutable {
return file.size().then([this, stat, file](auto size) mutable {
stat.size = size;
return identify_namespace(file
).safe_then([stat] (auto id_namespace_data) mutable {
// LBA format provides LBA size which is power of 2. LBA is the
// minimum size of read and write.
stat.block_size = (1 << id_namespace_data.lbaf0.lbads);
if (stat.block_size < RBM_SUPERBLOCK_SIZE) {
stat.block_size = RBM_SUPERBLOCK_SIZE;
}
return stat_device_ret(
read_ertr::ready_future_marker{},
stat
);
}).handle_error(crimson::ct_error::input_output_error::handle(
[stat]{
return stat_device_ret(
read_ertr::ready_future_marker{},
stat
);
}), crimson::ct_error::pass_further_all{});
}).safe_then([file](auto st) mutable {
return file.close(
).then([st] {
return stat_device_ret(
read_ertr::ready_future_marker{},
st
);
});
});
});
});
}
std::string get_device_path() const final {
return device_path;
}
seastar::future<> start() final {
return shard_devices.start(device_path);
}
seastar::future<> stop() final {
return shard_devices.stop();
}
Device& get_sharded_device() final {
return shard_devices.local();
}
uint64_t get_preffered_write_granularity() const { return write_granularity; }
uint64_t get_preffered_write_alignment() const { return write_alignment; }
uint64_t get_atomic_write_unit() const { return atomic_write_unit; }
/*
* End-to-End Data Protection
*
* NVMe device keeps track of data integrity similar with checksum. Client can
* offload checksuming to NVMe device to reduce its CPU utilization. If data
* protection is enabled, checksum is calculated on every write and used to
* verify data on every read.
*/
bool is_data_protection_enabled() const { return data_protection_enabled; }
/*
* Data Health
*
* Returns list of LBAs which have almost corrupted data. Data of the LBAs
* will be corrupted very soon. Caller can overwrite, unmap or refresh data to
* protect data
*/
virtual nvme_command_ertr::future<std::list<uint64_t>> get_data_health() {
std::list<uint64_t> fragile_lbas;
return nvme_command_ertr::future<std::list<uint64_t>>(
nvme_command_ertr::ready_future_marker{},
fragile_lbas
);
}
/*
* Recovery Level
*
* Regulate magnitude of SSD-internal data recovery. Caller can get good read
* latency with lower magnitude.
*/
virtual nvme_command_ertr::future<> set_data_recovery_level(
uint32_t level) { return nvme_command_ertr::now(); }
/*
* For passsing through nvme IO or Admin command to SSD
* Caller can construct and execute its own nvme command
*/
nvme_command_ertr::future<int> pass_admin(
nvme_admin_command_t& admin_cmd, seastar::file f);
nvme_command_ertr::future<int> pass_through_io(
nvme_io_command_t& io_cmd);
bool support_multistream = false;
uint8_t data_protection_type = 0;
/*
* Predictable Latency
*
* NVMe device can guarantee IO latency within pre-defined time window. This
* functionality will be analyzed soon.
*/
private:
// identify_controller/namespace are used to get SSD internal information such
// as supported features, NPWG and NPWA
nvme_command_ertr::future<nvme_identify_controller_data_t>
identify_controller(seastar::file f);
nvme_command_ertr::future<nvme_identify_namespace_data_t>
identify_namespace(seastar::file f);
nvme_command_ertr::future<int> get_nsid(seastar::file f);
open_ertr::future<> open_for_io(
const std::string& in_path,
seastar::open_flags mode);
seastar::file device;
std::vector<seastar::file> io_device;
uint32_t stream_index_to_open = WRITE_LIFE_NOT_SET;
uint32_t stream_id_count = 1; // stream is disabled, defaultly.
uint32_t awupf = 0;
uint64_t write_granularity = 4096;
uint64_t write_alignment = 4096;
uint32_t atomic_write_unit = 4096;
bool data_protection_enabled = false;
std::string device_path;
seastar::sharded<NVMeBlockDevice> shard_devices;
};
}
| 10,069 | 26.894737 | 80 | h |
null | ceph-main/src/crimson/os/seastore/random_block_manager/rbm_device.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sys/mman.h>
#include <string.h>
#include <fcntl.h>
#include "crimson/common/log.h"
#include "crimson/common/errorator-loop.h"
#include "include/buffer.h"
#include "rbm_device.h"
#include "nvme_block_device.h"
#include "block_rb_manager.h"
namespace crimson::os::seastore::random_block_device {
#include "crimson/os/seastore/logging.h"
SET_SUBSYS(seastore_device);
RBMDevice::mkfs_ret RBMDevice::do_primary_mkfs(device_config_t config,
int shard_num, size_t journal_size) {
LOG_PREFIX(RBMDevice::do_primary_mkfs);
return stat_device(
).handle_error(
mkfs_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error stat_device in RBMDevice::do_primary_mkfs"}
).safe_then(
[this, FNAME, config=std::move(config), shard_num, journal_size](auto st) {
super.block_size = st.block_size;
super.size = st.size;
super.feature |= RBM_BITMAP_BLOCK_CRC;
super.config = std::move(config);
super.journal_size = journal_size;
ceph_assert_always(super.journal_size > 0);
ceph_assert_always(super.size >= super.journal_size);
ceph_assert_always(shard_num > 0);
std::vector<rbm_shard_info_t> shard_infos(shard_num);
for (int i = 0; i < shard_num; i++) {
uint64_t aligned_size =
(super.size / shard_num) -
((super.size / shard_num) % super.block_size);
shard_infos[i].size = aligned_size;
shard_infos[i].start_offset = i * aligned_size;
assert(shard_infos[i].size > super.journal_size);
}
super.shard_infos = shard_infos;
super.shard_num = shard_num;
shard_info = shard_infos[seastar::this_shard_id()];
DEBUG("super {} ", super);
// write super block
return open(get_device_path(),
seastar::open_flags::rw | seastar::open_flags::dsync
).handle_error(
mkfs_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error open in RBMDevice::do_primary_mkfs"}
).safe_then([this] {
return write_rbm_header(
).safe_then([this] {
return close();
}).handle_error(
mkfs_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error write_rbm_header in RBMDevice::do_primary_mkfs"
});
});
});
}
write_ertr::future<> RBMDevice::write_rbm_header()
{
bufferlist meta_b_header;
super.crc = 0;
encode(super, meta_b_header);
// If NVMeDevice supports data protection, CRC for checksum is not required
// NVMeDevice is expected to generate and store checksum internally.
// CPU overhead for CRC might be saved.
if (is_data_protection_enabled()) {
super.crc = -1;
} else {
super.crc = meta_b_header.crc32c(-1);
}
bufferlist bl;
encode(super, bl);
auto iter = bl.begin();
auto bp = bufferptr(ceph::buffer::create_page_aligned(super.block_size));
assert(bl.length() < super.block_size);
iter.copy(bl.length(), bp.c_str());
return write(RBM_START_ADDRESS, std::move(bp));
}
read_ertr::future<rbm_metadata_header_t> RBMDevice::read_rbm_header(
rbm_abs_addr addr)
{
LOG_PREFIX(RBMDevice::read_rbm_header);
assert(super.block_size > 0);
return seastar::do_with(
bufferptr(ceph::buffer::create_page_aligned(super.block_size)),
[this, addr, FNAME](auto &bptr) {
return read(
addr,
bptr
).safe_then([length=bptr.length(), this, bptr, FNAME]()
-> read_ertr::future<rbm_metadata_header_t> {
bufferlist bl;
bl.append(bptr);
auto p = bl.cbegin();
rbm_metadata_header_t super_block;
try {
decode(super_block, p);
}
catch (ceph::buffer::error& e) {
DEBUG("read_rbm_header: unable to decode rbm super block {}",
e.what());
return crimson::ct_error::enoent::make();
}
checksum_t crc = super_block.crc;
bufferlist meta_b_header;
super_block.crc = 0;
encode(super_block, meta_b_header);
assert(ceph::encoded_sizeof<rbm_metadata_header_t>(super_block) <
super_block.block_size);
// Do CRC verification only if data protection is not supported.
if (is_data_protection_enabled() == false) {
if (meta_b_header.crc32c(-1) != crc) {
DEBUG("bad crc on super block, expected {} != actual {} ",
meta_b_header.crc32c(-1), crc);
return crimson::ct_error::input_output_error::make();
}
} else {
ceph_assert_always(crc == (checksum_t)-1);
}
super_block.crc = crc;
super = super_block;
DEBUG("got {} ", super);
return read_ertr::future<rbm_metadata_header_t>(
read_ertr::ready_future_marker{},
super_block
);
});
});
}
RBMDevice::mount_ret RBMDevice::do_shard_mount()
{
return open(get_device_path(),
seastar::open_flags::rw | seastar::open_flags::dsync
).safe_then([this] {
return stat_device(
).handle_error(
mount_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error stat_device in RBMDevice::do_shard_mount"}
).safe_then([this](auto st) {
assert(st.block_size > 0);
super.block_size = st.block_size;
return read_rbm_header(RBM_START_ADDRESS
).safe_then([this](auto s) {
LOG_PREFIX(RBMDevice::do_shard_mount);
shard_info = s.shard_infos[seastar::this_shard_id()];
INFO("{} read {}", device_id_printer_t{get_device_id()}, shard_info);
s.validate();
return seastar::now();
});
});
}).handle_error(
mount_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error mount in RBMDevice::do_shard_mount"}
);
}
EphemeralRBMDeviceRef create_test_ephemeral(uint64_t journal_size, uint64_t data_size) {
return EphemeralRBMDeviceRef(
new EphemeralRBMDevice(journal_size + data_size +
random_block_device::RBMDevice::get_shard_reserved_size(),
EphemeralRBMDevice::TEST_BLOCK_SIZE));
}
open_ertr::future<> EphemeralRBMDevice::open(
const std::string &in_path,
seastar::open_flags mode) {
LOG_PREFIX(EphemeralRBMDevice::open);
if (buf) {
return open_ertr::now();
}
DEBUG(
"Initializing test memory device {}",
size);
void* addr = ::mmap(
nullptr,
size,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1,
0);
buf = (char*)addr;
::memset(buf, 0, size);
return open_ertr::now();
}
write_ertr::future<> EphemeralRBMDevice::write(
uint64_t offset,
bufferptr &&bptr,
uint16_t stream) {
LOG_PREFIX(EphemeralRBMDevice::write);
ceph_assert(buf);
DEBUG(
"EphemeralRBMDevice: write offset {} len {}",
offset,
bptr.length());
::memcpy(buf + offset, bptr.c_str(), bptr.length());
return write_ertr::now();
}
read_ertr::future<> EphemeralRBMDevice::read(
uint64_t offset,
bufferptr &bptr) {
LOG_PREFIX(EphemeralRBMDevice::read);
ceph_assert(buf);
DEBUG(
"EphemeralRBMDevice: read offset {} len {}",
offset,
bptr.length());
bptr.copy_in(0, bptr.length(), buf + offset);
return read_ertr::now();
}
Device::close_ertr::future<> EphemeralRBMDevice::close() {
LOG_PREFIX(EphemeralRBMDevice::close);
DEBUG(" close ");
return close_ertr::now();
}
write_ertr::future<> EphemeralRBMDevice::writev(
uint64_t offset,
ceph::bufferlist bl,
uint16_t stream) {
LOG_PREFIX(EphemeralRBMDevice::writev);
ceph_assert(buf);
DEBUG(
"EphemeralRBMDevice: write offset {} len {}",
offset,
bl.length());
bl.begin().copy(bl.length(), buf + offset);
return write_ertr::now();
}
EphemeralRBMDevice::mount_ret EphemeralRBMDevice::mount() {
return do_shard_mount();
}
EphemeralRBMDevice::mkfs_ret EphemeralRBMDevice::mkfs(device_config_t config) {
return do_primary_mkfs(config, 1, DEFAULT_TEST_CBJOURNAL_SIZE);
}
}
| 7,657 | 27.154412 | 88 | cc |
null | ceph-main/src/crimson/os/seastore/random_block_manager/rbm_device.h | //-*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/device.h"
namespace ceph {
namespace buffer {
class bufferptr;
}
}
namespace crimson::os::seastore::random_block_device {
// from blk/BlockDevice.h
#if defined(__linux__)
#if !defined(F_SET_FILE_RW_HINT)
#define F_LINUX_SPECIFIC_BASE 1024
#define F_SET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 14)
#endif
// These values match Linux definition
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/fcntl.h#n56
#define WRITE_LIFE_NOT_SET 0 // No hint information set
#define WRITE_LIFE_NONE 1 // No hints about write life time
#define WRITE_LIFE_SHORT 2 // Data written has a short life time
#define WRITE_LIFE_MEDIUM 3 // Data written has a medium life time
#define WRITE_LIFE_LONG 4 // Data written has a long life time
#define WRITE_LIFE_EXTREME 5 // Data written has an extremely long life time
#define WRITE_LIFE_MAX 6
#else
// On systems don't have WRITE_LIFE_* only use one FD
// And all files are created equal
#define WRITE_LIFE_NOT_SET 0 // No hint information set
#define WRITE_LIFE_NONE 0 // No hints about write life time
#define WRITE_LIFE_SHORT 0 // Data written has a short life time
#define WRITE_LIFE_MEDIUM 0 // Data written has a medium life time
#define WRITE_LIFE_LONG 0 // Data written has a long life time
#define WRITE_LIFE_EXTREME 0 // Data written has an extremely long life time
#define WRITE_LIFE_MAX 1
#endif
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
using write_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::ebadf,
crimson::ct_error::enospc>;
using open_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
crimson::ct_error::enoent>;
using nvme_command_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using discard_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
constexpr uint32_t RBM_SUPERBLOCK_SIZE = 4096;
enum {
// TODO: This allows the device to manage crc on a block by itself
RBM_NVME_END_TO_END_PROTECTION = 1,
RBM_BITMAP_BLOCK_CRC = 2,
};
class RBMDevice : public Device {
public:
using Device::read;
read_ertr::future<> read (
paddr_t addr,
size_t len,
ceph::bufferptr &out) final {
uint64_t rbm_addr = convert_paddr_to_abs_addr(addr);
return read(rbm_addr, out);
}
protected:
rbm_metadata_header_t super;
rbm_shard_info_t shard_info;
public:
RBMDevice() {}
virtual ~RBMDevice() = default;
template <typename T>
static std::unique_ptr<T> create() {
return std::make_unique<T>();
}
device_id_t get_device_id() const {
return super.config.spec.id;
}
magic_t get_magic() const final {
return super.config.spec.magic;
}
device_type_t get_device_type() const final {
return device_type_t::RANDOM_BLOCK_SSD;
}
backend_type_t get_backend_type() const final {
return backend_type_t::RANDOM_BLOCK;
}
const seastore_meta_t &get_meta() const final {
return super.config.meta;
}
secondary_device_set_t& get_secondary_devices() final {
return super.config.secondary_devices;
}
std::size_t get_available_size() const { return super.size; }
extent_len_t get_block_size() const { return super.block_size; }
virtual read_ertr::future<> read(
uint64_t offset,
bufferptr &bptr) = 0;
/*
* Multi-stream write
*
* Give hint to device about classification of data whose life time is similar
* with each other. Data with same stream value will be managed together in
* SSD for better write performance.
*/
virtual write_ertr::future<> write(
uint64_t offset,
bufferptr &&bptr,
uint16_t stream = 0) = 0;
virtual discard_ertr::future<> discard(
uint64_t offset,
uint64_t len) { return seastar::now(); }
virtual open_ertr::future<> open(
const std::string& path,
seastar::open_flags mode) = 0;
virtual write_ertr::future<> writev(
uint64_t offset,
ceph::bufferlist bl,
uint16_t stream = 0) = 0;
bool is_data_protection_enabled() const { return false; }
mkfs_ret do_mkfs(device_config_t);
// shard 0 mkfs
mkfs_ret do_primary_mkfs(device_config_t, int shard_num, size_t journal_size);
mount_ret do_mount();
mount_ret do_shard_mount();
write_ertr::future<> write_rbm_header();
read_ertr::future<rbm_metadata_header_t> read_rbm_header(rbm_abs_addr addr);
using stat_device_ret =
read_ertr::future<seastar::stat_data>;
virtual stat_device_ret stat_device() = 0;
virtual std::string get_device_path() const = 0;
uint64_t get_journal_size() const {
return super.journal_size;
}
static rbm_abs_addr get_shard_reserved_size() {
return RBM_SUPERBLOCK_SIZE;
}
rbm_abs_addr get_shard_journal_start() {
return shard_info.start_offset + get_shard_reserved_size();
}
uint64_t get_shard_start() const {
return shard_info.start_offset;
}
uint64_t get_shard_end() const {
return shard_info.start_offset + shard_info.size;
}
};
using RBMDeviceRef = std::unique_ptr<RBMDevice>;
constexpr uint64_t DEFAULT_TEST_CBJOURNAL_SIZE = 1 << 26;
class EphemeralRBMDevice : public RBMDevice {
public:
uint64_t size = 0;
uint64_t block_size = 0;
constexpr static uint32_t TEST_BLOCK_SIZE = 4096;
EphemeralRBMDevice(size_t size, uint64_t block_size) :
size(size), block_size(block_size), buf(nullptr) {
}
~EphemeralRBMDevice() {
if (buf) {
::munmap(buf, size);
buf = nullptr;
}
}
std::size_t get_available_size() const final { return size; }
extent_len_t get_block_size() const final { return block_size; }
mount_ret mount() final;
mkfs_ret mkfs(device_config_t config) final;
open_ertr::future<> open(
const std::string &in_path,
seastar::open_flags mode) override;
write_ertr::future<> write(
uint64_t offset,
bufferptr &&bptr,
uint16_t stream = 0) override;
using RBMDevice::read;
read_ertr::future<> read(
uint64_t offset,
bufferptr &bptr) override;
close_ertr::future<> close() override;
write_ertr::future<> writev(
uint64_t offset,
ceph::bufferlist bl,
uint16_t stream = 0) final;
stat_device_ret stat_device() final {
seastar::stat_data stat;
stat.block_size = block_size;
stat.size = size;
return stat_device_ret(
read_ertr::ready_future_marker{},
stat
);
}
std::string get_device_path() const final {
return "";
}
char *buf;
};
using EphemeralRBMDeviceRef = std::unique_ptr<EphemeralRBMDevice>;
EphemeralRBMDeviceRef create_test_ephemeral(
uint64_t journal_size = DEFAULT_TEST_CBJOURNAL_SIZE,
uint64_t data_size = DEFAULT_TEST_CBJOURNAL_SIZE);
}
| 7,167 | 26.358779 | 105 | h |
null | ceph-main/src/crimson/os/seastore/segment_manager/block.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sys/mman.h>
#include <string.h>
#include <fmt/format.h>
#include <seastar/core/metrics.hh>
#include "include/buffer.h"
#include "crimson/common/config_proxy.h"
#include "crimson/common/errorator-loop.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/segment_manager/block.h"
SET_SUBSYS(seastore_device);
/*
* format:
* - D<device-id> S<segment-id> offset=<off>~<len> poffset=<off> information
* - D<device-id> poffset=<off>~<len> information
*
* levels:
* - INFO: major initiation, closing and segment operations
* - DEBUG: INFO details, major read and write operations
* - TRACE: DEBUG details
*/
using segment_state_t = crimson::os::seastore::Segment::segment_state_t;
template <> struct fmt::formatter<segment_state_t>: fmt::formatter<std::string_view> {
// parse is inherited from formatter<string_view>.
template <typename FormatContext>
auto format(segment_state_t s, FormatContext& ctx) {
std::string_view name = "unknown";
switch (s) {
case segment_state_t::EMPTY:
name = "empty";
break;
case segment_state_t::OPEN:
name = "open";
break;
case segment_state_t::CLOSED:
name = "closed";
break;
}
return formatter<string_view>::format(name, ctx);
}
};
namespace crimson::os::seastore::segment_manager::block {
static write_ertr::future<> do_write(
device_id_t device_id,
seastar::file &device,
uint64_t offset,
bufferptr &bptr)
{
LOG_PREFIX(block_do_write);
auto len = bptr.length();
TRACE("{} poffset={}~{} ...",
device_id_printer_t{device_id}, offset, len);
return device.dma_write(
offset,
bptr.c_str(),
len
).handle_exception(
[FNAME, device_id, offset, len](auto e) -> write_ertr::future<size_t> {
ERROR("{} poffset={}~{} got error -- {}",
device_id_printer_t{device_id}, offset, len, e);
return crimson::ct_error::input_output_error::make();
}).then([FNAME, device_id, offset, len](auto result) -> write_ertr::future<> {
if (result != len) {
ERROR("{} poffset={}~{} write len={} inconsistent",
device_id_printer_t{device_id}, offset, len, result);
return crimson::ct_error::input_output_error::make();
}
TRACE("{} poffset={}~{} done", device_id_printer_t{device_id}, offset, len);
return write_ertr::now();
});
}
static write_ertr::future<> do_writev(
device_id_t device_id,
seastar::file &device,
uint64_t offset,
bufferlist&& bl,
size_t block_size)
{
LOG_PREFIX(block_do_writev);
TRACE("{} poffset={}~{}, {} buffers",
device_id_printer_t{device_id}, offset, bl.length(), bl.get_num_buffers());
// writev requires each buffer to be aligned to the disks' block
// size, we need to rebuild here
bl.rebuild_aligned(block_size);
return seastar::do_with(
bl.prepare_iovs(),
std::move(bl),
[&device, device_id, offset, FNAME](auto& iovs, auto& bl)
{
return write_ertr::parallel_for_each(
iovs,
[&device, device_id, offset, FNAME](auto& p) mutable
{
auto off = offset + p.offset;
auto len = p.length;
auto& iov = p.iov;
TRACE("{} poffset={}~{} dma_write ...",
device_id_printer_t{device_id}, off, len);
return device.dma_write(off, std::move(iov)
).handle_exception(
[FNAME, device_id, off, len](auto e) -> write_ertr::future<size_t>
{
ERROR("{} poffset={}~{} dma_write got error -- {}",
device_id_printer_t{device_id}, off, len, e);
return crimson::ct_error::input_output_error::make();
}).then([FNAME, device_id, off, len](size_t written) -> write_ertr::future<> {
if (written != len) {
ERROR("{} poffset={}~{} dma_write len={} inconsistent",
device_id_printer_t{device_id}, off, len, written);
return crimson::ct_error::input_output_error::make();
}
TRACE("{} poffset={}~{} dma_write done",
device_id_printer_t{device_id}, off, len);
return write_ertr::now();
});
});
});
}
static read_ertr::future<> do_read(
device_id_t device_id,
seastar::file &device,
uint64_t offset,
size_t len,
bufferptr &bptr)
{
LOG_PREFIX(block_do_read);
TRACE("{} poffset={}~{} ...", device_id_printer_t{device_id}, offset, len);
assert(len <= bptr.length());
return device.dma_read(
offset,
bptr.c_str(),
len
).handle_exception(
//FIXME: this is a little bit tricky, since seastar::future<T>::handle_exception
// returns seastar::future<T>, to return an crimson::ct_error, we have to create
// a seastar::future<T> holding that crimson::ct_error. This is not necessary
// once seastar::future<T>::handle_exception() returns seastar::futurize_t<T>
[FNAME, device_id, offset, len](auto e) -> read_ertr::future<size_t>
{
ERROR("{} poffset={}~{} got error -- {}",
device_id_printer_t{device_id}, offset, len, e);
return crimson::ct_error::input_output_error::make();
}).then([FNAME, device_id, offset, len](auto result) -> read_ertr::future<> {
if (result != len) {
ERROR("{} poffset={}~{} read len={} inconsistent",
device_id_printer_t{device_id}, offset, len, result);
return crimson::ct_error::input_output_error::make();
}
TRACE("{} poffset={}~{} done", device_id_printer_t{device_id}, offset, len);
return read_ertr::now();
});
}
write_ertr::future<>
SegmentStateTracker::write_out(
device_id_t device_id,
seastar::file &device,
uint64_t offset)
{
LOG_PREFIX(SegmentStateTracker::write_out);
DEBUG("{} poffset={}~{}",
device_id_printer_t{device_id}, offset, bptr.length());
return do_write(device_id, device, offset, bptr);
}
write_ertr::future<>
SegmentStateTracker::read_in(
device_id_t device_id,
seastar::file &device,
uint64_t offset)
{
LOG_PREFIX(SegmentStateTracker::read_in);
DEBUG("{} poffset={}~{}",
device_id_printer_t{device_id}, offset, bptr.length());
return do_read(
device_id,
device,
offset,
bptr.length(),
bptr);
}
using std::vector;
static
block_sm_superblock_t make_superblock(
device_id_t device_id,
device_config_t sm_config,
const seastar::stat_data &data)
{
LOG_PREFIX(block_make_superblock);
using crimson::common::get_conf;
auto config_size = get_conf<Option::size_t>(
"seastore_device_size");
size_t size = (data.size == 0) ? config_size : data.size;
auto config_segment_size = get_conf<Option::size_t>(
"seastore_segment_size");
size_t raw_segments = size / config_segment_size;
size_t shard_tracker_size = SegmentStateTracker::get_raw_size(
raw_segments / seastar::smp::count,
data.block_size);
size_t total_tracker_size = shard_tracker_size * seastar::smp::count;
size_t tracker_off = data.block_size; //superblock
size_t segments = (size - tracker_off - total_tracker_size) / config_segment_size;
size_t segments_per_shard = segments / seastar::smp::count;
vector<block_shard_info_t> shard_infos(seastar::smp::count);
for (unsigned int i = 0; i < seastar::smp::count; i++) {
shard_infos[i].size = segments_per_shard * config_segment_size;
shard_infos[i].segments = segments_per_shard;
shard_infos[i].tracker_offset = tracker_off + i * shard_tracker_size;
shard_infos[i].first_segment_offset = tracker_off + total_tracker_size
+ i * segments_per_shard * config_segment_size;
}
INFO("{} disk_size={}, segment_size={}, block_size={}",
device_id_printer_t{device_id},
size,
uint64_t(config_segment_size),
data.block_size);
for (unsigned int i = 0; i < seastar::smp::count; i++) {
INFO("shard {} infos:", i, shard_infos[i]);
}
return block_sm_superblock_t{
seastar::smp::count,
config_segment_size,
data.block_size,
shard_infos,
std::move(sm_config)
};
}
using check_create_device_ertr = BlockSegmentManager::access_ertr;
using check_create_device_ret = check_create_device_ertr::future<>;
static check_create_device_ret check_create_device(
const std::string &path,
size_t size)
{
LOG_PREFIX(block_check_create_device);
INFO("path={}, size={}", path, size);
return seastar::open_file_dma(
path,
seastar::open_flags::exclusive |
seastar::open_flags::rw |
seastar::open_flags::create
).then([size, FNAME, &path](auto file) {
return seastar::do_with(
file,
[size, FNAME, &path](auto &f) -> seastar::future<>
{
DEBUG("path={} created, truncating to {}", path, size);
ceph_assert(f);
return f.truncate(
size
).then([&f, size] {
return f.allocate(0, size);
}).finally([&f] {
return f.close();
});
});
}).then_wrapped([&path, FNAME](auto f) -> check_create_device_ret {
if (f.failed()) {
try {
f.get();
return seastar::now();
} catch (const std::system_error &e) {
if (e.code().value() == EEXIST) {
ERROR("path={} exists", path);
return seastar::now();
} else {
ERROR("path={} creation error -- {}", path, e);
return crimson::ct_error::input_output_error::make();
}
} catch (...) {
ERROR("path={} creation error", path);
return crimson::ct_error::input_output_error::make();
}
}
DEBUG("path={} complete", path);
std::ignore = f.discard_result();
return seastar::now();
});
}
using open_device_ret =
BlockSegmentManager::access_ertr::future<
std::pair<seastar::file, seastar::stat_data>
>;
static
open_device_ret open_device(
const std::string &path)
{
LOG_PREFIX(block_open_device);
return seastar::file_stat(path, seastar::follow_symlink::yes
).then([&path, FNAME](auto stat) mutable {
return seastar::open_file_dma(
path,
seastar::open_flags::rw | seastar::open_flags::dsync
).then([stat, &path, FNAME](auto file) mutable {
return file.size().then([stat, file, &path, FNAME](auto size) mutable {
stat.size = size;
INFO("path={} successful, size={}, block_size={}",
path, stat.size, stat.block_size);
return std::make_pair(file, stat);
});
});
}).handle_exception([FNAME, &path](auto e) -> open_device_ret {
ERROR("path={} got error -- {}", path, e);
return crimson::ct_error::input_output_error::make();
});
}
static
BlockSegmentManager::access_ertr::future<>
write_superblock(
device_id_t device_id,
seastar::file &device,
block_sm_superblock_t sb)
{
LOG_PREFIX(block_write_superblock);
DEBUG("{} write {}", device_id_printer_t{device_id}, sb);
sb.validate();
assert(ceph::encoded_sizeof<block_sm_superblock_t>(sb) <
sb.block_size);
return seastar::do_with(
bufferptr(ceph::buffer::create_page_aligned(sb.block_size)),
[=, &device](auto &bp)
{
bufferlist bl;
encode(sb, bl);
auto iter = bl.begin();
assert(bl.length() < sb.block_size);
iter.copy(bl.length(), bp.c_str());
return do_write(device_id, device, 0, bp);
});
}
static
BlockSegmentManager::access_ertr::future<block_sm_superblock_t>
read_superblock(seastar::file &device, seastar::stat_data sd)
{
LOG_PREFIX(block_read_superblock);
DEBUG("reading superblock ...");
return seastar::do_with(
bufferptr(ceph::buffer::create_page_aligned(sd.block_size)),
[=, &device](auto &bp)
{
return do_read(
DEVICE_ID_NULL, // unknown
device,
0,
bp.length(),
bp
).safe_then([=, &bp] {
bufferlist bl;
bl.push_back(bp);
block_sm_superblock_t ret;
auto bliter = bl.cbegin();
try {
decode(ret, bliter);
} catch (...) {
ERROR("got decode error!");
ceph_assert(0 == "invalid superblock");
}
assert(ceph::encoded_sizeof<block_sm_superblock_t>(ret) <
sd.block_size);
return BlockSegmentManager::access_ertr::future<block_sm_superblock_t>(
BlockSegmentManager::access_ertr::ready_future_marker{},
ret);
});
});
}
BlockSegment::BlockSegment(
BlockSegmentManager &manager, segment_id_t id)
: manager(manager), id(id) {}
segment_off_t BlockSegment::get_write_capacity() const
{
return manager.get_segment_size();
}
Segment::close_ertr::future<> BlockSegment::close()
{
return manager.segment_close(id, write_pointer);
}
Segment::write_ertr::future<> BlockSegment::write(
segment_off_t offset, ceph::bufferlist bl)
{
LOG_PREFIX(BlockSegment::write);
auto paddr = paddr_t::make_seg_paddr(id, offset);
DEBUG("{} offset={}~{} poffset={} ...",
id, offset, bl.length(), manager.get_offset(paddr));
if (offset < write_pointer ||
offset % manager.superblock.block_size != 0 ||
bl.length() % manager.superblock.block_size != 0) {
ERROR("{} offset={}~{} poffset={} invalid write",
id, offset, bl.length(), manager.get_offset(paddr));
return crimson::ct_error::invarg::make();
}
if (offset + bl.length() > manager.superblock.segment_size) {
ERROR("{} offset={}~{} poffset={} write out of the range {}",
id, offset, bl.length(), manager.get_offset(paddr),
manager.superblock.segment_size);
return crimson::ct_error::enospc::make();
}
write_pointer = offset + bl.length();
return manager.segment_write(paddr, bl);
}
Segment::write_ertr::future<> BlockSegment::advance_wp(
segment_off_t offset) {
return write_ertr::now();
}
Segment::close_ertr::future<> BlockSegmentManager::segment_close(
segment_id_t id, segment_off_t write_pointer)
{
LOG_PREFIX(BlockSegmentManager::segment_close);
auto s_id = id.device_segment_id();
int unused_bytes = get_segment_size() - write_pointer;
INFO("{} unused_bytes={} ...", id, unused_bytes);
assert(unused_bytes >= 0);
assert(id.device_id() == get_device_id());
assert(tracker);
tracker->set(s_id, segment_state_t::CLOSED);
++stats.closed_segments;
stats.closed_segments_unused_bytes += unused_bytes;
stats.metadata_write.increment(tracker->get_size());
return tracker->write_out(
get_device_id(), device,
shard_info.tracker_offset);
}
Segment::write_ertr::future<> BlockSegmentManager::segment_write(
paddr_t addr,
ceph::bufferlist bl,
bool ignore_check)
{
assert(addr.get_device_id() == get_device_id());
assert((bl.length() % superblock.block_size) == 0);
stats.data_write.increment(bl.length());
return do_writev(
get_device_id(),
device,
get_offset(addr),
std::move(bl),
superblock.block_size);
}
BlockSegmentManager::~BlockSegmentManager()
{
}
BlockSegmentManager::mount_ret BlockSegmentManager::mount()
{
return shard_devices.invoke_on_all([](auto &local_device) {
return local_device.shard_mount(
).handle_error(
crimson::ct_error::assert_all{
"Invalid error in BlockSegmentManager::mount"
});
});
}
BlockSegmentManager::mount_ret BlockSegmentManager::shard_mount()
{
LOG_PREFIX(BlockSegmentManager::shard_mount);
return open_device(
device_path
).safe_then([=, this](auto p) {
device = std::move(p.first);
auto sd = p.second;
return read_superblock(device, sd);
}).safe_then([=, this](auto sb) {
set_device_id(sb.config.spec.id);
shard_info = sb.shard_infos[seastar::this_shard_id()];
INFO("{} read {}", device_id_printer_t{get_device_id()}, shard_info);
sb.validate();
superblock = sb;
stats.data_read.increment(
ceph::encoded_sizeof<block_sm_superblock_t>(superblock));
tracker = std::make_unique<SegmentStateTracker>(
shard_info.segments,
superblock.block_size);
stats.data_read.increment(tracker->get_size());
return tracker->read_in(
get_device_id(),
device,
shard_info.tracker_offset
).safe_then([this] {
for (device_segment_id_t i = 0; i < tracker->get_capacity(); ++i) {
if (tracker->get(i) == segment_state_t::OPEN) {
tracker->set(i, segment_state_t::CLOSED);
}
}
stats.metadata_write.increment(tracker->get_size());
return tracker->write_out(
get_device_id(), device,
shard_info.tracker_offset);
});
}).safe_then([this, FNAME] {
INFO("{} complete", device_id_printer_t{get_device_id()});
register_metrics();
});
}
BlockSegmentManager::mkfs_ret BlockSegmentManager::mkfs(
device_config_t sm_config)
{
return shard_devices.local().primary_mkfs(sm_config
).safe_then([this] {
return shard_devices.invoke_on_all([](auto &local_device) {
return local_device.shard_mkfs(
).handle_error(
crimson::ct_error::assert_all{
"Invalid error in BlockSegmentManager::mkfs"
});
});
});
}
BlockSegmentManager::mkfs_ret BlockSegmentManager::primary_mkfs(
device_config_t sm_config)
{
LOG_PREFIX(BlockSegmentManager::primary_mkfs);
ceph_assert(sm_config.spec.dtype == superblock.config.spec.dtype);
set_device_id(sm_config.spec.id);
INFO("{} path={}, {}",
device_id_printer_t{get_device_id()}, device_path, sm_config);
return seastar::do_with(
seastar::file{},
seastar::stat_data{},
block_sm_superblock_t{},
std::unique_ptr<SegmentStateTracker>(),
[=, this](auto &device, auto &stat, auto &sb, auto &tracker)
{
check_create_device_ret maybe_create = check_create_device_ertr::now();
using crimson::common::get_conf;
if (get_conf<bool>("seastore_block_create")) {
auto size = get_conf<Option::size_t>("seastore_device_size");
maybe_create = check_create_device(device_path, size);
}
return maybe_create.safe_then([this] {
return open_device(device_path);
}).safe_then([&, sm_config](auto p) {
device = p.first;
stat = p.second;
sb = make_superblock(get_device_id(), sm_config, stat);
stats.metadata_write.increment(
ceph::encoded_sizeof<block_sm_superblock_t>(sb));
return write_superblock(get_device_id(), device, sb);
}).finally([&] {
return device.close();
}).safe_then([FNAME, this] {
INFO("{} complete", device_id_printer_t{get_device_id()});
return mkfs_ertr::now();
});
});
}
BlockSegmentManager::mkfs_ret BlockSegmentManager::shard_mkfs()
{
LOG_PREFIX(BlockSegmentManager::shard_mkfs);
return open_device(
device_path
).safe_then([this](auto p) {
device = std::move(p.first);
auto sd = p.second;
return read_superblock(device, sd);
}).safe_then([this, FNAME](auto sb) {
set_device_id(sb.config.spec.id);
shard_info = sb.shard_infos[seastar::this_shard_id()];
INFO("{} read {}", device_id_printer_t{get_device_id()}, shard_info);
sb.validate();
tracker.reset(new SegmentStateTracker(
shard_info.segments, sb.block_size));
stats.metadata_write.increment(tracker->get_size());
return tracker->write_out(
get_device_id(), device,
shard_info.tracker_offset);
}).finally([this] {
return device.close();
}).safe_then([FNAME, this] {
INFO("{} complete", device_id_printer_t{get_device_id()});
return mkfs_ertr::now();
});
}
BlockSegmentManager::close_ertr::future<> BlockSegmentManager::close()
{
LOG_PREFIX(BlockSegmentManager::close);
INFO("{}", device_id_printer_t{get_device_id()});
metrics.clear();
return device.close();
}
SegmentManager::open_ertr::future<SegmentRef> BlockSegmentManager::open(
segment_id_t id)
{
LOG_PREFIX(BlockSegmentManager::open);
auto s_id = id.device_segment_id();
INFO("{} ...", id);
assert(id.device_id() == get_device_id());
if (s_id >= get_num_segments()) {
ERROR("{} segment-id out of range {}", id, get_num_segments());
return crimson::ct_error::invarg::make();
}
if (tracker->get(s_id) != segment_state_t::EMPTY) {
ERROR("{} invalid state {} != EMPTY", id, tracker->get(s_id));
return crimson::ct_error::invarg::make();
}
tracker->set(s_id, segment_state_t::OPEN);
stats.metadata_write.increment(tracker->get_size());
return tracker->write_out(
get_device_id(), device,
shard_info.tracker_offset
).safe_then([this, id, FNAME] {
++stats.opened_segments;
DEBUG("{} done", id);
return open_ertr::future<SegmentRef>(
open_ertr::ready_future_marker{},
SegmentRef(new BlockSegment(*this, id)));
});
}
SegmentManager::release_ertr::future<> BlockSegmentManager::release(
segment_id_t id)
{
LOG_PREFIX(BlockSegmentManager::release);
auto s_id = id.device_segment_id();
INFO("{} ...", id);
assert(id.device_id() == get_device_id());
if (s_id >= get_num_segments()) {
ERROR("{} segment-id out of range {}", id, get_num_segments());
return crimson::ct_error::invarg::make();
}
if (tracker->get(s_id) != segment_state_t::CLOSED) {
ERROR("{} invalid state {} != CLOSED", id, tracker->get(s_id));
return crimson::ct_error::invarg::make();
}
tracker->set(s_id, segment_state_t::EMPTY);
++stats.released_segments;
stats.metadata_write.increment(tracker->get_size());
return tracker->write_out(
get_device_id(), device,
shard_info.tracker_offset);
}
SegmentManager::read_ertr::future<> BlockSegmentManager::read(
paddr_t addr,
size_t len,
ceph::bufferptr &out)
{
LOG_PREFIX(BlockSegmentManager::read);
auto& seg_addr = addr.as_seg_paddr();
auto id = seg_addr.get_segment_id();
auto s_id = id.device_segment_id();
auto s_off = seg_addr.get_segment_off();
auto p_off = get_offset(addr);
DEBUG("{} offset={}~{} poffset={} ...", id, s_off, len, p_off);
assert(addr.get_device_id() == get_device_id());
if (s_off % superblock.block_size != 0 ||
len % superblock.block_size != 0) {
ERROR("{} offset={}~{} poffset={} invalid read", id, s_off, len, p_off);
return crimson::ct_error::invarg::make();
}
if (s_id >= get_num_segments()) {
ERROR("{} offset={}~{} poffset={} segment-id out of range {}",
id, s_off, len, p_off, get_num_segments());
return crimson::ct_error::invarg::make();
}
if (s_off + len > superblock.segment_size) {
ERROR("{} offset={}~{} poffset={} read out of range {}",
id, s_off, len, p_off, superblock.segment_size);
return crimson::ct_error::invarg::make();
}
if (tracker->get(s_id) == segment_state_t::EMPTY) {
// XXX: not an error during scanning,
// might need refactor to increase the log level
DEBUG("{} offset={}~{} poffset={} invalid state {}",
id, s_off, len, p_off, tracker->get(s_id));
return crimson::ct_error::enoent::make();
}
stats.data_read.increment(len);
return do_read(
get_device_id(),
device,
p_off,
len,
out);
}
void BlockSegmentManager::register_metrics()
{
LOG_PREFIX(BlockSegmentManager::register_metrics);
DEBUG("{}", device_id_printer_t{get_device_id()});
namespace sm = seastar::metrics;
std::vector<sm::label_instance> label_instances;
label_instances.push_back(sm::label_instance("device_id", get_device_id()));
stats.reset();
metrics.add_group(
"segment_manager",
{
sm::make_counter(
"data_read_num",
stats.data_read.num,
sm::description("total number of data read"),
label_instances
),
sm::make_counter(
"data_read_bytes",
stats.data_read.bytes,
sm::description("total bytes of data read"),
label_instances
),
sm::make_counter(
"data_write_num",
stats.data_write.num,
sm::description("total number of data write"),
label_instances
),
sm::make_counter(
"data_write_bytes",
stats.data_write.bytes,
sm::description("total bytes of data write"),
label_instances
),
sm::make_counter(
"metadata_write_num",
stats.metadata_write.num,
sm::description("total number of metadata write"),
label_instances
),
sm::make_counter(
"metadata_write_bytes",
stats.metadata_write.bytes,
sm::description("total bytes of metadata write"),
label_instances
),
sm::make_counter(
"opened_segments",
stats.opened_segments,
sm::description("total segments opened"),
label_instances
),
sm::make_counter(
"closed_segments",
stats.closed_segments,
sm::description("total segments closed"),
label_instances
),
sm::make_counter(
"closed_segments_unused_bytes",
stats.closed_segments_unused_bytes,
sm::description("total unused bytes of closed segments"),
label_instances
),
sm::make_counter(
"released_segments",
stats.released_segments,
sm::description("total segments released"),
label_instances
),
}
);
}
}
| 24,720 | 29.482121 | 86 | cc |
null | ceph-main/src/crimson/os/seastore/segment_manager/block.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/file.hh>
#include <seastar/core/future.hh>
#include <seastar/core/reactor.hh>
#include "crimson/common/layout.h"
#include "crimson/os/seastore/segment_manager.h"
namespace crimson::os::seastore::segment_manager::block {
using write_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
/**
* SegmentStateTracker
*
* Tracks lifecycle state of each segment using space at the beginning
* of the drive.
*/
class SegmentStateTracker {
using segment_state_t = Segment::segment_state_t;
bufferptr bptr;
using L = absl::container_internal::Layout<uint8_t>;
const L layout;
public:
static size_t get_raw_size(size_t segments, size_t block_size) {
return p2roundup(segments, block_size);
}
SegmentStateTracker(size_t segments, size_t block_size)
: bptr(ceph::buffer::create_page_aligned(
get_raw_size(segments, block_size))),
layout(bptr.length())
{
::memset(
bptr.c_str(),
static_cast<char>(segment_state_t::EMPTY),
bptr.length());
}
size_t get_size() const {
return bptr.length();
}
size_t get_capacity() const {
return bptr.length();
}
segment_state_t get(device_segment_id_t offset) const {
assert(offset < get_capacity());
return static_cast<segment_state_t>(
layout.template Pointer<0>(
bptr.c_str())[offset]);
}
void set(device_segment_id_t offset, segment_state_t state) {
assert(offset < get_capacity());
layout.template Pointer<0>(bptr.c_str())[offset] =
static_cast<uint8_t>(state);
}
write_ertr::future<> write_out(
device_id_t device_id,
seastar::file &device,
uint64_t offset);
read_ertr::future<> read_in(
device_id_t device_id,
seastar::file &device,
uint64_t offset);
};
class BlockSegmentManager;
class BlockSegment final : public Segment {
friend class BlockSegmentManager;
BlockSegmentManager &manager;
const segment_id_t id;
segment_off_t write_pointer = 0;
public:
BlockSegment(BlockSegmentManager &manager, segment_id_t id);
segment_id_t get_segment_id() const final { return id; }
segment_off_t get_write_capacity() const final;
segment_off_t get_write_ptr() const final { return write_pointer; }
close_ertr::future<> close() final;
write_ertr::future<> write(segment_off_t offset, ceph::bufferlist bl) final;
write_ertr::future<> advance_wp(segment_off_t offset) final;
~BlockSegment() {}
};
/**
* BlockSegmentManager
*
* Implements SegmentManager on a conventional block device.
* SegmentStateTracker uses space at the start of the device to store
* state analagous to that of the segments of a zns device.
*/
class BlockSegmentManager final : public SegmentManager {
// interfaces used by Device
public:
seastar::future<> start() {
return shard_devices.start(device_path, superblock.config.spec.dtype);
}
seastar::future<> stop() {
return shard_devices.stop();
}
Device& get_sharded_device() final {
return shard_devices.local();
}
mount_ret mount() final;
mkfs_ret mkfs(device_config_t) final;
// interfaces used by each shard device
public:
close_ertr::future<> close();
BlockSegmentManager(
const std::string &path,
device_type_t dtype)
: device_path(path) {
ceph_assert(get_device_type() == device_type_t::NONE);
superblock.config.spec.dtype = dtype;
}
~BlockSegmentManager();
open_ertr::future<SegmentRef> open(segment_id_t id) final;
release_ertr::future<> release(segment_id_t id) final;
read_ertr::future<> read(
paddr_t addr,
size_t len,
ceph::bufferptr &out) final;
device_type_t get_device_type() const final {
return superblock.config.spec.dtype;
}
size_t get_available_size() const final {
return shard_info.size;
}
extent_len_t get_block_size() const {
return superblock.block_size;
}
segment_off_t get_segment_size() const {
return superblock.segment_size;
}
device_id_t get_device_id() const final {
assert(device_id <= DEVICE_ID_MAX_VALID);
return device_id;
}
secondary_device_set_t& get_secondary_devices() final {
return superblock.config.secondary_devices;
}
// public so tests can bypass segment interface when simpler
Segment::write_ertr::future<> segment_write(
paddr_t addr,
ceph::bufferlist bl,
bool ignore_check=false);
magic_t get_magic() const final {
return superblock.config.spec.magic;
}
private:
friend class BlockSegment;
using segment_state_t = Segment::segment_state_t;
struct effort_t {
uint64_t num = 0;
uint64_t bytes = 0;
void increment(uint64_t read_bytes) {
++num;
bytes += read_bytes;
}
};
struct {
effort_t data_read;
effort_t data_write;
effort_t metadata_write;
uint64_t opened_segments;
uint64_t closed_segments;
uint64_t closed_segments_unused_bytes;
uint64_t released_segments;
void reset() {
data_read = {};
data_write = {};
metadata_write = {};
opened_segments = 0;
closed_segments = 0;
closed_segments_unused_bytes = 0;
released_segments = 0;
}
} stats;
void register_metrics();
seastar::metrics::metric_group metrics;
std::string device_path;
std::unique_ptr<SegmentStateTracker> tracker;
block_shard_info_t shard_info;
block_sm_superblock_t superblock;
seastar::file device;
void set_device_id(device_id_t id) {
assert(id <= DEVICE_ID_MAX_VALID);
assert(device_id == DEVICE_ID_NULL ||
device_id == id);
device_id = id;
}
device_id_t device_id = DEVICE_ID_NULL;
size_t get_offset(paddr_t addr) {
auto& seg_addr = addr.as_seg_paddr();
return shard_info.first_segment_offset +
(seg_addr.get_segment_id().device_segment_id() * superblock.segment_size) +
seg_addr.get_segment_off();
}
const seastore_meta_t &get_meta() const {
return superblock.config.meta;
}
std::vector<segment_state_t> segment_state;
char *buffer = nullptr;
Segment::close_ertr::future<> segment_close(
segment_id_t id, segment_off_t write_pointer);
private:
// shard 0 mkfs
mkfs_ret primary_mkfs(device_config_t);
// all shards mkfs
mkfs_ret shard_mkfs();
// all shards mount
mount_ret shard_mount();
seastar::sharded<BlockSegmentManager> shard_devices;
};
}
| 6,606 | 24.121673 | 81 | h |
null | ceph-main/src/crimson/os/seastore/segment_manager/ephemeral.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sys/mman.h>
#include <string.h>
#include "seastar/core/sleep.hh"
#include "crimson/common/log.h"
#include "include/buffer.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_seastore_device);
}
}
namespace crimson::os::seastore::segment_manager {
std::ostream &operator<<(std::ostream &lhs, const ephemeral_config_t &c) {
return lhs << "ephemeral_config_t(size=" << c.size << ", block_size=" << c.block_size
<< ", segment_size=" << c.segment_size << ")";
}
EphemeralSegmentManagerRef create_test_ephemeral() {
return EphemeralSegmentManagerRef(
new EphemeralSegmentManager(DEFAULT_TEST_EPHEMERAL));
}
device_config_t get_ephemeral_device_config(
std::size_t index,
std::size_t num_main_devices,
std::size_t num_cold_devices)
{
auto num_devices = num_main_devices + num_cold_devices;
assert(num_devices > index);
auto get_sec_dtype = [num_main_devices](std::size_t idx) {
if (idx < num_main_devices) {
return device_type_t::EPHEMERAL_MAIN;
} else {
return device_type_t::EPHEMERAL_COLD;
}
};
magic_t magic = 0xabcd;
bool is_major_device;
secondary_device_set_t secondary_devices;
if (index == 0) {
is_major_device = true;
for (std::size_t secondary_index = index + 1;
secondary_index < num_devices;
++secondary_index) {
device_id_t secondary_id = static_cast<device_id_t>(secondary_index);
secondary_devices.insert({
secondary_index,
device_spec_t{
magic,
get_sec_dtype(secondary_index),
secondary_id
}
});
}
} else { // index > 0
is_major_device = false;
}
device_id_t id = static_cast<device_id_t>(index);
seastore_meta_t meta = {};
return {is_major_device,
device_spec_t{
magic,
get_sec_dtype(index),
id
},
meta,
secondary_devices};
}
EphemeralSegment::EphemeralSegment(
EphemeralSegmentManager &manager, segment_id_t id)
: manager(manager), id(id) {}
segment_off_t EphemeralSegment::get_write_capacity() const
{
return manager.get_segment_size();
}
Segment::close_ertr::future<> EphemeralSegment::close()
{
return manager.segment_close(id).safe_then([] {
return seastar::sleep(std::chrono::milliseconds(1));
});
}
Segment::write_ertr::future<> EphemeralSegment::write(
segment_off_t offset, ceph::bufferlist bl)
{
if (offset < write_pointer || offset % manager.config.block_size != 0)
return crimson::ct_error::invarg::make();
if (offset + bl.length() > (size_t)manager.get_segment_size())
return crimson::ct_error::enospc::make();
return manager.segment_write(paddr_t::make_seg_paddr(id, offset), bl);
}
Segment::write_ertr::future<> EphemeralSegment::advance_wp(
segment_off_t offset)
{
return write_ertr::now();
}
Segment::close_ertr::future<> EphemeralSegmentManager::segment_close(segment_id_t id)
{
auto s_id = id.device_segment_id();
if (segment_state[s_id] != segment_state_t::OPEN)
return crimson::ct_error::invarg::make();
segment_state[s_id] = segment_state_t::CLOSED;
return Segment::close_ertr::now().safe_then([] {
return seastar::sleep(std::chrono::milliseconds(1));
});
}
EphemeralSegmentManager::mkfs_ret
EphemeralSegmentManager::mkfs(device_config_t _config)
{
logger().info(
"Mkfs ephemeral segment manager with {}",
_config);
device_config = _config;
return mkfs_ertr::now();
}
Segment::write_ertr::future<> EphemeralSegmentManager::segment_write(
paddr_t addr,
ceph::bufferlist bl,
bool ignore_check)
{
auto& seg_addr = addr.as_seg_paddr();
logger().debug(
"segment_write to segment {} at offset {}, physical offset {}, len {}, crc {}",
seg_addr.get_segment_id(),
seg_addr.get_segment_off(),
get_offset(addr),
bl.length(),
bl.crc32c(1));
if (!ignore_check && segment_state[seg_addr.get_segment_id().device_segment_id()]
!= segment_state_t::OPEN)
return crimson::ct_error::invarg::make();
bl.begin().copy(bl.length(), buffer + get_offset(addr));
return Segment::write_ertr::now().safe_then([] {
return seastar::sleep(std::chrono::milliseconds(1));
});
}
EphemeralSegmentManager::init_ertr::future<> EphemeralSegmentManager::init()
{
logger().info(
"Initing ephemeral segment manager with config {}",
config);
if (config.block_size % (4<<10) != 0) {
return crimson::ct_error::invarg::make();
}
if (config.segment_size % config.block_size != 0) {
return crimson::ct_error::invarg::make();
}
if (config.size % config.segment_size != 0) {
return crimson::ct_error::invarg::make();
}
void* addr = ::mmap(
nullptr,
config.size,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
-1,
0);
segment_state.resize(config.size / config.segment_size, segment_state_t::EMPTY);
if (addr == MAP_FAILED)
return crimson::ct_error::enospc::make();
buffer = (char*)addr;
::memset(buffer, 0, config.size);
return init_ertr::now().safe_then([] {
return seastar::sleep(std::chrono::milliseconds(1));
});
}
EphemeralSegmentManager::~EphemeralSegmentManager()
{
if (buffer) {
::munmap(buffer, config.size);
}
}
void EphemeralSegmentManager::remount()
{
for (auto &i : segment_state) {
if (i == Segment::segment_state_t::OPEN)
i = Segment::segment_state_t::CLOSED;
}
}
SegmentManager::open_ertr::future<SegmentRef> EphemeralSegmentManager::open(
segment_id_t id)
{
auto s_id = id.device_segment_id();
if (s_id >= get_num_segments()) {
logger().error("EphemeralSegmentManager::open: invalid segment {}", id);
return crimson::ct_error::invarg::make();
}
if (segment_state[s_id] != segment_state_t::EMPTY) {
logger().error("EphemeralSegmentManager::open: segment {} not empty", id);
return crimson::ct_error::invarg::make();
}
segment_state[s_id] = segment_state_t::OPEN;
return open_ertr::make_ready_future<SegmentRef>(new EphemeralSegment(*this, id));
}
SegmentManager::release_ertr::future<> EphemeralSegmentManager::release(
segment_id_t id)
{
auto s_id = id.device_segment_id();
logger().debug("EphemeralSegmentManager::release: {}", id);
if (s_id >= get_num_segments()) {
logger().error(
"EphemeralSegmentManager::release: invalid segment {}",
id);
return crimson::ct_error::invarg::make();
}
if (segment_state[s_id] != segment_state_t::CLOSED) {
logger().error(
"EphemeralSegmentManager::release: segment id {} not closed",
id);
return crimson::ct_error::invarg::make();
}
::memset(buffer + get_offset(paddr_t::make_seg_paddr(id, 0)), 0, config.segment_size);
segment_state[s_id] = segment_state_t::EMPTY;
return release_ertr::now().safe_then([] {
return seastar::sleep(std::chrono::milliseconds(1));
});
}
SegmentManager::read_ertr::future<> EphemeralSegmentManager::read(
paddr_t addr,
size_t len,
ceph::bufferptr &out)
{
auto& seg_addr = addr.as_seg_paddr();
if (seg_addr.get_segment_id().device_segment_id() >= get_num_segments()) {
logger().error(
"EphemeralSegmentManager::read: invalid segment {}",
addr);
return crimson::ct_error::invarg::make();
}
if (seg_addr.get_segment_off() + len > config.segment_size) {
logger().error(
"EphemeralSegmentManager::read: invalid offset {}~{}!",
addr,
len);
return crimson::ct_error::invarg::make();
}
out.copy_in(0, len, buffer + get_offset(addr));
bufferlist bl;
bl.push_back(out);
logger().debug(
"segment_read to segment {} at offset {}, physical offset {}, length {}, crc {}",
seg_addr.get_segment_id().device_segment_id(),
seg_addr.get_segment_off(),
get_offset(addr),
len,
bl.begin().crc32c(len, 1));
return read_ertr::now().safe_then([] {
return seastar::sleep(std::chrono::milliseconds(1));
});
}
}
| 8,041 | 26.261017 | 88 | cc |
null | ceph-main/src/crimson/os/seastore/segment_manager/ephemeral.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/future.hh>
#include "crimson/os/seastore/segment_manager.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
namespace crimson::os::seastore::segment_manager {
class EphemeralSegmentManager;
using EphemeralSegmentManagerRef = std::unique_ptr<EphemeralSegmentManager>;
struct ephemeral_config_t {
size_t size = 0;
size_t block_size = 0;
size_t segment_size = 0;
void validate() const {
ceph_assert_always(size > 0);
ceph_assert_always(size <= DEVICE_OFF_MAX);
ceph_assert_always(segment_size > 0);
ceph_assert_always(segment_size <= SEGMENT_OFF_MAX);
ceph_assert_always(size / segment_size > 0);
ceph_assert_always(size / segment_size <= DEVICE_SEGMENT_ID_MAX);
}
};
constexpr ephemeral_config_t DEFAULT_TEST_EPHEMERAL = {
1 << 30,
4 << 10,
8 << 20
};
std::ostream &operator<<(std::ostream &, const ephemeral_config_t &);
EphemeralSegmentManagerRef create_test_ephemeral();
device_config_t get_ephemeral_device_config(
std::size_t index,
std::size_t num_main_devices,
std::size_t num_cold_devices);
class EphemeralSegment final : public Segment {
friend class EphemeralSegmentManager;
EphemeralSegmentManager &manager;
const segment_id_t id;
segment_off_t write_pointer = 0;
public:
EphemeralSegment(EphemeralSegmentManager &manager, segment_id_t id);
segment_id_t get_segment_id() const final { return id; }
segment_off_t get_write_capacity() const final;
segment_off_t get_write_ptr() const final { return write_pointer; }
close_ertr::future<> close() final;
write_ertr::future<> write(segment_off_t offset, ceph::bufferlist bl) final;
write_ertr::future<> advance_wp(segment_off_t offset) final;
~EphemeralSegment() {}
};
class EphemeralSegmentManager final : public SegmentManager {
friend class EphemeralSegment;
using segment_state_t = Segment::segment_state_t;
const ephemeral_config_t config;
std::optional<device_config_t> device_config;
device_type_t get_device_type() const final {
assert(device_config);
return device_config->spec.dtype;
}
size_t get_offset(paddr_t addr) {
auto& seg_addr = addr.as_seg_paddr();
return (seg_addr.get_segment_id().device_segment_id() * config.segment_size) +
seg_addr.get_segment_off();
}
std::vector<segment_state_t> segment_state;
char *buffer = nullptr;
Segment::close_ertr::future<> segment_close(segment_id_t id);
public:
EphemeralSegmentManager(
ephemeral_config_t config)
: config(config) {
config.validate();
}
~EphemeralSegmentManager();
close_ertr::future<> close() final {
return close_ertr::now();
}
device_id_t get_device_id() const final {
assert(device_config);
return device_config->spec.id;
}
mount_ret mount() final {
return mount_ertr::now();
}
mkfs_ret mkfs(device_config_t) final;
open_ertr::future<SegmentRef> open(segment_id_t id) final;
release_ertr::future<> release(segment_id_t id) final;
read_ertr::future<> read(
paddr_t addr,
size_t len,
ceph::bufferptr &out) final;
size_t get_available_size() const final {
return config.size;
}
extent_len_t get_block_size() const final {
return config.block_size;
}
segment_off_t get_segment_size() const final {
return config.segment_size;
}
const seastore_meta_t &get_meta() const final {
assert(device_config);
return device_config->meta;
}
secondary_device_set_t& get_secondary_devices() final {
assert(device_config);
return device_config->secondary_devices;
}
magic_t get_magic() const final {
return device_config->spec.magic;
}
using init_ertr = crimson::errorator<
crimson::ct_error::enospc,
crimson::ct_error::invarg>;
init_ertr::future<> init();
void remount();
// public so tests can bypass segment interface when simpler
Segment::write_ertr::future<> segment_write(
paddr_t addr,
ceph::bufferlist bl,
bool ignore_check=false);
};
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::os::seastore::segment_manager::ephemeral_config_t> : fmt::ostream_formatter {};
#endif
| 4,354 | 25.077844 | 122 | h |
null | ceph-main/src/crimson/os/seastore/segment_manager/zbd.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <sys/mman.h>
#include <string.h>
#include <linux/blkzoned.h>
#include <fmt/format.h>
#include "crimson/os/seastore/segment_manager/zbd.h"
#include "crimson/common/config_proxy.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/common/errorator-loop.h"
#include "include/buffer.h"
SET_SUBSYS(seastore_device);
#define SECT_SHIFT 9
#define RESERVED_ZONES 1
// limit the max padding buf size to 1MB
#define MAX_PADDING_SIZE 4194304
using z_op = crimson::os::seastore::segment_manager::zbd::zone_op;
template <> struct fmt::formatter<z_op>: fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(z_op s, FormatContext& ctx) {
std::string_view name = "Unknown";
switch (s) {
using enum z_op;
case OPEN:
name = "BLKOPENZONE";
break;
case FINISH:
name = "BLKFINISHZONE";
break;
case CLOSE:
name = "BLKCLOSEZONE";
break;
case RESET:
name = "BLKRESETZONE";
break;
}
return formatter<string_view>::format(name, ctx);
}
};
namespace crimson::os::seastore::segment_manager::zbd {
using open_device_ret = ZBDSegmentManager::access_ertr::future<
std::pair<seastar::file, seastar::stat_data>>;
static open_device_ret open_device(
const std::string &path,
seastar::open_flags mode)
{
LOG_PREFIX(ZBDSegmentManager::open_device);
return seastar::file_stat(
path, seastar::follow_symlink::yes
).then([FNAME, mode, &path](auto stat) mutable {
return seastar::open_file_dma(path, mode).then([=](auto file) {
DEBUG("open of device {} successful, size {}",
path,
stat.size);
return std::make_pair(file, stat);
});
}).handle_exception(
[FNAME](auto e) -> open_device_ret {
ERROR("got error {}",
e);
return crimson::ct_error::input_output_error::make();
}
);
}
static zbd_sm_metadata_t make_metadata(
uint64_t total_size,
seastore_meta_t meta,
const seastar::stat_data &data,
size_t zone_size_sectors,
size_t zone_capacity_sectors,
size_t nr_cnv_zones,
size_t num_zones)
{
LOG_PREFIX(ZBDSegmentManager::make_metadata);
// Using only SWR zones in a SMR drive, for now
auto skipped_zones = RESERVED_ZONES + nr_cnv_zones;
assert(num_zones > skipped_zones);
// TODO: support Option::size_t seastore_segment_size
// to allow zones_per_segment > 1 with striping.
size_t zone_size = zone_size_sectors << SECT_SHIFT;
assert(total_size == num_zones * zone_size);
size_t zone_capacity = zone_capacity_sectors << SECT_SHIFT;
size_t segment_size = zone_size;
size_t zones_per_segment = segment_size / zone_size;
size_t segments = (num_zones - skipped_zones) / zones_per_segment;
size_t per_shard_segments = segments / seastar::smp::count;
size_t available_size = zone_capacity * segments;
size_t per_shard_available_size = zone_capacity * per_shard_segments;
WARN("Ignoring configuration values for device and segment size");
INFO(
"device size: {}, available size: {}, block size: {}, allocated size: {},"
" total zones {}, zone size: {}, zone capacity: {},"
" total segments: {}, zones per segment: {}, segment size: {}"
" conv zones: {}, swr zones: {}, per shard segments: {}"
" per shard available size: {}",
total_size,
available_size,
data.block_size,
data.allocated_size,
num_zones,
zone_size,
zone_capacity,
segments,
zones_per_segment,
zone_capacity * zones_per_segment,
nr_cnv_zones,
num_zones - nr_cnv_zones,
per_shard_segments,
per_shard_available_size);
std::vector<zbd_shard_info_t> shard_infos(seastar::smp::count);
for (unsigned int i = 0; i < seastar::smp::count; i++) {
shard_infos[i].size = per_shard_available_size;
shard_infos[i].segments = per_shard_segments;
shard_infos[i].first_segment_offset = zone_size * skipped_zones
+ i * segment_size * per_shard_segments;
INFO("First segment offset for shard {} is: {}",
i, shard_infos[i].first_segment_offset);
}
zbd_sm_metadata_t ret = zbd_sm_metadata_t{
seastar::smp::count,
segment_size,
zone_capacity * zones_per_segment,
zones_per_segment,
zone_capacity,
data.block_size,
zone_size,
shard_infos,
meta};
ret.validate();
return ret;
}
struct ZoneReport {
struct blk_zone_report *hdr;
ZoneReport(int nr_zones)
: hdr((blk_zone_report *)malloc(
sizeof(struct blk_zone_report) + nr_zones * sizeof(struct blk_zone))){;}
~ZoneReport(){
free(hdr);
}
ZoneReport(const ZoneReport &) = delete;
ZoneReport(ZoneReport &&rhs) : hdr(rhs.hdr) {
rhs.hdr = nullptr;
}
};
static seastar::future<size_t> get_blk_dev_size(
seastar::file &device)
{
return seastar::do_with(
(uint64_t)0,
[&](auto& size_sects) {
return device.ioctl(
BLKGETSIZE,
(void *)&size_sects
).then([&](int ret) {
ceph_assert(size_sects);
size_t size = size_sects << SECT_SHIFT;
return seastar::make_ready_future<size_t>(size);
});
});
}
// zone_size should be in 512B sectors
static seastar::future<> reset_device(
seastar::file &device,
uint64_t zone_size_sects,
uint64_t nr_zones)
{
return seastar::do_with(
blk_zone_range{},
[&, nr_zones, zone_size_sects](auto &range) {
range.sector = 0;
range.nr_sectors = zone_size_sects * nr_zones;
return device.ioctl(
BLKRESETZONE,
&range
).then([&](int ret){
return seastar::now();
});
}
);
}
static seastar::future<size_t> get_zone_capacity(
seastar::file &device,
uint32_t nr_zones)
{
return seastar::do_with(
ZoneReport(nr_zones),
[&](auto &zr) {
zr.hdr->sector = 0;
zr.hdr->nr_zones = nr_zones;
return device.ioctl(
BLKREPORTZONE,
zr.hdr
).then([&](int ret) {
return seastar::make_ready_future<size_t>(zr.hdr->zones[0].capacity);
});
}
);
}
// get the number of conventional zones of SMR HDD,
// they are randomly writable and don't respond to zone operations
static seastar::future<size_t> get_nr_cnv_zones(
seastar::file &device,
uint32_t nr_zones)
{
return seastar::do_with(
ZoneReport(nr_zones),
[&](auto &zr) {
zr.hdr->sector = 0;
zr.hdr->nr_zones = nr_zones;
return device.ioctl(
BLKREPORTZONE,
zr.hdr
).then([&, nr_zones](int ret) {
size_t cnv_zones = 0;
for (uint32_t i = 0; i < nr_zones; i++) {
if (zr.hdr->zones[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
cnv_zones++;
}
return seastar::make_ready_future<size_t>(cnv_zones);
});
}
);
}
static write_ertr::future<> do_write(
seastar::file &device,
uint64_t offset,
bufferptr &bptr)
{
LOG_PREFIX(ZBDSegmentManager::do_write);
DEBUG("offset {} len {}",
offset,
bptr.length());
return device.dma_write(
offset,
bptr.c_str(),
bptr.length()
).handle_exception(
[FNAME](auto e) -> write_ertr::future<size_t> {
ERROR("dma_write got error {}",
e);
return crimson::ct_error::input_output_error::make();
}
).then([length = bptr.length()](auto result) -> write_ertr::future<> {
if (result != length) {
return crimson::ct_error::input_output_error::make();
}
return write_ertr::now();
});
}
static write_ertr::future<> do_writev(
device_id_t device_id,
seastar::file &device,
uint64_t offset,
bufferlist&& bl,
size_t block_size)
{
LOG_PREFIX(ZBDSegmentManager::do_writev);
DEBUG("{} offset {} len {}",
device_id_printer_t{device_id}, offset, bl.length());
// writev requires each buffer to be aligned to the disks' block
// size, we need to rebuild here
bl.rebuild_aligned(block_size);
return seastar::do_with(
bl.prepare_iovs(),
std::move(bl),
[&device, device_id, offset, FNAME](auto& iovs, auto& bl)
{
return write_ertr::parallel_for_each(
iovs,
[&device, device_id, offset, FNAME](auto& p)
{
auto off = offset + p.offset;
auto len = p.length;
auto& iov = p.iov;
DEBUG("{} poffset={}~{} dma_write ...",
device_id_printer_t{device_id},
off, len);
return device.dma_write(off, std::move(iov)
).handle_exception(
[FNAME, device_id, off, len](auto e) -> write_ertr::future<size_t>
{
ERROR("{} poffset={}~{} dma_write got error -- {}",
device_id_printer_t{device_id}, off, len, e);
return crimson::ct_error::input_output_error::make();
}).then([FNAME, device_id, off, len](size_t written) -> write_ertr::future<> {
if (written != len) {
ERROR("{} poffset={}~{} dma_write len={} inconsistent",
device_id_printer_t{device_id}, off, len, written);
return crimson::ct_error::input_output_error::make();
}
DEBUG("{} poffset={}~{} dma_write done",
device_id_printer_t{device_id},
off, len);
return write_ertr::now();
});
});
});
}
static ZBDSegmentManager::access_ertr::future<>
write_metadata(seastar::file &device, zbd_sm_metadata_t sb)
{
assert(ceph::encoded_sizeof_bounded<zbd_sm_metadata_t>() <
sb.block_size);
return seastar::do_with(
bufferptr(ceph::buffer::create_page_aligned(sb.block_size)),
[=, &device](auto &bp) {
LOG_PREFIX(ZBDSegmentManager::write_metadata);
DEBUG("block_size {}", sb.block_size);
bufferlist bl;
encode(sb, bl);
auto iter = bl.begin();
assert(bl.length() < sb.block_size);
DEBUG("buffer length {}", bl.length());
iter.copy(bl.length(), bp.c_str());
DEBUG("doing writeout");
return do_write(device, 0, bp);
});
}
static read_ertr::future<> do_read(
seastar::file &device,
uint64_t offset,
size_t len,
bufferptr &bptr)
{
LOG_PREFIX(ZBDSegmentManager::do_read);
assert(len <= bptr.length());
DEBUG("offset {} len {}",
offset,
len);
return device.dma_read(
offset,
bptr.c_str(),
len
).handle_exception(
[FNAME](auto e) -> read_ertr::future<size_t> {
ERROR("dma_read got error {}",
e);
return crimson::ct_error::input_output_error::make();
}
).then([len](auto result) -> read_ertr::future<> {
if (result != len) {
return crimson::ct_error::input_output_error::make();
}
return read_ertr::now();
});
}
static
ZBDSegmentManager::access_ertr::future<zbd_sm_metadata_t>
read_metadata(seastar::file &device, seastar::stat_data sd)
{
assert(ceph::encoded_sizeof_bounded<zbd_sm_metadata_t>() <
sd.block_size);
return seastar::do_with(
bufferptr(ceph::buffer::create_page_aligned(sd.block_size)),
[=, &device](auto &bp) {
return do_read(
device,
0,
bp.length(),
bp
).safe_then([=, &bp] {
bufferlist bl;
bl.push_back(bp);
zbd_sm_metadata_t ret;
auto bliter = bl.cbegin();
decode(ret, bliter);
ret.validate();
return ZBDSegmentManager::access_ertr::future<zbd_sm_metadata_t>(
ZBDSegmentManager::access_ertr::ready_future_marker{},
ret);
});
});
}
ZBDSegmentManager::mount_ret ZBDSegmentManager::mount()
{
return shard_devices.invoke_on_all([](auto &local_device) {
return local_device.shard_mount(
).handle_error(
crimson::ct_error::assert_all{
"Invalid error in ZBDSegmentManager::mount"
});
});
}
ZBDSegmentManager::mount_ret ZBDSegmentManager::shard_mount()
{
return open_device(
device_path, seastar::open_flags::rw
).safe_then([=, this](auto p) {
device = std::move(p.first);
auto sd = p.second;
return read_metadata(device, sd);
}).safe_then([=, this](auto meta){
shard_info = meta.shard_infos[seastar::this_shard_id()];
metadata = meta;
return mount_ertr::now();
});
}
ZBDSegmentManager::mkfs_ret ZBDSegmentManager::mkfs(
device_config_t config)
{
return shard_devices.local().primary_mkfs(config
).safe_then([this] {
return shard_devices.invoke_on_all([](auto &local_device) {
return local_device.shard_mkfs(
).handle_error(
crimson::ct_error::assert_all{
"Invalid error in ZBDSegmentManager::mkfs"
});
});
});
}
ZBDSegmentManager::mkfs_ret ZBDSegmentManager::primary_mkfs(
device_config_t config)
{
LOG_PREFIX(ZBDSegmentManager::primary_mkfs);
INFO("starting, device_path {}", device_path);
return seastar::do_with(
seastar::file{},
seastar::stat_data{},
zbd_sm_metadata_t{},
size_t(),
size_t(),
size_t(),
size_t(),
[=, this]
(auto &device,
auto &stat,
auto &sb,
auto &zone_size_sects,
auto &nr_zones,
auto &size,
auto &nr_cnv_zones) {
return open_device(
device_path,
seastar::open_flags::rw
).safe_then([=, this, &device, &stat, &sb, &zone_size_sects, &nr_zones, &size, &nr_cnv_zones](auto p) {
device = p.first;
stat = p.second;
return device.ioctl(
BLKGETNRZONES,
(void *)&nr_zones
).then([&](int ret) {
if (nr_zones == 0) {
return seastar::make_exception_future<int>(
std::system_error(std::make_error_code(std::errc::io_error)));
}
return device.ioctl(BLKGETZONESZ, (void *)&zone_size_sects);
}).then([&](int ret) {
ceph_assert(zone_size_sects);
return reset_device(device, zone_size_sects, nr_zones);
}).then([&] {
return get_blk_dev_size(device);
}).then([&](auto devsize) {
size = devsize;
return get_nr_cnv_zones(device, nr_zones);
}).then([&](auto cnv_zones) {
DEBUG("Found {} conventional zones", cnv_zones);
nr_cnv_zones = cnv_zones;
return get_zone_capacity(device, nr_zones);
}).then([&, FNAME, config](auto zone_capacity_sects) {
ceph_assert(zone_capacity_sects);
DEBUG("zone_size in sectors {}, zone_capacity in sectors {}",
zone_size_sects, zone_capacity_sects);
sb = make_metadata(
size,
config.meta,
stat,
zone_size_sects,
zone_capacity_sects,
nr_cnv_zones,
nr_zones);
metadata = sb;
stats.metadata_write.increment(
ceph::encoded_sizeof_bounded<zbd_sm_metadata_t>());
DEBUG("Wrote to stats.");
return write_metadata(device, sb);
}).finally([&, FNAME] {
DEBUG("Closing device.");
return device.close();
}).safe_then([FNAME] {
DEBUG("Returning from mkfs.");
return mkfs_ertr::now();
});
});
});
}
ZBDSegmentManager::mkfs_ret ZBDSegmentManager::shard_mkfs()
{
LOG_PREFIX(ZBDSegmentManager::shard_mkfs);
INFO("starting, device_path {}", device_path);
return open_device(
device_path, seastar::open_flags::rw
).safe_then([=, this](auto p) {
device = std::move(p.first);
auto sd = p.second;
return read_metadata(device, sd);
}).safe_then([=, this](auto meta){
shard_info = meta.shard_infos[seastar::this_shard_id()];
metadata = meta;
return device.close();
}).safe_then([FNAME] {
DEBUG("Returning from shard_mkfs.");
return mkfs_ertr::now();
});
}
// Return range of sectors to operate on.
struct blk_zone_range make_range(
segment_id_t id,
size_t segment_size,
size_t first_segment_offset)
{
return blk_zone_range{
(id.device_segment_id() * (segment_size >> SECT_SHIFT)
+ (first_segment_offset >> SECT_SHIFT)),
(segment_size >> SECT_SHIFT)
};
}
using blk_zone_op_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using blk_zone_op_ret = blk_zone_op_ertr::future<>;
blk_zone_op_ret blk_zone_op(seastar::file &device,
blk_zone_range &range,
zone_op op) {
LOG_PREFIX(ZBDSegmentManager::blk_zone_op);
unsigned long ioctl_op = 0;
switch (op) {
using enum zone_op;
case OPEN:
ioctl_op = BLKOPENZONE;
break;
case FINISH:
ioctl_op = BLKFINISHZONE;
break;
case RESET:
ioctl_op = BLKRESETZONE;
break;
case CLOSE:
ioctl_op = BLKCLOSEZONE;
break;
default:
ERROR("Invalid zone operation {}", op);
ceph_assert(ioctl_op);
}
return device.ioctl(
ioctl_op,
&range
).then_wrapped([=](auto f) -> blk_zone_op_ret {
if (f.failed()) {
ERROR("{} ioctl failed", op);
return crimson::ct_error::input_output_error::make();
} else {
int ret = f.get();
if (ret == 0) {
return seastar::now();
} else {
ERROR("{} ioctl failed with return code {}", op, ret);
return crimson::ct_error::input_output_error::make();
}
}
});
}
ZBDSegmentManager::open_ertr::future<SegmentRef> ZBDSegmentManager::open(
segment_id_t id)
{
LOG_PREFIX(ZBDSegmentManager::open);
return seastar::do_with(
blk_zone_range{},
[=, this](auto &range) {
range = make_range(
id,
metadata.segment_size,
shard_info.first_segment_offset);
return blk_zone_op(
device,
range,
zone_op::OPEN
);
}
).safe_then([=, this] {
DEBUG("segment {}, open successful", id);
return open_ertr::future<SegmentRef>(
open_ertr::ready_future_marker{},
SegmentRef(new ZBDSegment(*this, id))
);
});
}
ZBDSegmentManager::release_ertr::future<> ZBDSegmentManager::release(
segment_id_t id)
{
LOG_PREFIX(ZBDSegmentManager::release);
DEBUG("Resetting zone/segment {}", id);
return seastar::do_with(
blk_zone_range{},
[=, this](auto &range) {
range = make_range(
id,
metadata.segment_size,
shard_info.first_segment_offset);
return blk_zone_op(
device,
range,
zone_op::RESET
);
}
).safe_then([=] {
DEBUG("segment release successful");
return release_ertr::now();
});
}
SegmentManager::read_ertr::future<> ZBDSegmentManager::read(
paddr_t addr,
size_t len,
ceph::bufferptr &out)
{
LOG_PREFIX(ZBDSegmentManager::read);
auto& seg_addr = addr.as_seg_paddr();
if (seg_addr.get_segment_id().device_segment_id() >= get_num_segments()) {
ERROR("invalid segment {}",
seg_addr.get_segment_id().device_segment_id());
return crimson::ct_error::invarg::make();
}
if (seg_addr.get_segment_off() + len > metadata.segment_capacity) {
ERROR("invalid read offset {}, len {}",
addr,
len);
return crimson::ct_error::invarg::make();
}
return do_read(
device,
get_offset(addr),
len,
out);
}
Segment::close_ertr::future<> ZBDSegmentManager::segment_close(
segment_id_t id, segment_off_t write_pointer)
{
LOG_PREFIX(ZBDSegmentManager::segment_close);
return seastar::do_with(
blk_zone_range{},
[=, this](auto &range) {
range = make_range(
id,
metadata.segment_size,
shard_info.first_segment_offset);
return blk_zone_op(
device,
range,
zone_op::FINISH
);
}
).safe_then([=] {
DEBUG("zone finish successful");
return Segment::close_ertr::now();
});
}
Segment::write_ertr::future<> ZBDSegmentManager::segment_write(
paddr_t addr,
ceph::bufferlist bl,
bool ignore_check)
{
LOG_PREFIX(ZBDSegmentManager::segment_write);
assert(addr.get_device_id() == get_device_id());
assert((bl.length() % metadata.block_size) == 0);
auto& seg_addr = addr.as_seg_paddr();
DEBUG("write to segment {} at offset {}, physical offset {}, len {}",
seg_addr.get_segment_id(),
seg_addr.get_segment_off(),
get_offset(addr),
bl.length());
stats.data_write.increment(bl.length());
return do_writev(
get_device_id(),
device,
get_offset(addr),
std::move(bl),
metadata.block_size);
}
device_id_t ZBDSegmentManager::get_device_id() const
{
return metadata.device_id;
};
secondary_device_set_t& ZBDSegmentManager::get_secondary_devices()
{
return metadata.secondary_devices;
};
magic_t ZBDSegmentManager::get_magic() const
{
return metadata.magic;
};
segment_off_t ZBDSegment::get_write_capacity() const
{
return manager.get_segment_size();
}
SegmentManager::close_ertr::future<> ZBDSegmentManager::close()
{
if (device) {
return device.close();
}
return seastar::now();
}
Segment::close_ertr::future<> ZBDSegment::close()
{
return manager.segment_close(id, write_pointer);
}
Segment::write_ertr::future<> ZBDSegment::write(
segment_off_t offset, ceph::bufferlist bl)
{
LOG_PREFIX(ZBDSegment::write);
if (offset != write_pointer || offset % manager.metadata.block_size != 0) {
ERROR("Segment offset and zone write pointer mismatch. "
"segment {} segment-offset {} write pointer {}",
id, offset, write_pointer);
return crimson::ct_error::invarg::make();
}
if (offset + bl.length() > manager.metadata.segment_capacity) {
return crimson::ct_error::enospc::make();
}
write_pointer = offset + bl.length();
return manager.segment_write(paddr_t::make_seg_paddr(id, offset), bl);
}
Segment::write_ertr::future<> ZBDSegment::write_padding_bytes(
size_t padding_bytes)
{
LOG_PREFIX(ZBDSegment::write_padding_bytes);
DEBUG("Writing {} padding bytes to segment {} at wp {}",
padding_bytes, id, write_pointer);
return crimson::repeat([FNAME, padding_bytes, this] () mutable {
size_t bufsize = 0;
if (padding_bytes >= MAX_PADDING_SIZE) {
bufsize = MAX_PADDING_SIZE;
} else {
bufsize = padding_bytes;
}
padding_bytes -= bufsize;
bufferptr bp(ceph::buffer::create_page_aligned(bufsize));
bp.zero();
bufferlist padd_bl;
padd_bl.append(bp);
return write(write_pointer, padd_bl).safe_then([FNAME, padding_bytes, this]() {
if (padding_bytes == 0) {
return write_ertr::make_ready_future<seastar::stop_iteration>(seastar::stop_iteration::yes);
} else {
return write_ertr::make_ready_future<seastar::stop_iteration>(seastar::stop_iteration::no);
}
});
});
}
// Advance write pointer, to given offset.
Segment::write_ertr::future<> ZBDSegment::advance_wp(
segment_off_t offset)
{
LOG_PREFIX(ZBDSegment::advance_wp);
DEBUG("Advancing write pointer from {} to {}", write_pointer, offset);
if (offset < write_pointer) {
return crimson::ct_error::invarg::make();
}
size_t padding_bytes = offset - write_pointer;
if (padding_bytes == 0) {
return write_ertr::now();
}
assert(padding_bytes % manager.metadata.block_size == 0);
return write_padding_bytes(padding_bytes);
}
}
| 22,318 | 26.086165 | 109 | cc |
null | ceph-main/src/crimson/os/seastore/segment_manager/zbd.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <linux/blkzoned.h>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <seastar/core/file.hh>
#include <seastar/core/future.hh>
#include <seastar/core/reactor.hh>
#include "crimson/common/layout.h"
#include "crimson/os/seastore/segment_manager.h"
#include "include/uuid.h"
namespace crimson::os::seastore::segment_manager::zbd {
struct zbd_shard_info_t {
size_t size = 0;
size_t segments = 0;
size_t first_segment_offset = 0;
DENC(zbd_shard_info_t, v, p) {
DENC_START(1, 1, p);
denc(v.size, p);
denc(v.segments, p);
denc(v.first_segment_offset, p);
DENC_FINISH(p);
}
};
struct zbd_sm_metadata_t {
unsigned int shard_num = 0;
size_t segment_size = 0;
size_t segment_capacity = 0;
size_t zones_per_segment = 0;
size_t zone_capacity = 0;
size_t block_size = 0;
size_t zone_size = 0;
std::vector<zbd_shard_info_t> shard_infos;
seastore_meta_t meta;
bool major_dev = false;
magic_t magic = 0;
device_type_t dtype = device_type_t::NONE;
device_id_t device_id = 0;
secondary_device_set_t secondary_devices;
DENC(zbd_sm_metadata_t, v, p) {
DENC_START(1, 1, p);
denc(v.shard_num, p);
denc(v.segment_size, p);
denc(v.segment_capacity, p);
denc(v.zones_per_segment, p);
denc(v.zone_capacity, p);
denc(v.block_size, p);
denc(v.zone_size, p);
denc(v.shard_infos, p);
denc(v.meta, p);
denc(v.magic, p);
denc(v.dtype, p);
denc(v.device_id, p);
if (v.major_dev) {
denc(v.secondary_devices, p);
}
DENC_FINISH(p);
}
void validate() const {
ceph_assert_always(shard_num == seastar::smp::count);
for (unsigned int i = 0; i < seastar::smp::count; i++) {
ceph_assert_always(shard_infos[i].size > 0);
ceph_assert_always(shard_infos[i].size <= DEVICE_OFF_MAX);
ceph_assert_always(shard_infos[i].segments > 0);
ceph_assert_always(shard_infos[i].segments <= DEVICE_SEGMENT_ID_MAX);
}
ceph_assert_always(segment_capacity > 0);
ceph_assert_always(segment_capacity <= SEGMENT_OFF_MAX);
}
};
using write_ertr = crimson::errorator<crimson::ct_error::input_output_error>;
using read_ertr = crimson::errorator<crimson::ct_error::input_output_error>;
enum class zone_op {
OPEN,
FINISH,
CLOSE,
RESET,
};
class ZBDSegmentManager;
class ZBDSegment final : public Segment {
public:
ZBDSegment(ZBDSegmentManager &man, segment_id_t i) : manager(man), id(i){};
segment_id_t get_segment_id() const final { return id; }
segment_off_t get_write_capacity() const final;
segment_off_t get_write_ptr() const final { return write_pointer; }
close_ertr::future<> close() final;
write_ertr::future<> write(segment_off_t offset, ceph::bufferlist bl) final;
write_ertr::future<> advance_wp(segment_off_t offset) final;
~ZBDSegment() {}
private:
friend class ZBDSegmentManager;
ZBDSegmentManager &manager;
const segment_id_t id;
segment_off_t write_pointer = 0;
write_ertr::future<> write_padding_bytes(size_t padding_bytes);
};
class ZBDSegmentManager final : public SegmentManager{
// interfaces used by Device
public:
seastar::future<> start() {
return shard_devices.start(device_path);
}
seastar::future<> stop() {
return shard_devices.stop();
}
Device& get_sharded_device() final {
return shard_devices.local();
}
mount_ret mount() final;
mkfs_ret mkfs(device_config_t meta) final;
ZBDSegmentManager(const std::string &path) : device_path(path) {}
~ZBDSegmentManager() final = default;
//interfaces used by each shard device
public:
open_ertr::future<SegmentRef> open(segment_id_t id) final;
close_ertr::future<> close() final;
release_ertr::future<> release(segment_id_t id) final;
read_ertr::future<> read(
paddr_t addr,
size_t len,
ceph::bufferptr &out) final;
device_type_t get_device_type() const final {
return device_type_t::ZBD;
}
size_t get_available_size() const final {
return shard_info.size;
};
extent_len_t get_block_size() const final {
return metadata.block_size;
};
segment_off_t get_segment_size() const final {
return metadata.segment_capacity;
};
const seastore_meta_t &get_meta() const {
return metadata.meta;
};
device_id_t get_device_id() const final;
secondary_device_set_t& get_secondary_devices() final;
magic_t get_magic() const final;
Segment::write_ertr::future<> segment_write(
paddr_t addr,
ceph::bufferlist bl,
bool ignore_check=false);
private:
friend class ZBDSegment;
std::string device_path;
zbd_shard_info_t shard_info;
zbd_sm_metadata_t metadata;
seastar::file device;
uint32_t nr_zones;
struct effort_t {
uint64_t num = 0;
uint64_t bytes = 0;
void increment(uint64_t read_bytes) {
++num;
bytes += read_bytes;
}
};
struct zbd_sm_stats {
effort_t data_read = {};
effort_t data_write = {};
effort_t metadata_write = {};
uint64_t opened_segments = 0;
uint64_t closed_segments = 0;
uint64_t closed_segments_unused_bytes = 0;
uint64_t released_segments = 0;
void reset() {
*this = zbd_sm_stats{};
}
} stats;
void register_metrics();
seastar::metrics::metric_group metrics;
Segment::close_ertr::future<> segment_close(
segment_id_t id, segment_off_t write_pointer);
uint64_t get_offset(paddr_t addr) {
auto& seg_addr = addr.as_seg_paddr();
return (shard_info.first_segment_offset +
(seg_addr.get_segment_id().device_segment_id() *
metadata.segment_size)) + seg_addr.get_segment_off();
}
private:
// shard 0 mkfs
mkfs_ret primary_mkfs(device_config_t meta);
// all shards mkfs
mkfs_ret shard_mkfs();
mount_ret shard_mount();
seastar::sharded<ZBDSegmentManager> shard_devices;
};
}
WRITE_CLASS_DENC_BOUNDED(
crimson::os::seastore::segment_manager::zbd::zbd_shard_info_t
)
WRITE_CLASS_DENC_BOUNDED(
crimson::os::seastore::segment_manager::zbd::zbd_sm_metadata_t
)
| 6,464 | 25.174089 | 80 | h |
null | ceph-main/src/crimson/osd/acked_peers.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <vector>
namespace crimson::osd {
struct peer_shard_t {
pg_shard_t shard;
eversion_t last_complete_ondisk;
};
using acked_peers_t = std::vector<peer_shard_t>;
}
| 298 | 18.933333 | 70 | h |
null | ceph-main/src/crimson/osd/backfill_facades.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/osd/backfill_state.h"
#include "crimson/osd/pg.h"
#include "osd/PeeringState.h"
namespace crimson::osd {
// PeeringFacade -- main implementation of the BackfillState::PeeringFacade
// interface. We have the abstraction to decuple BackfillState from Peering
// State, and thus cut depedencies in unit testing. The second implemention
// is BackfillFixture::PeeringFacade and sits in test_backfill.cc.
struct PeeringFacade final : BackfillState::PeeringFacade {
PeeringState& peering_state;
hobject_t earliest_backfill() const override {
return peering_state.earliest_backfill();
}
const std::set<pg_shard_t>& get_backfill_targets() const override {
return peering_state.get_backfill_targets();
}
const hobject_t& get_peer_last_backfill(pg_shard_t peer) const override {
return peering_state.get_peer_info(peer).last_backfill;
}
const eversion_t& get_last_update() const override {
return peering_state.get_info().last_update;
}
const eversion_t& get_log_tail() const override {
return peering_state.get_info().log_tail;
}
void scan_log_after(eversion_t v, scan_log_func_t f) const override {
peering_state.get_pg_log().get_log().scan_log_after(v, std::move(f));
}
bool is_backfill_target(pg_shard_t peer) const override {
return peering_state.is_backfill_target(peer);
}
void update_complete_backfill_object_stats(const hobject_t &hoid,
const pg_stat_t &stats) override {
peering_state.update_complete_backfill_object_stats(hoid, stats);
}
bool is_backfilling() const override {
return peering_state.is_backfilling();
}
PeeringFacade(PeeringState& peering_state)
: peering_state(peering_state) {
}
};
// PGFacade -- a facade (in the GoF-defined meaning) simplifying the huge
// interface of crimson's PG class. The motivation is to have an inventory
// of behaviour that must be provided by a unit test's mock.
struct PGFacade final : BackfillState::PGFacade {
PG& pg;
const eversion_t& get_projected_last_update() const override {
return pg.projected_last_update;
}
PGFacade(PG& pg) : pg(pg) {}
};
} // namespace crimson::osd
| 2,311 | 30.243243 | 79 | h |
null | ceph-main/src/crimson/osd/backfill_state.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <algorithm>
#include <boost/type_index.hpp>
#include <fmt/ranges.h>
#include "common/hobject_fmt.h"
#include "crimson/osd/backfill_state.h"
#include "osd/osd_types_fmt.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_osd);
}
}
namespace crimson::osd {
BackfillState::BackfillState(
BackfillState::BackfillListener& backfill_listener,
std::unique_ptr<BackfillState::PeeringFacade> peering_state,
std::unique_ptr<BackfillState::PGFacade> pg)
: backfill_machine(*this,
backfill_listener,
std::move(peering_state),
std::move(pg)),
progress_tracker(
std::make_unique<BackfillState::ProgressTracker>(backfill_machine))
{
logger().debug("{}:{}", __func__, __LINE__);
backfill_machine.initiate();
}
template <class S>
BackfillState::StateHelper<S>::StateHelper()
{
logger().debug("enter {}",
boost::typeindex::type_id<S>().pretty_name());
}
template <class S>
BackfillState::StateHelper<S>::~StateHelper()
{
logger().debug("exit {}",
boost::typeindex::type_id<S>().pretty_name());
}
BackfillState::~BackfillState() = default;
BackfillState::BackfillMachine::BackfillMachine(
BackfillState& backfill_state,
BackfillState::BackfillListener& backfill_listener,
std::unique_ptr<BackfillState::PeeringFacade> peering_state,
std::unique_ptr<BackfillState::PGFacade> pg)
: backfill_state(backfill_state),
backfill_listener(backfill_listener),
peering_state(std::move(peering_state)),
pg(std::move(pg))
{}
BackfillState::BackfillMachine::~BackfillMachine() = default;
BackfillState::Initial::Initial(my_context ctx)
: my_base(ctx)
{
backfill_state().last_backfill_started = peering_state().earliest_backfill();
logger().debug("{}: bft={} from {}",
__func__, peering_state().get_backfill_targets(),
backfill_state().last_backfill_started);
for (const auto& bt : peering_state().get_backfill_targets()) {
logger().debug("{}: target shard {} from {}",
__func__, bt, peering_state().get_peer_last_backfill(bt));
}
ceph_assert(peering_state().get_backfill_targets().size());
ceph_assert(!backfill_state().last_backfill_started.is_max());
}
boost::statechart::result
BackfillState::Initial::react(const BackfillState::Triggered& evt)
{
logger().debug("{}: backfill triggered", __func__);
ceph_assert(backfill_state().last_backfill_started == \
peering_state().earliest_backfill());
ceph_assert(peering_state().is_backfilling());
// initialize BackfillIntervals
for (const auto& bt : peering_state().get_backfill_targets()) {
backfill_state().peer_backfill_info[bt].reset(
peering_state().get_peer_last_backfill(bt));
}
backfill_state().backfill_info.reset(backfill_state().last_backfill_started);
if (Enqueuing::all_enqueued(peering_state(),
backfill_state().backfill_info,
backfill_state().peer_backfill_info)) {
logger().debug("{}: switching to Done state", __func__);
return transit<BackfillState::Done>();
} else {
logger().debug("{}: switching to Enqueuing state", __func__);
return transit<BackfillState::Enqueuing>();
}
}
// -- Enqueuing
void BackfillState::Enqueuing::maybe_update_range()
{
if (auto& primary_bi = backfill_state().backfill_info;
primary_bi.version >= pg().get_projected_last_update()) {
logger().info("{}: bi is current", __func__);
ceph_assert(primary_bi.version == pg().get_projected_last_update());
} else if (primary_bi.version >= peering_state().get_log_tail()) {
#if 0
if (peering_state().get_pg_log().get_log().empty() &&
pg().get_projected_log().empty()) {
/* Because we don't move log_tail on split, the log might be
* empty even if log_tail != last_update. However, the only
* way to get here with an empty log is if log_tail is actually
* eversion_t(), because otherwise the entry which changed
* last_update since the last scan would have to be present.
*/
ceph_assert(primary_bi.version == eversion_t());
return;
}
#endif
logger().debug("{}: bi is old, ({}) can be updated with log to {}",
__func__,
primary_bi.version,
pg().get_projected_last_update());
logger().debug("{}: scanning pg log first", __func__);
peering_state().scan_log_after(primary_bi.version,
[&](const pg_log_entry_t& e) {
logger().debug("maybe_update_range(lambda): updating from version {}",
e.version);
if (e.soid >= primary_bi.begin && e.soid < primary_bi.end) {
if (e.is_update()) {
logger().debug("maybe_update_range(lambda): {} updated to ver {}",
e.soid, e.version);
primary_bi.objects.erase(e.soid);
primary_bi.objects.insert(std::make_pair(e.soid,
e.version));
} else if (e.is_delete()) {
logger().debug("maybe_update_range(lambda): {} removed",
e.soid);
primary_bi.objects.erase(e.soid);
}
}
});
primary_bi.version = pg().get_projected_last_update();
} else {
ceph_abort_msg(
"scan_range should have raised primary_bi.version past log_tail");
}
}
void BackfillState::Enqueuing::trim_backfill_infos()
{
for (const auto& bt : peering_state().get_backfill_targets()) {
backfill_state().peer_backfill_info[bt].trim_to(
std::max(peering_state().get_peer_last_backfill(bt),
backfill_state().last_backfill_started));
}
backfill_state().backfill_info.trim_to(
backfill_state().last_backfill_started);
}
/* static */ bool BackfillState::Enqueuing::all_enqueued(
const PeeringFacade& peering_state,
const BackfillInterval& backfill_info,
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info)
{
const bool all_local_enqueued = \
backfill_info.extends_to_end() && backfill_info.empty();
const bool all_peer_enqueued = std::all_of(
std::begin(peer_backfill_info),
std::end(peer_backfill_info),
[] (const auto& kv) {
[[maybe_unused]] const auto& [ shard, peer_backfill_info ] = kv;
return peer_backfill_info.extends_to_end() && peer_backfill_info.empty();
});
return all_local_enqueued && all_peer_enqueued;
}
hobject_t BackfillState::Enqueuing::earliest_peer_backfill(
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info) const
{
hobject_t e = hobject_t::get_max();
for (const pg_shard_t& bt : peering_state().get_backfill_targets()) {
const auto iter = peer_backfill_info.find(bt);
ceph_assert(iter != peer_backfill_info.end());
e = std::min(e, iter->second.begin);
}
return e;
}
bool BackfillState::Enqueuing::should_rescan_replicas(
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info,
const BackfillInterval& backfill_info) const
{
const auto& targets = peering_state().get_backfill_targets();
return std::any_of(std::begin(targets), std::end(targets),
[&] (const auto& bt) {
return ReplicasScanning::replica_needs_scan(peer_backfill_info.at(bt),
backfill_info);
});
}
bool BackfillState::Enqueuing::should_rescan_primary(
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info,
const BackfillInterval& backfill_info) const
{
return backfill_info.begin <= earliest_peer_backfill(peer_backfill_info) &&
!backfill_info.extends_to_end();
}
void BackfillState::Enqueuing::trim_backfilled_object_from_intervals(
BackfillState::Enqueuing::result_t&& result,
hobject_t& last_backfill_started,
std::map<pg_shard_t, BackfillInterval>& peer_backfill_info)
{
std::for_each(std::begin(result.pbi_targets), std::end(result.pbi_targets),
[&peer_backfill_info] (const auto& bt) {
peer_backfill_info.at(bt).pop_front();
});
last_backfill_started = std::move(result.new_last_backfill_started);
}
BackfillState::Enqueuing::result_t
BackfillState::Enqueuing::remove_on_peers(const hobject_t& check)
{
// set `new_last_backfill_started` to `check`
result_t result { {}, check };
for (const auto& bt : peering_state().get_backfill_targets()) {
const auto& pbi = backfill_state().peer_backfill_info.at(bt);
if (pbi.begin == check) {
result.pbi_targets.insert(bt);
const auto& version = pbi.objects.begin()->second;
backfill_state().progress_tracker->enqueue_drop(pbi.begin);
backfill_listener().enqueue_drop(bt, pbi.begin, version);
}
}
logger().debug("{}: BACKFILL removing {} from peers {}",
__func__, check, result.pbi_targets);
ceph_assert(!result.pbi_targets.empty());
return result;
}
BackfillState::Enqueuing::result_t
BackfillState::Enqueuing::update_on_peers(const hobject_t& check)
{
logger().debug("{}: check={}", __func__, check);
const auto& primary_bi = backfill_state().backfill_info;
result_t result { {}, primary_bi.begin };
for (const auto& bt : peering_state().get_backfill_targets()) {
const auto& peer_bi = backfill_state().peer_backfill_info.at(bt);
// Find all check peers that have the wrong version
if (const eversion_t& obj_v = primary_bi.objects.begin()->second;
check == primary_bi.begin && check == peer_bi.begin) {
if(peer_bi.objects.begin()->second != obj_v &&
backfill_state().progress_tracker->enqueue_push(primary_bi.begin)) {
backfill_listener().enqueue_push(primary_bi.begin, obj_v);
} else {
// it's fine, keep it! OR already recovering
}
result.pbi_targets.insert(bt);
} else {
// Only include peers that we've caught up to their backfill line
// otherwise, they only appear to be missing this object
// because their peer_bi.begin > backfill_info.begin.
if (primary_bi.begin > peering_state().get_peer_last_backfill(bt) &&
backfill_state().progress_tracker->enqueue_push(primary_bi.begin)) {
backfill_listener().enqueue_push(primary_bi.begin, obj_v);
}
}
}
return result;
}
bool BackfillState::Enqueuing::Enqueuing::all_emptied(
const BackfillInterval& local_backfill_info,
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info) const
{
const auto& targets = peering_state().get_backfill_targets();
const auto replicas_emptied =
std::all_of(std::begin(targets), std::end(targets),
[&] (const auto& bt) {
return peer_backfill_info.at(bt).empty();
});
return local_backfill_info.empty() && replicas_emptied;
}
BackfillState::Enqueuing::Enqueuing(my_context ctx)
: my_base(ctx)
{
auto& primary_bi = backfill_state().backfill_info;
// update our local interval to cope with recent changes
primary_bi.begin = backfill_state().last_backfill_started;
if (primary_bi.version < peering_state().get_log_tail()) {
// it might be that the OSD is so flooded with modifying operations
// that backfill will be spinning here over and over. For the sake
// of performance and complexity we don't synchronize with entire PG.
// similar can happen in classical OSD.
logger().warn("{}: bi is old, rescanning of local backfill_info",
__func__);
post_event(RequestPrimaryScanning{});
return;
} else {
maybe_update_range();
}
trim_backfill_infos();
while (!all_emptied(primary_bi, backfill_state().peer_backfill_info)) {
if (!backfill_listener().budget_available()) {
post_event(RequestWaiting{});
return;
} else if (should_rescan_replicas(backfill_state().peer_backfill_info,
primary_bi)) {
// Count simultaneous scans as a single op and let those complete
post_event(RequestReplicasScanning{});
return;
}
// Get object within set of peers to operate on and the set of targets
// for which that object applies.
if (const hobject_t check = \
earliest_peer_backfill(backfill_state().peer_backfill_info);
check < primary_bi.begin) {
// Don't increment ops here because deletions
// are cheap and not replied to unlike real recovery_ops,
// and we can't increment ops without requeueing ourself
// for recovery.
auto result = remove_on_peers(check);
trim_backfilled_object_from_intervals(std::move(result),
backfill_state().last_backfill_started,
backfill_state().peer_backfill_info);
} else {
auto result = update_on_peers(check);
trim_backfilled_object_from_intervals(std::move(result),
backfill_state().last_backfill_started,
backfill_state().peer_backfill_info);
primary_bi.pop_front();
}
backfill_listener().maybe_flush();
}
if (should_rescan_primary(backfill_state().peer_backfill_info,
primary_bi)) {
// need to grab one another chunk of the object namespace and restart
// the queueing.
logger().debug("{}: reached end for current local chunk",
__func__);
post_event(RequestPrimaryScanning{});
} else if (backfill_state().progress_tracker->tracked_objects_completed()) {
post_event(RequestDone{});
} else {
logger().debug("{}: reached end for both local and all peers "
"but still has in-flight operations", __func__);
post_event(RequestWaiting{});
}
}
// -- PrimaryScanning
BackfillState::PrimaryScanning::PrimaryScanning(my_context ctx)
: my_base(ctx)
{
backfill_state().backfill_info.version = peering_state().get_last_update();
backfill_listener().request_primary_scan(
backfill_state().backfill_info.begin);
}
boost::statechart::result
BackfillState::PrimaryScanning::react(PrimaryScanned evt)
{
logger().debug("{}", __func__);
backfill_state().backfill_info = std::move(evt.result);
return transit<Enqueuing>();
}
boost::statechart::result
BackfillState::PrimaryScanning::react(ObjectPushed evt)
{
logger().debug("PrimaryScanning::react() on ObjectPushed; evt.object={}",
evt.object);
backfill_state().progress_tracker->complete_to(evt.object, evt.stat);
return discard_event();
}
// -- ReplicasScanning
bool BackfillState::ReplicasScanning::replica_needs_scan(
const BackfillInterval& replica_backfill_info,
const BackfillInterval& local_backfill_info)
{
return replica_backfill_info.empty() && \
replica_backfill_info.begin <= local_backfill_info.begin && \
!replica_backfill_info.extends_to_end();
}
BackfillState::ReplicasScanning::ReplicasScanning(my_context ctx)
: my_base(ctx)
{
for (const auto& bt : peering_state().get_backfill_targets()) {
if (const auto& pbi = backfill_state().peer_backfill_info.at(bt);
replica_needs_scan(pbi, backfill_state().backfill_info)) {
logger().debug("{}: scanning peer osd.{} from {}",
__func__, bt, pbi.end);
backfill_listener().request_replica_scan(bt, pbi.end, hobject_t{});
ceph_assert(waiting_on_backfill.find(bt) == \
waiting_on_backfill.end());
waiting_on_backfill.insert(bt);
}
}
ceph_assert(!waiting_on_backfill.empty());
// TODO: start_recovery_op(hobject_t::get_max()); // XXX: was pbi.end
}
#if 0
BackfillState::ReplicasScanning::~ReplicasScanning()
{
// TODO: finish_recovery_op(hobject_t::get_max());
}
#endif
boost::statechart::result
BackfillState::ReplicasScanning::react(ReplicaScanned evt)
{
logger().debug("{}: got scan result from osd={}, result={}",
__func__, evt.from, evt.result);
// TODO: maybe we'll be able to move waiting_on_backfill from
// the machine to the state.
ceph_assert(peering_state().is_backfill_target(evt.from));
if (waiting_on_backfill.erase(evt.from)) {
backfill_state().peer_backfill_info[evt.from] = std::move(evt.result);
if (waiting_on_backfill.empty()) {
ceph_assert(backfill_state().peer_backfill_info.size() == \
peering_state().get_backfill_targets().size());
return transit<Enqueuing>();
}
} else {
// we canceled backfill for a while due to a too full, and this
// is an extra response from a non-too-full peer
logger().debug("{}: canceled backfill (too full?)", __func__);
}
return discard_event();
}
boost::statechart::result
BackfillState::ReplicasScanning::react(ObjectPushed evt)
{
logger().debug("ReplicasScanning::react() on ObjectPushed; evt.object={}",
evt.object);
backfill_state().progress_tracker->complete_to(evt.object, evt.stat);
return discard_event();
}
// -- Waiting
BackfillState::Waiting::Waiting(my_context ctx)
: my_base(ctx)
{
}
boost::statechart::result
BackfillState::Waiting::react(ObjectPushed evt)
{
logger().debug("Waiting::react() on ObjectPushed; evt.object={}",
evt.object);
backfill_state().progress_tracker->complete_to(evt.object, evt.stat);
if (!Enqueuing::all_enqueued(peering_state(),
backfill_state().backfill_info,
backfill_state().peer_backfill_info)) {
return transit<Enqueuing>();
} else if (backfill_state().progress_tracker->tracked_objects_completed()) {
return transit<Done>();
} else {
// we still have something to wait on
logger().debug("Waiting::react() on ObjectPushed; still waiting");
return discard_event();
}
}
// -- Done
BackfillState::Done::Done(my_context ctx)
: my_base(ctx)
{
logger().info("{}: backfill is done", __func__);
backfill_listener().backfilled();
}
// -- Crashed
BackfillState::Crashed::Crashed()
{
ceph_abort_msg("{}: this should not happen");
}
// ProgressTracker is an intermediary between the BackfillListener and
// BackfillMachine + its states. All requests to push or drop an object
// are directed through it. The same happens with notifications about
// completing given operations which are generated by BackfillListener
// and dispatched as i.e. ObjectPushed events.
// This allows ProgressTacker to track the list of in-flight operations
// which is essential to make the decision whether the entire machine
// should switch from Waiting to Done keep in Waiting.
// ProgressTracker also coordinates .last_backfill_started and stats
// updates.
bool BackfillState::ProgressTracker::tracked_objects_completed() const
{
return registry.empty();
}
bool BackfillState::ProgressTracker::enqueue_push(const hobject_t& obj)
{
[[maybe_unused]] const auto [it, first_seen] = registry.try_emplace(
obj, registry_item_t{op_stage_t::enqueued_push, std::nullopt});
return first_seen;
}
void BackfillState::ProgressTracker::enqueue_drop(const hobject_t& obj)
{
registry.try_emplace(
obj, registry_item_t{op_stage_t::enqueued_drop, pg_stat_t{}});
}
void BackfillState::ProgressTracker::complete_to(
const hobject_t& obj,
const pg_stat_t& stats)
{
logger().debug("{}: obj={}",
__func__, obj);
if (auto completion_iter = registry.find(obj);
completion_iter != std::end(registry)) {
completion_iter->second = \
registry_item_t{ op_stage_t::completed_push, stats };
} else {
ceph_abort_msg("completing untracked object shall not happen");
}
for (auto it = std::begin(registry);
it != std::end(registry) &&
it->second.stage != op_stage_t::enqueued_push;
it = registry.erase(it)) {
auto& [soid, item] = *it;
assert(item.stats);
peering_state().update_complete_backfill_object_stats(
soid,
*item.stats);
}
if (Enqueuing::all_enqueued(peering_state(),
backfill_state().backfill_info,
backfill_state().peer_backfill_info) &&
tracked_objects_completed()) {
backfill_state().last_backfill_started = hobject_t::get_max();
backfill_listener().update_peers_last_backfill(hobject_t::get_max());
} else {
backfill_listener().update_peers_last_backfill(obj);
}
}
} // namespace crimson::osd
| 20,224 | 35.18068 | 79 | cc |
null | ceph-main/src/crimson/osd/backfill_state.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <optional>
#include <boost/statechart/custom_reaction.hpp>
#include <boost/statechart/event.hpp>
#include <boost/statechart/event_base.hpp>
#include <boost/statechart/simple_state.hpp>
#include <boost/statechart/state.hpp>
#include <boost/statechart/state_machine.hpp>
#include <boost/statechart/transition.hpp>
#include "osd/recovery_types.h"
namespace crimson::osd {
namespace sc = boost::statechart;
struct BackfillState {
struct BackfillListener;
struct PeeringFacade;
struct PGFacade;
// events comes first
struct PrimaryScanned : sc::event<PrimaryScanned> {
BackfillInterval result;
PrimaryScanned(BackfillInterval&& result)
: result(std::move(result)) {
}
};
struct ReplicaScanned : sc::event<ReplicaScanned> {
pg_shard_t from;
BackfillInterval result;
ReplicaScanned(pg_shard_t from, BackfillInterval&& result)
: from(std::move(from)),
result(std::move(result)) {
}
};
struct ObjectPushed : sc::event<ObjectPushed> {
// TODO: implement replica management; I don't want to follow
// current convention where the backend layer is responsible
// for tracking replicas.
hobject_t object;
pg_stat_t stat;
ObjectPushed(hobject_t object)
: object(std::move(object)) {
}
};
struct Triggered : sc::event<Triggered> {
};
private:
// internal events
struct RequestPrimaryScanning : sc::event<RequestPrimaryScanning> {
};
struct RequestReplicasScanning : sc::event<RequestReplicasScanning> {
};
struct RequestWaiting : sc::event<RequestWaiting> {
};
struct RequestDone : sc::event<RequestDone> {
};
class ProgressTracker;
public:
struct Initial;
struct Enqueuing;
struct PrimaryScanning;
struct ReplicasScanning;
struct Waiting;
struct Done;
struct BackfillMachine : sc::state_machine<BackfillMachine, Initial> {
BackfillMachine(BackfillState& backfill_state,
BackfillListener& backfill_listener,
std::unique_ptr<PeeringFacade> peering_state,
std::unique_ptr<PGFacade> pg);
~BackfillMachine();
BackfillState& backfill_state;
BackfillListener& backfill_listener;
std::unique_ptr<PeeringFacade> peering_state;
std::unique_ptr<PGFacade> pg;
};
private:
template <class S>
struct StateHelper {
StateHelper();
~StateHelper();
BackfillState& backfill_state() {
return static_cast<S*>(this) \
->template context<BackfillMachine>().backfill_state;
}
BackfillListener& backfill_listener() {
return static_cast<S*>(this) \
->template context<BackfillMachine>().backfill_listener;
}
PeeringFacade& peering_state() {
return *static_cast<S*>(this) \
->template context<BackfillMachine>().peering_state;
}
PGFacade& pg() {
return *static_cast<S*>(this)->template context<BackfillMachine>().pg;
}
const PeeringFacade& peering_state() const {
return *static_cast<const S*>(this) \
->template context<BackfillMachine>().peering_state;
}
const BackfillState& backfill_state() const {
return static_cast<const S*>(this) \
->template context<BackfillMachine>().backfill_state;
}
};
public:
// states
struct Crashed : sc::simple_state<Crashed, BackfillMachine>,
StateHelper<Crashed> {
explicit Crashed();
};
struct Initial : sc::state<Initial, BackfillMachine>,
StateHelper<Initial> {
using reactions = boost::mpl::list<
sc::custom_reaction<Triggered>,
sc::transition<sc::event_base, Crashed>>;
explicit Initial(my_context);
// initialize after triggering backfill by on_activate_complete().
// transit to Enqueuing.
sc::result react(const Triggered&);
};
struct Enqueuing : sc::state<Enqueuing, BackfillMachine>,
StateHelper<Enqueuing> {
using reactions = boost::mpl::list<
sc::transition<RequestPrimaryScanning, PrimaryScanning>,
sc::transition<RequestReplicasScanning, ReplicasScanning>,
sc::transition<RequestWaiting, Waiting>,
sc::transition<RequestDone, Done>,
sc::transition<sc::event_base, Crashed>>;
explicit Enqueuing(my_context);
// indicate whether there is any remaining work to do when it comes
// to comparing the hobject_t namespace between primary and replicas.
// true doesn't necessarily mean backfill is done -- there could be
// in-flight pushes or drops which had been enqueued but aren't
// completed yet.
static bool all_enqueued(
const PeeringFacade& peering_state,
const BackfillInterval& backfill_info,
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info);
private:
void maybe_update_range();
void trim_backfill_infos();
// these methods take BackfillIntervals instead of extracting them from
// the state to emphasize the relationships across the main loop.
bool all_emptied(
const BackfillInterval& local_backfill_info,
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info) const;
hobject_t earliest_peer_backfill(
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info) const;
bool should_rescan_replicas(
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info,
const BackfillInterval& backfill_info) const;
// indicate whether a particular acting primary needs to scanned again
// to process next piece of the hobject_t's namespace.
// the logic is per analogy to replica_needs_scan(). See comments there.
bool should_rescan_primary(
const std::map<pg_shard_t, BackfillInterval>& peer_backfill_info,
const BackfillInterval& backfill_info) const;
// the result_t is intermediary between {remove,update}_on_peers() and
// updating BackfillIntervals in trim_backfilled_object_from_intervals.
// This step is important because it affects the main loop's condition,
// and thus deserves to be exposed instead of being called deeply from
// {remove,update}_on_peers().
struct [[nodiscard]] result_t {
std::set<pg_shard_t> pbi_targets;
hobject_t new_last_backfill_started;
};
void trim_backfilled_object_from_intervals(
result_t&&,
hobject_t& last_backfill_started,
std::map<pg_shard_t, BackfillInterval>& peer_backfill_info);
result_t remove_on_peers(const hobject_t& check);
result_t update_on_peers(const hobject_t& check);
};
struct PrimaryScanning : sc::state<PrimaryScanning, BackfillMachine>,
StateHelper<PrimaryScanning> {
using reactions = boost::mpl::list<
sc::custom_reaction<ObjectPushed>,
sc::custom_reaction<PrimaryScanned>,
sc::transition<sc::event_base, Crashed>>;
explicit PrimaryScanning(my_context);
sc::result react(ObjectPushed);
// collect scanning result and transit to Enqueuing.
sc::result react(PrimaryScanned);
};
struct ReplicasScanning : sc::state<ReplicasScanning, BackfillMachine>,
StateHelper<ReplicasScanning> {
using reactions = boost::mpl::list<
sc::custom_reaction<ObjectPushed>,
sc::custom_reaction<ReplicaScanned>,
sc::transition<sc::event_base, Crashed>>;
explicit ReplicasScanning(my_context);
// collect scanning result; if all results are collected, transition
// to Enqueuing will happen.
sc::result react(ObjectPushed);
sc::result react(ReplicaScanned);
// indicate whether a particular peer should be scanned to retrieve
// BackfillInterval for new range of hobject_t namespace.
// true when bi.objects is exhausted, replica bi's end is not MAX,
// and primary bi'begin is further than the replica's one.
static bool replica_needs_scan(
const BackfillInterval& replica_backfill_info,
const BackfillInterval& local_backfill_info);
private:
std::set<pg_shard_t> waiting_on_backfill;
};
struct Waiting : sc::state<Waiting, BackfillMachine>,
StateHelper<Waiting> {
using reactions = boost::mpl::list<
sc::custom_reaction<ObjectPushed>,
sc::transition<sc::event_base, Crashed>>;
explicit Waiting(my_context);
sc::result react(ObjectPushed);
};
struct Done : sc::state<Done, BackfillMachine>,
StateHelper<Done> {
using reactions = boost::mpl::list<
sc::transition<sc::event_base, Crashed>>;
explicit Done(my_context);
};
BackfillState(BackfillListener& backfill_listener,
std::unique_ptr<PeeringFacade> peering_state,
std::unique_ptr<PGFacade> pg);
~BackfillState();
void process_event(
boost::intrusive_ptr<const sc::event_base> evt) {
backfill_machine.process_event(*std::move(evt));
}
hobject_t get_last_backfill_started() const {
return last_backfill_started;
}
private:
hobject_t last_backfill_started;
BackfillInterval backfill_info;
std::map<pg_shard_t, BackfillInterval> peer_backfill_info;
BackfillMachine backfill_machine;
std::unique_ptr<ProgressTracker> progress_tracker;
};
// BackfillListener -- an interface used by the backfill FSM to request
// low-level services like issueing `MOSDPGPush` or `MOSDPGBackfillRemove`.
// The goals behind the interface are: 1) unittestability; 2) possibility
// to retrofit classical OSD with BackfillState. For the second reason we
// never use `seastar::future` -- instead responses to the requests are
// conveyed as events; see ObjectPushed as an example.
struct BackfillState::BackfillListener {
virtual void request_replica_scan(
const pg_shard_t& target,
const hobject_t& begin,
const hobject_t& end) = 0;
virtual void request_primary_scan(
const hobject_t& begin) = 0;
virtual void enqueue_push(
const hobject_t& obj,
const eversion_t& v) = 0;
virtual void enqueue_drop(
const pg_shard_t& target,
const hobject_t& obj,
const eversion_t& v) = 0;
virtual void maybe_flush() = 0;
virtual void update_peers_last_backfill(
const hobject_t& new_last_backfill) = 0;
virtual bool budget_available() const = 0;
virtual void backfilled() = 0;
virtual ~BackfillListener() = default;
};
// PeeringFacade -- a facade (in the GoF-defined meaning) simplifying
// the interface of PeeringState. The motivation is to have an inventory
// of behaviour that must be provided by a unit test's mock.
struct BackfillState::PeeringFacade {
virtual hobject_t earliest_backfill() const = 0;
virtual const std::set<pg_shard_t>& get_backfill_targets() const = 0;
virtual const hobject_t& get_peer_last_backfill(pg_shard_t peer) const = 0;
virtual const eversion_t& get_last_update() const = 0;
virtual const eversion_t& get_log_tail() const = 0;
// the performance impact of `std::function` has not been considered yet.
// If there is any proof (from e.g. profiling) about its significance, we
// can switch back to the template variant.
using scan_log_func_t = std::function<void(const pg_log_entry_t&)>;
virtual void scan_log_after(eversion_t, scan_log_func_t) const = 0;
virtual bool is_backfill_target(pg_shard_t peer) const = 0;
virtual void update_complete_backfill_object_stats(const hobject_t &hoid,
const pg_stat_t &stats) = 0;
virtual bool is_backfilling() const = 0;
virtual ~PeeringFacade() {}
};
// PGFacade -- a facade (in the GoF-defined meaning) simplifying the huge
// interface of crimson's PG class. The motivation is to have an inventory
// of behaviour that must be provided by a unit test's mock.
struct BackfillState::PGFacade {
virtual const eversion_t& get_projected_last_update() const = 0;
virtual ~PGFacade() {}
};
class BackfillState::ProgressTracker {
// TODO: apply_stat,
enum class op_stage_t {
enqueued_push,
enqueued_drop,
completed_push,
};
struct registry_item_t {
op_stage_t stage;
std::optional<pg_stat_t> stats;
};
BackfillMachine& backfill_machine;
std::map<hobject_t, registry_item_t> registry;
BackfillState& backfill_state() {
return backfill_machine.backfill_state;
}
PeeringFacade& peering_state() {
return *backfill_machine.peering_state;
}
BackfillListener& backfill_listener() {
return backfill_machine.backfill_listener;
}
public:
ProgressTracker(BackfillMachine& backfill_machine)
: backfill_machine(backfill_machine) {
}
bool tracked_objects_completed() const;
bool enqueue_push(const hobject_t&);
void enqueue_drop(const hobject_t&);
void complete_to(const hobject_t&, const pg_stat_t&);
};
} // namespace crimson::osd
| 12,813 | 32.456919 | 78 | h |
null | ceph-main/src/crimson/osd/ec_backend.cc | #include "ec_backend.h"
#include "crimson/osd/shard_services.h"
ECBackend::ECBackend(shard_id_t shard,
ECBackend::CollectionRef coll,
crimson::osd::ShardServices& shard_services,
const ec_profile_t&,
uint64_t,
DoutPrefixProvider &dpp)
: PGBackend{shard, coll, shard_services, dpp}
{
// todo
}
ECBackend::ll_read_ierrorator::future<ceph::bufferlist>
ECBackend::_read(const hobject_t& hoid,
const uint64_t off,
const uint64_t len,
const uint32_t flags)
{
// todo
return seastar::make_ready_future<bufferlist>();
}
ECBackend::rep_op_fut_t
ECBackend::_submit_transaction(std::set<pg_shard_t>&& pg_shards,
const hobject_t& hoid,
ceph::os::Transaction&& txn,
osd_op_params_t&& osd_op_p,
epoch_t min_epoch, epoch_t max_epoch,
std::vector<pg_log_entry_t>&& log_entries)
{
// todo
return {seastar::now(),
seastar::make_ready_future<crimson::osd::acked_peers_t>()};
}
| 1,153 | 29.368421 | 68 | cc |
null | ceph-main/src/crimson/osd/ec_backend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/intrusive_ptr.hpp>
#include <seastar/core/future.hh>
#include "include/buffer_fwd.h"
#include "osd/osd_types.h"
#include "pg_backend.h"
class ECBackend : public PGBackend
{
public:
ECBackend(shard_id_t shard,
CollectionRef coll,
crimson::osd::ShardServices& shard_services,
const ec_profile_t& ec_profile,
uint64_t stripe_width,
DoutPrefixProvider &dpp);
seastar::future<> stop() final {
return seastar::now();
}
void on_actingset_changed(bool same_primary) final {}
private:
ll_read_ierrorator::future<ceph::bufferlist>
_read(const hobject_t& hoid, uint64_t off, uint64_t len, uint32_t flags) override;
rep_op_fut_t
_submit_transaction(std::set<pg_shard_t>&& pg_shards,
const hobject_t& hoid,
ceph::os::Transaction&& txn,
osd_op_params_t&& req,
epoch_t min_epoch, epoch_t max_epoch,
std::vector<pg_log_entry_t>&& log_entries) final;
CollectionRef coll;
crimson::os::FuturizedStore::Shard* store;
seastar::future<> request_committed(const osd_reqid_t& reqid,
const eversion_t& version) final {
return seastar::now();
}
};
| 1,260 | 29.02381 | 84 | h |
null | ceph-main/src/crimson/osd/exceptions.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <exception>
#include <system_error>
#include "crimson/common/errorator.h"
namespace crimson::osd {
class error : private std::system_error {
public:
error(const std::errc ec)
: system_error(std::make_error_code(ec)) {
}
using system_error::code;
using system_error::what;
friend error make_error(int ret);
private:
error(const int ret) noexcept
: system_error(ret, std::system_category()) {
}
};
inline error make_error(const int ret) {
return error{ret};
}
struct object_not_found : public error {
object_not_found() : error(std::errc::no_such_file_or_directory) {}
};
struct invalid_argument : public error {
invalid_argument() : error(std::errc::invalid_argument) {}
};
// FIXME: error handling
struct permission_denied : public error {
permission_denied() : error(std::errc::operation_not_permitted) {}
};
} // namespace crimson::osd
| 1,000 | 20.297872 | 70 | h |
null | ceph-main/src/crimson/osd/heartbeat.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "heartbeat.h"
#include <boost/range/join.hpp>
#include <fmt/chrono.h>
#include <fmt/os.h>
#include "messages/MOSDPing.h"
#include "messages/MOSDFailure.h"
#include "crimson/common/config_proxy.h"
#include "crimson/common/formatter.h"
#include "crimson/net/Connection.h"
#include "crimson/net/Messenger.h"
#include "crimson/osd/shard_services.h"
#include "crimson/mon/MonClient.h"
#include "osd/OSDMap.h"
using std::set;
using std::string;
using crimson::common::local_conf;
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_osd);
}
}
Heartbeat::Heartbeat(osd_id_t whoami,
crimson::osd::ShardServices& service,
crimson::mon::Client& monc,
crimson::net::Messenger &front_msgr,
crimson::net::Messenger &back_msgr)
: whoami{whoami},
service{service},
monc{monc},
front_msgr{front_msgr},
back_msgr{back_msgr},
// do this in background
timer{[this] {
heartbeat_check();
(void)send_heartbeats();
}},
failing_peers{*this}
{}
seastar::future<> Heartbeat::start(entity_addrvec_t front_addrs,
entity_addrvec_t back_addrs)
{
logger().info("heartbeat: start front_addrs={}, back_addrs={}",
front_addrs, back_addrs);
// i only care about the address, so any unused port would work
for (auto& addr : boost::join(front_addrs.v, back_addrs.v)) {
addr.set_port(0);
}
using crimson::net::SocketPolicy;
front_msgr.set_policy(entity_name_t::TYPE_OSD,
SocketPolicy::lossy_client(0));
back_msgr.set_policy(entity_name_t::TYPE_OSD,
SocketPolicy::lossy_client(0));
return seastar::when_all_succeed(start_messenger(front_msgr,
front_addrs),
start_messenger(back_msgr,
back_addrs))
.then_unpack([this] {
timer.arm_periodic(
std::chrono::seconds(local_conf()->osd_heartbeat_interval));
});
}
seastar::future<>
Heartbeat::start_messenger(crimson::net::Messenger& msgr,
const entity_addrvec_t& addrs)
{
return msgr.bind(addrs).safe_then([this, &msgr]() mutable {
return msgr.start({this});
}, crimson::net::Messenger::bind_ertr::all_same_way(
[addrs] (const std::error_code& e) {
logger().error("heartbeat messenger bind({}): {}", addrs, e);
ceph_abort();
}));
}
seastar::future<> Heartbeat::stop()
{
logger().info("{}", __func__);
timer.cancel();
front_msgr.stop();
back_msgr.stop();
return gate.close().then([this] {
return seastar::when_all_succeed(front_msgr.shutdown(),
back_msgr.shutdown());
}).then_unpack([] {
return seastar::now();
});
}
const entity_addrvec_t& Heartbeat::get_front_addrs() const
{
return front_msgr.get_myaddrs();
}
const entity_addrvec_t& Heartbeat::get_back_addrs() const
{
return back_msgr.get_myaddrs();
}
crimson::net::Messenger& Heartbeat::get_front_msgr() const
{
return front_msgr;
}
crimson::net::Messenger& Heartbeat::get_back_msgr() const
{
return back_msgr;
}
void Heartbeat::add_peer(osd_id_t _peer, epoch_t epoch)
{
assert(whoami != _peer);
auto [iter, added] = peers.try_emplace(_peer, *this, _peer);
auto& peer = iter->second;
peer.set_epoch_added(epoch);
}
Heartbeat::osds_t Heartbeat::remove_down_peers()
{
osds_t old_osds; // osds not added in this epoch
for (auto i = peers.begin(); i != peers.end(); ) {
auto osdmap = service.get_map();
const auto& [osd, peer] = *i;
if (!osdmap->is_up(osd)) {
i = peers.erase(i);
} else {
if (peer.get_epoch_added() < osdmap->get_epoch()) {
old_osds.push_back(osd);
}
++i;
}
}
return old_osds;
}
void Heartbeat::add_reporter_peers(int whoami)
{
auto osdmap = service.get_map();
// include next and previous up osds to ensure we have a fully-connected set
set<int> want;
if (auto next = osdmap->get_next_up_osd_after(whoami); next >= 0) {
want.insert(next);
}
if (auto prev = osdmap->get_previous_up_osd_before(whoami); prev >= 0) {
want.insert(prev);
}
// make sure we have at least **min_down** osds coming from different
// subtree level (e.g., hosts) for fast failure detection.
auto min_down = local_conf().get_val<uint64_t>("mon_osd_min_down_reporters");
auto subtree = local_conf().get_val<string>("mon_osd_reporter_subtree_level");
osdmap->get_random_up_osds_by_subtree(
whoami, subtree, min_down, want, &want);
auto epoch = osdmap->get_epoch();
for (int osd : want) {
add_peer(osd, epoch);
};
}
void Heartbeat::update_peers(int whoami)
{
const auto min_peers = static_cast<size_t>(
local_conf().get_val<int64_t>("osd_heartbeat_min_peers"));
add_reporter_peers(whoami);
auto extra = remove_down_peers();
// too many?
for (auto& osd : extra) {
if (peers.size() <= min_peers) {
break;
}
remove_peer(osd);
}
// or too few?
auto osdmap = service.get_map();
auto epoch = osdmap->get_epoch();
for (auto next = osdmap->get_next_up_osd_after(whoami);
peers.size() < min_peers && next >= 0 && next != whoami;
next = osdmap->get_next_up_osd_after(next)) {
add_peer(next, epoch);
}
}
Heartbeat::osds_t Heartbeat::get_peers() const
{
osds_t osds;
osds.reserve(peers.size());
for (auto& peer : peers) {
osds.push_back(peer.first);
}
return osds;
}
void Heartbeat::remove_peer(osd_id_t peer)
{
assert(peers.count(peer) == 1);
peers.erase(peer);
}
std::optional<seastar::future<>>
Heartbeat::ms_dispatch(crimson::net::ConnectionRef conn, MessageRef m)
{
bool dispatched = true;
gate.dispatch_in_background(__func__, *this, [this, conn, &m, &dispatched] {
switch (m->get_type()) {
case MSG_OSD_PING:
return handle_osd_ping(conn, boost::static_pointer_cast<MOSDPing>(m));
default:
dispatched = false;
return seastar::now();
}
});
return (dispatched ? std::make_optional(seastar::now()) : std::nullopt);
}
void Heartbeat::ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace)
{
auto peer = conn->get_peer_id();
if (conn->get_peer_type() != entity_name_t::TYPE_OSD ||
peer == entity_name_t::NEW) {
return;
}
if (auto found = peers.find(peer);
found != peers.end()) {
found->second.handle_reset(conn, is_replace);
}
}
void Heartbeat::ms_handle_connect(
crimson::net::ConnectionRef conn,
seastar::shard_id new_shard)
{
ceph_assert_always(seastar::this_shard_id() == new_shard);
auto peer = conn->get_peer_id();
if (conn->get_peer_type() != entity_name_t::TYPE_OSD ||
peer == entity_name_t::NEW) {
return;
}
if (auto found = peers.find(peer);
found != peers.end()) {
found->second.handle_connect(conn);
}
}
void Heartbeat::ms_handle_accept(
crimson::net::ConnectionRef conn,
seastar::shard_id new_shard,
bool is_replace)
{
ceph_assert_always(seastar::this_shard_id() == new_shard);
auto peer = conn->get_peer_id();
if (conn->get_peer_type() != entity_name_t::TYPE_OSD ||
peer == entity_name_t::NEW) {
return;
}
if (auto found = peers.find(peer);
found != peers.end()) {
found->second.handle_accept(conn, is_replace);
}
}
seastar::future<> Heartbeat::handle_osd_ping(crimson::net::ConnectionRef conn,
Ref<MOSDPing> m)
{
switch (m->op) {
case MOSDPing::PING:
return handle_ping(conn, m);
case MOSDPing::PING_REPLY:
return handle_reply(conn, m);
case MOSDPing::YOU_DIED:
return handle_you_died();
default:
return seastar::now();
}
}
seastar::future<> Heartbeat::handle_ping(crimson::net::ConnectionRef conn,
Ref<MOSDPing> m)
{
auto min_message = static_cast<uint32_t>(
local_conf()->osd_heartbeat_min_size);
auto reply =
crimson::make_message<MOSDPing>(
m->fsid,
service.get_map()->get_epoch(),
MOSDPing::PING_REPLY,
m->ping_stamp,
m->mono_ping_stamp,
service.get_mnow(),
service.get_up_epoch(),
min_message);
return conn->send(std::move(reply)
).then([this, m, conn] {
return maybe_share_osdmap(conn, m);
});
}
seastar::future<> Heartbeat::maybe_share_osdmap(
crimson::net::ConnectionRef conn,
Ref<MOSDPing> m)
{
const osd_id_t from = m->get_source().num();
const epoch_t current_osdmap_epoch = service.get_map()->get_epoch();
auto found = peers.find(from);
if (found == peers.end()) {
return seastar::now();
}
auto& peer = found->second;
if (m->map_epoch > peer.get_projected_epoch()) {
logger().debug("{} updating peer {} session's projected_epoch"
"from {} to ping map epoch of {}",
__func__, from, peer.get_projected_epoch(),
m->map_epoch);
peer.set_projected_epoch(m->map_epoch);
}
if (current_osdmap_epoch <= peer.get_projected_epoch()) {
logger().debug("{} peer {} projected_epoch {} is already later "
"than our osdmap epoch of {}",
__func__ , from, peer.get_projected_epoch(),
current_osdmap_epoch);
return seastar::now();
}
const epoch_t send_from = peer.get_projected_epoch();
logger().debug("{} sending peer {} peer maps from projected epoch {} through "
"local osdmap epoch {}",
__func__,
from,
send_from,
current_osdmap_epoch);
peer.set_projected_epoch(current_osdmap_epoch);
return service.send_incremental_map_to_osd(from, send_from);
}
seastar::future<> Heartbeat::handle_reply(crimson::net::ConnectionRef conn,
Ref<MOSDPing> m)
{
const osd_id_t from = m->get_source().num();
auto found = peers.find(from);
if (found == peers.end()) {
// stale reply
return seastar::now();
}
auto& peer = found->second;
return peer.handle_reply(conn, m
).then([this, conn, m] {
return maybe_share_osdmap(conn, m);
});
}
seastar::future<> Heartbeat::handle_you_died()
{
// TODO: ask for newer osdmap
return seastar::now();
}
void Heartbeat::heartbeat_check()
{
failure_queue_t failure_queue;
const auto now = clock::now();
for (const auto& [osd, peer] : peers) {
auto failed_since = peer.failed_since(now);
if (!clock::is_zero(failed_since)) {
failure_queue.emplace(osd, failed_since);
}
}
if (!failure_queue.empty()) {
// send_failures can run in background, because
// 1. After the execution of send_failures, no msg is actually
// sent, which means the sending operation is not done,
// which further seems to involve problems risks that when
// osd shuts down, the left part of the sending operation
// may reference OSD and Heartbeat instances that are already
// deleted. However, remaining work of that sending operation
// involves no reference back to OSD or Heartbeat instances,
// which means it wouldn't involve the above risks.
// 2. messages are sent in order, if later checks find out
// the previous "failed" peers to be healthy, that "still
// alive" messages would be sent after the previous "osd
// failure" messages which is totally safe.
(void)send_failures(std::move(failure_queue));
}
}
seastar::future<> Heartbeat::send_heartbeats()
{
const auto mnow = service.get_mnow();
const auto now = clock::now();
std::vector<seastar::future<>> futures;
for (auto& [osd, peer] : peers) {
peer.send_heartbeat(now, mnow, futures);
}
return seastar::when_all_succeed(futures.begin(), futures.end());
}
seastar::future<> Heartbeat::send_failures(failure_queue_t&& failure_queue)
{
std::vector<seastar::future<>> futures;
const auto now = clock::now();
for (auto [osd, failed_since] : failure_queue) {
failing_peers.add_pending(osd, failed_since, now, futures);
}
return seastar::when_all_succeed(futures.begin(), futures.end());
}
void Heartbeat::print(std::ostream& out) const
{
out << "heartbeat";
}
Heartbeat::Connection::~Connection()
{
if (conn) {
conn->mark_down();
}
}
bool Heartbeat::Connection::matches(crimson::net::ConnectionRef _conn) const
{
return (conn && conn == _conn);
}
bool Heartbeat::Connection::accepted(
crimson::net::ConnectionRef accepted_conn,
bool is_replace)
{
ceph_assert(accepted_conn);
ceph_assert(accepted_conn != conn);
if (accepted_conn->get_peer_addr() != listener.get_peer_addr(type)) {
return false;
}
if (is_replace) {
logger().info("Heartbeat::Connection::accepted(): "
"{} racing", *this);
racing_detected = true;
}
if (conn) {
// there is no assumption about the ordering of the reset and accept
// events for the 2 racing connections.
if (is_connected) {
logger().warn("Heartbeat::Connection::accepted(): "
"{} is accepted while connected, is_replace={}",
*this, is_replace);
conn->mark_down();
set_unconnected();
}
}
conn = accepted_conn;
set_connected();
return true;
}
void Heartbeat::Connection::reset(bool is_replace)
{
if (is_replace) {
logger().info("Heartbeat::Connection::reset(): "
"{} racing, waiting for the replacing accept",
*this);
racing_detected = true;
}
if (is_connected) {
set_unconnected();
} else {
conn = nullptr;
}
if (is_replace) {
// waiting for the replacing accept event
} else if (!racing_detected || is_winner_side) {
connect();
} else { // racing_detected && !is_winner_side
logger().info("Heartbeat::Connection::reset(): "
"{} racing detected and lose, "
"waiting for peer connect me", *this);
}
}
seastar::future<> Heartbeat::Connection::send(MessageURef msg)
{
assert(is_connected);
return conn->send(std::move(msg));
}
void Heartbeat::Connection::validate()
{
assert(is_connected);
auto peer_addr = listener.get_peer_addr(type);
if (conn->get_peer_addr() != peer_addr) {
logger().info("Heartbeat::Connection::validate(): "
"{} has new address {} over {}, reset",
*this, peer_addr, conn->get_peer_addr());
conn->mark_down();
racing_detected = false;
reset();
}
}
void Heartbeat::Connection::retry()
{
racing_detected = false;
if (!is_connected) {
if (conn) {
conn->mark_down();
reset();
} else {
connect();
}
}
}
void Heartbeat::Connection::set_connected()
{
assert(conn);
assert(!is_connected);
ceph_assert(conn->is_connected());
is_connected = true;
listener.increase_connected();
}
void Heartbeat::Connection::set_unconnected()
{
assert(conn);
assert(is_connected);
conn = nullptr;
is_connected = false;
listener.decrease_connected();
}
void Heartbeat::Connection::connect()
{
assert(!conn);
auto addr = listener.get_peer_addr(type);
conn = msgr.connect(addr, entity_name_t(CEPH_ENTITY_TYPE_OSD, peer));
if (conn->is_connected()) {
set_connected();
}
}
Heartbeat::clock::time_point
Heartbeat::Session::failed_since(Heartbeat::clock::time_point now) const
{
if (do_health_screen(now) == health_state::UNHEALTHY) {
auto oldest_deadline = ping_history.begin()->second.deadline;
auto failed_since = std::min(last_rx_back, last_rx_front);
if (clock::is_zero(failed_since)) {
logger().error("Heartbeat::Session::failed_since(): no reply from osd.{} "
"ever on either front or back, first ping sent {} "
"(oldest deadline {})",
peer, first_tx, oldest_deadline);
failed_since = first_tx;
} else {
logger().error("Heartbeat::Session::failed_since(): no reply from osd.{} "
"since back {} front {} (oldest deadline {})",
peer, last_rx_back, last_rx_front, oldest_deadline);
}
return failed_since;
} else {
return clock::zero();
}
}
void Heartbeat::Session::set_inactive_history(clock::time_point now)
{
assert(!connected);
if (ping_history.empty()) {
const utime_t sent_stamp{now};
const auto deadline =
now + std::chrono::seconds(local_conf()->osd_heartbeat_grace);
ping_history.emplace(sent_stamp, reply_t{deadline, 0});
} else { // the entry is already added
assert(ping_history.size() == 1);
}
}
Heartbeat::Peer::Peer(Heartbeat& heartbeat, osd_id_t peer)
: ConnectionListener(2), heartbeat{heartbeat}, peer{peer}, session{peer},
con_front(peer, heartbeat.whoami > peer, Connection::type_t::front,
heartbeat.front_msgr, *this),
con_back(peer, heartbeat.whoami > peer, Connection::type_t::back,
heartbeat.back_msgr, *this)
{
logger().info("Heartbeat::Peer: osd.{} added", peer);
}
Heartbeat::Peer::~Peer()
{
logger().info("Heartbeat::Peer: osd.{} removed", peer);
}
void Heartbeat::Peer::send_heartbeat(
clock::time_point now, ceph::signedspan mnow,
std::vector<seastar::future<>>& futures)
{
session.set_tx(now);
if (session.is_started()) {
do_send_heartbeat(now, mnow, &futures);
for_each_conn([] (auto& conn) {
conn.validate();
});
} else {
// we should send MOSDPing but still cannot at this moment
if (pending_send) {
// we have already pending for a entire heartbeat interval
logger().warn("Heartbeat::Peer::send_heartbeat(): "
"heartbeat to osd.{} is still pending...", peer);
for_each_conn([] (auto& conn) {
conn.retry();
});
} else {
logger().info("Heartbeat::Peer::send_heartbeat(): "
"heartbeat to osd.{} is pending send...", peer);
session.set_inactive_history(now);
pending_send = true;
}
}
}
void Heartbeat::Peer::handle_reset(
crimson::net::ConnectionRef conn, bool is_replace)
{
int cnt = 0;
for_each_conn([&] (auto& _conn) {
if (_conn.matches(conn)) {
++cnt;
_conn.reset(is_replace);
}
});
if (cnt == 0) {
logger().info("Heartbeat::Peer::handle_reset(): {} ignores conn, is_replace={} -- {}",
*this, is_replace, *conn);
} else if (cnt > 1) {
logger().error("Heartbeat::Peer::handle_reset(): {} handles conn {} times -- {}",
*this, cnt, *conn);
}
}
void Heartbeat::Peer::handle_connect(crimson::net::ConnectionRef conn)
{
int cnt = 0;
for_each_conn([&] (auto& _conn) {
if (_conn.matches(conn)) {
++cnt;
_conn.connected();
}
});
if (cnt == 0) {
logger().error("Heartbeat::Peer::handle_connect(): {} ignores conn -- {}",
*this, *conn);
conn->mark_down();
} else if (cnt > 1) {
logger().error("Heartbeat::Peer::handle_connect(): {} handles conn {} times -- {}",
*this, cnt, *conn);
}
}
void Heartbeat::Peer::handle_accept(crimson::net::ConnectionRef conn, bool is_replace)
{
int cnt = 0;
for_each_conn([&] (auto& _conn) {
if (_conn.accepted(conn, is_replace)) {
++cnt;
}
});
if (cnt == 0) {
logger().warn("Heartbeat::Peer::handle_accept(): {} ignores conn -- {}",
*this, *conn);
} else if (cnt > 1) {
logger().error("Heartbeat::Peer::handle_accept(): {} handles conn {} times -- {}",
*this, cnt, *conn);
}
}
seastar::future<> Heartbeat::Peer::handle_reply(
crimson::net::ConnectionRef conn, Ref<MOSDPing> m)
{
if (!session.is_started()) {
// we haven't sent any ping yet
return seastar::now();
}
type_t type;
if (con_front.matches(conn)) {
type = type_t::front;
} else if (con_back.matches(conn)) {
type = type_t::back;
} else {
return seastar::now();
}
const auto now = clock::now();
if (session.on_pong(m->ping_stamp, type, now)) {
if (session.do_health_screen(now) == Session::health_state::HEALTHY) {
return heartbeat.failing_peers.cancel_one(peer);
}
}
return seastar::now();
}
entity_addr_t Heartbeat::Peer::get_peer_addr(type_t type)
{
const auto osdmap = heartbeat.service.get_map();
if (type == type_t::front) {
return osdmap->get_hb_front_addrs(peer).front();
} else {
return osdmap->get_hb_back_addrs(peer).front();
}
}
void Heartbeat::Peer::on_connected()
{
logger().info("Heartbeat::Peer: osd.{} connected (send={})",
peer, pending_send);
session.on_connected();
if (pending_send) {
pending_send = false;
do_send_heartbeat(clock::now(), heartbeat.service.get_mnow(), nullptr);
}
}
void Heartbeat::Peer::on_disconnected()
{
logger().info("Heartbeat::Peer: osd.{} disconnected", peer);
session.on_disconnected();
}
void Heartbeat::Peer::do_send_heartbeat(
Heartbeat::clock::time_point now,
ceph::signedspan mnow,
std::vector<seastar::future<>>* futures)
{
const utime_t sent_stamp{now};
const auto deadline =
now + std::chrono::seconds(local_conf()->osd_heartbeat_grace);
session.on_ping(sent_stamp, deadline);
for_each_conn([&, this] (auto& conn) {
auto min_message = static_cast<uint32_t>(
local_conf()->osd_heartbeat_min_size);
auto ping = crimson::make_message<MOSDPing>(
heartbeat.monc.get_fsid(),
heartbeat.service.get_map()->get_epoch(),
MOSDPing::PING,
sent_stamp,
mnow,
mnow,
heartbeat.service.get_up_epoch(),
min_message);
if (futures) {
futures->push_back(conn.send(std::move(ping)));
}
});
}
bool Heartbeat::FailingPeers::add_pending(
osd_id_t peer,
clock::time_point failed_since,
clock::time_point now,
std::vector<seastar::future<>>& futures)
{
if (failure_pending.count(peer)) {
return false;
}
auto failed_for = std::chrono::duration_cast<std::chrono::seconds>(
now - failed_since).count();
auto osdmap = heartbeat.service.get_map();
auto failure_report =
crimson::make_message<MOSDFailure>(heartbeat.monc.get_fsid(),
peer,
osdmap->get_addrs(peer),
static_cast<int>(failed_for),
osdmap->get_epoch());
failure_pending.emplace(peer, failure_info_t{failed_since,
osdmap->get_addrs(peer)});
futures.push_back(heartbeat.monc.send_message(std::move(failure_report)));
logger().info("{}: osd.{} failed for {}", __func__, peer, failed_for);
return true;
}
seastar::future<> Heartbeat::FailingPeers::cancel_one(osd_id_t peer)
{
if (auto pending = failure_pending.find(peer);
pending != failure_pending.end()) {
auto fut = send_still_alive(peer, pending->second.addrs);
failure_pending.erase(peer);
return fut;
}
return seastar::now();
}
seastar::future<>
Heartbeat::FailingPeers::send_still_alive(
osd_id_t osd, const entity_addrvec_t& addrs)
{
auto still_alive = crimson::make_message<MOSDFailure>(
heartbeat.monc.get_fsid(),
osd,
addrs,
0,
heartbeat.service.get_map()->get_epoch(),
MOSDFailure::FLAG_ALIVE);
logger().info("{}: osd.{}", __func__, osd);
return heartbeat.monc.send_message(std::move(still_alive));
}
| 23,284 | 27.396341 | 90 | cc |
null | ceph-main/src/crimson/osd/heartbeat.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <cstdint>
#include <seastar/core/future.hh>
#include "common/ceph_time.h"
#include "crimson/common/gated.h"
#include "crimson/net/Dispatcher.h"
#include "crimson/net/Fwd.h"
class MOSDPing;
namespace crimson::osd {
class ShardServices;
}
namespace crimson::mon {
class Client;
}
template<typename Message> using Ref = boost::intrusive_ptr<Message>;
class Heartbeat : public crimson::net::Dispatcher {
public:
using osd_id_t = int;
Heartbeat(osd_id_t whoami,
crimson::osd::ShardServices& service,
crimson::mon::Client& monc,
crimson::net::Messenger &front_msgr,
crimson::net::Messenger &back_msgr);
seastar::future<> start(entity_addrvec_t front,
entity_addrvec_t back);
seastar::future<> stop();
using osds_t = std::vector<osd_id_t>;
void add_peer(osd_id_t peer, epoch_t epoch);
void update_peers(int whoami);
void remove_peer(osd_id_t peer);
osds_t get_peers() const;
const entity_addrvec_t& get_front_addrs() const;
const entity_addrvec_t& get_back_addrs() const;
crimson::net::Messenger &get_front_msgr() const;
crimson::net::Messenger &get_back_msgr() const;
// Dispatcher methods
std::optional<seastar::future<>> ms_dispatch(
crimson::net::ConnectionRef conn, MessageRef m) override;
void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) override;
void ms_handle_connect(crimson::net::ConnectionRef conn, seastar::shard_id) override;
void ms_handle_accept(crimson::net::ConnectionRef conn, seastar::shard_id, bool is_replace) override;
void print(std::ostream&) const;
private:
seastar::future<> handle_osd_ping(crimson::net::ConnectionRef conn,
Ref<MOSDPing> m);
seastar::future<> handle_ping(crimson::net::ConnectionRef conn,
Ref<MOSDPing> m);
seastar::future<> handle_reply(crimson::net::ConnectionRef conn,
Ref<MOSDPing> m);
seastar::future<> handle_you_died();
/// remove down OSDs
/// @return peers not added in this epoch
osds_t remove_down_peers();
/// add enough reporters for fast failure detection
void add_reporter_peers(int whoami);
seastar::future<> start_messenger(crimson::net::Messenger& msgr,
const entity_addrvec_t& addrs);
seastar::future<> maybe_share_osdmap(crimson::net::ConnectionRef,
Ref<MOSDPing> m);
private:
const osd_id_t whoami;
crimson::osd::ShardServices& service;
crimson::mon::Client& monc;
crimson::net::Messenger &front_msgr;
crimson::net::Messenger &back_msgr;
seastar::timer<seastar::lowres_clock> timer;
// use real_clock so it can be converted to utime_t
using clock = ceph::coarse_real_clock;
class ConnectionListener;
class Connection;
class Session;
class Peer;
using peers_map_t = std::map<osd_id_t, Peer>;
peers_map_t peers;
// osds which are considered failed
// osd_id => when was the last time that both front and back pings were acked
// or sent.
// use for calculating how long the OSD has been unresponsive
using failure_queue_t = std::map<osd_id_t, clock::time_point>;
seastar::future<> send_failures(failure_queue_t&& failure_queue);
seastar::future<> send_heartbeats();
void heartbeat_check();
// osds we've reported to monior as failed ones, but they are not marked down
// yet
crimson::common::Gated gate;
class FailingPeers {
public:
FailingPeers(Heartbeat& heartbeat) : heartbeat(heartbeat) {}
bool add_pending(osd_id_t peer,
clock::time_point failed_since,
clock::time_point now,
std::vector<seastar::future<>>& futures);
seastar::future<> cancel_one(osd_id_t peer);
private:
seastar::future<> send_still_alive(osd_id_t, const entity_addrvec_t&);
Heartbeat& heartbeat;
struct failure_info_t {
clock::time_point failed_since;
entity_addrvec_t addrs;
};
std::map<osd_id_t, failure_info_t> failure_pending;
} failing_peers;
};
inline std::ostream& operator<<(std::ostream& out, const Heartbeat& hb) {
hb.print(out);
return out;
}
/*
* Event driven interface for Heartbeat::Peer to be notified when both hb_front
* and hb_back are connected, or connection is lost.
*/
class Heartbeat::ConnectionListener {
public:
ConnectionListener(size_t connections) : connections{connections} {}
void increase_connected() {
assert(connected < connections);
++connected;
if (connected == connections) {
on_connected();
}
}
void decrease_connected() {
assert(connected > 0);
if (connected == connections) {
on_disconnected();
}
--connected;
}
enum class type_t { front, back };
virtual entity_addr_t get_peer_addr(type_t) = 0;
protected:
virtual void on_connected() = 0;
virtual void on_disconnected() = 0;
private:
const size_t connections;
size_t connected = 0;
};
class Heartbeat::Connection {
public:
using type_t = ConnectionListener::type_t;
Connection(osd_id_t peer, bool is_winner_side, type_t type,
crimson::net::Messenger& msgr,
ConnectionListener& listener)
: peer{peer}, type{type},
msgr{msgr}, listener{listener},
is_winner_side{is_winner_side} {
connect();
}
Connection(const Connection&) = delete;
Connection(Connection&&) = delete;
Connection& operator=(const Connection&) = delete;
Connection& operator=(Connection&&) = delete;
~Connection();
bool matches(crimson::net::ConnectionRef _conn) const;
void connected() {
set_connected();
}
bool accepted(crimson::net::ConnectionRef, bool is_replace);
void reset(bool is_replace=false);
seastar::future<> send(MessageURef msg);
void validate();
// retry connection if still pending
void retry();
private:
void set_connected();
void set_unconnected();
void connect();
const osd_id_t peer;
const type_t type;
crimson::net::Messenger& msgr;
ConnectionListener& listener;
/*
* Resolve the following racing when both me and peer are trying to connect
* each other symmetrically, under SocketPolicy::lossy_client:
*
* OSD.A OSD.B
* - -
* |-[1]----> <----[2]-|
* \ /
* \ /
* delay.. X delay..
* / \
* |-[1]x> / \ <x[2]-|
* |<-[2]--- ---[1]->|
* |(reset#1) (reset#2)|
* |(reconnectB) (reconnectA)|
* |-[2]---> <---[1]-|
* delay.. delay..
* (remote close populated)
* |-[2]x> <x[1]-|
* |(reset#2) (reset#1)|
* | ... ... |
* (dead loop!)
*
* Our solution is to remember if such racing was happened recently, and
* establish connection asymmetrically only from the winner side whose osd-id
* is larger.
*/
const bool is_winner_side;
bool racing_detected = false;
crimson::net::ConnectionRef conn;
bool is_connected = false;
friend std::ostream& operator<<(std::ostream& os, const Connection& c) {
if (c.type == type_t::front) {
return os << "con_front(osd." << c.peer << ")";
} else {
return os << "con_back(osd." << c.peer << ")";
}
}
};
/*
* Track the ping history and ping reply (the pong) from the same session, clean up
* history once hb_front or hb_back loses connection and restart the session once
* both connections are connected again.
*
* We cannot simply remove the entire Heartbeat::Peer once hb_front or hb_back
* loses connection, because we would end up with the following deadloop:
*
* OSD.A OSD.B
* - -
* hb_front reset <--(network)--- hb_front close
* | ^
* | |
* remove Peer B (dead loop!) remove Peer A
* | |
* V |
* hb_back close ----(network)---> hb_back reset
*/
class Heartbeat::Session {
public:
Session(osd_id_t peer) : peer{peer} {}
void set_epoch_added(epoch_t epoch_) { epoch = epoch_; }
epoch_t get_epoch_added() const { return epoch; }
void set_projected_epoch(epoch_t epoch_) { projected_epoch = epoch_; }
epoch_t get_projected_epoch() const { return projected_epoch; }
bool is_started() const { return connected; }
bool pinged() const {
if (clock::is_zero(first_tx)) {
// i can never receive a pong without sending any ping message first.
assert(clock::is_zero(last_rx_front) &&
clock::is_zero(last_rx_back));
return false;
} else {
return true;
}
}
enum class health_state {
UNKNOWN,
UNHEALTHY,
HEALTHY,
};
health_state do_health_screen(clock::time_point now) const {
if (!pinged()) {
// we are not healty nor unhealty because we haven't sent anything yet
return health_state::UNKNOWN;
} else if (!ping_history.empty() && ping_history.begin()->second.deadline < now) {
return health_state::UNHEALTHY;
} else if (!clock::is_zero(last_rx_front) &&
!clock::is_zero(last_rx_back)) {
// only declare to be healthy until we have received the first
// replies from both front/back connections
return health_state::HEALTHY;
} else {
return health_state::UNKNOWN;
}
}
clock::time_point failed_since(clock::time_point now) const;
void set_tx(clock::time_point now) {
if (!pinged()) {
first_tx = now;
}
last_tx = now;
}
void on_connected() {
assert(!connected);
connected = true;
ping_history.clear();
}
void on_ping(const utime_t& sent_stamp,
const clock::time_point& deadline) {
assert(connected);
[[maybe_unused]] auto [reply, added] =
ping_history.emplace(sent_stamp, reply_t{deadline, 2});
}
bool on_pong(const utime_t& ping_stamp,
Connection::type_t type,
clock::time_point now) {
assert(connected);
auto ping = ping_history.find(ping_stamp);
if (ping == ping_history.end()) {
// old replies, deprecated by newly sent pings.
return false;
}
auto& unacked = ping->second.unacknowledged;
assert(unacked);
if (type == Connection::type_t::front) {
last_rx_front = now;
unacked--;
} else {
last_rx_back = now;
unacked--;
}
if (unacked == 0) {
ping_history.erase(ping_history.begin(), ++ping);
}
return true;
}
void on_disconnected() {
assert(connected);
connected = false;
if (!ping_history.empty()) {
// we lost our ping_history of the last session, but still need to keep
// the oldest deadline for unhealthy check.
auto oldest = ping_history.begin();
auto sent_stamp = oldest->first;
auto deadline = oldest->second.deadline;
ping_history.clear();
ping_history.emplace(sent_stamp, reply_t{deadline, 0});
}
}
// maintain an entry in ping_history for unhealthy check
void set_inactive_history(clock::time_point);
private:
const osd_id_t peer;
bool connected = false;
// time we sent our first ping request
clock::time_point first_tx;
// last time we sent a ping request
clock::time_point last_tx;
// last time we got a ping reply on the front side
clock::time_point last_rx_front;
// last time we got a ping reply on the back side
clock::time_point last_rx_back;
// most recent epoch we wanted this peer
epoch_t epoch; // rename me to epoch_added
// epoch we expect peer to be at once our sent incrementals are processed
epoch_t projected_epoch = 0;
struct reply_t {
clock::time_point deadline;
// one sent over front conn, another sent over back conn
uint8_t unacknowledged = 0;
};
// history of inflight pings, arranging by timestamp we sent
std::map<utime_t, reply_t> ping_history;
};
class Heartbeat::Peer final : private Heartbeat::ConnectionListener {
public:
Peer(Heartbeat&, osd_id_t);
~Peer();
Peer(Peer&&) = delete;
Peer(const Peer&) = delete;
Peer& operator=(Peer&&) = delete;
Peer& operator=(const Peer&) = delete;
// set/get the epoch at which the peer was added
void set_epoch_added(epoch_t epoch) { session.set_epoch_added(epoch); }
epoch_t get_epoch_added() const { return session.get_epoch_added(); }
void set_projected_epoch(epoch_t epoch) { session.set_projected_epoch(epoch); }
epoch_t get_projected_epoch() const { return session.get_projected_epoch(); }
// if failure, return time_point since last active
// else, return clock::zero()
clock::time_point failed_since(clock::time_point now) const {
return session.failed_since(now);
}
void send_heartbeat(
clock::time_point, ceph::signedspan, std::vector<seastar::future<>>&);
seastar::future<> handle_reply(crimson::net::ConnectionRef, Ref<MOSDPing>);
void handle_reset(crimson::net::ConnectionRef conn, bool is_replace);
void handle_connect(crimson::net::ConnectionRef conn);
void handle_accept(crimson::net::ConnectionRef conn, bool is_replace);
private:
entity_addr_t get_peer_addr(type_t type) override;
void on_connected() override;
void on_disconnected() override;
void do_send_heartbeat(
clock::time_point, ceph::signedspan, std::vector<seastar::future<>>*);
template <typename Func>
void for_each_conn(Func&& f) {
f(con_front);
f(con_back);
}
Heartbeat& heartbeat;
const osd_id_t peer;
Session session;
// if need to send heartbeat when session connected
bool pending_send = false;
Connection con_front;
Connection con_back;
friend std::ostream& operator<<(std::ostream& os, const Peer& p) {
return os << "peer(osd." << p.peer << ")";
}
};
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<Heartbeat> : fmt::ostream_formatter {};
template <> struct fmt::formatter<Heartbeat::Connection> : fmt::ostream_formatter {};
template <> struct fmt::formatter<Heartbeat::Peer> : fmt::ostream_formatter {};
#endif
| 14,078 | 29.474026 | 103 | h |
null | ceph-main/src/crimson/osd/lsan_suppressions.cc | #ifndef _NDEBUG
// The callbacks we define here will be called from the sanitizer runtime, but
// aren't referenced from the Chrome executable. We must ensure that those
// callbacks are not sanitizer-instrumented, and that they aren't stripped by
// the linker.
#define SANITIZER_HOOK_ATTRIBUTE \
extern "C" \
__attribute__((no_sanitize("address", "thread", "undefined"))) \
__attribute__((visibility("default"))) \
__attribute__((used))
static char kLSanDefaultSuppressions[] =
"leak:InitModule\n"
"leak:MallocExtension::Initialize\n";
SANITIZER_HOOK_ATTRIBUTE const char *__lsan_default_suppressions() {
return kLSanDefaultSuppressions;
}
#endif // ! _NDEBUG
| 826 | 40.35 | 78 | cc |
null | ceph-main/src/crimson/osd/main.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include <sys/types.h>
#include <unistd.h>
#include <iostream>
#include <fstream>
#include <random>
#include <seastar/core/app-template.hh>
#include <seastar/core/print.hh>
#include <seastar/core/prometheus.hh>
#include <seastar/core/thread.hh>
#include <seastar/http/httpd.hh>
#include <seastar/net/inet_address.hh>
#include <seastar/util/closeable.hh>
#include <seastar/util/defer.hh>
#include <seastar/util/std-compat.hh>
#include "auth/KeyRing.h"
#include "common/ceph_argparse.h"
#include "common/config_tracker.h"
#include "crimson/common/buffer_io.h"
#include "crimson/common/config_proxy.h"
#include "crimson/common/fatal_signal.h"
#include "crimson/mon/MonClient.h"
#include "crimson/net/Messenger.h"
#include "crimson/osd/stop_signal.h"
#include "crimson/osd/main_config_bootstrap_helpers.h"
#include "global/pidfile.h"
#include "osd.h"
using namespace std::literals;
namespace bpo = boost::program_options;
using crimson::common::local_conf;
using crimson::common::sharded_conf;
using crimson::common::sharded_perf_coll;
static seastar::logger& logger()
{
return crimson::get_logger(ceph_subsys_osd);
}
seastar::future<> make_keyring()
{
const auto path = local_conf().get_val<std::string>("keyring");
return seastar::file_exists(path).then([path](bool exists) {
KeyRing keyring;
EntityName name{local_conf()->name};
EntityAuth auth;
if (exists &&
keyring.load(nullptr, path) == 0 &&
keyring.get_auth(name, auth)) {
fmt::print(std::cerr, "already have key in keyring: {}\n", path);
return seastar::now();
} else {
CephContext temp_cct{};
auth.key.create(&temp_cct, CEPH_CRYPTO_AES);
keyring.add(name, auth);
bufferlist bl;
keyring.encode_plaintext(bl);
const auto permissions = (seastar::file_permissions::user_read |
seastar::file_permissions::user_write);
return crimson::write_file(std::move(bl), path, permissions);
}
}).handle_exception_type([path](const std::filesystem::filesystem_error& e) {
fmt::print(std::cerr, "FATAL: writing new keyring to {}: {}\n", path, e.what());
throw e;
});
}
static std::ofstream maybe_set_logger()
{
std::ofstream log_file_stream;
if (auto log_file = local_conf()->log_file; !log_file.empty()) {
log_file_stream.open(log_file, std::ios::app | std::ios::out);
try {
seastar::throw_system_error_on(log_file_stream.fail());
} catch (const std::system_error& e) {
ceph_abort_msg(fmt::format("unable to open log file: {}", e.what()));
}
logger().set_ostream(log_file_stream);
}
return log_file_stream;
}
int main(int argc, const char* argv[])
{
auto early_config_result = crimson::osd::get_early_config(argc, argv);
if (!early_config_result.has_value()) {
int r = early_config_result.error();
std::cerr << "do_early_config returned error: " << r << std::endl;
return r;
}
auto &early_config = early_config_result.value();
auto seastar_n_early_args = early_config.get_early_args();
auto config_proxy_args = early_config.get_ceph_args();
seastar::app_template::config app_cfg;
app_cfg.name = "Crimson";
app_cfg.auto_handle_sigint_sigterm = false;
seastar::app_template app(std::move(app_cfg));
app.add_options()
("mkkey", "generate a new secret key. "
"This is normally used in combination with --mkfs")
("mkfs", "create a [new] data directory")
("debug", "enable debug output on all loggers")
("trace", "enable trace output on all loggers")
("osdspec-affinity", bpo::value<std::string>()->default_value(std::string{}),
"set affinity to an osdspec")
("prometheus_port", bpo::value<uint16_t>()->default_value(0),
"Prometheus port. Set to zero to disable")
("prometheus_address", bpo::value<std::string>()->default_value("0.0.0.0"),
"Prometheus listening address")
("prometheus_prefix", bpo::value<std::string>()->default_value("osd"),
"Prometheus metrics prefix");
try {
return app.run(
seastar_n_early_args.size(),
const_cast<char**>(seastar_n_early_args.data()),
[&] {
auto& config = app.configuration();
return seastar::async([&] {
try {
FatalSignal fatal_signal;
seastar_apps_lib::stop_signal should_stop;
if (config.count("debug")) {
seastar::global_logger_registry().set_all_loggers_level(
seastar::log_level::debug
);
}
if (config.count("trace")) {
seastar::global_logger_registry().set_all_loggers_level(
seastar::log_level::trace
);
}
sharded_conf().start(
early_config.init_params.name, early_config.cluster_name).get();
local_conf().start().get();
auto stop_conf = seastar::deferred_stop(sharded_conf());
sharded_perf_coll().start().get();
auto stop_perf_coll = seastar::deferred_stop(sharded_perf_coll());
local_conf().parse_config_files(early_config.conf_file_list).get();
local_conf().parse_env().get();
local_conf().parse_argv(config_proxy_args).get();
auto log_file_stream = maybe_set_logger();
auto reset_logger = seastar::defer([] {
logger().set_ostream(std::cerr);
});
if (const auto ret = pidfile_write(local_conf()->pid_file);
ret == -EACCES || ret == -EAGAIN) {
ceph_abort_msg(
"likely there is another crimson-osd instance with the same id");
} else if (ret < 0) {
ceph_abort_msg(fmt::format("pidfile_write failed with {} {}",
ret, cpp_strerror(-ret)));
}
// just ignore SIGHUP, we don't reread settings. keep in mind signals
// handled by S* must be blocked for alien threads (see AlienStore).
seastar::engine().handle_signal(SIGHUP, [] {});
// start prometheus API server
seastar::httpd::http_server_control prom_server;
std::any stop_prometheus;
if (uint16_t prom_port = config["prometheus_port"].as<uint16_t>();
prom_port != 0) {
prom_server.start("prometheus").get();
stop_prometheus = seastar::make_shared(seastar::deferred_stop(prom_server));
seastar::prometheus::config prom_config;
prom_config.prefix = config["prometheus_prefix"].as<std::string>();
seastar::prometheus::start(prom_server, prom_config).get();
seastar::net::inet_address prom_addr(config["prometheus_address"].as<std::string>());
prom_server.listen(seastar::socket_address{prom_addr, prom_port})
.handle_exception([=] (auto ep) {
std::cerr << seastar::format("Could not start Prometheus API server on {}:{}: {}\n",
prom_addr, prom_port, ep);
return seastar::make_exception_future(ep);
}).get();
}
const int whoami = std::stoi(local_conf()->name.get_id());
const auto nonce = crimson::osd::get_nonce();
crimson::net::MessengerRef cluster_msgr, client_msgr;
crimson::net::MessengerRef hb_front_msgr, hb_back_msgr;
for (auto [msgr, name] : {make_pair(std::ref(cluster_msgr), "cluster"s),
make_pair(std::ref(client_msgr), "client"s),
make_pair(std::ref(hb_front_msgr), "hb_front"s),
make_pair(std::ref(hb_back_msgr), "hb_back"s)}) {
msgr = crimson::net::Messenger::create(entity_name_t::OSD(whoami),
name,
nonce,
true);
}
auto store = crimson::os::FuturizedStore::create(
local_conf().get_val<std::string>("osd_objectstore"),
local_conf().get_val<std::string>("osd_data"),
local_conf().get_config_values());
crimson::osd::OSD osd(
whoami, nonce, std::ref(should_stop.abort_source()),
std::ref(*store), cluster_msgr, client_msgr,
hb_front_msgr, hb_back_msgr);
if (config.count("mkkey")) {
make_keyring().get();
}
if (local_conf()->no_mon_config) {
logger().info("bypassing the config fetch due to --no-mon-config");
} else {
crimson::osd::populate_config_from_mon().get();
}
if (config.count("mkfs")) {
auto osd_uuid = local_conf().get_val<uuid_d>("osd_uuid");
if (osd_uuid.is_zero()) {
// use a random osd uuid if not specified
osd_uuid.generate_random();
}
osd.mkfs(
*store,
whoami,
osd_uuid,
local_conf().get_val<uuid_d>("fsid"),
config["osdspec-affinity"].as<std::string>()).get();
}
if (config.count("mkkey") || config.count("mkfs")) {
return EXIT_SUCCESS;
} else {
osd.start().get();
}
logger().info("crimson startup completed");
should_stop.wait().get();
logger().info("crimson shutting down");
osd.stop().get();
// stop()s registered using defer() are called here
} catch (...) {
logger().error("startup failed: {}", std::current_exception());
return EXIT_FAILURE;
}
logger().info("crimson shutdown complete");
return EXIT_SUCCESS;
});
});
} catch (...) {
fmt::print(std::cerr, "FATAL: Exception during startup, aborting: {}\n", std::current_exception());
return EXIT_FAILURE;
}
}
/*
* Local Variables:
* compile-command: "make -j4 \
* -C ../../../build \
* crimson-osd"
* End:
*/
| 10,066 | 37.719231 | 103 | cc |
null | ceph-main/src/crimson/osd/main_config_bootstrap_helpers.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/osd/main_config_bootstrap_helpers.h"
#include <seastar/core/print.hh>
#include <seastar/core/prometheus.hh>
#include <seastar/core/thread.hh>
#include <seastar/http/httpd.hh>
#include <seastar/net/inet_address.hh>
#include <seastar/util/closeable.hh>
#include <seastar/util/defer.hh>
#include <seastar/util/std-compat.hh>
#include "common/ceph_argparse.h"
#include "common/config_tracker.h"
#include "crimson/common/buffer_io.h"
#include "crimson/common/config_proxy.h"
#include "crimson/common/fatal_signal.h"
#include "crimson/mon/MonClient.h"
#include "crimson/net/Messenger.h"
#include "crimson/osd/main_config_bootstrap_helpers.h"
using namespace std::literals;
using crimson::common::local_conf;
using crimson::common::sharded_conf;
using crimson::common::sharded_perf_coll;
static seastar::logger& logger()
{
return crimson::get_logger(ceph_subsys_osd);
}
namespace crimson::osd {
void usage(const char* prog)
{
std::cout << "usage: " << prog << std::endl;
generic_server_usage();
}
seastar::future<> populate_config_from_mon()
{
logger().info("populating config from monitor");
// i don't have any client before joining the cluster, so no need to have
// a proper auth handler
class DummyAuthHandler : public crimson::common::AuthHandler {
public:
void handle_authentication(const EntityName& name,
const AuthCapsInfo& caps)
{}
};
return seastar::async([] {
auto auth_handler = std::make_unique<DummyAuthHandler>();
auto msgr = crimson::net::Messenger::create(entity_name_t::CLIENT(),
"temp_mon_client",
get_nonce(),
true);
crimson::mon::Client monc{*msgr, *auth_handler};
msgr->set_auth_client(&monc);
msgr->start({&monc}).get();
auto stop_msgr = seastar::defer([&] {
msgr->stop();
msgr->shutdown().get();
});
monc.start().handle_exception([] (auto ep) {
fmt::print(std::cerr, "FATAL: unable to connect to cluster: {}\n", ep);
return seastar::make_exception_future<>(ep);
}).get();
auto stop_monc = seastar::defer([&] {
monc.stop().get();
});
monc.sub_want("config", 0, 0);
monc.renew_subs().get();
// wait for monmap and config
monc.wait_for_config().get();
auto fsid = monc.get_fsid().to_string();
local_conf().set_val("fsid", fsid).get();
logger().debug("{}: got config from monitor, fsid {}", __func__, fsid);
});
}
static tl::expected<early_config_t, int>
_get_early_config(int argc, const char *argv[])
{
early_config_t ret;
// pull off ceph configs the stuff from early_args
std::vector<const char *> early_args;
early_args.insert(
std::end(early_args),
argv, argv + argc);
ret.init_params = ceph_argparse_early_args(
early_args,
CEPH_ENTITY_TYPE_OSD,
&ret.cluster_name,
&ret.conf_file_list);
if (ceph_argparse_need_usage(early_args)) {
usage(argv[0]);
exit(0);
}
seastar::app_template::config app_cfg;
app_cfg.name = "Crimson-startup";
app_cfg.auto_handle_sigint_sigterm = false;
seastar::app_template app(std::move(app_cfg));
const char *bootstrap_args[] = { argv[0], "--smp", "1" };
int r = app.run(
sizeof(bootstrap_args) / sizeof(bootstrap_args[0]),
const_cast<char**>(bootstrap_args),
[argc, argv, &ret, &early_args] {
return seastar::async([argc, argv, &ret, &early_args] {
seastar::global_logger_registry().set_all_loggers_level(
seastar::log_level::debug);
sharded_conf().start(
ret.init_params.name, ret.cluster_name).get();
local_conf().start().get();
auto stop_conf = seastar::deferred_stop(sharded_conf());
sharded_perf_coll().start().get();
auto stop_perf_coll = seastar::deferred_stop(sharded_perf_coll());
local_conf().parse_env().get();
local_conf().parse_argv(early_args).get();
local_conf().parse_config_files(ret.conf_file_list).get();
if (local_conf()->no_mon_config) {
logger().info("bypassing the config fetch due to --no-mon-config");
} else {
populate_config_from_mon().get();
}
// get ceph configs
std::set_difference(
argv, argv + argc,
std::begin(early_args),
std::end(early_args),
std::back_inserter(ret.ceph_args));
ret.early_args.insert(
std::end(ret.early_args),
std::begin(early_args),
std::end(early_args));
if (auto found = std::find_if(
std::begin(early_args),
std::end(early_args),
[](auto* arg) { return "--smp"sv == arg; });
found == std::end(early_args)) {
// Set --smp based on crimson_seastar_smp config option
ret.early_args.emplace_back("--smp");
auto smp_config = local_conf().get_val<uint64_t>(
"crimson_seastar_smp");
ret.early_args.emplace_back(fmt::format("{}", smp_config));
logger().info("get_early_config: set --smp {}", smp_config);
}
return 0;
});
});
if (r < 0) {
return tl::unexpected(r);
}
return ret;
}
/* get_early_config handles obtaining config parameters required prior
* to reactor startup. Most deployment mechanisms (cephadm for one)
* rely on pulling configs from the monitor rather than shipping around
* config files, so this process needs to support pulling config options
* from the monitors.
*
* Of particular interest are config params related to the seastar
* reactor itself which can't be modified after the reactor has been
* started -- like the number of cores to use (smp::count). Contacting
* the monitors, however, requires a MonClient, which in turn needs a
* running reactor.
*
* Unfortunately, seastar doesn't clean up thread local state
* associated with seastar::smp task queues etc, so we can't
* start a reactor, stop it, and restart it in the same thread
* without an impractical amount of cleanup in seastar.
*
* More unfortunately, starting a reactor in a seperate thread
* and then joining the thread still doesn't avoid all global state,
* I observed tasks from the previous reactor incarnation nevertheless
* continuing to run in the new one resulting in a crash as they access
* freed memory.
*
* The approach taken here, therefore, is to actually fork, start a
* reactor in the child process, encode the resulting early_config_t,
* and send it back to the parent process.
*/
tl::expected<early_config_t, int>
get_early_config(int argc, const char *argv[])
{
int pipes[2];
int r = pipe2(pipes, 0);
if (r < 0) {
std::cerr << "get_early_config: failed to create pipes: "
<< -errno << std::endl;
return tl::unexpected(-errno);
}
pid_t worker = fork();
if (worker < 0) {
close(pipes[0]);
close(pipes[1]);
std::cerr << "get_early_config: failed to fork: "
<< -errno << std::endl;
return tl::unexpected(-errno);
} else if (worker == 0) { // child
close(pipes[0]);
auto ret = _get_early_config(argc, argv);
if (ret.has_value()) {
bufferlist bl;
::encode(ret.value(), bl);
r = bl.write_fd(pipes[1]);
close(pipes[1]);
if (r < 0) {
std::cerr << "get_early_config: child failed to write_fd: "
<< r << std::endl;
exit(-r);
} else {
exit(0);
}
} else {
std::cerr << "get_early_config: child failed: "
<< -ret.error() << std::endl;
exit(-ret.error());
}
return tl::unexpected(-1);
} else { // parent
close(pipes[1]);
bufferlist bl;
early_config_t ret;
while ((r = bl.read_fd(pipes[0], 1024)) > 0);
close(pipes[0]);
// ignore error, we'll propogate error based on read and decode
waitpid(worker, nullptr, 0);
if (r < 0) {
std::cerr << "get_early_config: parent failed to read from pipe: "
<< r << std::endl;
return tl::unexpected(r);
}
try {
auto bliter = bl.cbegin();
::decode(ret, bliter);
return ret;
} catch (...) {
std::cerr << "get_early_config: parent failed to decode" << std::endl;
return tl::unexpected(-EINVAL);
}
}
}
}
| 8,135 | 29.586466 | 77 | cc |
null | ceph-main/src/crimson/osd/main_config_bootstrap_helpers.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <sys/types.h>
#include <unistd.h>
#include <iostream>
#include <fstream>
#include <random>
#include <seastar/core/future.hh>
#include "common/ceph_argparse.h"
#include "include/expected.hpp"
#include "include/random.h"
namespace crimson::osd {
void usage(const char* prog);
inline uint64_t get_nonce()
{
return ceph::util::generate_random_number<uint64_t>();
}
seastar::future<> populate_config_from_mon();
struct early_config_t {
std::vector<std::string> early_args;
std::vector<std::string> ceph_args;
std::string cluster_name{"ceph"};
std::string conf_file_list;
CephInitParameters init_params{CEPH_ENTITY_TYPE_OSD};
/// Returned vector must not outlive in
auto to_ptr_vector(const std::vector<std::string> &in) {
std::vector<const char *> ret;
ret.reserve(in.size());
std::transform(
std::begin(in), std::end(in),
std::back_inserter(ret),
[](const auto &str) { return str.c_str(); });
return ret;
}
std::vector<const char *> get_early_args() {
return to_ptr_vector(early_args);
}
std::vector<const char *> get_ceph_args() {
return to_ptr_vector(ceph_args);
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(early_args, bl);
encode(ceph_args, bl);
encode(cluster_name, bl);
encode(conf_file_list, bl);
encode(init_params, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(early_args, bl);
decode(ceph_args, bl);
decode(cluster_name, bl);
decode(conf_file_list, bl);
decode(init_params, bl);
DECODE_FINISH(bl);
}
};
/**
* get_early_config
*
* Compile initial configuration information from command line arguments,
* config files, and monitors.
*
* This implementation forks off a worker process to do this work and must
* therefore be called very early in main(). (See implementation for an
* explanation).
*/
tl::expected<early_config_t, int>
get_early_config(int argc, const char *argv[]);
}
WRITE_CLASS_ENCODER(crimson::osd::early_config_t)
| 2,214 | 22.56383 | 74 | h |
null | ceph-main/src/crimson/osd/objclass.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <cstdarg>
#include <cstring>
#include <boost/container/small_vector.hpp>
#include "common/ceph_context.h"
#include "common/ceph_releases.h"
#include "common/config.h"
#include "crimson/common/config_proxy.h"
#include "common/debug.h"
#include "crimson/osd/exceptions.h"
#include "crimson/osd/ops_executer.h"
#include "crimson/osd/pg_backend.h"
#include "objclass/objclass.h"
#include "osd/ClassHandler.h"
#include "auth/Crypto.h"
#include "common/armor.h"
using std::map;
using std::string;
#define dout_context ClassHandler::get_instance().cct
static constexpr int dout_subsys = ceph_subsys_objclass;
static inline int execute_osd_op(cls_method_context_t hctx, OSDOp& op)
{
// we can expect the memory under `ret` will be still fine after
// executing the osd op as we're running inside `seastar::thread`
// created for us by `seastar::async` in `::do_op_call()`.
int ret = 0;
using osd_op_errorator = crimson::osd::OpsExecuter::osd_op_errorator;
reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->execute_op(op)
.handle_error_interruptible(
osd_op_errorator::all_same_way([&ret] (const std::error_code& err) {
assert(err.value() > 0);
ret = -err.value();
return seastar::now();
})).get(); // we're blocking here which requires `seastar::thread`.
return ret;
}
int cls_call(cls_method_context_t hctx, const char *cls, const char *method,
char *indata, int datalen,
char **outdata, int *outdatalen)
{
// FIXME, HACK: this is for testing only. Let's use dynamic linker to verify
// our depedencies
return 0;
}
int cls_getxattr(cls_method_context_t hctx,
const char *name,
char **outdata,
int *outdatalen)
{
return 0;
}
int cls_setxattr(cls_method_context_t hctx,
const char *name,
const char *value,
int val_len)
{
return 0;
}
int cls_read(cls_method_context_t hctx,
int ofs, int len,
char **outdata,
int *outdatalen)
{
return 0;
}
int cls_get_request_origin(cls_method_context_t hctx, entity_inst_t *origin)
{
assert(origin);
try {
const auto& message = \
reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->get_message();
*origin = message.get_orig_source_inst();
return 0;
} catch (crimson::osd::error& e) {
return -e.code().value();
}
}
int cls_cxx_create(cls_method_context_t hctx, const bool exclusive)
{
OSDOp op{CEPH_OSD_OP_CREATE};
op.op.flags = (exclusive ? CEPH_OSD_OP_FLAG_EXCL : 0);
return execute_osd_op(hctx, op);
}
int cls_cxx_remove(cls_method_context_t hctx)
{
OSDOp op{CEPH_OSD_OP_DELETE};
return execute_osd_op(hctx, op);
}
int cls_cxx_stat(cls_method_context_t hctx, uint64_t *size, time_t *mtime)
{
OSDOp op{CEPH_OSD_OP_STAT};
if (const auto ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
utime_t ut;
uint64_t s;
try {
auto iter = op.outdata.cbegin();
decode(s, iter);
decode(ut, iter);
} catch (buffer::error& err) {
return -EIO;
}
if (size) {
*size = s;
}
if (mtime) {
*mtime = ut.sec();
}
return 0;
}
int cls_cxx_stat2(cls_method_context_t hctx,
uint64_t *size,
ceph::real_time *mtime)
{
OSDOp op{CEPH_OSD_OP_STAT};
if (const int ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
uint64_t dummy_size;
real_time dummy_mtime;
uint64_t& out_size = size ? *size : dummy_size;
real_time& out_mtime = mtime ? *mtime : dummy_mtime;
try {
auto iter = op.outdata.cbegin();
decode(out_size, iter);
decode(out_mtime, iter);
return 0;
} catch (buffer::error& err) {
return -EIO;
}
}
int cls_cxx_read2(cls_method_context_t hctx,
int ofs,
int len,
bufferlist *outbl,
uint32_t op_flags)
{
OSDOp op{CEPH_OSD_OP_SYNC_READ};
op.op.extent.offset = ofs;
op.op.extent.length = len;
op.op.flags = op_flags;
if (const auto ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
*outbl = std::move(op.outdata);
return outbl->length();
}
int cls_cxx_write2(cls_method_context_t hctx,
int ofs,
int len,
bufferlist *inbl,
uint32_t op_flags)
{
OSDOp op{CEPH_OSD_OP_WRITE};
op.op.extent.offset = ofs;
op.op.extent.length = len;
op.op.flags = op_flags;
op.indata = *inbl;
return execute_osd_op(hctx, op);
}
int cls_cxx_write_full(cls_method_context_t hctx, bufferlist * const inbl)
{
OSDOp op{CEPH_OSD_OP_WRITEFULL};
op.op.extent.offset = 0;
op.op.extent.length = inbl->length();
op.indata = *inbl;
return execute_osd_op(hctx, op);
}
int cls_cxx_replace(cls_method_context_t hctx,
int ofs,
int len,
bufferlist *inbl)
{
{
OSDOp top{CEPH_OSD_OP_TRUNCATE};
top.op.extent.offset = 0;
top.op.extent.length = 0;
if (const auto ret = execute_osd_op(hctx, top); ret < 0) {
return ret;
}
}
{
OSDOp wop{CEPH_OSD_OP_WRITE};
wop.op.extent.offset = ofs;
wop.op.extent.length = len;
wop.indata = *inbl;
if (const auto ret = execute_osd_op(hctx, wop); ret < 0) {
return ret;
}
}
return 0;
}
int cls_cxx_truncate(cls_method_context_t hctx, int ofs)
{
OSDOp op{CEPH_OSD_OP_TRUNCATE};
op.op.extent.offset = ofs;
op.op.extent.length = 0;
return execute_osd_op(hctx, op);
}
int cls_cxx_write_zero(cls_method_context_t hctx, int offset, int len)
{
OSDOp op{CEPH_OSD_OP_ZERO};
op.op.extent.offset = offset;
op.op.extent.length = len;
return execute_osd_op(hctx, op);
}
int cls_cxx_getxattr(cls_method_context_t hctx,
const char *name,
bufferlist *outbl)
{
OSDOp op{CEPH_OSD_OP_GETXATTR};
op.op.xattr.name_len = strlen(name);
op.indata.append(name, op.op.xattr.name_len);
if (const auto ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
*outbl = std::move(op.outdata);
return outbl->length();
}
int cls_cxx_getxattrs(cls_method_context_t hctx,
map<string, bufferlist> *attrset)
{
OSDOp op{CEPH_OSD_OP_GETXATTRS};
if (const int ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
try {
auto iter = op.outdata.cbegin();
decode(*attrset, iter);
} catch (buffer::error& err) {
return -EIO;
}
return 0;
}
int cls_cxx_setxattr(cls_method_context_t hctx,
const char *name,
bufferlist *inbl)
{
OSDOp op{CEPH_OSD_OP_SETXATTR};
op.op.xattr.name_len = std::strlen(name);
op.op.xattr.value_len = inbl->length();
op.indata.append(name, op.op.xattr.name_len);
op.indata.append(*inbl);
return execute_osd_op(hctx, op);
}
int cls_cxx_snap_revert(cls_method_context_t hctx, snapid_t snapid)
{
OSDOp op{CEPH_OSD_OP_ROLLBACK};
op.op.snap.snapid = snapid;
return execute_osd_op(hctx, op);
}
int cls_cxx_map_get_all_vals(cls_method_context_t hctx,
map<string, bufferlist>* vals,
bool *more)
{
return 0;
}
int cls_cxx_map_get_keys(cls_method_context_t hctx,
const std::string& start_obj,
const uint64_t max_to_get,
std::set<std::string>* const keys,
bool* const more)
{
OSDOp op{CEPH_OSD_OP_OMAPGETKEYS};
encode(start_obj, op.indata);
encode(max_to_get, op.indata);
if (const auto ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
try {
auto iter = op.outdata.cbegin();
decode(*keys, iter);
decode(*more, iter);
} catch (buffer::error&) {
return -EIO;
}
return keys->size();
}
int cls_cxx_map_get_vals(cls_method_context_t hctx,
const std::string& start_obj,
const std::string& filter_prefix,
const uint64_t max_to_get,
std::map<std::string, ceph::bufferlist> *vals,
bool* const more)
{
OSDOp op{CEPH_OSD_OP_OMAPGETVALS};
encode(start_obj, op.indata);
encode(max_to_get, op.indata);
encode(filter_prefix, op.indata);
if (const auto ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
try {
auto iter = op.outdata.cbegin();
decode(*vals, iter);
decode(*more, iter);
} catch (buffer::error&) {
return -EIO;
}
return vals->size();
}
int cls_cxx_map_get_vals_by_keys(cls_method_context_t hctx,
const std::set<std::string> &keys,
std::map<std::string, ceph::bufferlist> *vals)
{
OSDOp op{CEPH_OSD_OP_OMAPGETVALSBYKEYS};
encode(keys, op.indata);
if (const auto ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
try {
auto iter = op.outdata.cbegin();
decode(*vals, iter);
} catch (buffer::error&) {
return -EIO;
}
return 0;
}
int cls_cxx_map_read_header(cls_method_context_t hctx, bufferlist *outbl)
{
OSDOp op{CEPH_OSD_OP_OMAPGETHEADER};
if (const auto ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
*outbl = std::move(op.outdata);
return 0;
}
int cls_cxx_map_get_val(cls_method_context_t hctx,
const string &key,
bufferlist *outbl)
{
OSDOp op{CEPH_OSD_OP_OMAPGETVALSBYKEYS};
{
std::set<std::string> k{key};
encode(k, op.indata);
}
if (const auto ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
std::map<std::string, ceph::bufferlist> m;
try {
auto iter = op.outdata.cbegin();
decode(m, iter);
} catch (buffer::error&) {
return -EIO;
}
if (auto iter = std::begin(m); iter != std::end(m)) {
*outbl = std::move(iter->second);
return 0;
} else {
return -ENOENT;
}
}
int cls_cxx_map_set_val(cls_method_context_t hctx,
const string &key,
bufferlist *inbl)
{
OSDOp op{CEPH_OSD_OP_OMAPSETVALS};
{
std::map<std::string, ceph::bufferlist> m;
m[key] = *inbl;
encode(m, op.indata);
}
return execute_osd_op(hctx, op);
}
int cls_cxx_map_set_vals(cls_method_context_t hctx,
const std::map<string, ceph::bufferlist> *map)
{
OSDOp op{CEPH_OSD_OP_OMAPSETVALS};
encode(*map, op.indata);
return execute_osd_op(hctx, op);
}
int cls_cxx_map_clear(cls_method_context_t hctx)
{
OSDOp op{CEPH_OSD_OP_OMAPCLEAR};
return execute_osd_op(hctx, op);
}
int cls_cxx_map_write_header(cls_method_context_t hctx, bufferlist *inbl)
{
OSDOp op{CEPH_OSD_OP_OMAPSETHEADER};
op.indata = std::move(*inbl);
return execute_osd_op(hctx, op);
}
int cls_cxx_map_remove_range(cls_method_context_t hctx,
const std::string& key_begin,
const std::string& key_end)
{
OSDOp op{CEPH_OSD_OP_OMAPRMKEYRANGE};
encode(key_begin, op.indata);
encode(key_end, op.indata);
return execute_osd_op(hctx, op);
}
int cls_cxx_map_remove_key(cls_method_context_t hctx, const string &key)
{
OSDOp op{CEPH_OSD_OP_OMAPRMKEYS};
std::vector<string> to_rm{key};
encode(to_rm, op.indata);
return execute_osd_op(hctx, op);
}
int cls_cxx_list_watchers(cls_method_context_t hctx,
obj_list_watch_response_t *watchers)
{
OSDOp op{CEPH_OSD_OP_LIST_WATCHERS};
if (const auto ret = execute_osd_op(hctx, op); ret < 0) {
return ret;
}
try {
auto iter = op.outdata.cbegin();
decode(*watchers, iter);
} catch (buffer::error&) {
return -EIO;
}
return 0;
}
uint64_t cls_current_version(cls_method_context_t hctx)
{
auto* ox = reinterpret_cast<crimson::osd::OpsExecuter*>(hctx);
return ox->get_last_user_version();
}
int cls_current_subop_num(cls_method_context_t hctx)
{
auto* ox = reinterpret_cast<crimson::osd::OpsExecuter*>(hctx);
// in contrast to classical OSD, crimson doesn't count OP_CALL and
// OP_STAT which seems fine regarding how the plugins we take care
// about use this part of API.
return ox->get_processed_rw_ops_num();
}
uint64_t cls_get_features(cls_method_context_t hctx)
{
return 0;
}
uint64_t cls_get_client_features(cls_method_context_t hctx)
{
try {
const auto& message = \
reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->get_message();
return message.get_features();
} catch (crimson::osd::error& e) {
return -e.code().value();
}
}
uint64_t cls_get_pool_stripe_width(cls_method_context_t hctx)
{
auto* ox = reinterpret_cast<crimson::osd::OpsExecuter*>(hctx);
return ox->get_pool_stripe_width();
}
ceph_release_t cls_get_required_osd_release(cls_method_context_t hctx)
{
// FIXME
return ceph_release_t::nautilus;
}
ceph_release_t cls_get_min_compatible_client(cls_method_context_t hctx)
{
// FIXME
return ceph_release_t::nautilus;
}
const ConfigProxy& cls_get_config(cls_method_context_t hctx)
{
return crimson::common::local_conf();
}
const object_info_t& cls_get_object_info(cls_method_context_t hctx)
{
return reinterpret_cast<crimson::osd::OpsExecuter*>(hctx)->get_object_info();
}
int cls_get_snapset_seq(cls_method_context_t hctx, uint64_t *snap_seq)
{
auto* ox = reinterpret_cast<crimson::osd::OpsExecuter*>(hctx);
auto obc = ox->get_obc();
if (!obc->obs.exists ||
(obc->obs.oi.is_whiteout() &&
obc->ssc->snapset.clones.empty())) {
return -ENOENT;
}
*snap_seq = obc->ssc->snapset.seq;
return 0;
}
int cls_cxx_chunk_write_and_set(cls_method_context_t hctx,
int ofs,
int len,
bufferlist *write_inbl,
uint32_t op_flags,
bufferlist *set_inbl,
int set_len)
{
return 0;
}
int cls_get_manifest_ref_count(cls_method_context_t hctx, string fp_oid)
{
return 0;
}
uint64_t cls_get_osd_min_alloc_size(cls_method_context_t hctx) {
// FIXME
return 4096;
}
int cls_cxx_gather(cls_method_context_t hctx, const std::set<std::string> &src_objs, const std::string& pool,
const char *cls, const char *method, bufferlist& inbl)
{
return 0;
}
int cls_cxx_get_gathered_data(cls_method_context_t hctx, std::map<std::string, bufferlist> *results)
{
return 0;
}
// although at first glance the implementation looks the same as in
// the classical OSD, it's different b/c of how the dout macro expands.
int cls_log(int level, const char *format, ...)
{
size_t size = 256;
va_list ap;
while (1) {
boost::container::small_vector<char, 256> buf(size);
va_start(ap, format);
int n = vsnprintf(buf.data(), size, format, ap);
va_end(ap);
#define MAX_SIZE 8196UL
if ((n > -1 && static_cast<size_t>(n) < size) || size > MAX_SIZE) {
dout(ceph::dout::need_dynamic(level)) << buf.data() << dendl;
return n;
}
size *= 2;
}
}
| 15,070 | 24.762393 | 109 | cc |
null | ceph-main/src/crimson/osd/object_context.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "crimson/osd/object_context.h"
#include <fmt/ranges.h>
#include "common/Formatter.h"
#include "crimson/common/config_proxy.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_osd);
}
}
namespace crimson::osd {
ObjectContextRegistry::ObjectContextRegistry(crimson::common::ConfigProxy &conf)
{
obc_lru.set_target_size(conf.get_val<uint64_t>("crimson_osd_obc_lru_size"));
conf.add_observer(this);
}
ObjectContextRegistry::~ObjectContextRegistry()
{
// purge the cache to avoid leaks and complains from LSan
obc_lru.set_target_size(0UL);
}
const char** ObjectContextRegistry::get_tracked_conf_keys() const
{
static const char* KEYS[] = {
"crimson_osd_obc_lru_size",
nullptr
};
return KEYS;
}
void ObjectContextRegistry::handle_conf_change(
const crimson::common::ConfigProxy& conf,
const std::set <std::string> &changed)
{
obc_lru.set_target_size(conf.get_val<uint64_t>("crimson_osd_obc_lru_size"));
}
std::optional<hobject_t> resolve_oid(
const SnapSet &ss,
const hobject_t &oid)
{
logger().debug("{} oid.snap={},head snapset.seq={}",
__func__, oid.snap, ss.seq);
if (oid.snap > ss.seq) {
// Because oid.snap > ss.seq, we are trying to read from a snapshot
// taken after the most recent write to this object. Read from head.
return oid.get_head();
} else {
// which clone would it be?
auto clone = std::lower_bound(
begin(ss.clones), end(ss.clones),
oid.snap);
if (clone == end(ss.clones)) {
// Doesn't exist, > last clone, < ss.seq
return std::nullopt;
}
auto citer = ss.clone_snaps.find(*clone);
// TODO: how do we want to handle this kind of logic error?
ceph_assert(citer != ss.clone_snaps.end());
if (std::find(
citer->second.begin(),
citer->second.end(),
oid.snap) == citer->second.end()) {
logger().debug("{} {} does not contain {} -- DNE",
__func__, ss.clone_snaps, oid.snap);
return std::nullopt;
} else {
auto soid = oid;
soid.snap = *clone;
return std::optional<hobject_t>(soid);
}
}
}
}
| 2,260 | 25.290698 | 80 | cc |
null | ceph-main/src/crimson/osd/object_context.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include <optional>
#include <utility>
#include <seastar/core/shared_future.hh>
#include <seastar/core/shared_ptr.hh>
#include "common/intrusive_lru.h"
#include "osd/object_state.h"
#include "crimson/common/exception.h"
#include "crimson/common/tri_mutex.h"
#include "crimson/osd/osd_operation.h"
namespace ceph {
class Formatter;
}
namespace crimson::common {
class ConfigProxy;
}
namespace crimson::osd {
class Watch;
struct SnapSetContext;
using SnapSetContextRef = boost::intrusive_ptr<SnapSetContext>;
template <typename OBC>
struct obc_to_hoid {
using type = hobject_t;
const type &operator()(const OBC &obc) {
return obc.obs.oi.soid;
}
};
struct SnapSetContext :
public boost::intrusive_ref_counter<SnapSetContext,
boost::thread_unsafe_counter>
{
hobject_t oid;
SnapSet snapset;
bool exists = false;
/**
* exists
*
* Because ObjectContext's are cached, we need to be able to express the case
* where the object to which a cached ObjectContext refers does not exist.
* ObjectContext's for yet-to-be-created objects are initialized with exists=false.
* The ObjectContext for a deleted object will have exists set to false until it falls
* out of cache (or another write recreates the object).
*/
explicit SnapSetContext(const hobject_t& o) :
oid(o), exists(false) {}
};
class ObjectContext : public ceph::common::intrusive_lru_base<
ceph::common::intrusive_lru_config<
hobject_t, ObjectContext, obc_to_hoid<ObjectContext>>>
{
public:
ObjectState obs;
SnapSetContextRef ssc;
// the watch / notify machinery rather stays away from the hot and
// frequented paths. std::map is used mostly because of developer's
// convenience.
using watch_key_t = std::pair<uint64_t, entity_name_t>;
std::map<watch_key_t, seastar::shared_ptr<crimson::osd::Watch>> watchers;
ObjectContext(hobject_t hoid) : obs(std::move(hoid)) {}
const hobject_t &get_oid() const {
return obs.oi.soid;
}
bool is_head() const {
return get_oid().is_head();
}
hobject_t get_head_oid() const {
return get_oid().get_head();
}
const SnapSet &get_head_ss() const {
ceph_assert(is_head());
ceph_assert(ssc);
return ssc->snapset;
}
void set_head_state(ObjectState &&_obs, SnapSetContextRef &&_ssc) {
ceph_assert(is_head());
obs = std::move(_obs);
ssc = std::move(_ssc);
}
void set_clone_state(ObjectState &&_obs) {
ceph_assert(!is_head());
obs = std::move(_obs);
}
/// pass the provided exception to any waiting consumers of this ObjectContext
template<typename Exception>
void interrupt(Exception ex) {
lock.abort(std::move(ex));
if (recovery_read_marker) {
drop_recovery_read();
}
}
private:
tri_mutex lock;
bool recovery_read_marker = false;
template <typename Lock, typename Func>
auto _with_lock(Lock&& lock, Func&& func) {
Ref obc = this;
return lock.lock().then([&lock, func = std::forward<Func>(func), obc]() mutable {
return seastar::futurize_invoke(func).finally([&lock, obc] {
lock.unlock();
});
});
}
boost::intrusive::list_member_hook<> list_hook;
uint64_t list_link_cnt = 0;
public:
template <typename ListType>
void append_to(ListType& list) {
if (list_link_cnt++ == 0) {
list.push_back(*this);
}
}
template <typename ListType>
void remove_from(ListType&& list) {
assert(list_link_cnt > 0);
if (--list_link_cnt == 0) {
list.erase(std::decay_t<ListType>::s_iterator_to(*this));
}
}
using obc_accessing_option_t = boost::intrusive::member_hook<
ObjectContext,
boost::intrusive::list_member_hook<>,
&ObjectContext::list_hook>;
template<RWState::State Type, typename InterruptCond = void, typename Func>
auto with_lock(Func&& func) {
if constexpr (!std::is_void_v<InterruptCond>) {
auto wrapper = ::crimson::interruptible::interruptor<InterruptCond>::wrap_function(std::forward<Func>(func));
switch (Type) {
case RWState::RWWRITE:
return _with_lock(lock.for_write(), std::move(wrapper));
case RWState::RWREAD:
return _with_lock(lock.for_read(), std::move(wrapper));
case RWState::RWEXCL:
return _with_lock(lock.for_excl(), std::move(wrapper));
case RWState::RWNONE:
return seastar::futurize_invoke(std::move(wrapper));
default:
assert(0 == "noop");
}
} else {
switch (Type) {
case RWState::RWWRITE:
return _with_lock(lock.for_write(), std::forward<Func>(func));
case RWState::RWREAD:
return _with_lock(lock.for_read(), std::forward<Func>(func));
case RWState::RWEXCL:
return _with_lock(lock.for_excl(), std::forward<Func>(func));
case RWState::RWNONE:
return seastar::futurize_invoke(std::forward<Func>(func));
default:
assert(0 == "noop");
}
}
}
template<RWState::State Type, typename InterruptCond = void, typename Func>
auto with_promoted_lock(Func&& func) {
if constexpr (!std::is_void_v<InterruptCond>) {
auto wrapper = ::crimson::interruptible::interruptor<InterruptCond>::wrap_function(std::forward<Func>(func));
switch (Type) {
case RWState::RWWRITE:
return _with_lock(lock.excl_from_write(), std::move(wrapper));
case RWState::RWREAD:
return _with_lock(lock.excl_from_read(), std::move(wrapper));
case RWState::RWEXCL:
return _with_lock(lock.excl_from_excl(), std::move(wrapper));
case RWState::RWNONE:
return _with_lock(lock.for_excl(), std::move(wrapper));
default:
assert(0 == "noop");
}
} else {
switch (Type) {
case RWState::RWWRITE:
return _with_lock(lock.excl_from_write(), std::forward<Func>(func));
case RWState::RWREAD:
return _with_lock(lock.excl_from_read(), std::forward<Func>(func));
case RWState::RWEXCL:
return _with_lock(lock.excl_from_excl(), std::forward<Func>(func));
case RWState::RWNONE:
return _with_lock(lock.for_excl(), std::forward<Func>(func));
default:
assert(0 == "noop");
}
}
}
bool empty() const {
return !lock.is_acquired();
}
bool is_request_pending() const {
return lock.is_acquired();
}
bool get_recovery_read() {
if (lock.try_lock_for_read()) {
recovery_read_marker = true;
return true;
} else {
return false;
}
}
void wait_recovery_read() {
assert(lock.get_readers() > 0);
recovery_read_marker = true;
}
void drop_recovery_read() {
assert(recovery_read_marker);
recovery_read_marker = false;
}
bool maybe_get_excl() {
return lock.try_lock_for_excl();
}
};
using ObjectContextRef = ObjectContext::Ref;
class ObjectContextRegistry : public md_config_obs_t {
ObjectContext::lru_t obc_lru;
public:
ObjectContextRegistry(crimson::common::ConfigProxy &conf);
~ObjectContextRegistry();
std::pair<ObjectContextRef, bool> get_cached_obc(const hobject_t &hoid) {
return obc_lru.get_or_create(hoid);
}
ObjectContextRef maybe_get_cached_obc(const hobject_t &hoid) {
return obc_lru.get(hoid);
}
void clear_range(const hobject_t &from,
const hobject_t &to) {
obc_lru.clear_range(from, to);
}
template <class F>
void for_each(F&& f) {
obc_lru.for_each(std::forward<F>(f));
}
const char** get_tracked_conf_keys() const final;
void handle_conf_change(const crimson::common::ConfigProxy& conf,
const std::set <std::string> &changed) final;
};
std::optional<hobject_t> resolve_oid(const SnapSet &ss,
const hobject_t &oid);
} // namespace crimson::osd
| 7,762 | 27.025271 | 115 | h |
null | ceph-main/src/crimson/osd/object_context_loader.cc | #include "crimson/osd/object_context_loader.h"
#include "osd/osd_types_fmt.h"
SET_SUBSYS(osd);
namespace crimson::osd {
using crimson::common::local_conf;
template<RWState::State State>
ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_head_obc(ObjectContextRef obc,
bool existed,
with_obc_func_t&& func)
{
LOG_PREFIX(ObjectContextLoader::with_head_obc);
DEBUGDPP("object {}", dpp, obc->get_oid());
assert(obc->is_head());
obc->append_to(obc_set_accessing);
return obc->with_lock<State, IOInterruptCondition>(
[existed=existed, obc=obc, func=std::move(func), this] {
return get_or_load_obc<State>(obc, existed)
.safe_then_interruptible(
[func = std::move(func)](auto obc) {
return std::move(func)(std::move(obc));
});
}).finally([FNAME, this, obc=std::move(obc)] {
DEBUGDPP("released object {}", dpp, obc->get_oid());
obc->remove_from(obc_set_accessing);
});
}
template<RWState::State State>
ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_clone_obc(hobject_t oid,
with_obc_func_t&& func)
{
LOG_PREFIX(ObjectContextLoader::with_clone_obc);
assert(!oid.is_head());
return with_obc<RWState::RWREAD>(
oid.get_head(),
[FNAME, oid, func=std::move(func), this](auto head) mutable
-> load_obc_iertr::future<> {
if (!head->obs.exists) {
ERRORDPP("head doesn't exist for object {}", dpp, head->obs.oi.soid);
return load_obc_iertr::future<>{
crimson::ct_error::enoent::make()
};
}
return this->with_clone_obc_only<State>(std::move(head),
oid,
std::move(func));
});
}
template<RWState::State State>
ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_clone_obc_only(ObjectContextRef head,
hobject_t oid,
with_obc_func_t&& func)
{
LOG_PREFIX(ObjectContextLoader::with_clone_obc_only);
auto coid = resolve_oid(head->get_head_ss(), oid);
if (!coid) {
ERRORDPP("clone {} not found", dpp, oid);
return load_obc_iertr::future<>{
crimson::ct_error::enoent::make()
};
}
auto [clone, existed] = obc_registry.get_cached_obc(*coid);
return clone->template with_lock<State, IOInterruptCondition>(
[existed=existed, clone=std::move(clone),
func=std::move(func), head=std::move(head), this]()
-> load_obc_iertr::future<> {
auto loaded = get_or_load_obc<State>(clone, existed);
return loaded.safe_then_interruptible(
[func = std::move(func)](auto clone) {
return std::move(func)(std::move(clone));
});
});
}
template<RWState::State State>
ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_head_and_clone_obc(
hobject_t oid,
with_both_obc_func_t&& func)
{
LOG_PREFIX(ObjectContextLoader::with_head_and_clone_obc);
assert(!oid.is_head());
return with_obc<RWState::RWREAD>(
oid.get_head(),
[FNAME, oid, func=std::move(func), this](auto head) mutable
-> load_obc_iertr::future<> {
if (!head->obs.exists) {
ERRORDPP("head doesn't exist for object {}", dpp, head->obs.oi.soid);
return load_obc_iertr::future<>{
crimson::ct_error::enoent::make()
};
}
auto coid = resolve_oid(head->get_head_ss(), oid);
if (!coid) {
ERRORDPP("clone {} not found", dpp, oid);
return load_obc_iertr::future<>{
crimson::ct_error::enoent::make()
};
}
auto [clone, existed] = obc_registry.get_cached_obc(*coid);
return clone->template with_lock<State, IOInterruptCondition>(
[existed=existed, clone=std::move(clone),
func=std::move(func), head=std::move(head), this]()
-> load_obc_iertr::future<> {
auto loaded = get_or_load_obc<State>(clone, existed);
return loaded.safe_then_interruptible(
[func = std::move(func), head=std::move(head)](auto clone) {
return std::move(func)(std::move(head), std::move(clone));
});
});
});
}
template<RWState::State State>
ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_obc(hobject_t oid,
with_obc_func_t&& func)
{
if (oid.is_head()) {
auto [obc, existed] =
obc_registry.get_cached_obc(std::move(oid));
return with_head_obc<State>(std::move(obc),
existed,
std::move(func));
} else {
return with_clone_obc<State>(oid, std::move(func));
}
}
ObjectContextLoader::load_obc_iertr::future<ObjectContextRef>
ObjectContextLoader::load_obc(ObjectContextRef obc)
{
LOG_PREFIX(ObjectContextLoader::load_obc);
return backend.load_metadata(obc->get_oid())
.safe_then_interruptible(
[FNAME, this, obc=std::move(obc)](auto md)
-> load_obc_ertr::future<ObjectContextRef> {
const hobject_t& oid = md->os.oi.soid;
DEBUGDPP("loaded obs {} for {}", dpp, md->os.oi, oid);
if (oid.is_head()) {
if (!md->ssc) {
ERRORDPP("oid {} missing snapsetcontext", dpp, oid);
return crimson::ct_error::object_corrupted::make();
}
obc->set_head_state(std::move(md->os),
std::move(md->ssc));
} else {
obc->set_clone_state(std::move(md->os));
}
DEBUGDPP("returning obc {} for {}", dpp, obc->obs.oi, obc->obs.oi.soid);
return load_obc_ertr::make_ready_future<ObjectContextRef>(obc);
});
}
template<RWState::State State>
ObjectContextLoader::load_obc_iertr::future<ObjectContextRef>
ObjectContextLoader::get_or_load_obc(ObjectContextRef obc,
bool existed)
{
LOG_PREFIX(ObjectContextLoader::get_or_load_obc);
auto loaded =
load_obc_iertr::make_ready_future<ObjectContextRef>(obc);
if (existed) {
DEBUGDPP("cache hit on {}", dpp, obc->get_oid());
} else {
DEBUGDPP("cache miss on {}", dpp, obc->get_oid());
loaded =
obc->template with_promoted_lock<State, IOInterruptCondition>(
[obc, this] {
return load_obc(obc);
});
}
return loaded;
}
ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::reload_obc(ObjectContext& obc) const
{
LOG_PREFIX(ObjectContextLoader::reload_obc);
assert(obc.is_head());
return backend.load_metadata(obc.get_oid())
.safe_then_interruptible<false>(
[FNAME, this, &obc](auto md)-> load_obc_ertr::future<> {
DEBUGDPP("reloaded obs {} for {}", dpp, md->os.oi, obc.get_oid());
if (!md->ssc) {
ERRORDPP("oid {} missing snapsetcontext", dpp, obc.get_oid());
return crimson::ct_error::object_corrupted::make();
}
obc.set_head_state(std::move(md->os), std::move(md->ssc));
return load_obc_ertr::now();
});
}
void ObjectContextLoader::notify_on_change(bool is_primary)
{
LOG_PREFIX(ObjectContextLoader::notify_on_change);
DEBUGDPP("is_primary: {}", dpp, is_primary);
for (auto& obc : obc_set_accessing) {
DEBUGDPP("interrupting obc: {}", dpp, obc.get_oid());
obc.interrupt(::crimson::common::actingset_changed(is_primary));
}
}
// explicitly instantiate the used instantiations
template ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_obc<RWState::RWNONE>(hobject_t,
with_obc_func_t&&);
template ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_obc<RWState::RWREAD>(hobject_t,
with_obc_func_t&&);
template ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_obc<RWState::RWWRITE>(hobject_t,
with_obc_func_t&&);
template ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_obc<RWState::RWEXCL>(hobject_t,
with_obc_func_t&&);
template ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_head_and_clone_obc<RWState::RWWRITE>(
hobject_t,
with_both_obc_func_t&&);
}
| 8,548 | 35.534188 | 78 | cc |
null | ceph-main/src/crimson/osd/object_context_loader.h | #pragma once
#include <seastar/core/future.hh>
#include "crimson/common/errorator.h"
#include "crimson/osd/object_context.h"
#include "crimson/osd/pg_backend.h"
namespace crimson::osd {
class ObjectContextLoader {
public:
using obc_accessing_list_t = boost::intrusive::list<
ObjectContext,
ObjectContext::obc_accessing_option_t>;
ObjectContextLoader(
ObjectContextRegistry& _obc_services,
PGBackend& _backend,
DoutPrefixProvider& dpp)
: obc_registry{_obc_services},
backend{_backend},
dpp{dpp}
{}
using load_obc_ertr = crimson::errorator<
crimson::ct_error::enoent,
crimson::ct_error::object_corrupted>;
using load_obc_iertr =
::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
load_obc_ertr>;
using with_obc_func_t =
std::function<load_obc_iertr::future<> (ObjectContextRef)>;
using with_both_obc_func_t =
std::function<load_obc_iertr::future<> (ObjectContextRef, ObjectContextRef)>;
// Use this variant by default
template<RWState::State State>
load_obc_iertr::future<> with_obc(hobject_t oid,
with_obc_func_t&& func);
// Use this variant in the case where the head object
// obc is already locked and only the clone obc is needed.
// Avoid nesting with_head_obc() calls by using with_clone_obc()
// with an already locked head.
template<RWState::State State>
load_obc_iertr::future<> with_clone_obc_only(ObjectContextRef head,
hobject_t oid,
with_obc_func_t&& func);
// Use this variant in the case where both the head
// object *and* the matching clone object are being used
// in func.
template<RWState::State State>
load_obc_iertr::future<> with_head_and_clone_obc(
hobject_t oid,
with_both_obc_func_t&& func);
load_obc_iertr::future<> reload_obc(ObjectContext& obc) const;
void notify_on_change(bool is_primary);
private:
ObjectContextRegistry& obc_registry;
PGBackend& backend;
DoutPrefixProvider& dpp;
obc_accessing_list_t obc_set_accessing;
template<RWState::State State>
load_obc_iertr::future<> with_clone_obc(hobject_t oid,
with_obc_func_t&& func);
template<RWState::State State>
load_obc_iertr::future<> with_head_obc(ObjectContextRef obc,
bool existed,
with_obc_func_t&& func);
template<RWState::State State>
load_obc_iertr::future<ObjectContextRef>
get_or_load_obc(ObjectContextRef obc,
bool existed);
load_obc_iertr::future<ObjectContextRef>
load_obc(ObjectContextRef obc);
};
}
| 2,772 | 30.511364 | 81 | h |
null | ceph-main/src/crimson/osd/ops_executer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ops_executer.h"
#include <boost/range/adaptor/filtered.hpp>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/adaptor/transformed.hpp>
#include <boost/range/algorithm_ext/push_back.hpp>
#include <boost/range/algorithm/max_element.hpp>
#include <boost/range/numeric.hpp>
#include <fmt/format.h>
#include <fmt/ostream.h>
#include <seastar/core/thread.hh>
#include "crimson/osd/exceptions.h"
#include "crimson/osd/pg.h"
#include "crimson/osd/watch.h"
#include "osd/ClassHandler.h"
#include "osd/SnapMapper.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_osd);
}
}
namespace crimson::osd {
OpsExecuter::call_ierrorator::future<> OpsExecuter::do_op_call(OSDOp& osd_op)
{
std::string cname, mname;
ceph::bufferlist indata;
try {
auto bp = std::begin(osd_op.indata);
bp.copy(osd_op.op.cls.class_len, cname);
bp.copy(osd_op.op.cls.method_len, mname);
bp.copy(osd_op.op.cls.indata_len, indata);
} catch (buffer::error&) {
logger().warn("call unable to decode class + method + indata");
return crimson::ct_error::invarg::make();
}
// NOTE: opening a class can actually result in dlopen(), and thus
// blocking the entire reactor. Thankfully to ClassHandler's cache
// this is supposed to be extremely infrequent.
ClassHandler::ClassData* cls;
int r = ClassHandler::get_instance().open_class(cname, &cls);
if (r) {
logger().warn("class {} open got {}", cname, cpp_strerror(r));
if (r == -ENOENT) {
return crimson::ct_error::operation_not_supported::make();
} else if (r == -EPERM) {
// propagate permission errors
return crimson::ct_error::permission_denied::make();
}
return crimson::ct_error::input_output_error::make();
}
ClassHandler::ClassMethod* method = cls->get_method(mname);
if (!method) {
logger().warn("call method {}.{} does not exist", cname, mname);
return crimson::ct_error::operation_not_supported::make();
}
const auto flags = method->get_flags();
if (!obc->obs.exists && (flags & CLS_METHOD_WR) == 0) {
return crimson::ct_error::enoent::make();
}
#if 0
if (flags & CLS_METHOD_WR) {
ctx->user_modify = true;
}
#endif
logger().debug("calling method {}.{}, num_read={}, num_write={}",
cname, mname, num_read, num_write);
const auto prev_rd = num_read;
const auto prev_wr = num_write;
return interruptor::async(
[this, method, indata=std::move(indata)]() mutable {
ceph::bufferlist outdata;
auto cls_context = reinterpret_cast<cls_method_context_t>(this);
const auto ret = method->exec(cls_context, indata, outdata);
return std::make_pair(ret, std::move(outdata));
}
).then_interruptible(
[this, prev_rd, prev_wr, &osd_op, flags]
(auto outcome) -> call_errorator::future<> {
auto& [ret, outdata] = outcome;
osd_op.rval = ret;
logger().debug("do_op_call: method returned ret={}, outdata.length()={}"
" while num_read={}, num_write={}",
ret, outdata.length(), num_read, num_write);
if (num_read > prev_rd && !(flags & CLS_METHOD_RD)) {
logger().error("method tried to read object but is not marked RD");
osd_op.rval = -EIO;
return crimson::ct_error::input_output_error::make();
}
if (num_write > prev_wr && !(flags & CLS_METHOD_WR)) {
logger().error("method tried to update object but is not marked WR");
osd_op.rval = -EIO;
return crimson::ct_error::input_output_error::make();
}
// ceph-osd has this implemented in `PrimaryLogPG::execute_ctx`,
// grep for `ignore_out_data`.
using crimson::common::local_conf;
if (op_info.allows_returnvec() &&
op_info.may_write() &&
ret >= 0 &&
outdata.length() > local_conf()->osd_max_write_op_reply_len) {
// the justification of this limit it to not inflate the pg log.
// that's the reason why we don't worry about pure reads.
logger().error("outdata overflow due to .length()={}, limit={}",
outdata.length(),
local_conf()->osd_max_write_op_reply_len);
osd_op.rval = -EOVERFLOW;
return crimson::ct_error::value_too_large::make();
}
// for write calls we never return data expect errors or RETURNVEC.
// please refer cls/cls_hello.cc to details.
if (!op_info.may_write() || op_info.allows_returnvec() || ret < 0) {
osd_op.op.extent.length = outdata.length();
osd_op.outdata.claim_append(outdata);
}
if (ret < 0) {
return crimson::stateful_ec{
std::error_code(-ret, std::generic_category()) };
} else {
return seastar::now();
}
}
);
}
static watch_info_t create_watch_info(const OSDOp& osd_op,
const OpsExecuter::ExecutableMessage& msg,
entity_addr_t peer_addr)
{
using crimson::common::local_conf;
const uint32_t timeout =
osd_op.op.watch.timeout == 0 ? local_conf()->osd_client_watch_timeout
: osd_op.op.watch.timeout;
return {
osd_op.op.watch.cookie,
timeout,
peer_addr
};
}
OpsExecuter::watch_ierrorator::future<> OpsExecuter::do_op_watch_subop_watch(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn)
{
logger().debug("{}", __func__);
struct connect_ctx_t {
ObjectContext::watch_key_t key;
crimson::net::ConnectionRef conn;
watch_info_t info;
connect_ctx_t(
const OSDOp& osd_op,
const ExecutableMessage& msg,
crimson::net::ConnectionRef conn)
: key(osd_op.op.watch.cookie, msg.get_reqid().name),
conn(conn),
info(create_watch_info(osd_op, msg, conn->get_peer_addr())) {
}
};
return with_effect_on_obc(
connect_ctx_t{ osd_op, get_message(), conn },
[&](auto& ctx) {
const auto& entity = ctx.key.second;
auto [it, emplaced] =
os.oi.watchers.try_emplace(ctx.key, std::move(ctx.info));
if (emplaced) {
logger().info("registered new watch {} by {}", it->second, entity);
txn.nop();
} else {
logger().info("found existing watch {} by {}", it->second, entity);
}
return seastar::now();
},
[](auto&& ctx, ObjectContextRef obc, Ref<PG> pg) {
assert(pg);
auto [it, emplaced] = obc->watchers.try_emplace(ctx.key, nullptr);
if (emplaced) {
const auto& [cookie, entity] = ctx.key;
it->second = crimson::osd::Watch::create(
obc, ctx.info, entity, std::move(pg));
logger().info("op_effect: added new watcher: {}", ctx.key);
} else {
logger().info("op_effect: found existing watcher: {}", ctx.key);
}
return it->second->connect(std::move(ctx.conn), true /* will_ping */);
}
);
}
OpsExecuter::watch_ierrorator::future<> OpsExecuter::do_op_watch_subop_reconnect(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn)
{
const entity_name_t& entity = get_message().get_reqid().name;
const auto& cookie = osd_op.op.watch.cookie;
if (!os.oi.watchers.count(std::make_pair(cookie, entity))) {
return crimson::ct_error::not_connected::make();
} else {
logger().info("found existing watch by {}", entity);
return do_op_watch_subop_watch(osd_op, os, txn);
}
}
OpsExecuter::watch_ierrorator::future<> OpsExecuter::do_op_watch_subop_unwatch(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn)
{
logger().info("{}", __func__);
struct disconnect_ctx_t {
ObjectContext::watch_key_t key;
disconnect_ctx_t(const OSDOp& osd_op, const ExecutableMessage& msg)
: key(osd_op.op.watch.cookie, msg.get_reqid().name) {
}
};
return with_effect_on_obc(disconnect_ctx_t{ osd_op, get_message() },
[&] (auto& ctx) {
const auto& entity = ctx.key.second;
if (auto nh = os.oi.watchers.extract(ctx.key); !nh.empty()) {
logger().info("removed watch {} by {}", nh.mapped(), entity);
txn.nop();
} else {
logger().info("can't remove: no watch by {}", entity);
}
return seastar::now();
},
[] (auto&& ctx, ObjectContextRef obc, Ref<PG>) {
if (auto nh = obc->watchers.extract(ctx.key); !nh.empty()) {
return seastar::do_with(std::move(nh.mapped()),
[ctx](auto&& watcher) {
logger().info("op_effect: disconnect watcher {}", ctx.key);
return watcher->remove();
});
} else {
logger().info("op_effect: disconnect failed to find watcher {}", ctx.key);
return seastar::now();
}
});
}
OpsExecuter::watch_ierrorator::future<> OpsExecuter::do_op_watch_subop_ping(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn)
{
const entity_name_t& entity = get_message().get_reqid().name;
const auto& cookie = osd_op.op.watch.cookie;
const auto key = std::make_pair(cookie, entity);
// Note: WATCH with PING doesn't cause may_write() to return true,
// so if there is nothing else in the transaction, this is going
// to run do_osd_op_effects, but not write out a log entry */
if (!os.oi.watchers.count(key)) {
return crimson::ct_error::not_connected::make();
}
auto it = obc->watchers.find(key);
if (it == std::end(obc->watchers) || !it->second->is_connected()) {
return crimson::ct_error::timed_out::make();
}
logger().info("found existing watch by {}", entity);
it->second->got_ping(ceph_clock_now());
return seastar::now();
}
OpsExecuter::watch_ierrorator::future<> OpsExecuter::do_op_watch(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn)
{
logger().debug("{}", __func__);
if (!os.exists) {
return crimson::ct_error::enoent::make();
}
switch (osd_op.op.watch.op) {
case CEPH_OSD_WATCH_OP_WATCH:
return do_op_watch_subop_watch(osd_op, os, txn);
case CEPH_OSD_WATCH_OP_RECONNECT:
return do_op_watch_subop_reconnect(osd_op, os, txn);
case CEPH_OSD_WATCH_OP_PING:
return do_op_watch_subop_ping(osd_op, os, txn);
case CEPH_OSD_WATCH_OP_UNWATCH:
return do_op_watch_subop_unwatch(osd_op, os, txn);
case CEPH_OSD_WATCH_OP_LEGACY_WATCH:
logger().warn("ignoring CEPH_OSD_WATCH_OP_LEGACY_WATCH");
return crimson::ct_error::invarg::make();
}
logger().warn("unrecognized WATCH subop: {}", osd_op.op.watch.op);
return crimson::ct_error::invarg::make();
}
static uint64_t get_next_notify_id(epoch_t e)
{
// FIXME
static std::uint64_t next_notify_id = 0;
return (((uint64_t)e) << 32) | ((uint64_t)(next_notify_id++));
}
OpsExecuter::watch_ierrorator::future<> OpsExecuter::do_op_notify(
OSDOp& osd_op,
const ObjectState& os)
{
logger().debug("{}, msg epoch: {}", __func__, get_message().get_map_epoch());
if (!os.exists) {
return crimson::ct_error::enoent::make();
}
struct notify_ctx_t {
crimson::net::ConnectionRef conn;
notify_info_t ninfo;
const uint64_t client_gid;
const epoch_t epoch;
notify_ctx_t(const ExecutableMessage& msg,
crimson::net::ConnectionRef conn)
: conn(conn),
client_gid(msg.get_reqid().name.num()),
epoch(msg.get_map_epoch()) {
}
};
return with_effect_on_obc(
notify_ctx_t{ get_message(), conn },
[&](auto& ctx) {
try {
auto bp = osd_op.indata.cbegin();
uint32_t ver; // obsolete
ceph::decode(ver, bp);
ceph::decode(ctx.ninfo.timeout, bp);
ceph::decode(ctx.ninfo.bl, bp);
} catch (const buffer::error&) {
ctx.ninfo.timeout = 0;
}
if (!ctx.ninfo.timeout) {
using crimson::common::local_conf;
ctx.ninfo.timeout = local_conf()->osd_default_notify_timeout;
}
ctx.ninfo.notify_id = get_next_notify_id(ctx.epoch);
ctx.ninfo.cookie = osd_op.op.notify.cookie;
// return our unique notify id to the client
ceph::encode(ctx.ninfo.notify_id, osd_op.outdata);
return seastar::now();
},
[](auto&& ctx, ObjectContextRef obc, Ref<PG>) {
auto alive_watchers = obc->watchers | boost::adaptors::map_values
| boost::adaptors::filtered(
[] (const auto& w) {
// FIXME: filter as for the `is_ping` in `Watch::start_notify`
return w->is_alive();
});
return crimson::osd::Notify::create_n_propagate(
std::begin(alive_watchers),
std::end(alive_watchers),
std::move(ctx.conn),
ctx.ninfo,
ctx.client_gid,
obc->obs.oi.user_version);
}
);
}
OpsExecuter::watch_ierrorator::future<> OpsExecuter::do_op_list_watchers(
OSDOp& osd_op,
const ObjectState& os)
{
logger().debug("{}", __func__);
obj_list_watch_response_t response;
for (const auto& [key, info] : os.oi.watchers) {
logger().debug("{}: key cookie={}, entity={}",
__func__, key.first, key.second);
assert(key.first == info.cookie);
assert(key.second.is_client());
response.entries.emplace_back(watch_item_t{
key.second, info.cookie, info.timeout_seconds, info.addr});
}
response.encode(osd_op.outdata, get_message().get_features());
return watch_ierrorator::now();
}
OpsExecuter::watch_ierrorator::future<> OpsExecuter::do_op_notify_ack(
OSDOp& osd_op,
const ObjectState& os)
{
logger().debug("{}", __func__);
struct notifyack_ctx_t {
const entity_name_t entity;
uint64_t watch_cookie;
uint64_t notify_id;
ceph::bufferlist reply_bl;
notifyack_ctx_t(const ExecutableMessage& msg)
: entity(msg.get_reqid().name) {
}
};
return with_effect_on_obc(notifyack_ctx_t{ get_message() },
[&] (auto& ctx) -> watch_errorator::future<> {
try {
auto bp = osd_op.indata.cbegin();
ceph::decode(ctx.notify_id, bp);
ceph::decode(ctx.watch_cookie, bp);
if (!bp.end()) {
ceph::decode(ctx.reply_bl, bp);
}
} catch (const buffer::error&) {
// here we behave differently than ceph-osd. For historical reasons,
// it falls back to using `osd_op.op.watch.cookie` as `ctx.notify_id`.
// crimson just returns EINVAL if the data cannot be decoded.
return crimson::ct_error::invarg::make();
}
return watch_errorator::now();
},
[] (auto&& ctx, ObjectContextRef obc, Ref<PG>) {
logger().info("notify_ack watch_cookie={}, notify_id={}",
ctx.watch_cookie, ctx.notify_id);
return seastar::do_for_each(obc->watchers,
[ctx=std::move(ctx)] (auto& kv) {
const auto& [key, watchp] = kv;
static_assert(
std::is_same_v<std::decay_t<decltype(watchp)>,
seastar::shared_ptr<crimson::osd::Watch>>);
auto& [cookie, entity] = key;
if (ctx.entity != entity) {
logger().debug("skipping watch {}; entity name {} != {}",
key, entity, ctx.entity);
return seastar::now();
}
if (ctx.watch_cookie != cookie) {
logger().debug("skipping watch {}; cookie {} != {}",
key, ctx.watch_cookie, cookie);
return seastar::now();
}
logger().info("acking notify on watch {}", key);
return watchp->notify_ack(ctx.notify_id, ctx.reply_bl);
});
});
}
// Defined here because there is a circular dependency between OpsExecuter and PG
template <class Func>
auto OpsExecuter::do_const_op(Func&& f) {
// TODO: pass backend as read-only
return std::forward<Func>(f)(pg->get_backend(), std::as_const(obc->obs));
}
// Defined here because there is a circular dependency between OpsExecuter and PG
template <class Func>
auto OpsExecuter::do_write_op(Func&& f, OpsExecuter::modified_by m) {
++num_write;
if (!osd_op_params) {
osd_op_params.emplace();
fill_op_params_bump_pg_version();
}
user_modify = (m == modified_by::user);
return std::forward<Func>(f)(pg->get_backend(), obc->obs, txn);
}
OpsExecuter::call_errorator::future<> OpsExecuter::do_assert_ver(
OSDOp& osd_op,
const ObjectState& os)
{
if (!osd_op.op.assert_ver.ver) {
return crimson::ct_error::invarg::make();
} else if (osd_op.op.assert_ver.ver < os.oi.user_version) {
return crimson::ct_error::erange::make();
} else if (osd_op.op.assert_ver.ver > os.oi.user_version) {
return crimson::ct_error::value_too_large::make();
}
return seastar::now();
}
OpsExecuter::list_snaps_iertr::future<> OpsExecuter::do_list_snaps(
OSDOp& osd_op,
const ObjectState& os,
const SnapSet& ss)
{
obj_list_snap_response_t resp;
resp.clones.reserve(ss.clones.size() + 1);
for (auto &clone: ss.clones) {
clone_info ci;
ci.cloneid = clone;
{
auto p = ss.clone_snaps.find(clone);
if (p == ss.clone_snaps.end()) {
logger().error(
"OpsExecutor::do_list_snaps: {} has inconsistent "
"clone_snaps, missing clone {}",
os.oi.soid,
clone);
return crimson::ct_error::invarg::make();
}
ci.snaps.reserve(p->second.size());
ci.snaps.insert(ci.snaps.end(), p->second.rbegin(), p->second.rend());
}
{
auto p = ss.clone_overlap.find(clone);
if (p == ss.clone_overlap.end()) {
logger().error(
"OpsExecutor::do_list_snaps: {} has inconsistent "
"clone_overlap, missing clone {}",
os.oi.soid,
clone);
return crimson::ct_error::invarg::make();
}
ci.overlap.reserve(p->second.num_intervals());
ci.overlap.insert(ci.overlap.end(), p->second.begin(), p->second.end());
}
{
auto p = ss.clone_size.find(clone);
if (p == ss.clone_size.end()) {
logger().error(
"OpsExecutor::do_list_snaps: {} has inconsistent "
"clone_size, missing clone {}",
os.oi.soid,
clone);
return crimson::ct_error::invarg::make();
}
ci.size = p->second;
}
resp.clones.push_back(std::move(ci));
}
if (!os.oi.is_whiteout()) {
clone_info ci;
ci.cloneid = CEPH_NOSNAP;
ci.size = os.oi.size;
resp.clones.push_back(std::move(ci));
}
resp.seq = ss.seq;
logger().error(
"OpsExecutor::do_list_snaps: {}, resp.clones.size(): {}",
os.oi.soid,
resp.clones.size());
resp.encode(osd_op.outdata);
return read_ierrorator::now();
}
OpsExecuter::interruptible_errorated_future<OpsExecuter::osd_op_errorator>
OpsExecuter::execute_op(OSDOp& osd_op)
{
return do_execute_op(osd_op).handle_error_interruptible(
osd_op_errorator::all_same_way([&osd_op](auto e, auto&& e_raw)
-> OpsExecuter::osd_op_errorator::future<> {
// All ops except for CMPEXT should have rval set to -e.value(),
// CMPEXT sets rval itself and shouldn't be overridden.
if (e.value() != ct_error::cmp_fail_error_value) {
osd_op.rval = -e.value();
}
if ((osd_op.op.flags & CEPH_OSD_OP_FLAG_FAILOK) &&
e.value() != EAGAIN && e.value() != EINPROGRESS) {
return osd_op_errorator::now();
} else {
return std::move(e_raw);
}
}));
}
OpsExecuter::interruptible_errorated_future<OpsExecuter::osd_op_errorator>
OpsExecuter::do_execute_op(OSDOp& osd_op)
{
// TODO: dispatch via call table?
// TODO: we might want to find a way to unify both input and output
// of each op.
logger().debug(
"handling op {} on object {}",
ceph_osd_op_name(osd_op.op.op),
get_target());
switch (const ceph_osd_op& op = osd_op.op; op.op) {
case CEPH_OSD_OP_SYNC_READ:
[[fallthrough]];
case CEPH_OSD_OP_READ:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.read(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_SPARSE_READ:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.sparse_read(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_CHECKSUM:
return do_read_op([&osd_op](auto& backend, const auto& os) {
return backend.checksum(os, osd_op);
});
case CEPH_OSD_OP_CMPEXT:
return do_read_op([&osd_op](auto& backend, const auto& os) {
return backend.cmp_ext(os, osd_op);
});
case CEPH_OSD_OP_GETXATTR:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.getxattr(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_GETXATTRS:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.get_xattrs(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_CMPXATTR:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.cmp_xattr(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_RMXATTR:
return do_write_op([&osd_op](auto& backend, auto& os, auto& txn) {
return backend.rm_xattr(os, osd_op, txn);
});
case CEPH_OSD_OP_CREATE:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.create(os, osd_op, txn, delta_stats);
});
case CEPH_OSD_OP_WRITE:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.write(os, osd_op, txn, *osd_op_params, delta_stats);
});
case CEPH_OSD_OP_WRITESAME:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.write_same(os, osd_op, txn, *osd_op_params, delta_stats);
});
case CEPH_OSD_OP_WRITEFULL:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.writefull(os, osd_op, txn, *osd_op_params, delta_stats);
});
case CEPH_OSD_OP_ROLLBACK:
return do_write_op([this, &head=obc,
&osd_op](auto& backend, auto& os, auto& txn) {
return backend.rollback(os, osd_op, txn, *osd_op_params, delta_stats,
head, pg->obc_loader);
});
case CEPH_OSD_OP_APPEND:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.append(os, osd_op, txn, *osd_op_params, delta_stats);
});
case CEPH_OSD_OP_TRUNCATE:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
// FIXME: rework needed. Move this out to do_write_op(), introduce
// do_write_op_no_user_modify()...
return backend.truncate(os, osd_op, txn, *osd_op_params, delta_stats);
});
case CEPH_OSD_OP_ZERO:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.zero(os, osd_op, txn, *osd_op_params, delta_stats);
});
case CEPH_OSD_OP_SETALLOCHINT:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.set_allochint(os, osd_op, txn, delta_stats);
});
case CEPH_OSD_OP_SETXATTR:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.setxattr(os, osd_op, txn, delta_stats);
});
case CEPH_OSD_OP_DELETE:
{
bool whiteout = false;
if (!obc->ssc->snapset.clones.empty() ||
(snapc.snaps.size() && // there are snaps
snapc.snaps[0] > obc->ssc->snapset.seq)) { // existing obj is old
logger().debug("{} has or will have clones, will whiteout {}",
__func__, obc->obs.oi.soid);
whiteout = true;
}
return do_write_op([this, whiteout](auto& backend, auto& os, auto& txn) {
return backend.remove(os, txn, delta_stats, whiteout);
});
}
case CEPH_OSD_OP_CALL:
return this->do_op_call(osd_op);
case CEPH_OSD_OP_STAT:
// note: stat does not require RD
return do_const_op([this, &osd_op] (/* const */auto& backend, const auto& os) {
return backend.stat(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_TMAPPUT:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.tmapput(os, osd_op, txn, delta_stats, *osd_op_params);
});
case CEPH_OSD_OP_TMAPUP:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto &txn) {
return backend.tmapup(os, osd_op, txn, delta_stats, *osd_op_params);
});
case CEPH_OSD_OP_TMAPGET:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.tmapget(os, osd_op, delta_stats);
});
// OMAP
case CEPH_OSD_OP_OMAPGETKEYS:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.omap_get_keys(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_OMAPGETVALS:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.omap_get_vals(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_OMAP_CMP:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.omap_cmp(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_OMAPGETHEADER:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.omap_get_header(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
return do_read_op([this, &osd_op](auto& backend, const auto& os) {
return backend.omap_get_vals_by_keys(os, osd_op, delta_stats);
});
case CEPH_OSD_OP_OMAPSETVALS:
#if 0
if (!pg.get_pgpool().info.supports_omap()) {
return crimson::ct_error::operation_not_supported::make();
}
#endif
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.omap_set_vals(os, osd_op, txn, *osd_op_params, delta_stats);
});
case CEPH_OSD_OP_OMAPSETHEADER:
#if 0
if (!pg.get_pgpool().info.supports_omap()) {
return crimson::ct_error::operation_not_supported::make();
}
#endif
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.omap_set_header(os, osd_op, txn, *osd_op_params,
delta_stats);
});
case CEPH_OSD_OP_OMAPRMKEYRANGE:
#if 0
if (!pg.get_pgpool().info.supports_omap()) {
return crimson::ct_error::operation_not_supported::make();
}
#endif
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.omap_remove_range(os, osd_op, txn, delta_stats);
});
case CEPH_OSD_OP_OMAPRMKEYS:
/** TODO: Implement supports_omap()
if (!pg.get_pgpool().info.supports_omap()) {
return crimson::ct_error::operation_not_supported::make();
}*/
return do_write_op([&osd_op](auto& backend, auto& os, auto& txn) {
return backend.omap_remove_key(os, osd_op, txn);
});
case CEPH_OSD_OP_OMAPCLEAR:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return backend.omap_clear(os, osd_op, txn, *osd_op_params, delta_stats);
});
// watch/notify
case CEPH_OSD_OP_WATCH:
return do_write_op([this, &osd_op](auto& backend, auto& os, auto& txn) {
return do_op_watch(osd_op, os, txn);
}, modified_by::sys);
case CEPH_OSD_OP_LIST_WATCHERS:
return do_read_op([this, &osd_op](auto&, const auto& os) {
return do_op_list_watchers(osd_op, os);
});
case CEPH_OSD_OP_NOTIFY:
return do_read_op([this, &osd_op](auto&, const auto& os) {
return do_op_notify(osd_op, os);
});
case CEPH_OSD_OP_NOTIFY_ACK:
return do_read_op([this, &osd_op](auto&, const auto& os) {
return do_op_notify_ack(osd_op, os);
});
case CEPH_OSD_OP_ASSERT_VER:
return do_read_op([this, &osd_op](auto&, const auto& os) {
return do_assert_ver(osd_op, os);
});
case CEPH_OSD_OP_LIST_SNAPS:
return do_snapset_op([this, &osd_op](const auto &os, const auto &ss) {
return do_list_snaps(osd_op, os, ss);
});
default:
logger().warn("unknown op {}", ceph_osd_op_name(op.op));
throw std::runtime_error(
fmt::format("op '{}' not supported", ceph_osd_op_name(op.op)));
}
}
void OpsExecuter::fill_op_params_bump_pg_version()
{
osd_op_params->req_id = msg->get_reqid();
osd_op_params->mtime = msg->get_mtime();
osd_op_params->at_version = pg->next_version();
osd_op_params->pg_trim_to = pg->get_pg_trim_to();
osd_op_params->min_last_complete_ondisk = pg->get_min_last_complete_ondisk();
osd_op_params->last_complete = pg->get_info().last_complete;
}
std::vector<pg_log_entry_t> OpsExecuter::prepare_transaction(
const std::vector<OSDOp>& ops)
{
// let's ensure we don't need to inform SnapMapper about this particular
// entry.
assert(obc->obs.oi.soid.snap >= CEPH_MAXSNAP);
std::vector<pg_log_entry_t> log_entries;
log_entries.emplace_back(
obc->obs.exists ?
pg_log_entry_t::MODIFY : pg_log_entry_t::DELETE,
obc->obs.oi.soid,
osd_op_params->at_version,
obc->obs.oi.version,
osd_op_params->user_modify ? osd_op_params->at_version.version : 0,
osd_op_params->req_id,
osd_op_params->mtime,
op_info.allows_returnvec() && !ops.empty() ? ops.back().rval.code : 0);
if (op_info.allows_returnvec()) {
// also the per-op values are recorded in the pg log
log_entries.back().set_op_returns(ops);
logger().debug("{} op_returns: {}",
__func__, log_entries.back().op_returns);
}
log_entries.back().clean_regions = std::move(osd_op_params->clean_regions);
return log_entries;
}
OpsExecuter::interruptible_future<> OpsExecuter::snap_map_remove(
const hobject_t& soid,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn)
{
logger().debug("{}: soid {}", __func__, soid);
return interruptor::async([soid, &snap_mapper,
_t=osdriver.get_transaction(&txn)]() mutable {
const auto r = snap_mapper.remove_oid(soid, &_t);
if (r) {
logger().error("{}: remove_oid {} failed with {}",
__func__, soid, r);
}
// On removal tolerate missing key corruption
assert(r == 0 || r == -ENOENT);
});
}
OpsExecuter::interruptible_future<> OpsExecuter::snap_map_modify(
const hobject_t& soid,
const std::set<snapid_t>& snaps,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn)
{
logger().debug("{}: soid {}, snaps {}", __func__, soid, snaps);
return interruptor::async([soid, snaps, &snap_mapper,
_t=osdriver.get_transaction(&txn)]() mutable {
assert(std::size(snaps) > 0);
[[maybe_unused]] const auto r = snap_mapper.update_snaps(
soid, snaps, 0, &_t);
assert(r == 0);
});
}
OpsExecuter::interruptible_future<> OpsExecuter::snap_map_clone(
const hobject_t& soid,
const std::set<snapid_t>& snaps,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn)
{
logger().debug("{}: soid {}, snaps {}", __func__, soid, snaps);
return interruptor::async([soid, snaps, &snap_mapper,
_t=osdriver.get_transaction(&txn)]() mutable {
assert(std::size(snaps) > 0);
snap_mapper.add_oid(soid, snaps, &_t);
});
}
// Defined here because there is a circular dependency between OpsExecuter and PG
uint32_t OpsExecuter::get_pool_stripe_width() const {
return pg->get_pgpool().info.get_stripe_width();
}
// Defined here because there is a circular dependency between OpsExecuter and PG
version_t OpsExecuter::get_last_user_version() const
{
return pg->get_last_user_version();
}
std::unique_ptr<OpsExecuter::CloningContext> OpsExecuter::execute_clone(
const SnapContext& snapc,
const ObjectState& initial_obs,
const SnapSet& initial_snapset,
PGBackend& backend,
ceph::os::Transaction& txn)
{
const hobject_t& soid = initial_obs.oi.soid;
logger().debug("{} {} snapset={} snapc={}",
__func__, soid,
initial_snapset, snapc);
auto cloning_ctx = std::make_unique<CloningContext>();
cloning_ctx->new_snapset = initial_snapset;
// clone object, the snap field is set to the seq of the SnapContext
// at its creation.
hobject_t coid = soid;
coid.snap = snapc.seq;
// existing snaps are stored in descending order in snapc,
// cloned_snaps vector will hold all the snaps stored until snapset.seq
const std::vector<snapid_t> cloned_snaps = [&] {
auto last = std::find_if(
std::begin(snapc.snaps), std::end(snapc.snaps),
[&](snapid_t snap_id) { return snap_id <= initial_snapset.seq; });
return std::vector<snapid_t>{std::begin(snapc.snaps), last};
}();
auto [snap_oi, clone_obc] = prepare_clone(coid);
// make clone
backend.clone(snap_oi, initial_obs, clone_obc->obs, txn);
delta_stats.num_objects++;
if (snap_oi.is_omap()) {
delta_stats.num_objects_omap++;
}
delta_stats.num_object_clones++;
// newsnapset is obc's ssc
cloning_ctx->new_snapset.clones.push_back(coid.snap);
cloning_ctx->new_snapset.clone_size[coid.snap] = initial_obs.oi.size;
cloning_ctx->new_snapset.clone_snaps[coid.snap] = cloned_snaps;
// clone_overlap should contain an entry for each clone
// (an empty interval_set if there is no overlap)
auto &overlap = cloning_ctx->new_snapset.clone_overlap[coid.snap];
if (initial_obs.oi.size) {
overlap.insert(0, initial_obs.oi.size);
}
// log clone
logger().debug("cloning v {} to {} v {} snaps={} snapset={}",
initial_obs.oi.version, coid,
osd_op_params->at_version, cloned_snaps, cloning_ctx->new_snapset);
cloning_ctx->log_entry = {
pg_log_entry_t::CLONE,
coid,
snap_oi.version,
initial_obs.oi.version,
initial_obs.oi.user_version,
osd_reqid_t(),
initial_obs.oi.mtime, // will be replaced in `apply_to()`
0
};
encode(cloned_snaps, cloning_ctx->log_entry.snaps);
// TODO: update most recent clone_overlap and usage stats
return cloning_ctx;
}
void OpsExecuter::CloningContext::apply_to(
std::vector<pg_log_entry_t>& log_entries,
ObjectContext& processed_obc) &&
{
log_entry.mtime = processed_obc.obs.oi.mtime;
log_entries.emplace_back(std::move(log_entry));
processed_obc.ssc->snapset = std::move(new_snapset);
}
OpsExecuter::interruptible_future<std::vector<pg_log_entry_t>>
OpsExecuter::flush_clone_metadata(
std::vector<pg_log_entry_t>&& log_entries,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn)
{
assert(!txn.empty());
auto maybe_snap_mapped = interruptor::now();
if (cloning_ctx) {
std::move(*cloning_ctx).apply_to(log_entries, *obc);
const auto& coid = log_entries.back().soid;
const auto& cloned_snaps = obc->ssc->snapset.clone_snaps[coid.snap];
maybe_snap_mapped = snap_map_clone(
coid,
std::set<snapid_t>{std::begin(cloned_snaps), std::end(cloned_snaps)},
snap_mapper,
osdriver,
txn);
}
if (snapc.seq > obc->ssc->snapset.seq) {
// update snapset with latest snap context
obc->ssc->snapset.seq = snapc.seq;
obc->ssc->snapset.snaps.clear();
}
logger().debug("{} done, initial snapset={}, new snapset={}",
__func__, obc->obs.oi.soid, obc->ssc->snapset);
return std::move(
maybe_snap_mapped
).then_interruptible([log_entries=std::move(log_entries)]() mutable {
return interruptor::make_ready_future<std::vector<pg_log_entry_t>>(
std::move(log_entries));
});
}
// TODO: make this static
std::pair<object_info_t, ObjectContextRef> OpsExecuter::prepare_clone(
const hobject_t& coid)
{
object_info_t static_snap_oi(coid);
static_snap_oi.version = pg->next_version();
static_snap_oi.prior_version = obc->obs.oi.version;
static_snap_oi.copy_user_bits(obc->obs.oi);
if (static_snap_oi.is_whiteout()) {
// clone shouldn't be marked as whiteout
static_snap_oi.clear_flag(object_info_t::FLAG_WHITEOUT);
}
ObjectContextRef clone_obc;
if (pg->is_primary()) {
// lookup_or_create
auto [c_obc, existed] =
pg->obc_registry.get_cached_obc(std::move(coid));
assert(!existed);
c_obc->obs.oi = static_snap_oi;
c_obc->obs.exists = true;
c_obc->ssc = obc->ssc;
logger().debug("clone_obc: {}", c_obc->obs.oi);
clone_obc = std::move(c_obc);
}
return std::make_pair(std::move(static_snap_oi), std::move(clone_obc));
}
void OpsExecuter::apply_stats()
{
pg->get_peering_state().apply_op_stats(get_target(), delta_stats);
pg->publish_stats_to_osd();
}
OpsExecuter::OpsExecuter(Ref<PG> pg,
ObjectContextRef _obc,
const OpInfo& op_info,
abstracted_msg_t&& msg,
crimson::net::ConnectionRef conn,
const SnapContext& _snapc)
: pg(std::move(pg)),
obc(std::move(_obc)),
op_info(op_info),
msg(std::move(msg)),
conn(conn),
snapc(_snapc)
{
if (op_info.may_write() && should_clone(*obc, snapc)) {
do_write_op([this](auto& backend, auto& os, auto& txn) {
cloning_ctx = execute_clone(std::as_const(snapc),
std::as_const(obc->obs),
std::as_const(obc->ssc->snapset),
backend,
txn);
});
}
}
static inline std::unique_ptr<const PGLSFilter> get_pgls_filter(
const std::string& type,
bufferlist::const_iterator& iter)
{
// storing non-const PGLSFilter for the sake of ::init()
std::unique_ptr<PGLSFilter> filter;
if (type.compare("plain") == 0) {
filter = std::make_unique<PGLSPlainFilter>();
} else {
std::size_t dot = type.find(".");
if (dot == type.npos || dot == 0 || dot == type.size() - 1) {
throw crimson::osd::invalid_argument{};
}
const std::string class_name = type.substr(0, dot);
const std::string filter_name = type.substr(dot + 1);
ClassHandler::ClassData *cls = nullptr;
int r = ClassHandler::get_instance().open_class(class_name, &cls);
if (r != 0) {
logger().warn("can't open class {}: {}", class_name, cpp_strerror(r));
if (r == -EPERM) {
// propogate permission error
throw crimson::osd::permission_denied{};
} else {
throw crimson::osd::invalid_argument{};
}
} else {
ceph_assert(cls);
}
ClassHandler::ClassFilter * const class_filter = cls->get_filter(filter_name);
if (class_filter == nullptr) {
logger().warn("can't find filter {} in class {}", filter_name, class_name);
throw crimson::osd::invalid_argument{};
}
filter.reset(class_filter->fn());
if (!filter) {
// Object classes are obliged to return us something, but let's
// give an error rather than asserting out.
logger().warn("buggy class {} failed to construct filter {}",
class_name, filter_name);
throw crimson::osd::invalid_argument{};
}
}
ceph_assert(filter);
int r = filter->init(iter);
if (r < 0) {
logger().warn("error initializing filter {}: {}", type, cpp_strerror(r));
throw crimson::osd::invalid_argument{};
}
// successfully constructed and initialized, return it.
return filter;
}
static PG::interruptible_future<hobject_t> pgls_filter(
const PGLSFilter& filter,
const PGBackend& backend,
const hobject_t& sobj)
{
if (const auto xattr = filter.get_xattr(); !xattr.empty()) {
logger().debug("pgls_filter: filter is interested in xattr={} for obj={}",
xattr, sobj);
return backend.getxattr(sobj, std::move(xattr)).safe_then_interruptible(
[&filter, sobj] (ceph::bufferlist val) {
logger().debug("pgls_filter: got xvalue for obj={}", sobj);
const bool filtered = filter.filter(sobj, val);
return seastar::make_ready_future<hobject_t>(filtered ? sobj : hobject_t{});
}, PGBackend::get_attr_errorator::all_same_way([&filter, sobj] {
logger().debug("pgls_filter: got error for obj={}", sobj);
if (filter.reject_empty_xattr()) {
return seastar::make_ready_future<hobject_t>();
}
ceph::bufferlist val;
const bool filtered = filter.filter(sobj, val);
return seastar::make_ready_future<hobject_t>(filtered ? sobj : hobject_t{});
}));
} else {
ceph::bufferlist empty_lvalue_bl;
const bool filtered = filter.filter(sobj, empty_lvalue_bl);
return seastar::make_ready_future<hobject_t>(filtered ? sobj : hobject_t{});
}
}
static PG::interruptible_future<ceph::bufferlist> do_pgnls_common(
const hobject_t& pg_start,
const hobject_t& pg_end,
const PGBackend& backend,
const hobject_t& lower_bound,
const std::string& nspace,
const uint64_t limit,
const PGLSFilter* const filter)
{
if (!(lower_bound.is_min() ||
lower_bound.is_max() ||
(lower_bound >= pg_start && lower_bound < pg_end))) {
// this should only happen with a buggy client.
throw std::invalid_argument("outside of PG bounds");
}
return backend.list_objects(lower_bound, limit).then_interruptible(
[&backend, filter, nspace](auto&& ret)
-> PG::interruptible_future<std::tuple<std::vector<hobject_t>, hobject_t>> {
auto& [objects, next] = ret;
auto in_my_namespace = [&nspace](const hobject_t& obj) {
using crimson::common::local_conf;
if (obj.get_namespace() == local_conf()->osd_hit_set_namespace) {
return false;
} else if (nspace == librados::all_nspaces) {
return true;
} else {
return obj.get_namespace() == nspace;
}
};
auto to_pglsed = [&backend, filter] (const hobject_t& obj)
-> PG::interruptible_future<hobject_t> {
// this transformation looks costly. However, I don't have any
// reason to think PGLS* operations are critical for, let's say,
// general performance.
//
// from tchaikov: "another way is to use seastar::map_reduce(),
// to 1) save the effort to filter the already filtered objects
// 2) avoid the space to keep the tuple<bool, object> even if
// the object is filtered out".
if (filter) {
return pgls_filter(*filter, backend, obj);
} else {
return seastar::make_ready_future<hobject_t>(obj);
}
};
auto range = objects | boost::adaptors::filtered(in_my_namespace)
| boost::adaptors::transformed(to_pglsed);
logger().debug("do_pgnls_common: finishing the 1st stage of pgls");
return seastar::when_all_succeed(std::begin(range),
std::end(range)).then(
[next=std::move(next)] (auto items) mutable {
// the sole purpose of this chaining is to pass `next` to 2nd
// stage altogether with items
logger().debug("do_pgnls_common: 1st done");
return seastar::make_ready_future<
std::tuple<std::vector<hobject_t>, hobject_t>>(
std::move(items), std::move(next));
});
}).then_interruptible(
[pg_end] (auto&& ret) {
auto& [items, next] = ret;
auto is_matched = [] (const auto& obj) {
return !obj.is_min();
};
auto to_entry = [] (const auto& obj) {
return librados::ListObjectImpl{
obj.get_namespace(), obj.oid.name, obj.get_key()
};
};
pg_nls_response_t response;
boost::push_back(response.entries, items | boost::adaptors::filtered(is_matched)
| boost::adaptors::transformed(to_entry));
response.handle = next.is_max() ? pg_end : next;
ceph::bufferlist out;
encode(response, out);
logger().debug("do_pgnls_common: response.entries.size()= {}",
response.entries.size());
return seastar::make_ready_future<ceph::bufferlist>(std::move(out));
});
}
static PG::interruptible_future<> do_pgnls(
const PG& pg,
const std::string& nspace,
OSDOp& osd_op)
{
hobject_t lower_bound;
try {
ceph::decode(lower_bound, osd_op.indata);
} catch (const buffer::error&) {
throw std::invalid_argument("unable to decode PGNLS handle");
}
const auto pg_start = pg.get_pgid().pgid.get_hobj_start();
const auto pg_end = \
pg.get_pgid().pgid.get_hobj_end(pg.get_pgpool().info.get_pg_num());
return do_pgnls_common(pg_start,
pg_end,
pg.get_backend(),
lower_bound,
nspace,
osd_op.op.pgls.count,
nullptr /* no filter */)
.then_interruptible([&osd_op](bufferlist bl) {
osd_op.outdata = std::move(bl);
return seastar::now();
});
}
static PG::interruptible_future<> do_pgnls_filtered(
const PG& pg,
const std::string& nspace,
OSDOp& osd_op)
{
std::string cname, mname, type;
auto bp = osd_op.indata.cbegin();
try {
ceph::decode(cname, bp);
ceph::decode(mname, bp);
ceph::decode(type, bp);
} catch (const buffer::error&) {
throw crimson::osd::invalid_argument{};
}
auto filter = get_pgls_filter(type, bp);
hobject_t lower_bound;
try {
lower_bound.decode(bp);
} catch (const buffer::error&) {
throw std::invalid_argument("unable to decode PGNLS_FILTER description");
}
logger().debug("{}: cname={}, mname={}, type={}, lower_bound={}, filter={}",
__func__, cname, mname, type, lower_bound,
static_cast<const void*>(filter.get()));
return seastar::do_with(std::move(filter),
[&, lower_bound=std::move(lower_bound)](auto&& filter) {
const auto pg_start = pg.get_pgid().pgid.get_hobj_start();
const auto pg_end = pg.get_pgid().pgid.get_hobj_end(pg.get_pgpool().info.get_pg_num());
return do_pgnls_common(pg_start,
pg_end,
pg.get_backend(),
lower_bound,
nspace,
osd_op.op.pgls.count,
filter.get())
.then_interruptible([&osd_op](bufferlist bl) {
osd_op.outdata = std::move(bl);
return seastar::now();
});
});
}
static PG::interruptible_future<ceph::bufferlist> do_pgls_common(
const hobject_t& pg_start,
const hobject_t& pg_end,
const PGBackend& backend,
const hobject_t& lower_bound,
const std::string& nspace,
const uint64_t limit,
const PGLSFilter* const filter)
{
if (!(lower_bound.is_min() ||
lower_bound.is_max() ||
(lower_bound >= pg_start && lower_bound < pg_end))) {
// this should only happen with a buggy client.
throw std::invalid_argument("outside of PG bounds");
}
using entries_t = decltype(pg_ls_response_t::entries);
return backend.list_objects(lower_bound, limit).then_interruptible(
[&backend, filter, nspace](auto&& ret) {
auto& [objects, next] = ret;
return PG::interruptor::when_all(
PG::interruptor::map_reduce(std::move(objects),
[&backend, filter, nspace](const hobject_t& obj)
-> PG::interruptible_future<hobject_t>{
if (obj.get_namespace() == nspace) {
if (filter) {
return pgls_filter(*filter, backend, obj);
} else {
return seastar::make_ready_future<hobject_t>(obj);
}
} else {
return seastar::make_ready_future<hobject_t>();
}
},
entries_t{},
[](entries_t entries, hobject_t obj) {
if (!obj.is_min()) {
entries.emplace_back(obj.oid, obj.get_key());
}
return entries;
}),
seastar::make_ready_future<hobject_t>(next));
}).then_interruptible([pg_end](auto&& ret) {
auto entries = std::move(std::get<0>(ret).get0());
auto next = std::move(std::get<1>(ret).get0());
pg_ls_response_t response;
response.handle = next.is_max() ? pg_end : next;
response.entries = std::move(entries);
ceph::bufferlist out;
encode(response, out);
logger().debug("{}: response.entries.size()=",
__func__, response.entries.size());
return seastar::make_ready_future<ceph::bufferlist>(std::move(out));
});
}
static PG::interruptible_future<> do_pgls(
const PG& pg,
const std::string& nspace,
OSDOp& osd_op)
{
hobject_t lower_bound;
auto bp = osd_op.indata.cbegin();
try {
lower_bound.decode(bp);
} catch (const buffer::error&) {
throw std::invalid_argument{"unable to decode PGLS handle"};
}
const auto pg_start = pg.get_pgid().pgid.get_hobj_start();
const auto pg_end =
pg.get_pgid().pgid.get_hobj_end(pg.get_pgpool().info.get_pg_num());
return do_pgls_common(pg_start,
pg_end,
pg.get_backend(),
lower_bound,
nspace,
osd_op.op.pgls.count,
nullptr /* no filter */)
.then_interruptible([&osd_op](bufferlist bl) {
osd_op.outdata = std::move(bl);
return seastar::now();
});
}
static PG::interruptible_future<> do_pgls_filtered(
const PG& pg,
const std::string& nspace,
OSDOp& osd_op)
{
std::string cname, mname, type;
auto bp = osd_op.indata.cbegin();
try {
ceph::decode(cname, bp);
ceph::decode(mname, bp);
ceph::decode(type, bp);
} catch (const buffer::error&) {
throw crimson::osd::invalid_argument{};
}
auto filter = get_pgls_filter(type, bp);
hobject_t lower_bound;
try {
lower_bound.decode(bp);
} catch (const buffer::error&) {
throw std::invalid_argument("unable to decode PGLS_FILTER description");
}
logger().debug("{}: cname={}, mname={}, type={}, lower_bound={}, filter={}",
__func__, cname, mname, type, lower_bound,
static_cast<const void*>(filter.get()));
return seastar::do_with(std::move(filter),
[&, lower_bound=std::move(lower_bound)](auto&& filter) {
const auto pg_start = pg.get_pgid().pgid.get_hobj_start();
const auto pg_end = pg.get_pgid().pgid.get_hobj_end(pg.get_pgpool().info.get_pg_num());
return do_pgls_common(pg_start,
pg_end,
pg.get_backend(),
lower_bound,
nspace,
osd_op.op.pgls.count,
filter.get())
.then_interruptible([&osd_op](bufferlist bl) {
osd_op.outdata = std::move(bl);
return seastar::now();
});
});
}
PgOpsExecuter::interruptible_future<>
PgOpsExecuter::execute_op(OSDOp& osd_op)
{
logger().warn("handling op {}", ceph_osd_op_name(osd_op.op.op));
switch (const ceph_osd_op& op = osd_op.op; op.op) {
case CEPH_OSD_OP_PGLS:
return do_pgls(pg, nspace, osd_op);
case CEPH_OSD_OP_PGLS_FILTER:
return do_pgls_filtered(pg, nspace, osd_op);
case CEPH_OSD_OP_PGNLS:
return do_pgnls(pg, nspace, osd_op);
case CEPH_OSD_OP_PGNLS_FILTER:
return do_pgnls_filtered(pg, nspace, osd_op);
default:
logger().warn("unknown op {}", ceph_osd_op_name(op.op));
throw std::runtime_error(
fmt::format("op '{}' not supported", ceph_osd_op_name(op.op)));
}
}
} // namespace crimson::osd
| 50,976 | 33.867989 | 93 | cc |
null | ceph-main/src/crimson/osd/ops_executer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <memory>
#include <type_traits>
#include <utility>
#include <boost/intrusive_ptr.hpp>
#include <boost/smart_ptr/intrusive_ref_counter.hpp>
#include <fmt/os.h>
#include <seastar/core/chunked_fifo.hh>
#include <seastar/core/future.hh>
#include <seastar/core/shared_future.hh>
#include <seastar/core/shared_ptr.hh>
#include "common/dout.h"
#include "common/map_cacher.hpp"
#include "common/static_ptr.h"
#include "messages/MOSDOp.h"
#include "os/Transaction.h"
#include "osd/osd_types.h"
#include "crimson/common/errorator.h"
#include "crimson/common/interruptible_future.h"
#include "crimson/common/type_helpers.h"
#include "crimson/osd/osd_operations/client_request.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/pg_backend.h"
#include "crimson/osd/pg_interval_interrupt_condition.h"
#include "crimson/osd/shard_services.h"
struct ObjectState;
struct OSDOp;
class OSDriver;
class SnapMapper;
namespace crimson::osd {
class PG;
// OpsExecuter -- a class for executing ops targeting a certain object.
class OpsExecuter : public seastar::enable_lw_shared_from_this<OpsExecuter> {
friend class SnapTrimObjSubEvent;
using call_errorator = crimson::errorator<
crimson::stateful_ec,
crimson::ct_error::enoent,
crimson::ct_error::eexist,
crimson::ct_error::enospc,
crimson::ct_error::edquot,
crimson::ct_error::cmp_fail,
crimson::ct_error::eagain,
crimson::ct_error::invarg,
crimson::ct_error::erange,
crimson::ct_error::ecanceled,
crimson::ct_error::enametoolong,
crimson::ct_error::permission_denied,
crimson::ct_error::operation_not_supported,
crimson::ct_error::input_output_error,
crimson::ct_error::value_too_large,
crimson::ct_error::file_too_large>;
using read_errorator = PGBackend::read_errorator;
using write_ertr = PGBackend::write_ertr;
using get_attr_errorator = PGBackend::get_attr_errorator;
using watch_errorator = crimson::errorator<
crimson::ct_error::enoent,
crimson::ct_error::invarg,
crimson::ct_error::not_connected,
crimson::ct_error::timed_out>;
using call_ierrorator =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, call_errorator>;
using read_ierrorator =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, read_errorator>;
using write_iertr =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, write_ertr>;
using get_attr_ierrorator =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, get_attr_errorator>;
using watch_ierrorator =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, watch_errorator>;
template <typename Errorator, typename T = void>
using interruptible_errorated_future =
::crimson::interruptible::interruptible_errorated_future<
IOInterruptCondition, Errorator, T>;
using interruptor =
::crimson::interruptible::interruptor<IOInterruptCondition>;
template <typename T = void>
using interruptible_future =
::crimson::interruptible::interruptible_future<
IOInterruptCondition, T>;
public:
// ExecutableMessage -- an interface class to allow using OpsExecuter
// with other message types than just the `MOSDOp`. The type erasure
// happens in the ctor of `OpsExecuter`.
struct ExecutableMessage {
virtual osd_reqid_t get_reqid() const = 0;
virtual utime_t get_mtime() const = 0;
virtual epoch_t get_map_epoch() const = 0;
virtual entity_inst_t get_orig_source_inst() const = 0;
virtual uint64_t get_features() const = 0;
virtual bool has_flag(uint32_t flag) const = 0;
virtual entity_name_t get_source() const = 0;
};
template <class ImplT>
class ExecutableMessagePimpl final : ExecutableMessage {
const ImplT* pimpl;
// In crimson, conn is independently maintained outside Message.
const crimson::net::ConnectionRef conn;
public:
ExecutableMessagePimpl(const ImplT* pimpl,
const crimson::net::ConnectionRef conn)
: pimpl(pimpl), conn(conn) {
}
osd_reqid_t get_reqid() const final {
return pimpl->get_reqid();
}
bool has_flag(uint32_t flag) const final {
return pimpl->has_flag(flag);
}
utime_t get_mtime() const final {
return pimpl->get_mtime();
};
epoch_t get_map_epoch() const final {
return pimpl->get_map_epoch();
}
entity_inst_t get_orig_source_inst() const final {
// We can't get the origin source address from the message
// since (In Crimson) the connection is maintained
// outside of the Message.
return entity_inst_t(get_source(), conn->get_peer_addr());
}
entity_name_t get_source() const final {
return pimpl->get_source();
}
uint64_t get_features() const final {
return pimpl->get_features();
}
};
// because OpsExecuter is pretty heavy-weight object we want to ensure
// it's not copied nor even moved by accident. Performance is the sole
// reason for prohibiting that.
OpsExecuter(OpsExecuter&&) = delete;
OpsExecuter(const OpsExecuter&) = delete;
using osd_op_errorator = crimson::compound_errorator_t<
call_errorator,
read_errorator,
write_ertr,
get_attr_errorator,
watch_errorator,
PGBackend::stat_errorator>;
using osd_op_ierrorator =
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, osd_op_errorator>;
object_stat_sum_t delta_stats;
private:
// an operation can be divided into two stages: main and effect-exposing
// one. The former is performed immediately on call to `do_osd_op()` while
// the later on `submit_changes()` – after successfully processing main
// stages of all involved operations. When any stage fails, none of all
// scheduled effect-exposing stages will be executed.
// when operation requires this division, some variant of `with_effect()`
// should be used.
struct effect_t {
// an effect can affect PG, i.e. create a watch timeout
virtual osd_op_errorator::future<> execute(Ref<PG> pg) = 0;
virtual ~effect_t() = default;
};
Ref<PG> pg; // for the sake of object class
ObjectContextRef obc;
const OpInfo& op_info;
using abstracted_msg_t =
ceph::static_ptr<ExecutableMessage,
sizeof(ExecutableMessagePimpl<void>)>;
abstracted_msg_t msg;
crimson::net::ConnectionRef conn;
std::optional<osd_op_params_t> osd_op_params;
bool user_modify = false;
ceph::os::Transaction txn;
size_t num_read = 0; ///< count read ops
size_t num_write = 0; ///< count update ops
SnapContext snapc; // writer snap context
struct CloningContext {
SnapSet new_snapset;
pg_log_entry_t log_entry;
void apply_to(
std::vector<pg_log_entry_t>& log_entries,
ObjectContext& processed_obc) &&;
};
std::unique_ptr<CloningContext> cloning_ctx;
/**
* execute_clone
*
* If snapc contains a snap which occurred logically after the last write
* seen by this object (see OpsExecutor::should_clone()), we first need
* make a clone of the object at its current state. execute_clone primes
* txn with that clone operation and returns an
* OpsExecutor::CloningContext which will allow us to fill in the corresponding
* metadata and log_entries once the operations have been processed.
*
* Note that this strategy differs from classic, which instead performs this
* work at the end and reorders the transaction. See
* PrimaryLogPG::make_writeable
*
* @param snapc [in] snapc for this operation (from the client if from the
* client, from the pool otherwise)
* @param initial_obs [in] objectstate for the object at operation start
* @param initial_snapset [in] snapset for the object at operation start
* @param backend [in,out] interface for generating mutations
* @param txn [out] transaction for the operation
*/
std::unique_ptr<CloningContext> execute_clone(
const SnapContext& snapc,
const ObjectState& initial_obs,
const SnapSet& initial_snapset,
PGBackend& backend,
ceph::os::Transaction& txn);
/**
* should_clone
*
* Predicate returning whether a user write with snap context snapc
* contains a snap which occurred prior to the most recent write
* on the object reflected in initial_obc.
*
* @param initial_obc [in] obc for object to be mutated
* @param snapc [in] snapc for this operation (from the client if from the
* client, from the pool otherwise)
*/
static bool should_clone(
const ObjectContext& initial_obc,
const SnapContext& snapc) {
// clone?
return initial_obc.obs.exists // both nominally and...
&& !initial_obc.obs.oi.is_whiteout() // ... logically exists
&& snapc.snaps.size() // there are snaps
&& snapc.snaps[0] > initial_obc.ssc->snapset.seq; // existing obj is old
}
interruptible_future<std::vector<pg_log_entry_t>> flush_clone_metadata(
std::vector<pg_log_entry_t>&& log_entries,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn);
static interruptible_future<> snap_map_remove(
const hobject_t& soid,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn);
static interruptible_future<> snap_map_modify(
const hobject_t& soid,
const std::set<snapid_t>& snaps,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn);
static interruptible_future<> snap_map_clone(
const hobject_t& soid,
const std::set<snapid_t>& snaps,
SnapMapper& snap_mapper,
OSDriver& osdriver,
ceph::os::Transaction& txn);
// this gizmo could be wrapped in std::optional for the sake of lazy
// initialization. we don't need it for ops that doesn't have effect
// TODO: verify the init overhead of chunked_fifo
seastar::chunked_fifo<std::unique_ptr<effect_t>> op_effects;
template <class Context, class MainFunc, class EffectFunc>
auto with_effect_on_obc(
Context&& ctx,
MainFunc&& main_func,
EffectFunc&& effect_func);
call_ierrorator::future<> do_op_call(OSDOp& osd_op);
watch_ierrorator::future<> do_op_watch(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn);
watch_ierrorator::future<> do_op_watch_subop_watch(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn);
watch_ierrorator::future<> do_op_watch_subop_reconnect(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn);
watch_ierrorator::future<> do_op_watch_subop_unwatch(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn);
watch_ierrorator::future<> do_op_watch_subop_ping(
OSDOp& osd_op,
ObjectState& os,
ceph::os::Transaction& txn);
watch_ierrorator::future<> do_op_list_watchers(
OSDOp& osd_op,
const ObjectState& os);
watch_ierrorator::future<> do_op_notify(
OSDOp& osd_op,
const ObjectState& os);
watch_ierrorator::future<> do_op_notify_ack(
OSDOp& osd_op,
const ObjectState& os);
call_errorator::future<> do_assert_ver(
OSDOp& osd_op,
const ObjectState& os);
using list_snaps_ertr = read_errorator::extend<
crimson::ct_error::invarg>;
using list_snaps_iertr = ::crimson::interruptible::interruptible_errorator<
::crimson::osd::IOInterruptCondition,
list_snaps_ertr>;
list_snaps_iertr::future<> do_list_snaps(
OSDOp& osd_op,
const ObjectState& os,
const SnapSet& ss);
template <class Func>
auto do_const_op(Func&& f);
template <class Func>
auto do_read_op(Func&& f) {
++num_read;
// TODO: pass backend as read-only
return do_const_op(std::forward<Func>(f));
}
template <class Func>
auto do_snapset_op(Func&& f) {
++num_read;
return std::invoke(
std::forward<Func>(f),
std::as_const(obc->obs),
std::as_const(obc->ssc->snapset));
}
enum class modified_by {
user,
sys,
};
template <class Func>
auto do_write_op(Func&& f, modified_by m = modified_by::user);
decltype(auto) dont_do_legacy_op() {
return crimson::ct_error::operation_not_supported::make();
}
interruptible_errorated_future<osd_op_errorator>
do_execute_op(OSDOp& osd_op);
OpsExecuter(Ref<PG> pg,
ObjectContextRef obc,
const OpInfo& op_info,
abstracted_msg_t&& msg,
crimson::net::ConnectionRef conn,
const SnapContext& snapc);
public:
template <class MsgT>
OpsExecuter(Ref<PG> pg,
ObjectContextRef obc,
const OpInfo& op_info,
const MsgT& msg,
crimson::net::ConnectionRef conn,
const SnapContext& snapc)
: OpsExecuter(
std::move(pg),
std::move(obc),
op_info,
abstracted_msg_t{
std::in_place_type_t<ExecutableMessagePimpl<MsgT>>{},
&msg,
conn},
conn,
snapc) {
}
template <class Func>
struct RollbackHelper;
template <class Func>
RollbackHelper<Func> create_rollbacker(Func&& func);
interruptible_errorated_future<osd_op_errorator>
execute_op(OSDOp& osd_op);
using rep_op_fut_tuple =
std::tuple<interruptible_future<>, osd_op_ierrorator::future<>>;
using rep_op_fut_t =
interruptible_future<rep_op_fut_tuple>;
template <typename MutFunc>
rep_op_fut_t flush_changes_n_do_ops_effects(
const std::vector<OSDOp>& ops,
SnapMapper& snap_mapper,
OSDriver& osdriver,
MutFunc&& mut_func) &&;
std::vector<pg_log_entry_t> prepare_transaction(
const std::vector<OSDOp>& ops);
void fill_op_params_bump_pg_version();
ObjectContextRef get_obc() const {
return obc;
}
const object_info_t &get_object_info() const {
return obc->obs.oi;
}
const hobject_t &get_target() const {
return get_object_info().soid;
}
const auto& get_message() const {
return *msg;
}
size_t get_processed_rw_ops_num() const {
return num_read + num_write;
}
uint32_t get_pool_stripe_width() const;
bool has_seen_write() const {
return num_write > 0;
}
object_stat_sum_t& get_stats(){
return delta_stats;
}
version_t get_last_user_version() const;
std::pair<object_info_t, ObjectContextRef> prepare_clone(
const hobject_t& coid);
void apply_stats();
};
template <class Context, class MainFunc, class EffectFunc>
auto OpsExecuter::with_effect_on_obc(
Context&& ctx,
MainFunc&& main_func,
EffectFunc&& effect_func)
{
using context_t = std::decay_t<Context>;
// the language offers implicit conversion to pointer-to-function for
// lambda only when it's closureless. We enforce this restriction due
// the fact that `flush_changes()` std::moves many executer's parts.
using allowed_effect_func_t =
seastar::future<> (*)(context_t&&, ObjectContextRef, Ref<PG>);
static_assert(std::is_convertible_v<EffectFunc, allowed_effect_func_t>,
"with_effect function is not allowed to capture");
struct task_t final : effect_t {
context_t ctx;
EffectFunc effect_func;
ObjectContextRef obc;
task_t(Context&& ctx, EffectFunc&& effect_func, ObjectContextRef obc)
: ctx(std::move(ctx)),
effect_func(std::move(effect_func)),
obc(std::move(obc)) {
}
osd_op_errorator::future<> execute(Ref<PG> pg) final {
return std::move(effect_func)(std::move(ctx),
std::move(obc),
std::move(pg));
}
};
auto task =
std::make_unique<task_t>(std::move(ctx), std::move(effect_func), obc);
auto& ctx_ref = task->ctx;
op_effects.emplace_back(std::move(task));
return std::forward<MainFunc>(main_func)(ctx_ref);
}
template <typename MutFunc>
OpsExecuter::rep_op_fut_t
OpsExecuter::flush_changes_n_do_ops_effects(
const std::vector<OSDOp>& ops,
SnapMapper& snap_mapper,
OSDriver& osdriver,
MutFunc&& mut_func) &&
{
const bool want_mutate = !txn.empty();
// osd_op_params are instantiated by every wr-like operation.
assert(osd_op_params || !want_mutate);
assert(obc);
rep_op_fut_t maybe_mutated =
interruptor::make_ready_future<rep_op_fut_tuple>(
seastar::now(),
interruptor::make_interruptible(osd_op_errorator::now()));
if (cloning_ctx) {
ceph_assert(want_mutate);
}
if (want_mutate) {
if (user_modify) {
osd_op_params->user_at_version = osd_op_params->at_version.version;
}
maybe_mutated = flush_clone_metadata(
prepare_transaction(ops),
snap_mapper,
osdriver,
txn
).then_interruptible([mut_func=std::move(mut_func),
this](auto&& log_entries) mutable {
auto [submitted, all_completed] =
std::forward<MutFunc>(mut_func)(std::move(txn),
std::move(obc),
std::move(*osd_op_params),
std::move(log_entries));
return interruptor::make_ready_future<rep_op_fut_tuple>(
std::move(submitted),
osd_op_ierrorator::future<>(std::move(all_completed)));
});
}
apply_stats();
if (__builtin_expect(op_effects.empty(), true)) {
return maybe_mutated;
} else {
return maybe_mutated.then_unpack_interruptible(
// need extra ref pg due to apply_stats() which can be executed after
// informing snap mapper
[this, pg=this->pg](auto&& submitted, auto&& all_completed) mutable {
return interruptor::make_ready_future<rep_op_fut_tuple>(
std::move(submitted),
all_completed.safe_then_interruptible([this, pg=std::move(pg)] {
// let's do the cleaning of `op_effects` in destructor
return interruptor::do_for_each(op_effects,
[pg=std::move(pg)](auto& op_effect) {
return op_effect->execute(pg);
});
}));
});
}
}
template <class Func>
struct OpsExecuter::RollbackHelper {
interruptible_future<> rollback_obc_if_modified(const std::error_code& e);
ObjectContextRef get_obc() const {
assert(ox);
return ox->obc;
}
seastar::lw_shared_ptr<OpsExecuter> ox;
Func func;
};
template <class Func>
inline OpsExecuter::RollbackHelper<Func>
OpsExecuter::create_rollbacker(Func&& func) {
return {shared_from_this(), std::forward<Func>(func)};
}
template <class Func>
OpsExecuter::interruptible_future<>
OpsExecuter::RollbackHelper<Func>::rollback_obc_if_modified(
const std::error_code& e)
{
// Oops, an operation had failed. do_osd_ops() altogether with
// OpsExecuter already dropped the ObjectStore::Transaction if
// there was any. However, this is not enough to completely
// rollback as we gave OpsExecuter the very single copy of `obc`
// we maintain and we did it for both reading and writing.
// Now all modifications must be reverted.
//
// Let's just reload from the store. Evicting from the shared
// LRU would be tricky as next MOSDOp (the one at `get_obc`
// phase) could actually already finished the lookup. Fortunately,
// this is supposed to live on cold paths, so performance is not
// a concern -- simplicity wins.
//
// The conditional's purpose is to efficiently handle hot errors
// which may appear as a result of e.g. CEPH_OSD_OP_CMPXATTR or
// CEPH_OSD_OP_OMAP_CMP. These are read-like ops and clients
// typically append them before any write. If OpsExecuter hasn't
// seen any modifying operation, `obc` is supposed to be kept
// unchanged.
assert(ox);
const auto need_rollback = ox->has_seen_write();
crimson::get_logger(ceph_subsys_osd).debug(
"{}: object {} got error {}, need_rollback={}",
__func__,
ox->obc->get_oid(),
e,
need_rollback);
return need_rollback ? func(*ox->obc) : interruptor::now();
}
// PgOpsExecuter -- a class for executing ops targeting a certain PG.
class PgOpsExecuter {
template <typename T = void>
using interruptible_future =
::crimson::interruptible::interruptible_future<
IOInterruptCondition, T>;
public:
PgOpsExecuter(const PG& pg, const MOSDOp& msg)
: pg(pg), nspace(msg.get_hobj().nspace) {
}
interruptible_future<> execute_op(OSDOp& osd_op);
private:
const PG& pg;
const std::string& nspace;
};
} // namespace crimson::osd
| 20,498 | 31.538095 | 81 | h |
null | ceph-main/src/crimson/osd/osd.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "osd.h"
#include <sys/utsname.h>
#include <boost/iterator/counting_iterator.hpp>
#include <boost/range/join.hpp>
#include <fmt/format.h>
#include <fmt/os.h>
#include <fmt/ostream.h>
#include <seastar/core/timer.hh>
#include "common/pick_address.h"
#include "include/util.h"
#include "messages/MCommand.h"
#include "messages/MOSDBeacon.h"
#include "messages/MOSDBoot.h"
#include "messages/MOSDMap.h"
#include "messages/MOSDMarkMeDown.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDPeeringOp.h"
#include "messages/MOSDPGCreate2.h"
#include "messages/MOSDPGUpdateLogMissing.h"
#include "messages/MOSDPGUpdateLogMissingReply.h"
#include "messages/MOSDRepOpReply.h"
#include "messages/MOSDScrub2.h"
#include "messages/MPGStats.h"
#include "os/Transaction.h"
#include "osd/ClassHandler.h"
#include "osd/OSDCap.h"
#include "osd/PGPeeringEvent.h"
#include "osd/PeeringState.h"
#include "crimson/admin/osd_admin.h"
#include "crimson/admin/pg_commands.h"
#include "crimson/common/buffer_io.h"
#include "crimson/common/exception.h"
#include "crimson/mon/MonClient.h"
#include "crimson/net/Connection.h"
#include "crimson/net/Messenger.h"
#include "crimson/os/futurized_collection.h"
#include "crimson/os/futurized_store.h"
#include "crimson/osd/heartbeat.h"
#include "crimson/osd/osd_meta.h"
#include "crimson/osd/pg.h"
#include "crimson/osd/pg_backend.h"
#include "crimson/osd/pg_meta.h"
#include "crimson/osd/osd_operations/client_request.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/osd_operations/pg_advance_map.h"
#include "crimson/osd/osd_operations/recovery_subrequest.h"
#include "crimson/osd/osd_operations/replicated_request.h"
#include "crimson/osd/osd_operation_external_tracking.h"
#include "crimson/crush/CrushLocation.h"
namespace {
seastar::logger& logger() {
return crimson::get_logger(ceph_subsys_osd);
}
static constexpr int TICK_INTERVAL = 1;
}
using std::make_unique;
using std::map;
using std::pair;
using std::string;
using std::unique_ptr;
using std::vector;
using crimson::common::local_conf;
using crimson::os::FuturizedStore;
namespace crimson::osd {
OSD::OSD(int id, uint32_t nonce,
seastar::abort_source& abort_source,
crimson::os::FuturizedStore& store,
crimson::net::MessengerRef cluster_msgr,
crimson::net::MessengerRef public_msgr,
crimson::net::MessengerRef hb_front_msgr,
crimson::net::MessengerRef hb_back_msgr)
: whoami{id},
nonce{nonce},
abort_source{abort_source},
// do this in background
beacon_timer{[this] { (void)send_beacon(); }},
cluster_msgr{cluster_msgr},
public_msgr{public_msgr},
hb_front_msgr{hb_front_msgr},
hb_back_msgr{hb_back_msgr},
monc{new crimson::mon::Client{*public_msgr, *this}},
mgrc{new crimson::mgr::Client{*public_msgr, *this}},
store{store},
// do this in background -- continuation rearms timer when complete
tick_timer{[this] {
std::ignore = update_heartbeat_peers(
).then([this] {
update_stats();
tick_timer.arm(
std::chrono::seconds(TICK_INTERVAL));
});
}},
asok{seastar::make_lw_shared<crimson::admin::AdminSocket>()},
log_client(cluster_msgr.get(), LogClient::NO_FLAGS),
clog(log_client.create_channel())
{
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
for (auto msgr : {std::ref(cluster_msgr), std::ref(public_msgr),
std::ref(hb_front_msgr), std::ref(hb_back_msgr)}) {
msgr.get()->set_auth_server(monc.get());
msgr.get()->set_auth_client(monc.get());
}
if (local_conf()->osd_open_classes_on_start) {
const int r = ClassHandler::get_instance().open_all_classes();
if (r) {
logger().warn("{} warning: got an error loading one or more classes: {}",
__func__, cpp_strerror(r));
}
}
logger().info("{}: nonce is {}", __func__, nonce);
monc->set_log_client(&log_client);
clog->set_log_to_monitors(true);
}
OSD::~OSD() = default;
namespace {
// Initial features in new superblock.
// Features here are also automatically upgraded
CompatSet get_osd_initial_compat_set()
{
CompatSet::FeatureSet ceph_osd_feature_compat;
CompatSet::FeatureSet ceph_osd_feature_ro_compat;
CompatSet::FeatureSet ceph_osd_feature_incompat;
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_BASE);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_PGINFO);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_OLOC);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEC);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_CATEGORIES);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_HOBJECTPOOL);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_BIGINFO);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEVELDBINFO);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEVELDBLOG);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_SNAPMAPPER);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_HINTS);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_PGMETA);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_MISSING);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_FASTINFO);
ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_RECOVERY_DELETES);
return CompatSet(ceph_osd_feature_compat,
ceph_osd_feature_ro_compat,
ceph_osd_feature_incompat);
}
}
seastar::future<> OSD::open_meta_coll()
{
return store.get_sharded_store().open_collection(
coll_t::meta()
).then([this](auto ch) {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
get_pg_shard_manager().init_meta_coll(ch, store.get_sharded_store());
return seastar::now();
});
}
seastar::future<OSDMeta> OSD::open_or_create_meta_coll(FuturizedStore &store)
{
return store.get_sharded_store().open_collection(coll_t::meta()).then([&store](auto ch) {
if (!ch) {
return store.get_sharded_store().create_new_collection(
coll_t::meta()
).then([&store](auto ch) {
return OSDMeta(ch, store.get_sharded_store());
});
} else {
return seastar::make_ready_future<OSDMeta>(ch, store.get_sharded_store());
}
});
}
seastar::future<> OSD::mkfs(
FuturizedStore &store,
unsigned whoami,
uuid_d osd_uuid,
uuid_d cluster_fsid,
std::string osdspec_affinity)
{
return store.start().then([&store, osd_uuid] {
return store.mkfs(osd_uuid).handle_error(
crimson::stateful_ec::handle([] (const auto& ec) {
logger().error("error creating empty object store in {}: ({}) {}",
local_conf().get_val<std::string>("osd_data"),
ec.value(), ec.message());
std::exit(EXIT_FAILURE);
}));
}).then([&store] {
return store.mount().handle_error(
crimson::stateful_ec::handle([](const auto& ec) {
logger().error("error mounting object store in {}: ({}) {}",
local_conf().get_val<std::string>("osd_data"),
ec.value(), ec.message());
std::exit(EXIT_FAILURE);
}));
}).then([&store] {
return open_or_create_meta_coll(store);
}).then([&store, whoami, cluster_fsid](auto meta_coll) {
OSDSuperblock superblock;
superblock.cluster_fsid = cluster_fsid;
superblock.osd_fsid = store.get_fsid();
superblock.whoami = whoami;
superblock.compat_features = get_osd_initial_compat_set();
return _write_superblock(
store, std::move(meta_coll), std::move(superblock));
}).then([&store, cluster_fsid] {
return store.write_meta("ceph_fsid", cluster_fsid.to_string());
}).then([&store] {
return store.write_meta("magic", CEPH_OSD_ONDISK_MAGIC);
}).then([&store, whoami] {
return store.write_meta("whoami", std::to_string(whoami));
}).then([&store] {
return _write_key_meta(store);
}).then([&store, osdspec_affinity=std::move(osdspec_affinity)] {
return store.write_meta("osdspec_affinity", osdspec_affinity);
}).then([&store] {
return store.write_meta("ready", "ready");
}).then([&store, whoami, cluster_fsid] {
fmt::print("created object store {} for osd.{} fsid {}\n",
local_conf().get_val<std::string>("osd_data"),
whoami, cluster_fsid);
return store.umount();
}).then([&store] {
return store.stop();
});
}
seastar::future<> OSD::_write_superblock(
FuturizedStore &store,
OSDMeta meta_coll,
OSDSuperblock superblock)
{
return seastar::do_with(
std::move(meta_coll),
std::move(superblock),
[&store](auto &meta_coll, auto &superblock) {
return meta_coll.load_superblock(
).safe_then([&superblock](OSDSuperblock&& sb) {
if (sb.cluster_fsid != superblock.cluster_fsid) {
logger().error("provided cluster fsid {} != superblock's {}",
sb.cluster_fsid, superblock.cluster_fsid);
throw std::invalid_argument("mismatched fsid");
}
if (sb.whoami != superblock.whoami) {
logger().error("provided osd id {} != superblock's {}",
sb.whoami, superblock.whoami);
throw std::invalid_argument("mismatched osd id");
}
}).handle_error(
crimson::ct_error::enoent::handle([&store, &meta_coll, &superblock] {
// meta collection does not yet, create superblock
logger().info(
"{} writing superblock cluster_fsid {} osd_fsid {}",
"_write_superblock",
superblock.cluster_fsid,
superblock.osd_fsid);
ceph::os::Transaction t;
meta_coll.create(t);
meta_coll.store_superblock(t, superblock);
logger().debug("OSD::_write_superblock: do_transaction...");
return store.get_sharded_store().do_transaction(
meta_coll.collection(),
std::move(t));
}),
crimson::ct_error::assert_all("_write_superbock error")
);
});
}
// this `to_string` sits in the `crimson::osd` namespace, so we don't brake
// the language rule on not overloading in `std::`.
static std::string to_string(const seastar::temporary_buffer<char>& temp_buf)
{
return {temp_buf.get(), temp_buf.size()};
}
seastar::future<> OSD::_write_key_meta(FuturizedStore &store)
{
if (auto key = local_conf().get_val<std::string>("key"); !std::empty(key)) {
return store.write_meta("osd_key", key);
} else if (auto keyfile = local_conf().get_val<std::string>("keyfile");
!std::empty(keyfile)) {
return read_file(keyfile).then([&store](const auto& temp_buf) {
// it's on a truly cold path, so don't worry about memcpy.
return store.write_meta("osd_key", to_string(temp_buf));
}).handle_exception([keyfile] (auto ep) {
logger().error("_write_key_meta: failed to handle keyfile {}: {}",
keyfile, ep);
ceph_abort();
});
} else {
return seastar::now();
}
}
namespace {
entity_addrvec_t pick_addresses(int what) {
entity_addrvec_t addrs;
crimson::common::CephContext cct;
// we're interested solely in v2; crimson doesn't do v1
const auto flags = what | CEPH_PICK_ADDRESS_MSGR2;
if (int r = ::pick_addresses(&cct, flags, &addrs, -1); r < 0) {
throw std::runtime_error("failed to pick address");
}
for (auto addr : addrs.v) {
logger().info("picked address {}", addr);
}
return addrs;
}
std::pair<entity_addrvec_t, bool>
replace_unknown_addrs(entity_addrvec_t maybe_unknowns,
const entity_addrvec_t& knowns) {
bool changed = false;
auto maybe_replace = [&](entity_addr_t addr) {
if (!addr.is_blank_ip()) {
return addr;
}
for (auto& b : knowns.v) {
if (addr.get_family() == b.get_family()) {
auto a = b;
a.set_nonce(addr.get_nonce());
a.set_type(addr.get_type());
a.set_port(addr.get_port());
changed = true;
return a;
}
}
throw std::runtime_error("failed to replace unknown address");
};
entity_addrvec_t replaced;
std::transform(maybe_unknowns.v.begin(),
maybe_unknowns.v.end(),
std::back_inserter(replaced.v),
maybe_replace);
return {replaced, changed};
}
}
seastar::future<> OSD::start()
{
logger().info("start");
startup_time = ceph::mono_clock::now();
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return store.start().then([this] {
return pg_to_shard_mappings.start(0, seastar::smp::count
).then([this] {
return osd_singleton_state.start_single(
whoami, std::ref(*cluster_msgr), std::ref(*public_msgr),
std::ref(*monc), std::ref(*mgrc));
}).then([this] {
return osd_states.start();
}).then([this] {
ceph::mono_time startup_time = ceph::mono_clock::now();
return shard_services.start(
std::ref(osd_singleton_state),
std::ref(pg_to_shard_mappings),
whoami,
startup_time,
osd_singleton_state.local().perf,
osd_singleton_state.local().recoverystate_perf,
std::ref(store),
std::ref(osd_states));
}).then([this] {
return shard_dispatchers.start(
std::ref(*this),
std::ref(pg_to_shard_mappings));
});
}).then([this] {
heartbeat.reset(new Heartbeat{
whoami, get_shard_services(),
*monc, *hb_front_msgr, *hb_back_msgr});
return store.mount().handle_error(
crimson::stateful_ec::handle([] (const auto& ec) {
logger().error("error mounting object store in {}: ({}) {}",
local_conf().get_val<std::string>("osd_data"),
ec.value(), ec.message());
std::exit(EXIT_FAILURE);
}));
}).then([this] {
return open_meta_coll();
}).then([this] {
return get_pg_shard_manager().get_meta_coll().load_superblock(
).handle_error(
crimson::ct_error::assert_all("open_meta_coll error")
);
}).then([this](OSDSuperblock&& sb) {
superblock = std::move(sb);
get_pg_shard_manager().set_superblock(superblock);
return get_pg_shard_manager().get_local_map(superblock.current_epoch);
}).then([this](OSDMapService::local_cached_map_t&& map) {
osdmap = make_local_shared_foreign(OSDMapService::local_cached_map_t(map));
return get_pg_shard_manager().update_map(std::move(map));
}).then([this] {
return shard_services.invoke_on_all([this](auto &local_service) {
local_service.local_state.osdmap_gate.got_map(osdmap->get_epoch());
});
}).then([this] {
bind_epoch = osdmap->get_epoch();
return get_pg_shard_manager().load_pgs(store);
}).then([this] {
uint64_t osd_required =
CEPH_FEATURE_UID |
CEPH_FEATURE_PGID64 |
CEPH_FEATURE_OSDENC;
using crimson::net::SocketPolicy;
public_msgr->set_default_policy(SocketPolicy::stateless_server(0));
public_msgr->set_policy(entity_name_t::TYPE_MON,
SocketPolicy::lossy_client(osd_required));
public_msgr->set_policy(entity_name_t::TYPE_MGR,
SocketPolicy::lossy_client(osd_required));
public_msgr->set_policy(entity_name_t::TYPE_OSD,
SocketPolicy::stateless_server(0));
cluster_msgr->set_default_policy(SocketPolicy::stateless_server(0));
cluster_msgr->set_policy(entity_name_t::TYPE_MON,
SocketPolicy::lossy_client(0));
cluster_msgr->set_policy(entity_name_t::TYPE_OSD,
SocketPolicy::lossless_peer(osd_required));
cluster_msgr->set_policy(entity_name_t::TYPE_CLIENT,
SocketPolicy::stateless_server(0));
crimson::net::dispatchers_t dispatchers{this, monc.get(), mgrc.get()};
return seastar::when_all_succeed(
cluster_msgr->bind(pick_addresses(CEPH_PICK_ADDRESS_CLUSTER))
.safe_then([this, dispatchers]() mutable {
return cluster_msgr->start(dispatchers);
}, crimson::net::Messenger::bind_ertr::all_same_way(
[] (const std::error_code& e) {
logger().error("cluster messenger bind(): {}", e);
ceph_abort();
})),
public_msgr->bind(pick_addresses(CEPH_PICK_ADDRESS_PUBLIC))
.safe_then([this, dispatchers]() mutable {
return public_msgr->start(dispatchers);
}, crimson::net::Messenger::bind_ertr::all_same_way(
[] (const std::error_code& e) {
logger().error("public messenger bind(): {}", e);
ceph_abort();
})));
}).then_unpack([this] {
return seastar::when_all_succeed(monc->start(),
mgrc->start());
}).then_unpack([this] {
return _add_me_to_crush();
}).then([this] {
monc->sub_want("osd_pg_creates", last_pg_create_epoch, 0);
monc->sub_want("mgrmap", 0, 0);
monc->sub_want("osdmap", 0, 0);
return monc->renew_subs();
}).then([this] {
if (auto [addrs, changed] =
replace_unknown_addrs(cluster_msgr->get_myaddrs(),
public_msgr->get_myaddrs()); changed) {
logger().debug("replacing unkwnown addrs of cluster messenger");
cluster_msgr->set_myaddrs(addrs);
}
return heartbeat->start(pick_addresses(CEPH_PICK_ADDRESS_PUBLIC),
pick_addresses(CEPH_PICK_ADDRESS_CLUSTER));
}).then([this] {
// create the admin-socket server, and the objects that register
// to handle incoming commands
return start_asok_admin();
}).then([this] {
return log_client.set_fsid(monc->get_fsid());
}).then([this] {
return start_boot();
});
}
seastar::future<> OSD::start_boot()
{
get_pg_shard_manager().set_preboot();
return monc->get_version("osdmap").then([this](auto&& ret) {
auto [newest, oldest] = ret;
return _preboot(oldest, newest);
});
}
seastar::future<> OSD::_preboot(version_t oldest, version_t newest)
{
logger().info("osd.{}: _preboot", whoami);
if (osdmap->get_epoch() == 0) {
logger().info("waiting for initial osdmap");
} else if (osdmap->is_destroyed(whoami)) {
logger().warn("osdmap says I am destroyed");
// provide a small margin so we don't livelock seeing if we
// un-destroyed ourselves.
if (osdmap->get_epoch() > newest - 1) {
throw std::runtime_error("i am destroyed");
}
} else if (osdmap->is_noup(whoami)) {
logger().warn("osdmap NOUP flag is set, waiting for it to clear");
} else if (!osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE)) {
logger().error("osdmap SORTBITWISE OSDMap flag is NOT set; please set it");
} else if (osdmap->require_osd_release < ceph_release_t::octopus) {
logger().error("osdmap require_osd_release < octopus; please upgrade to octopus");
} else if (false) {
// TODO: update mon if current fullness state is different from osdmap
} else if (version_t n = local_conf()->osd_map_message_max;
osdmap->get_epoch() >= oldest - 1 &&
osdmap->get_epoch() + n > newest) {
return _send_boot();
}
// get all the latest maps
if (osdmap->get_epoch() + 1 >= oldest) {
return get_shard_services().osdmap_subscribe(osdmap->get_epoch() + 1, false);
} else {
return get_shard_services().osdmap_subscribe(oldest - 1, true);
}
}
seastar::future<> OSD::_send_boot()
{
get_pg_shard_manager().set_booting();
entity_addrvec_t public_addrs = public_msgr->get_myaddrs();
entity_addrvec_t cluster_addrs = cluster_msgr->get_myaddrs();
entity_addrvec_t hb_back_addrs = heartbeat->get_back_addrs();
entity_addrvec_t hb_front_addrs = heartbeat->get_front_addrs();
if (cluster_msgr->set_addr_unknowns(public_addrs)) {
cluster_addrs = cluster_msgr->get_myaddrs();
}
if (heartbeat->get_back_msgr().set_addr_unknowns(cluster_addrs)) {
hb_back_addrs = heartbeat->get_back_addrs();
}
if (heartbeat->get_front_msgr().set_addr_unknowns(public_addrs)) {
hb_front_addrs = heartbeat->get_front_addrs();
}
logger().info("hb_back_msgr: {}", hb_back_addrs);
logger().info("hb_front_msgr: {}", hb_front_addrs);
logger().info("cluster_msgr: {}", cluster_addrs);
auto m = crimson::make_message<MOSDBoot>(superblock,
osdmap->get_epoch(),
boot_epoch,
hb_back_addrs,
hb_front_addrs,
cluster_addrs,
CEPH_FEATURES_ALL);
collect_sys_info(&m->metadata, NULL);
// See OSDMonitor::preprocess_boot, prevents boot without allow_crimson
// OSDMap flag
m->metadata["osd_type"] = "crimson";
return monc->send_message(std::move(m));
}
seastar::future<> OSD::_add_me_to_crush()
{
if (!local_conf().get_val<bool>("osd_crush_update_on_start")) {
return seastar::now();
}
auto get_weight = [this] {
if (auto w = local_conf().get_val<double>("osd_crush_initial_weight");
w >= 0) {
return seastar::make_ready_future<double>(w);
} else {
return store.stat().then([](auto st) {
auto total = st.total;
return seastar::make_ready_future<double>(
std::max(.00001,
double(total) / double(1ull << 40))); // TB
});
}
};
return get_weight().then([this](auto weight) {
const crimson::crush::CrushLocation loc;
return seastar::do_with(
std::move(loc),
[this, weight] (crimson::crush::CrushLocation& loc) {
return loc.init_on_startup().then([this, weight, &loc]() {
logger().info("crush location is {}", loc);
string cmd = fmt::format(R"({{
"prefix": "osd crush create-or-move",
"id": {},
"weight": {:.4f},
"args": [{}]
}})", whoami, weight, loc);
return monc->run_command(std::move(cmd), {});
});
});
}).then([](auto&& command_result) {
[[maybe_unused]] auto [code, message, out] = std::move(command_result);
if (code) {
logger().warn("fail to add to crush: {} ({})", message, code);
throw std::runtime_error("fail to add to crush");
} else {
logger().info("added to crush: {}", message);
}
return seastar::now();
});
}
seastar::future<> OSD::ShardDispatcher::handle_command(
crimson::net::ConnectionRef conn,
Ref<MCommand> m)
{
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return osd.asok->handle_command(conn, std::move(m));
}
/*
The OSD's Admin Socket object created here has two servers (i.e. - blocks of commands
to handle) registered to it:
- OSD's specific commands are handled by the OSD object;
- there are some common commands registered to be directly handled by the AdminSocket object
itself.
*/
seastar::future<> OSD::start_asok_admin()
{
auto asok_path = local_conf().get_val<std::string>("admin_socket");
using namespace crimson::admin;
return asok->start(asok_path).then([this] {
asok->register_admin_commands();
asok->register_command(make_asok_hook<OsdStatusHook>(std::as_const(*this)));
asok->register_command(make_asok_hook<SendBeaconHook>(*this));
asok->register_command(make_asok_hook<FlushPgStatsHook>(*this));
asok->register_command(
make_asok_hook<DumpPGStateHistory>(std::as_const(get_pg_shard_manager())));
asok->register_command(make_asok_hook<DumpMetricsHook>());
asok->register_command(make_asok_hook<DumpPerfCountersHook>());
asok->register_command(make_asok_hook<InjectDataErrorHook>(get_shard_services()));
asok->register_command(make_asok_hook<InjectMDataErrorHook>(get_shard_services()));
// PG commands
asok->register_command(make_asok_hook<pg::QueryCommand>(*this));
asok->register_command(make_asok_hook<pg::MarkUnfoundLostCommand>(*this));
// ops commands
asok->register_command(
make_asok_hook<DumpInFlightOpsHook>(
std::as_const(get_pg_shard_manager())));
asok->register_command(
make_asok_hook<DumpHistoricOpsHook>(
std::as_const(get_shard_services().get_registry())));
asok->register_command(
make_asok_hook<DumpSlowestHistoricOpsHook>(
std::as_const(get_shard_services().get_registry())));
asok->register_command(
make_asok_hook<DumpRecoveryReservationsHook>(get_shard_services()));
});
}
seastar::future<> OSD::stop()
{
logger().info("stop");
beacon_timer.cancel();
tick_timer.cancel();
// see also OSD::shutdown()
return prepare_to_stop().then([this] {
return get_pg_shard_manager().set_stopping();
}).then([this] {
logger().debug("prepared to stop");
public_msgr->stop();
cluster_msgr->stop();
auto gate_close_fut = gate.close();
return asok->stop().then([this] {
return heartbeat->stop();
}).then([this] {
return get_pg_shard_manager().stop_registries();
}).then([this] {
return store.umount();
}).then([this] {
return store.stop();
}).then([this] {
return get_pg_shard_manager().stop_pgs();
}).then([this] {
return monc->stop();
}).then([this] {
return mgrc->stop();
}).then([this] {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return shard_dispatchers.stop();
}).then([this] {
return shard_services.stop();
}).then([this] {
return osd_states.stop();
}).then([this] {
return osd_singleton_state.stop();
}).then([this] {
return pg_to_shard_mappings.stop();
}).then([fut=std::move(gate_close_fut)]() mutable {
return std::move(fut);
}).then([this] {
return when_all_succeed(
public_msgr->shutdown(),
cluster_msgr->shutdown()).discard_result();
}).handle_exception([](auto ep) {
logger().error("error while stopping osd: {}", ep);
});
});
}
void OSD::dump_status(Formatter* f) const
{
f->dump_stream("cluster_fsid") << superblock.cluster_fsid;
f->dump_stream("osd_fsid") << superblock.osd_fsid;
f->dump_unsigned("whoami", superblock.whoami);
f->dump_string("state", get_pg_shard_manager().get_osd_state_string());
f->dump_unsigned("oldest_map", superblock.oldest_map);
f->dump_unsigned("cluster_osdmap_trim_lower_bound",
superblock.cluster_osdmap_trim_lower_bound);
f->dump_unsigned("newest_map", superblock.newest_map);
f->dump_unsigned("num_pgs", get_pg_shard_manager().get_num_pgs());
}
void OSD::print(std::ostream& out) const
{
out << "{osd." << superblock.whoami << " "
<< superblock.osd_fsid << " [" << superblock.oldest_map
<< "," << superblock.newest_map << "] "
<< "tlb:" << superblock.cluster_osdmap_trim_lower_bound
<< " pgs:" << get_pg_shard_manager().get_num_pgs()
<< "}";
}
void OSD::ShardDispatcher::print(std::ostream& out) const
{
out << "{osd." << osd.superblock.whoami << " "
<< osd.superblock.osd_fsid << " [" << osd.superblock.oldest_map
<< "," << osd.superblock.newest_map << "] "
<< " pgs:" << get_pg_shard_manager().get_num_pgs()
<< "}";
}
std::optional<seastar::future<>>
OSD::ms_dispatch(crimson::net::ConnectionRef conn, MessageRef m)
{
if (get_pg_shard_manager().is_stopping()) {
return seastar::now();
}
bool dispatched = true;
gate.dispatch_in_background(__func__, *this, [this, conn=std::move(conn),
m=std::move(m), &dispatched]() mutable {
switch (m->get_type()) {
case CEPH_MSG_OSD_MAP:
case CEPH_MSG_OSD_OP:
case MSG_OSD_PG_CREATE2:
case MSG_COMMAND:
case MSG_OSD_MARK_ME_DOWN:
case MSG_OSD_PG_PULL:
case MSG_OSD_PG_PUSH:
case MSG_OSD_PG_PUSH_REPLY:
case MSG_OSD_PG_RECOVERY_DELETE:
case MSG_OSD_PG_RECOVERY_DELETE_REPLY:
case MSG_OSD_PG_SCAN:
case MSG_OSD_PG_BACKFILL:
case MSG_OSD_PG_BACKFILL_REMOVE:
case MSG_OSD_PG_LEASE:
case MSG_OSD_PG_LEASE_ACK:
case MSG_OSD_PG_NOTIFY2:
case MSG_OSD_PG_INFO2:
case MSG_OSD_PG_QUERY2:
case MSG_OSD_BACKFILL_RESERVE:
case MSG_OSD_RECOVERY_RESERVE:
case MSG_OSD_PG_LOG:
case MSG_OSD_REPOP:
case MSG_OSD_REPOPREPLY:
case MSG_OSD_SCRUB2:
case MSG_OSD_PG_UPDATE_LOG_MISSING:
case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY:
{
return shard_dispatchers.local().ms_dispatch(conn, std::move(m));
}
default:
{
dispatched = false;
return seastar::now();
}
}
});
return (dispatched ? std::make_optional(seastar::now()) : std::nullopt);
}
seastar::future<>
OSD::ShardDispatcher::ms_dispatch(
crimson::net::ConnectionRef conn,
MessageRef m)
{
if (seastar::this_shard_id() != PRIMARY_CORE) {
switch (m->get_type()) {
case CEPH_MSG_OSD_MAP:
case MSG_COMMAND:
case MSG_OSD_MARK_ME_DOWN:
// FIXME: order is not guaranteed in this path
return conn.get_foreign(
).then([this, m=std::move(m)](auto f_conn) {
return container().invoke_on(PRIMARY_CORE,
[f_conn=std::move(f_conn), m=std::move(m)]
(auto& local_dispatcher) mutable {
auto conn = make_local_shared_foreign(std::move(f_conn));
return local_dispatcher.ms_dispatch(conn, std::move(m));
});
});
}
}
switch (m->get_type()) {
case CEPH_MSG_OSD_MAP:
return handle_osd_map(boost::static_pointer_cast<MOSDMap>(m));
case CEPH_MSG_OSD_OP:
return handle_osd_op(conn, boost::static_pointer_cast<MOSDOp>(m));
case MSG_OSD_PG_CREATE2:
return handle_pg_create(
conn, boost::static_pointer_cast<MOSDPGCreate2>(m));
return seastar::now();
case MSG_COMMAND:
return handle_command(conn, boost::static_pointer_cast<MCommand>(m));
case MSG_OSD_MARK_ME_DOWN:
return handle_mark_me_down(conn, boost::static_pointer_cast<MOSDMarkMeDown>(m));
case MSG_OSD_PG_PULL:
[[fallthrough]];
case MSG_OSD_PG_PUSH:
[[fallthrough]];
case MSG_OSD_PG_PUSH_REPLY:
[[fallthrough]];
case MSG_OSD_PG_RECOVERY_DELETE:
[[fallthrough]];
case MSG_OSD_PG_RECOVERY_DELETE_REPLY:
[[fallthrough]];
case MSG_OSD_PG_SCAN:
[[fallthrough]];
case MSG_OSD_PG_BACKFILL:
[[fallthrough]];
case MSG_OSD_PG_BACKFILL_REMOVE:
return handle_recovery_subreq(conn, boost::static_pointer_cast<MOSDFastDispatchOp>(m));
case MSG_OSD_PG_LEASE:
[[fallthrough]];
case MSG_OSD_PG_LEASE_ACK:
[[fallthrough]];
case MSG_OSD_PG_NOTIFY2:
[[fallthrough]];
case MSG_OSD_PG_INFO2:
[[fallthrough]];
case MSG_OSD_PG_QUERY2:
[[fallthrough]];
case MSG_OSD_BACKFILL_RESERVE:
[[fallthrough]];
case MSG_OSD_RECOVERY_RESERVE:
[[fallthrough]];
case MSG_OSD_PG_LOG:
return handle_peering_op(conn, boost::static_pointer_cast<MOSDPeeringOp>(m));
case MSG_OSD_REPOP:
return handle_rep_op(conn, boost::static_pointer_cast<MOSDRepOp>(m));
case MSG_OSD_REPOPREPLY:
return handle_rep_op_reply(conn, boost::static_pointer_cast<MOSDRepOpReply>(m));
case MSG_OSD_SCRUB2:
return handle_scrub(conn, boost::static_pointer_cast<MOSDScrub2>(m));
case MSG_OSD_PG_UPDATE_LOG_MISSING:
return handle_update_log_missing(conn, boost::static_pointer_cast<
MOSDPGUpdateLogMissing>(m));
case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY:
return handle_update_log_missing_reply(conn, boost::static_pointer_cast<
MOSDPGUpdateLogMissingReply>(m));
default:
return seastar::now();
}
}
void OSD::ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace)
{
// TODO: cleanup the session attached to this connection
logger().warn("ms_handle_reset");
}
void OSD::ms_handle_remote_reset(crimson::net::ConnectionRef conn)
{
logger().warn("ms_handle_remote_reset");
}
void OSD::handle_authentication(const EntityName& name,
const AuthCapsInfo& caps_info)
{
// TODO: store the parsed cap and associate it with the connection
if (caps_info.allow_all) {
logger().debug("{} {} has all caps", __func__, name);
return;
}
if (caps_info.caps.length() > 0) {
auto p = caps_info.caps.cbegin();
string str;
try {
decode(str, p);
} catch (ceph::buffer::error& e) {
logger().warn("{} {} failed to decode caps string", __func__, name);
return;
}
OSDCap caps;
if (caps.parse(str)) {
logger().debug("{} {} has caps {}", __func__, name, str);
} else {
logger().warn("{} {} failed to parse caps {}", __func__, name, str);
}
}
}
void OSD::update_stats()
{
osd_stat_seq++;
osd_stat.up_from = get_shard_services().get_up_epoch();
osd_stat.hb_peers = heartbeat->get_peers();
osd_stat.seq = (
static_cast<uint64_t>(get_shard_services().get_up_epoch()) << 32
) | osd_stat_seq;
gate.dispatch_in_background("statfs", *this, [this] {
(void) store.stat().then([this](store_statfs_t&& st) {
osd_stat.statfs = st;
});
});
}
seastar::future<MessageURef> OSD::get_stats() const
{
// MPGStats::had_map_for is not used since PGMonitor was removed
auto m = crimson::make_message<MPGStats>(monc->get_fsid(), osdmap->get_epoch());
m->osd_stat = osd_stat;
return get_pg_shard_manager().get_pg_stats(
).then([m=std::move(m)](auto &&stats) mutable {
m->pg_stat = std::move(stats);
return seastar::make_ready_future<MessageURef>(std::move(m));
});
}
uint64_t OSD::send_pg_stats()
{
// mgr client sends the report message in background
mgrc->report();
return osd_stat.seq;
}
bool OSD::ShardDispatcher::require_mon_peer(
crimson::net::Connection *conn,
Ref<Message> m)
{
if (!conn->peer_is_mon()) {
logger().info("{} received from non-mon {}, {}",
__func__,
conn->get_peer_addr(),
*m);
return false;
}
return true;
}
seastar::future<> OSD::ShardDispatcher::handle_osd_map(Ref<MOSDMap> m)
{
/* Ensure that only one MOSDMap is processed at a time. Allowing concurrent
* processing may eventually be worthwhile, but such an implementation would
* need to ensure (among other things)
* 1. any particular map is only processed once
* 2. PGAdvanceMap operations are processed in order for each PG
* As map handling is not presently a bottleneck, we stick to this
* simpler invariant for now.
* See https://tracker.ceph.com/issues/59165
*/
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return osd.handle_osd_map_lock.lock().then([this, m] {
return _handle_osd_map(m);
}).finally([this] {
return osd.handle_osd_map_lock.unlock();
});
}
seastar::future<> OSD::ShardDispatcher::_handle_osd_map(Ref<MOSDMap> m)
{
logger().info("handle_osd_map {}", *m);
if (m->fsid != osd.superblock.cluster_fsid) {
logger().warn("fsid mismatched");
return seastar::now();
}
if (pg_shard_manager.is_initializing()) {
logger().warn("i am still initializing");
return seastar::now();
}
const auto first = m->get_first();
const auto last = m->get_last();
logger().info("handle_osd_map epochs [{}..{}], i have {}, src has [{}..{}]",
first, last, osd.superblock.newest_map,
m->cluster_osdmap_trim_lower_bound, m->newest_map);
// make sure there is something new, here, before we bother flushing
// the queues and such
if (last <= osd.superblock.newest_map) {
return seastar::now();
}
// missing some?
bool skip_maps = false;
epoch_t start = osd.superblock.newest_map + 1;
if (first > start) {
logger().info("handle_osd_map message skips epochs {}..{}",
start, first - 1);
if (m->cluster_osdmap_trim_lower_bound <= start) {
return get_shard_services().osdmap_subscribe(start, false);
}
// always try to get the full range of maps--as many as we can. this
// 1- is good to have
// 2- is at present the only way to ensure that we get a *full* map as
// the first map!
if (m->cluster_osdmap_trim_lower_bound < first) {
return get_shard_services().osdmap_subscribe(
m->cluster_osdmap_trim_lower_bound - 1, true);
}
skip_maps = true;
start = first;
}
return seastar::do_with(ceph::os::Transaction{},
[=, this](auto& t) {
return pg_shard_manager.store_maps(t, start, m).then([=, this, &t] {
// even if this map isn't from a mon, we may have satisfied our subscription
osd.monc->sub_got("osdmap", last);
if (!osd.superblock.oldest_map || skip_maps) {
osd.superblock.oldest_map = first;
}
osd.superblock.newest_map = last;
osd.superblock.current_epoch = last;
// note in the superblock that we were clean thru the prior epoch
if (osd.boot_epoch && osd.boot_epoch >= osd.superblock.mounted) {
osd.superblock.mounted = osd.boot_epoch;
osd.superblock.clean_thru = last;
}
pg_shard_manager.get_meta_coll().store_superblock(t, osd.superblock);
pg_shard_manager.set_superblock(osd.superblock);
logger().debug("OSD::handle_osd_map: do_transaction...");
return osd.store.get_sharded_store().do_transaction(
pg_shard_manager.get_meta_coll().collection(),
std::move(t));
});
}).then([=, this] {
// TODO: write to superblock and commit the transaction
return committed_osd_maps(start, last, m);
});
}
seastar::future<> OSD::ShardDispatcher::committed_osd_maps(
version_t first,
version_t last,
Ref<MOSDMap> m)
{
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
logger().info("osd.{}: committed_osd_maps({}, {})", osd.whoami, first, last);
// advance through the new maps
return seastar::do_for_each(boost::make_counting_iterator(first),
boost::make_counting_iterator(last + 1),
[this](epoch_t cur) {
return pg_shard_manager.get_local_map(
cur
).then([this](OSDMapService::local_cached_map_t&& o) {
osd.osdmap = make_local_shared_foreign(OSDMapService::local_cached_map_t(o));
return pg_shard_manager.update_map(std::move(o));
}).then([this] {
if (get_shard_services().get_up_epoch() == 0 &&
osd.osdmap->is_up(osd.whoami) &&
osd.osdmap->get_addrs(osd.whoami) == osd.public_msgr->get_myaddrs()) {
return pg_shard_manager.set_up_epoch(
osd.osdmap->get_epoch()
).then([this] {
if (!osd.boot_epoch) {
osd.boot_epoch = osd.osdmap->get_epoch();
}
});
} else {
return seastar::now();
}
});
}).then([m, this] {
auto fut = seastar::now();
if (osd.osdmap->is_up(osd.whoami)) {
const auto up_from = osd.osdmap->get_up_from(osd.whoami);
logger().info("osd.{}: map e {} marked me up: up_from {}, bind_epoch {}, state {}",
osd.whoami, osd.osdmap->get_epoch(), up_from, osd.bind_epoch,
pg_shard_manager.get_osd_state_string());
if (osd.bind_epoch < up_from &&
osd.osdmap->get_addrs(osd.whoami) == osd.public_msgr->get_myaddrs() &&
pg_shard_manager.is_booting()) {
logger().info("osd.{}: activating...", osd.whoami);
fut = pg_shard_manager.set_active().then([this] {
osd.beacon_timer.arm_periodic(
std::chrono::seconds(local_conf()->osd_beacon_report_interval));
// timer continuation rearms when complete
osd.tick_timer.arm(
std::chrono::seconds(TICK_INTERVAL));
});
}
} else {
if (pg_shard_manager.is_prestop()) {
osd.got_stop_ack();
return seastar::now();
}
}
return fut.then([this] {
return check_osdmap_features().then([this] {
// yay!
logger().info("osd.{}: committed_osd_maps: broadcasting osdmaps up"
" to {} epoch to pgs", osd.whoami, osd.osdmap->get_epoch());
return pg_shard_manager.broadcast_map_to_pgs(osd.osdmap->get_epoch());
});
});
}).then([m, this] {
if (pg_shard_manager.is_active()) {
logger().info("osd.{}: now active", osd.whoami);
if (!osd.osdmap->exists(osd.whoami) ||
osd.osdmap->is_stop(osd.whoami)) {
return osd.shutdown();
}
if (osd.should_restart()) {
return osd.restart();
} else {
return seastar::now();
}
} else if (pg_shard_manager.is_preboot()) {
logger().info("osd.{}: now preboot", osd.whoami);
if (m->get_source().is_mon()) {
return osd._preboot(
m->cluster_osdmap_trim_lower_bound, m->newest_map);
} else {
logger().info("osd.{}: start_boot", osd.whoami);
return osd.start_boot();
}
} else {
logger().info("osd.{}: now {}", osd.whoami,
pg_shard_manager.get_osd_state_string());
// XXX
return seastar::now();
}
});
}
seastar::future<> OSD::ShardDispatcher::handle_osd_op(
crimson::net::ConnectionRef conn,
Ref<MOSDOp> m)
{
return pg_shard_manager.start_pg_operation<ClientRequest>(
get_shard_services(),
conn,
std::move(m)).second;
}
seastar::future<> OSD::ShardDispatcher::handle_pg_create(
crimson::net::ConnectionRef conn,
Ref<MOSDPGCreate2> m)
{
return seastar::do_for_each(m->pgs, [this, conn, m](auto& pg) {
auto& [pgid, when] = pg;
const auto &[created, created_stamp] = when;
auto q = m->pg_extra.find(pgid);
ceph_assert(q != m->pg_extra.end());
auto& [history, pi] = q->second;
logger().debug(
"{}: {} e{} @{} "
"history {} pi {}",
__func__, pgid, created, created_stamp,
history, pi);
if (!pi.empty() &&
m->epoch < pi.get_bounds().second) {
logger().error(
"got pg_create on {} epoch {} "
"unmatched past_intervals {} (history {})",
pgid, m->epoch,
pi, history);
return seastar::now();
} else {
return pg_shard_manager.start_pg_operation<RemotePeeringEvent>(
conn,
pg_shard_t(),
pgid,
m->epoch,
m->epoch,
NullEvt(),
true,
new PGCreateInfo(pgid, m->epoch, history, pi, true)).second;
}
});
}
seastar::future<> OSD::ShardDispatcher::handle_update_log_missing(
crimson::net::ConnectionRef conn,
Ref<MOSDPGUpdateLogMissing> m)
{
m->decode_payload();
return pg_shard_manager.start_pg_operation<LogMissingRequest>(
std::move(conn),
std::move(m)).second;
}
seastar::future<> OSD::ShardDispatcher::handle_update_log_missing_reply(
crimson::net::ConnectionRef conn,
Ref<MOSDPGUpdateLogMissingReply> m)
{
m->decode_payload();
return pg_shard_manager.start_pg_operation<LogMissingRequestReply>(
std::move(conn),
std::move(m)).second;
}
seastar::future<> OSD::ShardDispatcher::handle_rep_op(
crimson::net::ConnectionRef conn,
Ref<MOSDRepOp> m)
{
m->finish_decode();
return pg_shard_manager.start_pg_operation<RepRequest>(
std::move(conn),
std::move(m)).second;
}
seastar::future<> OSD::ShardDispatcher::handle_rep_op_reply(
crimson::net::ConnectionRef conn,
Ref<MOSDRepOpReply> m)
{
spg_t pgid = m->get_spg();
return pg_shard_manager.with_pg(
pgid,
[m=std::move(m)](auto &&pg) {
if (pg) {
m->finish_decode();
pg->handle_rep_op_reply(*m);
} else {
logger().warn("stale reply: {}", *m);
}
return seastar::now();
});
}
seastar::future<> OSD::ShardDispatcher::handle_scrub(
crimson::net::ConnectionRef conn,
Ref<MOSDScrub2> m)
{
if (m->fsid != osd.superblock.cluster_fsid) {
logger().warn("fsid mismatched");
return seastar::now();
}
return seastar::parallel_for_each(std::move(m->scrub_pgs),
[m, conn, this](spg_t pgid) {
pg_shard_t from_shard{static_cast<int>(m->get_source().num()),
pgid.shard};
PeeringState::RequestScrub scrub_request{m->deep, m->repair};
return pg_shard_manager.start_pg_operation<RemotePeeringEvent>(
conn,
from_shard,
pgid,
PGPeeringEvent{m->epoch, m->epoch, scrub_request}).second;
});
}
seastar::future<> OSD::ShardDispatcher::handle_mark_me_down(
crimson::net::ConnectionRef conn,
Ref<MOSDMarkMeDown> m)
{
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
if (pg_shard_manager.is_prestop()) {
osd.got_stop_ack();
}
return seastar::now();
}
seastar::future<> OSD::ShardDispatcher::handle_recovery_subreq(
crimson::net::ConnectionRef conn,
Ref<MOSDFastDispatchOp> m)
{
return pg_shard_manager.start_pg_operation<RecoverySubRequest>(
conn, std::move(m)).second;
}
bool OSD::should_restart() const
{
if (!osdmap->is_up(whoami)) {
logger().info("map e {} marked osd.{} down",
osdmap->get_epoch(), whoami);
return true;
} else if (osdmap->get_addrs(whoami) != public_msgr->get_myaddrs()) {
logger().error("map e {} had wrong client addr ({} != my {})",
osdmap->get_epoch(),
osdmap->get_addrs(whoami),
public_msgr->get_myaddrs());
return true;
} else if (osdmap->get_cluster_addrs(whoami) != cluster_msgr->get_myaddrs()) {
logger().error("map e {} had wrong cluster addr ({} != my {})",
osdmap->get_epoch(),
osdmap->get_cluster_addrs(whoami),
cluster_msgr->get_myaddrs());
return true;
} else {
return false;
}
}
seastar::future<> OSD::restart()
{
beacon_timer.cancel();
tick_timer.cancel();
return get_pg_shard_manager().set_up_epoch(
0
).then([this] {
bind_epoch = osdmap->get_epoch();
// TODO: promote to shutdown if being marked down for multiple times
// rebind messengers
return start_boot();
});
}
seastar::future<> OSD::shutdown()
{
logger().info("shutting down per osdmap");
abort_source.request_abort();
return seastar::now();
}
seastar::future<> OSD::send_beacon()
{
if (!get_pg_shard_manager().is_active()) {
return seastar::now();
}
// FIXME: min lec should be calculated from pg_stat
// and should set m->pgs
epoch_t min_last_epoch_clean = osdmap->get_epoch();
auto m = crimson::make_message<MOSDBeacon>(osdmap->get_epoch(),
min_last_epoch_clean,
superblock.last_purged_snaps_scrub,
local_conf()->osd_beacon_report_interval);
return monc->send_message(std::move(m));
}
seastar::future<> OSD::update_heartbeat_peers()
{
if (!get_pg_shard_manager().is_active()) {
return seastar::now();;
}
get_pg_shard_manager().for_each_pgid([this](auto &pgid) {
vector<int> up, acting;
osdmap->pg_to_up_acting_osds(pgid.pgid,
&up, nullptr,
&acting, nullptr);
for (int osd : boost::join(up, acting)) {
if (osd == CRUSH_ITEM_NONE || osd == whoami) {
continue;
} else {
heartbeat->add_peer(osd, osdmap->get_epoch());
}
}
});
heartbeat->update_peers(whoami);
return seastar::now();
}
seastar::future<> OSD::ShardDispatcher::handle_peering_op(
crimson::net::ConnectionRef conn,
Ref<MOSDPeeringOp> m)
{
const int from = m->get_source().num();
logger().debug("handle_peering_op on {} from {}", m->get_spg(), from);
m->set_features(conn->get_features());
std::unique_ptr<PGPeeringEvent> evt(m->get_event());
return pg_shard_manager.start_pg_operation<RemotePeeringEvent>(
conn,
pg_shard_t{from, m->get_spg().shard},
m->get_spg(),
std::move(*evt)).second;
}
seastar::future<> OSD::ShardDispatcher::check_osdmap_features()
{
return osd.store.write_meta(
"require_osd_release",
stringify((int)osd.osdmap->require_osd_release));
}
seastar::future<> OSD::prepare_to_stop()
{
if (osdmap && osdmap->is_up(whoami)) {
get_pg_shard_manager().set_prestop();
const auto timeout =
std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::duration<double>(
local_conf().get_val<double>("osd_mon_shutdown_timeout")));
return seastar::with_timeout(
seastar::timer<>::clock::now() + timeout,
monc->send_message(
crimson::make_message<MOSDMarkMeDown>(
monc->get_fsid(),
whoami,
osdmap->get_addrs(whoami),
osdmap->get_epoch(),
true)).then([this] {
return stop_acked.get_future();
})
).handle_exception_type(
[](seastar::timed_out_error&) {
return seastar::now();
});
}
return seastar::now();
}
}
| 48,062 | 32.990806 | 94 | cc |
null | ceph-main/src/crimson/osd/osd.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <seastar/core/abort_source.hh>
#include <seastar/core/future.hh>
#include <seastar/core/shared_future.hh>
#include <seastar/core/gate.hh>
#include <seastar/core/shared_ptr.hh>
#include <seastar/core/shared_future.hh>
#include <seastar/core/timer.hh>
#include "crimson/common/logclient.h"
#include "crimson/common/type_helpers.h"
#include "crimson/common/auth_handler.h"
#include "crimson/common/gated.h"
#include "crimson/admin/admin_socket.h"
#include "crimson/common/simple_lru.h"
#include "crimson/mgr/client.h"
#include "crimson/net/Dispatcher.h"
#include "crimson/osd/osdmap_service.h"
#include "crimson/osd/pg_shard_manager.h"
#include "crimson/osd/osdmap_gate.h"
#include "crimson/osd/pg_map.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/state.h"
#include "messages/MOSDOp.h"
#include "osd/PeeringState.h"
#include "osd/osd_types.h"
#include "osd/osd_perf_counters.h"
#include "osd/PGPeeringEvent.h"
class MCommand;
class MOSDMap;
class MOSDRepOpReply;
class MOSDRepOp;
class MOSDScrub2;
class OSDMeta;
class Heartbeat;
namespace ceph::os {
class Transaction;
}
namespace crimson::mon {
class Client;
}
namespace crimson::net {
class Messenger;
}
namespace crimson::os {
class FuturizedStore;
}
namespace crimson::osd {
class PG;
class OSD final : public crimson::net::Dispatcher,
private crimson::common::AuthHandler,
private crimson::mgr::WithStats {
public:
class ShardDispatcher
: public seastar::peering_sharded_service<ShardDispatcher> {
friend class OSD;
public:
ShardDispatcher(
OSD& osd,
PGShardMapping& pg_to_shard_mapping)
: pg_shard_manager(osd.osd_singleton_state,
osd.shard_services, pg_to_shard_mapping),
osd(osd) {}
~ShardDispatcher() = default;
// Dispatcher methods
seastar::future<> ms_dispatch(crimson::net::ConnectionRef, MessageRef);
private:
bool require_mon_peer(crimson::net::Connection *conn, Ref<Message> m);
seastar::future<> handle_osd_map(Ref<MOSDMap> m);
seastar::future<> _handle_osd_map(Ref<MOSDMap> m);
seastar::future<> handle_pg_create(crimson::net::ConnectionRef conn,
Ref<MOSDPGCreate2> m);
seastar::future<> handle_osd_op(crimson::net::ConnectionRef conn,
Ref<MOSDOp> m);
seastar::future<> handle_rep_op(crimson::net::ConnectionRef conn,
Ref<MOSDRepOp> m);
seastar::future<> handle_rep_op_reply(crimson::net::ConnectionRef conn,
Ref<MOSDRepOpReply> m);
seastar::future<> handle_peering_op(crimson::net::ConnectionRef conn,
Ref<MOSDPeeringOp> m);
seastar::future<> handle_recovery_subreq(crimson::net::ConnectionRef conn,
Ref<MOSDFastDispatchOp> m);
seastar::future<> handle_scrub(crimson::net::ConnectionRef conn,
Ref<MOSDScrub2> m);
seastar::future<> handle_mark_me_down(crimson::net::ConnectionRef conn,
Ref<MOSDMarkMeDown> m);
seastar::future<> committed_osd_maps(version_t first,
version_t last,
Ref<MOSDMap> m);
seastar::future<> check_osdmap_features();
seastar::future<> handle_command(crimson::net::ConnectionRef conn,
Ref<MCommand> m);
seastar::future<> handle_update_log_missing(crimson::net::ConnectionRef conn,
Ref<MOSDPGUpdateLogMissing> m);
seastar::future<> handle_update_log_missing_reply(
crimson::net::ConnectionRef conn,
Ref<MOSDPGUpdateLogMissingReply> m);
public:
void print(std::ostream&) const;
auto &get_pg_shard_manager() {
return pg_shard_manager;
}
auto &get_pg_shard_manager() const {
return pg_shard_manager;
}
ShardServices &get_shard_services() {
return pg_shard_manager.get_shard_services();
}
private:
crimson::osd::PGShardManager pg_shard_manager;
OSD& osd;
};
const int whoami;
const uint32_t nonce;
seastar::abort_source& abort_source;
seastar::timer<seastar::lowres_clock> beacon_timer;
// talk with osd
crimson::net::MessengerRef cluster_msgr;
// talk with client/mon/mgr
crimson::net::MessengerRef public_msgr;
// HB Messengers
crimson::net::MessengerRef hb_front_msgr;
crimson::net::MessengerRef hb_back_msgr;
std::unique_ptr<crimson::mon::Client> monc;
std::unique_ptr<crimson::mgr::Client> mgrc;
// TODO: use a wrapper for ObjectStore
OSDMapService::cached_map_t osdmap;
crimson::os::FuturizedStore& store;
/// _first_ epoch we were marked up (after this process started)
epoch_t boot_epoch = 0;
//< epoch we last did a bind to new ip:ports
epoch_t bind_epoch = 0;
//< since when there is no more pending pg creates from mon
epoch_t last_pg_create_epoch = 0;
ceph::mono_time startup_time;
seastar::shared_mutex handle_osd_map_lock;
OSDSuperblock superblock;
// Dispatcher methods
std::optional<seastar::future<>> ms_dispatch(crimson::net::ConnectionRef, MessageRef) final;
void ms_handle_reset(crimson::net::ConnectionRef conn, bool is_replace) final;
void ms_handle_remote_reset(crimson::net::ConnectionRef conn) final;
// mgr::WithStats methods
// pg statistics including osd ones
osd_stat_t osd_stat;
uint32_t osd_stat_seq = 0;
void update_stats();
seastar::future<MessageURef> get_stats() const final;
// AuthHandler methods
void handle_authentication(const EntityName& name,
const AuthCapsInfo& caps) final;
seastar::sharded<PGShardMapping> pg_to_shard_mappings;
seastar::sharded<OSDSingletonState> osd_singleton_state;
seastar::sharded<OSDState> osd_states;
seastar::sharded<ShardServices> shard_services;
seastar::sharded<ShardDispatcher> shard_dispatchers;
std::unique_ptr<Heartbeat> heartbeat;
seastar::timer<seastar::lowres_clock> tick_timer;
// admin-socket
seastar::lw_shared_ptr<crimson::admin::AdminSocket> asok;
public:
OSD(int id, uint32_t nonce,
seastar::abort_source& abort_source,
crimson::os::FuturizedStore& store,
crimson::net::MessengerRef cluster_msgr,
crimson::net::MessengerRef client_msgr,
crimson::net::MessengerRef hb_front_msgr,
crimson::net::MessengerRef hb_back_msgr);
~OSD() final;
seastar::future<> open_meta_coll();
static seastar::future<OSDMeta> open_or_create_meta_coll(
crimson::os::FuturizedStore &store
);
static seastar::future<> mkfs(
crimson::os::FuturizedStore &store,
unsigned whoami,
uuid_d osd_uuid,
uuid_d cluster_fsid,
std::string osdspec_affinity);
seastar::future<> start();
seastar::future<> stop();
void dump_status(Formatter*) const;
void print(std::ostream&) const;
/// @return the seq id of the pg stats being sent
uint64_t send_pg_stats();
auto &get_shard_services() {
ceph_assert(seastar::this_shard_id() == PRIMARY_CORE);
return shard_services.local();
}
auto &get_pg_shard_manager() {
return shard_dispatchers.local().get_pg_shard_manager();
}
auto &get_pg_shard_manager() const {
return shard_dispatchers.local().get_pg_shard_manager();
}
private:
static seastar::future<> _write_superblock(
crimson::os::FuturizedStore &store,
OSDMeta meta,
OSDSuperblock superblock);
static seastar::future<> _write_key_meta(
crimson::os::FuturizedStore &store
);
seastar::future<> start_boot();
seastar::future<> _preboot(version_t oldest_osdmap, version_t newest_osdmap);
seastar::future<> _send_boot();
seastar::future<> _add_me_to_crush();
seastar::future<> osdmap_subscribe(version_t epoch, bool force_request);
seastar::future<> start_asok_admin();
void write_superblock(ceph::os::Transaction& t);
seastar::future<> read_superblock();
private:
crimson::common::Gated gate;
seastar::promise<> stop_acked;
void got_stop_ack() {
stop_acked.set_value();
}
seastar::future<> prepare_to_stop();
bool should_restart() const;
seastar::future<> restart();
seastar::future<> shutdown();
seastar::future<> update_heartbeat_peers();
friend class PGAdvanceMap;
public:
seastar::future<> send_beacon();
private:
LogClient log_client;
LogChannelRef clog;
};
inline std::ostream& operator<<(std::ostream& out, const OSD& osd) {
osd.print(out);
return out;
}
inline std::ostream& operator<<(std::ostream& out,
const OSD::ShardDispatcher& shard_dispatcher) {
shard_dispatcher.print(out);
return out;
}
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<crimson::osd::OSD> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::osd::OSD::ShardDispatcher> : fmt::ostream_formatter {};
#endif
| 9,074 | 29.45302 | 98 | h |
null | ceph-main/src/crimson/osd/osd_connection_priv.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "crimson/net/Connection.h"
#include "crimson/osd/osd_operation.h"
#include "crimson/osd/osd_operations/client_request.h"
#include "crimson/osd/osd_operations/peering_event.h"
#include "crimson/osd/osd_operations/replicated_request.h"
namespace crimson::osd {
struct OSDConnectionPriv : public crimson::net::Connection::user_private_t {
ConnectionPipeline client_request_conn_pipeline;
ConnectionPipeline peering_request_conn_pipeline;
ConnectionPipeline replicated_request_conn_pipeline;
};
static OSDConnectionPriv &get_osd_priv(crimson::net::Connection *conn) {
if (!conn->has_user_private()) {
conn->set_user_private(std::make_unique<OSDConnectionPriv>());
}
return static_cast<OSDConnectionPriv&>(conn->get_user_private());
}
}
| 874 | 30.25 | 76 | h |
null | ceph-main/src/crimson/osd/osd_meta.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "osd_meta.h"
#include <fmt/format.h>
#include <fmt/ostream.h>
#include "crimson/os/futurized_collection.h"
#include "crimson/os/futurized_store.h"
#include "os/Transaction.h"
using std::string;
using read_errorator = crimson::os::FuturizedStore::Shard::read_errorator;
void OSDMeta::create(ceph::os::Transaction& t)
{
t.create_collection(coll->get_cid(), 0);
}
void OSDMeta::store_map(ceph::os::Transaction& t,
epoch_t e, const bufferlist& m)
{
t.write(coll->get_cid(), osdmap_oid(e), 0, m.length(), m);
}
seastar::future<bufferlist> OSDMeta::load_map(epoch_t e)
{
return store.read(coll,
osdmap_oid(e), 0, 0,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED).handle_error(
read_errorator::all_same_way([e] {
ceph_abort_msg(fmt::format("{} read gave enoent on {}",
__func__, osdmap_oid(e)));
}));
}
void OSDMeta::store_superblock(ceph::os::Transaction& t,
const OSDSuperblock& superblock)
{
bufferlist bl;
encode(superblock, bl);
t.write(coll->get_cid(), superblock_oid(), 0, bl.length(), bl);
}
OSDMeta::load_superblock_ret OSDMeta::load_superblock()
{
return store.read(
coll, superblock_oid(), 0, 0
).safe_then([] (bufferlist&& bl) {
auto p = bl.cbegin();
OSDSuperblock superblock;
decode(superblock, p);
return seastar::make_ready_future<OSDSuperblock>(std::move(superblock));
});
}
seastar::future<std::tuple<pg_pool_t,
std::string,
OSDMeta::ec_profile_t>>
OSDMeta::load_final_pool_info(int64_t pool) {
return store.read(coll, final_pool_info_oid(pool),
0, 0).safe_then([] (bufferlist&& bl) {
auto p = bl.cbegin();
pg_pool_t pi;
string name;
ec_profile_t ec_profile;
decode(pi, p);
decode(name, p);
decode(ec_profile, p);
return seastar::make_ready_future<std::tuple<pg_pool_t,
string,
ec_profile_t>>(
std::make_tuple(std::move(pi),
std::move(name),
std::move(ec_profile)));
},read_errorator::all_same_way([pool] {
throw std::runtime_error(fmt::format("read gave enoent on {}",
final_pool_info_oid(pool)));
}));
}
ghobject_t OSDMeta::osdmap_oid(epoch_t epoch)
{
string name = fmt::format("osdmap.{}", epoch);
return ghobject_t(hobject_t(sobject_t(object_t(name), 0)));
}
ghobject_t OSDMeta::final_pool_info_oid(int64_t pool)
{
string name = fmt::format("final_pool_{}", pool);
return ghobject_t(hobject_t(sobject_t(object_t(name), CEPH_NOSNAP)));
}
ghobject_t OSDMeta::superblock_oid()
{
return ghobject_t(hobject_t(sobject_t(object_t("osd_superblock"), 0)));
}
| 2,814 | 27.434343 | 76 | cc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.